Compare commits
309 Commits
Author | SHA1 | Date | |
---|---|---|---|
768c6b4bef | |||
8bcc04c275 | |||
2fd822887f | |||
e2c8aa0847 | |||
9b049402c9 | |||
d0e1779893 | |||
929ffc5a4e | |||
1f63fb06f1 | |||
b49aa125c9 | |||
55836d133e | |||
277e402d55 | |||
0ab8312b23 | |||
bc4c5c5a97 | |||
1a9aa78129 | |||
798a6db915 | |||
0a4a3fd37e | |||
66242eab41 | |||
7f0d4f0656 | |||
acba8d6026 | |||
1ff9555099 | |||
72a13e2a72 | |||
74cdfc2213 | |||
7b8e5a9f47 | |||
80525ac862 | |||
c14f98c6fc | |||
c6edfc3944 | |||
b95c493d66 | |||
5871462241 | |||
53bb826375 | |||
c769bcc418 | |||
f06a4c7861 | |||
0cae099d12 | |||
4bc3653906 | |||
3e7050983a | |||
9f1bb75445 | |||
139bb32dba | |||
158f6f3725 | |||
e33f9ea6b5 | |||
473037db86 | |||
b0e14ea83c | |||
782a549613 | |||
c805f7dc4e | |||
782829152e | |||
da6f09afb8 | |||
004b1b9c3f | |||
2f8d0f88d6 | |||
177d241160 | |||
5323622842 | |||
c852923347 | |||
5dc4410d58 | |||
da4642d634 | |||
a264be1791 | |||
9aff121949 | |||
a7f4d1487a | |||
11e43e1654 | |||
82be47bc18 | |||
6498e4fbf6 | |||
9978971bd9 | |||
e28ac2c377 | |||
ef296aa7db | |||
43e7107f65 | |||
752fa29390 | |||
7bb7b42356 | |||
2a7fc744f9 | |||
90e3da0389 | |||
1a62bcee42 | |||
b83a4cae90 | |||
05ef21cd3b | |||
dfa27b04d7 | |||
880b04906e | |||
1fe0b1e516 | |||
f9fd4bd24c | |||
c55a11d160 | |||
92118de0e1 | |||
0d9802a2cd | |||
f6beede01b | |||
ff48ea20de | |||
dd9cb18d65 | |||
71932aed0a | |||
24dc6680e1 | |||
61d9d40e48 | |||
e9b40db319 | |||
316356861d | |||
e07c00710a | |||
bc47c80610 | |||
14baa511f0 | |||
e773faeb24 | |||
42847516a2 | |||
47e9a1ae4f | |||
549a154394 | |||
dca00d1bde | |||
45ce1b4f96 | |||
a9232c0633 | |||
3da254c745 | |||
9ba3ee9683 | |||
b0addba2a9 | |||
bb59525ff8 | |||
acd25124d4 | |||
d718ab2491 | |||
1860aacd1f | |||
d4bbb7f516 | |||
d1c0f4b4f1 | |||
b72b837ba2 | |||
fde85c96c0 | |||
121418dad2 | |||
f44f94fe23 | |||
55a4481022 | |||
e859ad37a8 | |||
1a28c7fc12 | |||
c706a07764 | |||
59568e5776 | |||
33ca8fa72a | |||
4bb66a81fb | |||
468c14b14f | |||
03e505897a | |||
5205eb382e | |||
b07b6e56fa | |||
bcc890e705 | |||
07d14f6f07 | |||
03b213e296 | |||
1bfce24c9f | |||
94b2565969 | |||
2896fdb603 | |||
50970bc8f9 | |||
10df45b173 | |||
d3b8129593 | |||
f7fb5aebac | |||
9311a6e356 | |||
8c706892df | |||
7f2b11756c | |||
f324547600 | |||
36e8977f1d | |||
b88db2689e | |||
1584ec220c | |||
fb366a7236 | |||
b903158543 | |||
9dad9c6333 | |||
a6658b9d75 | |||
a97feedcc1 | |||
8021bce41f | |||
d8fa19336c | |||
191483cf9f | |||
1eb8314d42 | |||
88eeb817e4 | |||
b777126bd2 | |||
89d78dcfcf | |||
1cf142c193 | |||
3e29325410 | |||
4dc98c3dbd | |||
9caad645e2 | |||
6cb76ac326 | |||
0001e5c0a1 | |||
ab32d13da1 | |||
cefe46e981 | |||
f4d70e78b6 | |||
d130adf582 | |||
1e6285e64e | |||
e3c90c3807 | |||
85750307aa | |||
0ee4a5e799 | |||
55cb9cf681 | |||
d3af7e0653 | |||
729a24d557 | |||
55b92c16da | |||
835bacce4f | |||
ccb7b1a698 | |||
85dbdeb4c3 | |||
397f9f11c5 | |||
a11986ad1d | |||
a4d373f0af | |||
52eea215ce | |||
6f48aafd3a | |||
2d94c09aee | |||
9699b61679 | |||
8865bfbd59 | |||
5f80c1d37d | |||
f616f5dec6 | |||
db1003b5f8 | |||
f52ff777b7 | |||
4314a29953 | |||
e560fff840 | |||
5ac747ea7d | |||
f522dc1e18 | |||
486812bf54 | |||
7df8f76df1 | |||
bbe4990e80 | |||
a5baaf790d | |||
0a36ed1b8c | |||
b7ad240375 | |||
2cc71f2d55 | |||
3125c74681 | |||
d5b1dee8d6 | |||
4b33a2a1b8 | |||
58e6a5c281 | |||
7eb61074ab | |||
9b2edbaa9b | |||
e8659b45c7 | |||
a9553cb401 | |||
800c409698 | |||
b6f484ddee | |||
3c39fee5a8 | |||
560f34d1f6 | |||
dbda50941a | |||
f1e68ac25c | |||
95029b9b05 | |||
a789bf4761 | |||
d2e7ffa8b9 | |||
0914519f6a | |||
43cd5f3730 | |||
d396a5f45a | |||
76a7071dba | |||
133baa8ce6 | |||
5df3510fde | |||
357339273f | |||
2500881e0b | |||
0013bfff4e | |||
f13498b428 | |||
b567138170 | |||
653982cae5 | |||
605f4906ba | |||
d27f24e312 | |||
c9c1cb5c9c | |||
1cc6493ccf | |||
ae47862be2 | |||
8590184df7 | |||
d840bbab08 | |||
63314de516 | |||
c47a6e12c7 | |||
7937c45ba4 | |||
813b11ac56 | |||
ad6883b66a | |||
a8f4c4e297 | |||
6d68e94e4e | |||
5dd40d7d88 | |||
3f58177670 | |||
edfd65b115 | |||
51da66ec84 | |||
ba36308d69 | |||
ee450b2dd0 | |||
84b28fb261 | |||
1586b86797 | |||
8f065e487e | |||
953eadd983 | |||
a4a792facd | |||
055f808f98 | |||
0404878445 | |||
053907f8a4 | |||
f76dcc1f05 | |||
823bc138cd | |||
18f746b025 | |||
c81adaf901 | |||
2d12ddd0f6 | |||
bee36cc8d0 | |||
f7aee67023 | |||
c021727009 | |||
6653136e1d | |||
06c40c807c | |||
9b262b4915 | |||
cc2d3ecfd7 | |||
92743499bf | |||
aa6a00a03e | |||
bd19f7c4cb | |||
988bf65ba4 | |||
d5b03bd824 | |||
6a72dab111 | |||
56e8319a6d | |||
aed1e51ef1 | |||
f4278d61df | |||
a5c3ae3cef | |||
05c052e212 | |||
dc05bb648a | |||
800b65b2f6 | |||
ae1a0f57c5 | |||
df7c44bd0c | |||
3e29cfd712 | |||
202031538f | |||
29ff1b925d | |||
5a91db6e62 | |||
94ba700e58 | |||
1964c6ec29 | |||
4dd6591bfd | |||
163217815b | |||
37c182cd5d | |||
0c68f27ac3 | |||
5fb8da9b35 | |||
74d9fd1e4f | |||
e71206c578 | |||
0141c80238 | |||
ed928cfdf7 | |||
2fd319ab7a | |||
7813a1decd | |||
93e4ed1f75 | |||
a70f31b3da | |||
2d25227d0a | |||
fc7bfd0f67 | |||
2996291b37 | |||
3e80b9231c | |||
78231a8682 | |||
ace711e7f1 | |||
c9cbc39ec9 | |||
606a392d50 | |||
c67596ceb4 | |||
9a42cc7555 | |||
2e5ef2a802 | |||
8c8e2c4b2b | |||
0578801f99 | |||
6141e1410a | |||
4fc86807ff | |||
d2a2eba69e |
14
.buildkite/env/secrets.ejson
vendored
14
.buildkite/env/secrets.ejson
vendored
@ -1,12 +1,12 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
|
||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
|
||||
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
|
||||
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
|
||||
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
|
||||
}
|
||||
}
|
||||
|
@ -1,18 +0,0 @@
|
||||
root: ./docs/src
|
||||
|
||||
structure:
|
||||
readme: introduction.md
|
||||
summary: SUMMARY.md
|
||||
|
||||
redirects:
|
||||
wallet: ./wallet-guide/README.md
|
||||
wallet/app-wallets: ./wallet-guide/apps.md
|
||||
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
|
||||
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
|
||||
wallet/cli-wallets: ./wallet-guide/cli.md
|
||||
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
|
||||
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
|
||||
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
|
||||
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
|
||||
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
|
||||
wallet/support: ./wallet-guide/support.md
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -23,3 +23,7 @@ log-*/
|
||||
/.idea/
|
||||
/solana.iml
|
||||
/.vscode/
|
||||
|
||||
# fetch-spl.sh artifacts
|
||||
/spl-genesis-args.sh
|
||||
/spl_*.so
|
||||
|
33
.mergify.yml
33
.mergify.yml
@ -1,9 +1,40 @@
|
||||
# Validate your changes with:
|
||||
#
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate/
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: automatic merge (squash) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
# Join the dont-squash-my-commits group if you won't like your commits squashed
|
||||
- name: automatic merge (rebase) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
- name: remove automerge label on CI failure
|
||||
conditions:
|
||||
- label=automerge
|
||||
- "#status-failure!=0"
|
||||
actions:
|
||||
label:
|
||||
remove:
|
||||
- automerge
|
||||
comment:
|
||||
message: automerge label removed due to a CI failure
|
||||
- name: remove outdated reviews
|
||||
conditions:
|
||||
- base=master
|
||||
|
94
.travis.yml
94
.travis.yml
@ -1,18 +1,3 @@
|
||||
os:
|
||||
- osx
|
||||
- windows
|
||||
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
@ -23,21 +8,64 @@ notifications:
|
||||
on_success: change
|
||||
secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU=
|
||||
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
api_key: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
os: linux
|
||||
dist: bionic
|
||||
language: minimal
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- &release-artifacts
|
||||
if: type = push
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
token: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
|
||||
# docs pull request or commit
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "node"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh docs/ .travis || travis_terminate 0
|
||||
- cd docs/
|
||||
- source .travis/before_install.sh
|
||||
|
||||
script:
|
||||
- source .travis/script.sh
|
||||
|
25
.travis/affects.sh
Executable file
25
.travis/affects.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Check if files in the commit range match one or more prefixes
|
||||
#
|
||||
|
||||
# Always run the job if we are on a tagged release
|
||||
if [[ -n "$TRAVIS_TAG" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
git diff --name-only "$TRAVIS_COMMIT_RANGE"
|
||||
)
|
||||
|
||||
for file in $(git diff --name-only "$TRAVIS_COMMIT_RANGE"); do
|
||||
for prefix in "$@"; do
|
||||
if [[ $file =~ ^"$prefix" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "No modifications to $*"
|
||||
exit 1
|
3095
Cargo.lock
generated
3095
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -25,6 +25,7 @@ members = [
|
||||
"log-analyzer",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"storage-bigtable",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
@ -52,6 +53,7 @@ members = [
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
@ -63,6 +65,4 @@ members = [
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
"programs/move_loader",
|
||||
"programs/librapay",
|
||||
]
|
||||
|
@ -116,7 +116,8 @@ There are three release channels that map to branches as follows:
|
||||
|
||||
1. After the new release has been tagged, update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1) with:
|
||||
```
|
||||
scripts/increment-cargo-version.sh patch
|
||||
$ scripts/increment-cargo-version.sh patch
|
||||
$ ./scripts/cargo-for-all-lock-files.sh tree
|
||||
```
|
||||
1. Rebuild to get an updated version of `Cargo.lock`:
|
||||
```
|
||||
|
29
account-decoder/Cargo.toml
Normal file
29
account-decoder/Cargo.toml
Normal file
@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.2.21"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
base64 = "0.12.3"
|
||||
bs58 = "0.3.1"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.21" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.21" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.54"
|
||||
thiserror = "1.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
115
account-decoder/src/lib.rs
Normal file
115
account-decoder/src/lib.rs
Normal file
@ -0,0 +1,115 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod parse_account_data;
|
||||
pub mod parse_config;
|
||||
pub mod parse_nonce;
|
||||
pub mod parse_stake;
|
||||
pub mod parse_sysvar;
|
||||
pub mod parse_token;
|
||||
pub mod parse_vote;
|
||||
pub mod validator_info;
|
||||
|
||||
use crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount};
|
||||
use solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey};
|
||||
use std::str::FromStr;
|
||||
|
||||
pub type StringAmount = String;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAccount {
|
||||
pub lamports: u64,
|
||||
pub data: UiAccountData,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum UiAccountData {
|
||||
Binary(String),
|
||||
Json(ParsedAccount),
|
||||
Binary64(String),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary,
|
||||
JsonParsed,
|
||||
Binary64,
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
pub fn encode(
|
||||
pubkey: &Pubkey,
|
||||
account: Account,
|
||||
encoding: UiAccountEncoding,
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => {
|
||||
UiAccountData::Binary(bs58::encode(account.data).into_string())
|
||||
}
|
||||
UiAccountEncoding::Binary64 => UiAccountData::Binary64(base64::encode(account.data)),
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, &account.owner, &account.data, additional_data)
|
||||
{
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
UiAccountData::Binary64(base64::encode(account.data))
|
||||
}
|
||||
}
|
||||
};
|
||||
UiAccount {
|
||||
lamports: account.lamports,
|
||||
data,
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> Option<Account> {
|
||||
let data = match &self.data {
|
||||
UiAccountData::Json(_) => None,
|
||||
UiAccountData::Binary(blob) => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountData::Binary64(blob) => base64::decode(blob).ok(),
|
||||
}?;
|
||||
Some(Account {
|
||||
lamports: self.lamports,
|
||||
data,
|
||||
owner: Pubkey::from_str(&self.owner).ok()?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiFeeCalculator {
|
||||
pub lamports_per_signature: StringAmount,
|
||||
}
|
||||
|
||||
impl From<FeeCalculator> for UiFeeCalculator {
|
||||
fn from(fee_calculator: FeeCalculator) -> Self {
|
||||
Self {
|
||||
lamports_per_signature: fee_calculator.lamports_per_signature.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for UiFeeCalculator {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lamports_per_signature: "0".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
141
account-decoder/src/parse_account_data.rs
Normal file
141
account-decoder/src/parse_account_data.rs
Normal file
@ -0,0 +1,141 @@
|
||||
use crate::{
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id_v1_0},
|
||||
parse_vote::parse_vote,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v1_0();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
|
||||
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
|
||||
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ParseAccountError {
|
||||
#[error("{0:?} account not parsable")]
|
||||
AccountNotParsable(ParsableAccount),
|
||||
|
||||
#[error("Program not parsable")]
|
||||
ProgramNotParsable,
|
||||
|
||||
#[error("Additional data required to parse: {0}")]
|
||||
AdditionalDataMissing(String),
|
||||
|
||||
#[error("Instruction error")]
|
||||
InstructionError(#[from] InstructionError),
|
||||
|
||||
#[error("Serde json error")]
|
||||
SerdeJsonError(#[from] serde_json::error::Error),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ParsedAccount {
|
||||
pub program: String,
|
||||
pub parsed: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ParsableAccount {
|
||||
Config,
|
||||
Nonce,
|
||||
SplToken,
|
||||
Stake,
|
||||
Sysvar,
|
||||
Vote,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AccountAdditionalData {
|
||||
pub spl_token_decimals: Option<u8>,
|
||||
}
|
||||
|
||||
pub fn parse_account_data(
|
||||
pubkey: &Pubkey,
|
||||
program_id: &Pubkey,
|
||||
data: &[u8],
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
) -> Result<ParsedAccount, ParseAccountError> {
|
||||
let program_name = PARSABLE_PROGRAM_IDS
|
||||
.get(program_id)
|
||||
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
|
||||
let additional_data = additional_data.unwrap_or_default();
|
||||
let parsed_json = match program_name {
|
||||
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
|
||||
ParsableAccount::Nonce => serde_json::to_value(parse_nonce(data)?)?,
|
||||
ParsableAccount::SplToken => {
|
||||
serde_json::to_value(parse_token(data, additional_data.spl_token_decimals)?)?
|
||||
}
|
||||
ParsableAccount::Stake => serde_json::to_value(parse_stake(data)?)?,
|
||||
ParsableAccount::Sysvar => serde_json::to_value(parse_sysvar(data, pubkey)?)?,
|
||||
ParsableAccount::Vote => serde_json::to_value(parse_vote(data)?)?,
|
||||
};
|
||||
Ok(ParsedAccount {
|
||||
program: format!("{:?}", program_name).to_kebab_case(),
|
||||
parsed: parsed_json,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
let account_pubkey = Pubkey::new_rand();
|
||||
let other_program = Pubkey::new_rand();
|
||||
let data = vec![0; 4];
|
||||
assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err());
|
||||
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
&solana_vote_program::id(),
|
||||
&vote_account_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(parsed.program, "vote".to_string());
|
||||
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
&system_program::id(),
|
||||
&nonce_account_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(parsed.program, "nonce".to_string());
|
||||
}
|
||||
}
|
146
account-decoder/src/parse_config.rs
Normal file
146
account-decoder/src/parse_config.rs
Normal file
@ -0,0 +1,146 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
.map(|config| ConfigAccountType::StakeConfig(config.into()))
|
||||
} else {
|
||||
deserialize::<ConfigKeys>(data).ok().and_then(|key_list| {
|
||||
if !key_list.keys.is_empty() && key_list.keys[0].0 == validator_info::id() {
|
||||
parse_config_data::<String>(data, key_list.keys).and_then(|validator_info| {
|
||||
Some(ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: validator_info.keys,
|
||||
config_data: serde_json::from_str(&validator_info.config_data).ok()?,
|
||||
}))
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
};
|
||||
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::Config,
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConfig<T>>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| UiConfigKey {
|
||||
pubkey: key.0.to_string(),
|
||||
signer: key.1,
|
||||
})
|
||||
.collect();
|
||||
Some(UiConfig { keys, config_data })
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum ConfigAccountType {
|
||||
StakeConfig(UiStakeConfig),
|
||||
ValidatorInfo(UiConfig<Value>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiConfigKey {
|
||||
pub pubkey: String,
|
||||
pub signer: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeConfig {
|
||||
pub warmup_cooldown_rate: f64,
|
||||
pub slash_penalty: u8,
|
||||
}
|
||||
|
||||
impl From<StakeConfig> for UiStakeConfig {
|
||||
fn from(config: StakeConfig) -> Self {
|
||||
Self {
|
||||
warmup_cooldown_rate: config.warmup_cooldown_rate,
|
||||
slash_penalty: config.slash_penalty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiConfig<T> {
|
||||
pub keys: Vec<UiConfigKey>,
|
||||
pub config_data: T,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::validator_info::ValidatorInfo;
|
||||
use serde_json::json;
|
||||
use solana_config_program::create_config_account;
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
let stake_config = StakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(
|
||||
&stake_config_account.data,
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
}),
|
||||
);
|
||||
|
||||
let validator_info = ValidatorInfo {
|
||||
info: serde_json::to_string(&json!({
|
||||
"name": "Solana",
|
||||
}))
|
||||
.unwrap(),
|
||||
};
|
||||
let info_pubkey = Pubkey::new_rand();
|
||||
let validator_info_config_account = create_config_account(
|
||||
vec![(validator_info::id(), false), (info_pubkey, true)],
|
||||
&validator_info,
|
||||
10,
|
||||
);
|
||||
assert_eq!(
|
||||
parse_config(&validator_info_config_account.data, &info_pubkey).unwrap(),
|
||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: vec![
|
||||
UiConfigKey {
|
||||
pubkey: validator_info::id().to_string(),
|
||||
signer: false,
|
||||
},
|
||||
UiConfigKey {
|
||||
pubkey: info_pubkey.to_string(),
|
||||
signer: true,
|
||||
}
|
||||
],
|
||||
config_data: serde_json::from_str(r#"{"name":"Solana"}"#).unwrap(),
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_config(&bad_data, &info_pubkey).is_err());
|
||||
}
|
||||
}
|
67
account-decoder/src/parse_nonce.rs
Normal file
67
account-decoder/src/parse_nonce.rs
Normal file
@ -0,0 +1,67 @@
|
||||
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
let nonce_state: Versions = bincode::deserialize(data)
|
||||
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
||||
let nonce_state = nonce_state.convert_to_current();
|
||||
match nonce_state {
|
||||
State::Uninitialized => Ok(UiNonceState::Uninitialized),
|
||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||
authority: data.authority.to_string(),
|
||||
blockhash: data.blockhash.to_string(),
|
||||
fee_calculator: data.fee_calculator.into(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// A duplicate representation of NonceState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum UiNonceState {
|
||||
Uninitialized,
|
||||
Initialized(UiNonceData),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiNonceData {
|
||||
pub authority: String,
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_nonce() {
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
assert_eq!(
|
||||
parse_nonce(&nonce_account_data).unwrap(),
|
||||
UiNonceState::Initialized(UiNonceData {
|
||||
authority: Pubkey::default().to_string(),
|
||||
blockhash: Hash::default().to_string(),
|
||||
fee_calculator: UiFeeCalculator {
|
||||
lamports_per_signature: 0.to_string(),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_nonce(&bad_data).is_err());
|
||||
}
|
||||
}
|
236
account-decoder/src/parse_stake.rs
Normal file
236
account-decoder/src/parse_stake.rs
Normal file
@ -0,0 +1,236 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::Stake))?;
|
||||
let parsed_account = match stake_state {
|
||||
StakeState::Uninitialized => StakeAccountType::Uninitialized,
|
||||
StakeState::Initialized(meta) => StakeAccountType::Initialized(UiStakeAccount {
|
||||
meta: meta.into(),
|
||||
stake: None,
|
||||
}),
|
||||
StakeState::Stake(meta, stake) => StakeAccountType::Delegated(UiStakeAccount {
|
||||
meta: meta.into(),
|
||||
stake: Some(stake.into()),
|
||||
}),
|
||||
StakeState::RewardsPool => StakeAccountType::RewardsPool,
|
||||
};
|
||||
Ok(parsed_account)
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum StakeAccountType {
|
||||
Uninitialized,
|
||||
Initialized(UiStakeAccount),
|
||||
Delegated(UiStakeAccount),
|
||||
RewardsPool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeAccount {
|
||||
pub meta: UiMeta,
|
||||
pub stake: Option<UiStake>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMeta {
|
||||
pub rent_exempt_reserve: StringAmount,
|
||||
pub authorized: UiAuthorized,
|
||||
pub lockup: UiLockup,
|
||||
}
|
||||
|
||||
impl From<Meta> for UiMeta {
|
||||
fn from(meta: Meta) -> Self {
|
||||
Self {
|
||||
rent_exempt_reserve: meta.rent_exempt_reserve.to_string(),
|
||||
authorized: meta.authorized.into(),
|
||||
lockup: meta.lockup.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiLockup {
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
pub epoch: Epoch,
|
||||
pub custodian: String,
|
||||
}
|
||||
|
||||
impl From<Lockup> for UiLockup {
|
||||
fn from(lockup: Lockup) -> Self {
|
||||
Self {
|
||||
unix_timestamp: lockup.unix_timestamp,
|
||||
epoch: lockup.epoch,
|
||||
custodian: lockup.custodian.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAuthorized {
|
||||
pub staker: String,
|
||||
pub withdrawer: String,
|
||||
}
|
||||
|
||||
impl From<Authorized> for UiAuthorized {
|
||||
fn from(authorized: Authorized) -> Self {
|
||||
Self {
|
||||
staker: authorized.staker.to_string(),
|
||||
withdrawer: authorized.withdrawer.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStake {
|
||||
pub delegation: UiDelegation,
|
||||
pub credits_observed: u64,
|
||||
}
|
||||
|
||||
impl From<Stake> for UiStake {
|
||||
fn from(stake: Stake) -> Self {
|
||||
Self {
|
||||
delegation: stake.delegation.into(),
|
||||
credits_observed: stake.credits_observed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDelegation {
|
||||
pub voter: String,
|
||||
pub stake: StringAmount,
|
||||
pub activation_epoch: StringAmount,
|
||||
pub deactivation_epoch: StringAmount,
|
||||
pub warmup_cooldown_rate: f64,
|
||||
}
|
||||
|
||||
impl From<Delegation> for UiDelegation {
|
||||
fn from(delegation: Delegation) -> Self {
|
||||
Self {
|
||||
voter: delegation.voter_pubkey.to_string(),
|
||||
stake: delegation.stake.to_string(),
|
||||
activation_epoch: delegation.activation_epoch.to_string(),
|
||||
deactivation_epoch: delegation.deactivation_epoch.to_string(),
|
||||
warmup_cooldown_rate: delegation.warmup_cooldown_rate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_parse_stake() {
|
||||
let stake_state = StakeState::Uninitialized;
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Uninitialized
|
||||
);
|
||||
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let custodian = Pubkey::new_rand();
|
||||
let authorized = Authorized::auto(&pubkey);
|
||||
let lockup = Lockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian,
|
||||
};
|
||||
let meta = Meta {
|
||||
rent_exempt_reserve: 42,
|
||||
authorized,
|
||||
lockup,
|
||||
};
|
||||
|
||||
let stake_state = StakeState::Initialized(meta);
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Initialized(UiStakeAccount {
|
||||
meta: UiMeta {
|
||||
rent_exempt_reserve: 42.to_string(),
|
||||
authorized: UiAuthorized {
|
||||
staker: pubkey.to_string(),
|
||||
withdrawer: pubkey.to_string(),
|
||||
},
|
||||
lockup: UiLockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian: custodian.to_string(),
|
||||
}
|
||||
},
|
||||
stake: None,
|
||||
})
|
||||
);
|
||||
|
||||
let voter_pubkey = Pubkey::new_rand();
|
||||
let stake = Stake {
|
||||
delegation: Delegation {
|
||||
voter_pubkey,
|
||||
stake: 20,
|
||||
activation_epoch: 2,
|
||||
deactivation_epoch: std::u64::MAX,
|
||||
warmup_cooldown_rate: 0.25,
|
||||
},
|
||||
credits_observed: 10,
|
||||
};
|
||||
|
||||
let stake_state = StakeState::Stake(meta, stake);
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Delegated(UiStakeAccount {
|
||||
meta: UiMeta {
|
||||
rent_exempt_reserve: 42.to_string(),
|
||||
authorized: UiAuthorized {
|
||||
staker: pubkey.to_string(),
|
||||
withdrawer: pubkey.to_string(),
|
||||
},
|
||||
lockup: UiLockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian: custodian.to_string(),
|
||||
}
|
||||
},
|
||||
stake: Some(UiStake {
|
||||
delegation: UiDelegation {
|
||||
voter: voter_pubkey.to_string(),
|
||||
stake: 20.to_string(),
|
||||
activation_epoch: 2.to_string(),
|
||||
deactivation_epoch: std::u64::MAX.to_string(),
|
||||
warmup_cooldown_rate: 0.25,
|
||||
},
|
||||
credits_observed: 10,
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
let stake_state = StakeState::RewardsPool;
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::RewardsPool
|
||||
);
|
||||
|
||||
let bad_data = vec![1, 2, 3, 4];
|
||||
assert!(parse_stake(&bad_data).is_err());
|
||||
}
|
||||
}
|
328
account-decoder/src/parse_sysvar.rs
Normal file
328
account-decoder/src/parse_sysvar.rs
Normal file
@ -0,0 +1,328 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use bv::BitVec;
|
||||
use solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
};
|
||||
|
||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||
let parsed_account = {
|
||||
if pubkey == &sysvar::clock::id() {
|
||||
deserialize::<Clock>(data)
|
||||
.ok()
|
||||
.map(|clock| SysvarAccountType::Clock(clock.into()))
|
||||
} else if pubkey == &sysvar::epoch_schedule::id() {
|
||||
deserialize(data).ok().map(SysvarAccountType::EpochSchedule)
|
||||
} else if pubkey == &sysvar::fees::id() {
|
||||
deserialize::<Fees>(data)
|
||||
.ok()
|
||||
.map(|fees| SysvarAccountType::Fees(fees.into()))
|
||||
} else if pubkey == &sysvar::recent_blockhashes::id() {
|
||||
deserialize::<RecentBlockhashes>(data)
|
||||
.ok()
|
||||
.map(|recent_blockhashes| {
|
||||
let recent_blockhashes = recent_blockhashes
|
||||
.iter()
|
||||
.map(|entry| UiRecentBlockhashesEntry {
|
||||
blockhash: entry.blockhash.to_string(),
|
||||
fee_calculator: entry.fee_calculator.clone().into(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::RecentBlockhashes(recent_blockhashes)
|
||||
})
|
||||
} else if pubkey == &sysvar::rent::id() {
|
||||
deserialize::<Rent>(data)
|
||||
.ok()
|
||||
.map(|rent| SysvarAccountType::Rent(rent.into()))
|
||||
} else if pubkey == &sysvar::rewards::id() {
|
||||
deserialize::<Rewards>(data)
|
||||
.ok()
|
||||
.map(|rewards| SysvarAccountType::Rewards(rewards.into()))
|
||||
} else if pubkey == &sysvar::slot_hashes::id() {
|
||||
deserialize::<SlotHashes>(data).ok().map(|slot_hashes| {
|
||||
let slot_hashes = slot_hashes
|
||||
.iter()
|
||||
.map(|slot_hash| UiSlotHashEntry {
|
||||
slot: slot_hash.0,
|
||||
hash: slot_hash.1.to_string(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::SlotHashes(slot_hashes)
|
||||
})
|
||||
} else if pubkey == &sysvar::slot_history::id() {
|
||||
deserialize::<SlotHistory>(data).ok().map(|slot_history| {
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
next_slot: slot_history.next_slot,
|
||||
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
|
||||
})
|
||||
})
|
||||
} else if pubkey == &sysvar::stake_history::id() {
|
||||
deserialize::<StakeHistory>(data).ok().map(|stake_history| {
|
||||
let stake_history = stake_history
|
||||
.iter()
|
||||
.map(|entry| UiStakeHistoryEntry {
|
||||
epoch: entry.0,
|
||||
stake_history: entry.1.clone(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::StakeHistory(stake_history)
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::Sysvar,
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum SysvarAccountType {
|
||||
Clock(UiClock),
|
||||
EpochSchedule(EpochSchedule),
|
||||
Fees(UiFees),
|
||||
RecentBlockhashes(Vec<UiRecentBlockhashesEntry>),
|
||||
Rent(UiRent),
|
||||
Rewards(UiRewards),
|
||||
SlotHashes(Vec<UiSlotHashEntry>),
|
||||
SlotHistory(UiSlotHistory),
|
||||
StakeHistory(Vec<UiStakeHistoryEntry>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiClock {
|
||||
pub slot: Slot,
|
||||
pub epoch: Epoch,
|
||||
pub leader_schedule_epoch: Epoch,
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
}
|
||||
|
||||
impl From<Clock> for UiClock {
|
||||
fn from(clock: Clock) -> Self {
|
||||
Self {
|
||||
slot: clock.slot,
|
||||
epoch: clock.epoch,
|
||||
leader_schedule_epoch: clock.leader_schedule_epoch,
|
||||
unix_timestamp: clock.unix_timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiFees {
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
impl From<Fees> for UiFees {
|
||||
fn from(fees: Fees) -> Self {
|
||||
Self {
|
||||
fee_calculator: fees.fee_calculator.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRent {
|
||||
pub lamports_per_byte_year: StringAmount,
|
||||
pub exemption_threshold: f64,
|
||||
pub burn_percent: u8,
|
||||
}
|
||||
|
||||
impl From<Rent> for UiRent {
|
||||
fn from(rent: Rent) -> Self {
|
||||
Self {
|
||||
lamports_per_byte_year: rent.lamports_per_byte_year.to_string(),
|
||||
exemption_threshold: rent.exemption_threshold,
|
||||
burn_percent: rent.burn_percent,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRewards {
|
||||
pub validator_point_value: f64,
|
||||
}
|
||||
|
||||
impl From<Rewards> for UiRewards {
|
||||
fn from(rewards: Rewards) -> Self {
|
||||
Self {
|
||||
validator_point_value: rewards.validator_point_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRecentBlockhashesEntry {
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiSlotHashEntry {
|
||||
pub slot: Slot,
|
||||
pub hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiSlotHistory {
|
||||
pub next_slot: Slot,
|
||||
pub bits: String,
|
||||
}
|
||||
|
||||
struct SlotHistoryBits(BitVec<u64>);
|
||||
|
||||
impl std::fmt::Debug for SlotHistoryBits {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
for i in 0..slot_history::MAX_ENTRIES {
|
||||
if self.0.get(i) {
|
||||
write!(f, "1")?;
|
||||
} else {
|
||||
write!(f, "0")?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeHistoryEntry {
|
||||
pub epoch: Epoch,
|
||||
pub stake_history: StakeHistoryEntry,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
sysvar::{recent_blockhashes::IterItem, Sysvar},
|
||||
};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
let clock_sysvar = Clock::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
|
||||
SysvarAccountType::Clock(UiClock::default()),
|
||||
);
|
||||
|
||||
let epoch_schedule = EpochSchedule {
|
||||
slots_per_epoch: 12,
|
||||
leader_schedule_slot_offset: 0,
|
||||
warmup: false,
|
||||
first_normal_epoch: 1,
|
||||
first_normal_slot: 12,
|
||||
};
|
||||
let epoch_schedule_sysvar = epoch_schedule.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
|
||||
SysvarAccountType::EpochSchedule(epoch_schedule),
|
||||
);
|
||||
|
||||
let fees_sysvar = Fees::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
);
|
||||
|
||||
let hash = Hash::new(&[1; 32]);
|
||||
let fee_calculator = FeeCalculator {
|
||||
lamports_per_signature: 10,
|
||||
};
|
||||
let recent_blockhashes =
|
||||
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
|
||||
let recent_blockhashes_sysvar = recent_blockhashes.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
&sysvar::recent_blockhashes::id()
|
||||
)
|
||||
.unwrap(),
|
||||
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
|
||||
blockhash: hash.to_string(),
|
||||
fee_calculator: fee_calculator.into(),
|
||||
}]),
|
||||
);
|
||||
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 10,
|
||||
exemption_threshold: 2.0,
|
||||
burn_percent: 5,
|
||||
};
|
||||
let rent_sysvar = rent.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
|
||||
SysvarAccountType::Rent(rent.into()),
|
||||
);
|
||||
|
||||
let rewards_sysvar = Rewards::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
|
||||
SysvarAccountType::Rewards(UiRewards::default()),
|
||||
);
|
||||
|
||||
let mut slot_hashes = SlotHashes::default();
|
||||
slot_hashes.add(1, hash);
|
||||
let slot_hashes_sysvar = slot_hashes.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
|
||||
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
|
||||
slot: 1,
|
||||
hash: hash.to_string(),
|
||||
}]),
|
||||
);
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
let slot_history_sysvar = slot_history.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
next_slot: slot_history.next_slot,
|
||||
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
|
||||
}),
|
||||
);
|
||||
|
||||
let mut stake_history = StakeHistory::default();
|
||||
let stake_history_entry = StakeHistoryEntry {
|
||||
effective: 10,
|
||||
activating: 2,
|
||||
deactivating: 3,
|
||||
};
|
||||
stake_history.add(1, stake_history_entry.clone());
|
||||
let stake_history_sysvar = stake_history.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
|
||||
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
|
||||
epoch: 1,
|
||||
stake_history: stake_history_entry,
|
||||
}]),
|
||||
);
|
||||
|
||||
let bad_pubkey = Pubkey::new_rand();
|
||||
assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err());
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_sysvar(&bad_data, &sysvar::stake_history::id()).is_err());
|
||||
}
|
||||
}
|
257
account-decoder/src/parse_token.rs
Normal file
257
account-decoder/src/parse_token.rs
Normal file
@ -0,0 +1,257 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v1_0::{
|
||||
option::COption,
|
||||
solana_sdk::pubkey::Pubkey as SplTokenPubkey,
|
||||
state::{unpack, Account, Mint, Multisig},
|
||||
};
|
||||
use std::{mem::size_of, str::FromStr};
|
||||
|
||||
// A helper function to convert spl_token_v1_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id_v1_0() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token_v1_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v1_0_native_mint() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::native_mint::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
pub fn parse_token(
|
||||
data: &[u8],
|
||||
mint_decimals: Option<u8>,
|
||||
) -> Result<TokenAccountType, ParseAccountError> {
|
||||
let mut data = data.to_vec();
|
||||
if data.len() == size_of::<Account>() {
|
||||
let account: Account = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
let decimals = mint_decimals.ok_or_else(|| {
|
||||
ParseAccountError::AdditionalDataMissing(
|
||||
"no mint_decimals provided to parse spl-token account".to_string(),
|
||||
)
|
||||
})?;
|
||||
Ok(TokenAccountType::Account(UiTokenAccount {
|
||||
mint: account.mint.to_string(),
|
||||
owner: account.owner.to_string(),
|
||||
token_amount: token_amount_to_ui_amount(account.amount, decimals),
|
||||
delegate: match account.delegate {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
COption::None => None,
|
||||
},
|
||||
is_initialized: account.is_initialized,
|
||||
is_native: account.is_native,
|
||||
delegated_amount: token_amount_to_ui_amount(account.delegated_amount, decimals),
|
||||
}))
|
||||
} else if data.len() == size_of::<Mint>() {
|
||||
let mint: Mint = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Mint(UiMint {
|
||||
owner: match mint.owner {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
COption::None => None,
|
||||
},
|
||||
decimals: mint.decimals,
|
||||
is_initialized: mint.is_initialized,
|
||||
}))
|
||||
} else if data.len() == size_of::<Multisig>() {
|
||||
let multisig: Multisig = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: multisig.m,
|
||||
num_valid_signers: multisig.n,
|
||||
is_initialized: multisig.is_initialized,
|
||||
signers: multisig
|
||||
.signers
|
||||
.iter()
|
||||
.filter_map(|pubkey| {
|
||||
if pubkey != &SplTokenPubkey::default() {
|
||||
Some(pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
}))
|
||||
} else {
|
||||
Err(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::SplToken,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum TokenAccountType {
|
||||
Account(UiTokenAccount),
|
||||
Mint(UiMint),
|
||||
Multisig(UiMultisig),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiTokenAccount {
|
||||
pub mint: String,
|
||||
pub owner: String,
|
||||
pub token_amount: UiTokenAmount,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegate: Option<String>,
|
||||
pub is_initialized: bool,
|
||||
pub is_native: bool,
|
||||
#[serde(skip_serializing_if = "UiTokenAmount::is_zero")]
|
||||
pub delegated_amount: UiTokenAmount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiTokenAmount {
|
||||
pub ui_amount: f64,
|
||||
pub decimals: u8,
|
||||
pub amount: StringAmount,
|
||||
}
|
||||
|
||||
impl UiTokenAmount {
|
||||
fn is_zero(&self) -> bool {
|
||||
if let Ok(amount) = self.amount.parse::<u64>() {
|
||||
amount == 0
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
|
||||
// Use `amount_to_ui_amount()` once spl_token is bumped to a version that supports it: https://github.com/solana-labs/solana-program-library/pull/211
|
||||
let amount_decimals = amount as f64 / 10_usize.pow(decimals as u32) as f64;
|
||||
UiTokenAmount {
|
||||
ui_amount: amount_decimals,
|
||||
decimals,
|
||||
amount: amount.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMint {
|
||||
pub owner: Option<String>,
|
||||
pub decimals: u8,
|
||||
pub is_initialized: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMultisig {
|
||||
pub num_required_signers: u8,
|
||||
pub num_valid_signers: u8,
|
||||
pub is_initialized: bool,
|
||||
pub signers: Vec<String>,
|
||||
}
|
||||
|
||||
pub fn get_token_account_mint(data: &[u8]) -> Option<Pubkey> {
|
||||
if data.len() == size_of::<Account>() {
|
||||
Some(Pubkey::new(&data[0..32]))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use spl_token_v1_0::state::unpack_unchecked;
|
||||
|
||||
#[test]
|
||||
fn test_parse_token() {
|
||||
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
|
||||
let owner_pubkey = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut account_data = [0; size_of::<Account>()];
|
||||
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
|
||||
account.mint = mint_pubkey;
|
||||
account.owner = owner_pubkey;
|
||||
account.amount = 42;
|
||||
account.is_initialized = true;
|
||||
assert!(parse_token(&account_data, None).is_err());
|
||||
assert_eq!(
|
||||
parse_token(&account_data, Some(2)).unwrap(),
|
||||
TokenAccountType::Account(UiTokenAccount {
|
||||
mint: mint_pubkey.to_string(),
|
||||
owner: owner_pubkey.to_string(),
|
||||
token_amount: UiTokenAmount {
|
||||
ui_amount: 0.42,
|
||||
decimals: 2,
|
||||
amount: "42".to_string()
|
||||
},
|
||||
delegate: None,
|
||||
is_initialized: true,
|
||||
is_native: false,
|
||||
delegated_amount: UiTokenAmount {
|
||||
ui_amount: 0.0,
|
||||
decimals: 2,
|
||||
amount: "0".to_string()
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
let mut mint_data = [0; size_of::<Mint>()];
|
||||
let mut mint: &mut Mint = unpack_unchecked(&mut mint_data).unwrap();
|
||||
mint.owner = COption::Some(owner_pubkey);
|
||||
mint.decimals = 3;
|
||||
mint.is_initialized = true;
|
||||
assert_eq!(
|
||||
parse_token(&mint_data, None).unwrap(),
|
||||
TokenAccountType::Mint(UiMint {
|
||||
owner: Some(owner_pubkey.to_string()),
|
||||
decimals: 3,
|
||||
is_initialized: true,
|
||||
}),
|
||||
);
|
||||
|
||||
let signer1 = SplTokenPubkey::new(&[1; 32]);
|
||||
let signer2 = SplTokenPubkey::new(&[2; 32]);
|
||||
let signer3 = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut multisig_data = [0; size_of::<Multisig>()];
|
||||
let mut multisig: &mut Multisig = unpack_unchecked(&mut multisig_data).unwrap();
|
||||
let mut signers = [SplTokenPubkey::default(); 11];
|
||||
signers[0] = signer1;
|
||||
signers[1] = signer2;
|
||||
signers[2] = signer3;
|
||||
multisig.m = 2;
|
||||
multisig.n = 3;
|
||||
multisig.is_initialized = true;
|
||||
multisig.signers = signers;
|
||||
assert_eq!(
|
||||
parse_token(&multisig_data, None).unwrap(),
|
||||
TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: 2,
|
||||
num_valid_signers: 3,
|
||||
is_initialized: true,
|
||||
signers: vec![
|
||||
signer1.to_string(),
|
||||
signer2.to_string(),
|
||||
signer3.to_string()
|
||||
],
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_token(&bad_data, None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_token_account_mint() {
|
||||
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
|
||||
let mut account_data = [0; size_of::<Account>()];
|
||||
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
|
||||
account.mint = mint_pubkey;
|
||||
|
||||
let expected_mint_pubkey = Pubkey::new(&[2; 32]);
|
||||
assert_eq!(
|
||||
get_token_account_mint(&account_data),
|
||||
Some(expected_mint_pubkey)
|
||||
);
|
||||
}
|
||||
}
|
144
account-decoder/src/parse_vote.rs
Normal file
144
account-decoder/src/parse_vote.rs
Normal file
@ -0,0 +1,144 @@
|
||||
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
let epoch_credits = vote_state
|
||||
.epoch_credits()
|
||||
.iter()
|
||||
.map(|(epoch, credits, previous_credits)| UiEpochCredits {
|
||||
epoch: *epoch,
|
||||
credits: credits.to_string(),
|
||||
previous_credits: previous_credits.to_string(),
|
||||
})
|
||||
.collect();
|
||||
let votes = vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|lockout| UiLockout {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
})
|
||||
.collect();
|
||||
let authorized_voters = vote_state
|
||||
.authorized_voters()
|
||||
.iter()
|
||||
.map(|(epoch, authorized_voter)| UiAuthorizedVoters {
|
||||
epoch: *epoch,
|
||||
authorized_voter: authorized_voter.to_string(),
|
||||
})
|
||||
.collect();
|
||||
let prior_voters = vote_state
|
||||
.prior_voters()
|
||||
.buf()
|
||||
.iter()
|
||||
.filter(|(pubkey, _, _)| pubkey != &Pubkey::default())
|
||||
.map(
|
||||
|(authorized_pubkey, epoch_of_last_authorized_switch, target_epoch)| UiPriorVoters {
|
||||
authorized_pubkey: authorized_pubkey.to_string(),
|
||||
epoch_of_last_authorized_switch: *epoch_of_last_authorized_switch,
|
||||
target_epoch: *target_epoch,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
Ok(VoteAccountType::Vote(UiVoteState {
|
||||
node_pubkey: vote_state.node_pubkey.to_string(),
|
||||
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
|
||||
commission: vote_state.commission,
|
||||
votes,
|
||||
root_slot: vote_state.root_slot,
|
||||
authorized_voters,
|
||||
prior_voters,
|
||||
epoch_credits,
|
||||
last_timestamp: vote_state.last_timestamp,
|
||||
}))
|
||||
}
|
||||
|
||||
/// A wrapper enum for consistency across programs
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum VoteAccountType {
|
||||
Vote(UiVoteState),
|
||||
}
|
||||
|
||||
/// A duplicate representation of VoteState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiVoteState {
|
||||
node_pubkey: String,
|
||||
authorized_withdrawer: String,
|
||||
commission: u8,
|
||||
votes: Vec<UiLockout>,
|
||||
root_slot: Option<Slot>,
|
||||
authorized_voters: Vec<UiAuthorizedVoters>,
|
||||
prior_voters: Vec<UiPriorVoters>,
|
||||
epoch_credits: Vec<UiEpochCredits>,
|
||||
last_timestamp: BlockTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiLockout {
|
||||
slot: Slot,
|
||||
confirmation_count: u32,
|
||||
}
|
||||
|
||||
impl From<&Lockout> for UiLockout {
|
||||
fn from(lockout: &Lockout) -> Self {
|
||||
Self {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiAuthorizedVoters {
|
||||
epoch: Epoch,
|
||||
authorized_voter: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiPriorVoters {
|
||||
authorized_pubkey: String,
|
||||
epoch_of_last_authorized_switch: Epoch,
|
||||
target_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiEpochCredits {
|
||||
epoch: Epoch,
|
||||
credits: StringAmount,
|
||||
previous_credits: StringAmount,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let mut expected_vote_state = UiVoteState::default();
|
||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
||||
expected_vote_state.authorized_withdrawer = Pubkey::default().to_string();
|
||||
assert_eq!(
|
||||
parse_vote(&vote_account_data).unwrap(),
|
||||
VoteAccountType::Vote(expected_vote_state)
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_vote(&bad_data).is_err());
|
||||
}
|
||||
}
|
18
account-decoder/src/validator_info.rs
Normal file
18
account-decoder/src/validator_info.rs
Normal file
@ -0,0 +1,18 @@
|
||||
use solana_config_program::ConfigState;
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
pub const MAX_VALIDATOR_INFO: u64 = 576;
|
||||
|
||||
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
pub struct ValidatorInfo {
|
||||
pub info: String,
|
||||
}
|
||||
|
||||
impl ConfigState for ValidatorInfo {
|
||||
fn max_space() -> u64 {
|
||||
MAX_VALIDATOR_INFO
|
||||
}
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.21" }
|
||||
solana-measure = { path = "../measure", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -13,16 +13,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||
solana-perf = { path = "../perf", version = "1.2.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.21" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.21" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.21" }
|
||||
solana-perf = { path = "../perf", version = "1.2.21" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.21" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.21" }
|
||||
solana-measure = { path = "../measure", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
solana-version = { path = "../version", version = "1.2.21" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -169,7 +169,7 @@ fn main() {
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -18,21 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.21" }
|
||||
solana-core = { path = "../core", version = "1.2.21" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.21" }
|
||||
solana-client = { path = "../client", version = "1.2.21" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.21" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.21" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.21" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.21" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
solana-version = { path = "../version", version = "1.2.21" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.21" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -14,6 +14,7 @@ use solana_metrics::datapoint_info;
|
||||
use solana_sdk::{
|
||||
client::{Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
timing::{duration_as_ms, duration_as_s},
|
||||
@ -449,7 +450,7 @@ fn swapper<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
let to_swap_txs: Vec<_> = to_swap
|
||||
@ -457,16 +458,14 @@ fn swapper<T>(
|
||||
.map(|(signer, swap, profit)| {
|
||||
let s: &Keypair = &signer;
|
||||
let owner = &signer.pubkey();
|
||||
Transaction::new_signed_instructions(
|
||||
&[s],
|
||||
&[exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
)],
|
||||
blockhash,
|
||||
)
|
||||
let instruction = exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
||||
Transaction::new(&[s], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -577,7 +576,7 @@ fn trader<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
|
||||
@ -588,28 +587,26 @@ fn trader<T>(
|
||||
let owner_pubkey = &owner.pubkey();
|
||||
let trade_pubkey = &trade.pubkey();
|
||||
let space = mem::size_of::<ExchangeState>() as u64;
|
||||
Transaction::new_signed_instructions(
|
||||
&[owner.as_ref(), trade],
|
||||
&[
|
||||
system_instruction::create_account(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
1,
|
||||
space,
|
||||
&id(),
|
||||
),
|
||||
exchange_instruction::trade_request(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
*side,
|
||||
pair,
|
||||
tokens,
|
||||
price,
|
||||
src,
|
||||
),
|
||||
],
|
||||
blockhash,
|
||||
)
|
||||
let instructions = [
|
||||
system_instruction::create_account(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
1,
|
||||
space,
|
||||
&id(),
|
||||
),
|
||||
exchange_instruction::trade_request(
|
||||
owner_pubkey,
|
||||
trade_pubkey,
|
||||
*side,
|
||||
pair,
|
||||
tokens,
|
||||
price,
|
||||
src,
|
||||
),
|
||||
];
|
||||
let message = Message::new(&instructions, Some(&owner_pubkey));
|
||||
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -747,13 +744,9 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
(
|
||||
k.clone(),
|
||||
Transaction::new_unsigned_instructions(&system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&m,
|
||||
)),
|
||||
)
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(k.clone(), Transaction::new_unsigned(message))
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -776,7 +769,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
to_fund_txs.len(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("blockhash");
|
||||
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
||||
@ -848,9 +841,10 @@ pub fn create_token_accounts<T: Client>(
|
||||
);
|
||||
let request_ix =
|
||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
|
||||
(
|
||||
(from_keypair, new_keypair),
|
||||
Transaction::new_unsigned_instructions(&[create_ix, request_ix]),
|
||||
Transaction::new_unsigned(message),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
@ -868,7 +862,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
|
||||
let mut retries = 0;
|
||||
while !to_create_txs.is_empty() {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
to_create_txs
|
||||
@ -997,7 +991,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
|
@ -2,18 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.21" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.21" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.21" }
|
||||
solana-version = { path = "../version", version = "1.2.21" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -14,28 +14,23 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.0", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.21" }
|
||||
solana-core = { path = "../core", version = "1.2.21" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.21" }
|
||||
solana-client = { path = "../client", version = "1.2.21" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.21" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.21" }
|
||||
solana-measure = { path = "../measure", version = "1.2.21" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.21" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
solana-version = { path = "../version", version = "1.2.21" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.0" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.21" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -4,8 +4,6 @@ use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::{create_genesis, upload_mint_script, upload_payment_script};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
@ -14,6 +12,7 @@ use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
@ -36,9 +35,6 @@ use std::{
|
||||
const MAX_TX_QUEUE_AGE: u64 =
|
||||
MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND;
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::librapay_transaction;
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -50,12 +46,12 @@ pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||
|
||||
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
||||
Ok((blockhash, fee_calculator)) => return (blockhash, fee_calculator),
|
||||
Ok((blockhash, fee_calculator, _last_valid_slot)) => {
|
||||
return (blockhash, fee_calculator)
|
||||
}
|
||||
Err(err) => {
|
||||
info!("Couldn't get recent blockhash: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
@ -119,7 +115,6 @@ fn generate_chunked_transfers(
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) {
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
@ -134,7 +129,6 @@ fn generate_chunked_transfers(
|
||||
&dest_keypair_chunks[chunk_index],
|
||||
threads,
|
||||
reclaim_lamports_back_to_source_account,
|
||||
&libra_args,
|
||||
);
|
||||
|
||||
// In sustained mode, overlap the transfers with generation. This has higher average
|
||||
@ -202,12 +196,7 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
pub fn do_bench_tps<T>(client: Arc<T>, config: Config, gen_keypairs: Vec<Keypair>) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
@ -291,7 +280,6 @@ where
|
||||
threads,
|
||||
duration,
|
||||
sustained,
|
||||
libra_args,
|
||||
);
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
@ -337,52 +325,6 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn generate_move_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
reclaim: bool,
|
||||
move_keypairs: &[Keypair],
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_id: &Pubkey,
|
||||
blockhash: &Hash,
|
||||
) -> Vec<(Transaction, u64)> {
|
||||
let count = move_keypairs.len() / 2;
|
||||
let source_move = &move_keypairs[..count];
|
||||
let dest_move = &move_keypairs[count..];
|
||||
let pairs: Vec<_> = if !reclaim {
|
||||
source_move
|
||||
.iter()
|
||||
.zip(dest_move.iter())
|
||||
.zip(source.iter())
|
||||
.collect()
|
||||
} else {
|
||||
dest_move
|
||||
.iter()
|
||||
.zip(source_move.iter())
|
||||
.zip(dest.iter())
|
||||
.collect()
|
||||
};
|
||||
|
||||
pairs
|
||||
.par_iter()
|
||||
.map(|((from, to), payer)| {
|
||||
(
|
||||
librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
libra_mint_id,
|
||||
&payer,
|
||||
&from,
|
||||
&to.pubkey(),
|
||||
1,
|
||||
*blockhash,
|
||||
),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_system_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
@ -413,7 +355,6 @@ fn generate_txs(
|
||||
dest: &VecDeque<&Keypair>,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
libra_args: &Option<LibraKeys>,
|
||||
) {
|
||||
let blockhash = *blockhash.read().unwrap();
|
||||
let tx_count = source.len();
|
||||
@ -423,33 +364,7 @@ fn generate_txs(
|
||||
);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions = if let Some((
|
||||
_libra_genesis_keypair,
|
||||
_libra_pay_program_id,
|
||||
_libra_mint_program_id,
|
||||
_libra_keys,
|
||||
)) = libra_args
|
||||
{
|
||||
#[cfg(not(feature = "move"))]
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
generate_move_txs(
|
||||
source,
|
||||
dest,
|
||||
reclaim,
|
||||
&_libra_keys,
|
||||
_libra_pay_program_id,
|
||||
&_libra_genesis_keypair.pubkey(),
|
||||
&blockhash,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
generate_system_txs(source, dest, reclaim, &blockhash)
|
||||
};
|
||||
let transactions = generate_system_txs(source, dest, reclaim, &blockhash);
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
@ -650,10 +565,9 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let tx = Transaction::new_unsigned_instructions(
|
||||
&system_instruction::transfer_many(&k.pubkey(), &t),
|
||||
);
|
||||
(*k, tx)
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(*k, Transaction::new_unsigned(message))
|
||||
})
|
||||
.collect();
|
||||
make_txs.stop();
|
||||
@ -952,181 +866,13 @@ pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn fund_move_keys<T: Client>(
|
||||
client: &T,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
total: u64,
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_program_id: &Pubkey,
|
||||
libra_genesis_key: &Keypair,
|
||||
) {
|
||||
let (mut blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
|
||||
info!("creating the libra funding account..");
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
|
||||
client
|
||||
.send_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
let tx = librapay_transaction::mint_tokens(
|
||||
&libra_mint_program_id,
|
||||
funding_key,
|
||||
libra_genesis_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
total,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("creating {} move accounts...", keypairs.len());
|
||||
let total_len = keypairs.len();
|
||||
let create_len = 5;
|
||||
let mut funding_time = Measure::start("funding_time");
|
||||
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
||||
if client
|
||||
.get_balance_with_commitment(&keys[0].pubkey(), CommitmentConfig::recent())
|
||||
.unwrap_or(0)
|
||||
> 0
|
||||
{
|
||||
// already created these accounts.
|
||||
break;
|
||||
}
|
||||
|
||||
let keypairs: Vec<_> = keys.iter().map(|k| k).collect();
|
||||
let tx = librapay_transaction::create_accounts(funding_key, &keypairs, 1, blockhash);
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
let mut keys = vec![funding_key];
|
||||
keys.extend(&keypairs);
|
||||
client.send_message(&keys, tx.message).unwrap();
|
||||
|
||||
if i % 10 == 0 {
|
||||
info!(
|
||||
"created {} accounts of {} (size {})",
|
||||
i,
|
||||
total_len / create_len,
|
||||
ser_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_FUNDING_KEYS: usize = 10;
|
||||
let funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
let pubkey_amounts: Vec<_> = funding_keys
|
||||
.iter()
|
||||
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
||||
.collect();
|
||||
let tx = Transaction::new_signed_instructions(
|
||||
&[funding_key],
|
||||
&system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts),
|
||||
blockhash,
|
||||
);
|
||||
client.send_message(&[funding_key], tx.message).unwrap();
|
||||
let mut balance = 0;
|
||||
for _ in 0..20 {
|
||||
if let Ok(balance_) = client
|
||||
.get_balance_with_commitment(&funding_keys[0].pubkey(), CommitmentConfig::recent())
|
||||
{
|
||||
if balance_ > 0 {
|
||||
balance = balance_;
|
||||
break;
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
assert!(balance > 0);
|
||||
info!(
|
||||
"funded multiple funding accounts with {:?} lanports",
|
||||
balance
|
||||
);
|
||||
|
||||
let libra_funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
for (i, key) in libra_funding_keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
|
||||
client
|
||||
.send_message(&[&funding_keys[i], &key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[i],
|
||||
&libra_funding_key,
|
||||
&key.pubkey(),
|
||||
total / NUM_FUNDING_KEYS as u64,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("funded libra funding key {}", i);
|
||||
}
|
||||
|
||||
let keypair_count = keypairs.len();
|
||||
let amount = total / (keypair_count as u64);
|
||||
for (i, keys) in keypairs[..keypair_count]
|
||||
.chunks(NUM_FUNDING_KEYS)
|
||||
.enumerate()
|
||||
{
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[j],
|
||||
&libra_funding_keys[j],
|
||||
&key.pubkey(),
|
||||
amount,
|
||||
blockhash,
|
||||
);
|
||||
|
||||
let _sig = client
|
||||
.async_send_transaction(tx.clone())
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
}
|
||||
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let mut times = 0;
|
||||
loop {
|
||||
let balance =
|
||||
librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
||||
if balance >= amount {
|
||||
break;
|
||||
} else if times > 20 {
|
||||
info!("timed out.. {} key: {} balance: {}", i, j, balance);
|
||||
break;
|
||||
} else {
|
||||
times += 1;
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"funded group {} of {}",
|
||||
i + 1,
|
||||
keypairs.len() / NUM_FUNDING_KEYS
|
||||
);
|
||||
blockhash = get_recent_blockhash(client).0;
|
||||
}
|
||||
|
||||
funding_time.stop();
|
||||
info!("done funding keys, took {} ms", funding_time.as_ms());
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
use_move: bool,
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
|
||||
) -> Result<Vec<Keypair>> {
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
info!("Get lamports...");
|
||||
@ -1139,12 +885,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
let mut move_keypairs_ret = None;
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let move_keypairs_ret = None;
|
||||
|
||||
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||
// start another bench-tps run without re-funding all of the keypairs, check if the
|
||||
// keypairs still have at least 80% of the expected funds. That should be enough to
|
||||
@ -1155,10 +895,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let mut total = lamports_per_account * total_keypairs + extra_fees;
|
||||
if use_move {
|
||||
total *= 3;
|
||||
}
|
||||
let total = lamports_per_account * total_keypairs + extra_fees;
|
||||
|
||||
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
|
||||
info!(
|
||||
@ -1170,40 +907,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
if use_move {
|
||||
let libra_genesis_keypair =
|
||||
create_genesis(&funding_key, client.as_ref(), 10_000_000);
|
||||
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
|
||||
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
|
||||
|
||||
// Generate another set of keypairs for move accounts.
|
||||
// Still fund the solana ones which will be used for fees.
|
||||
let seed = [0u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
||||
fund_move_keys(
|
||||
client.as_ref(),
|
||||
funding_key,
|
||||
&move_keypairs,
|
||||
total / 3,
|
||||
&libra_pay_program_id,
|
||||
&libra_mint_program_id,
|
||||
&libra_genesis_keypair,
|
||||
);
|
||||
move_keypairs_ret = Some((
|
||||
libra_genesis_keypair,
|
||||
libra_pay_program_id,
|
||||
libra_mint_program_id,
|
||||
move_keypairs,
|
||||
));
|
||||
|
||||
// Give solana keys 1/3 and move keys 1/3 the lamports. Keep 1/3 for fees.
|
||||
total /= 3;
|
||||
}
|
||||
}
|
||||
|
||||
fund_keys(
|
||||
client,
|
||||
funding_key,
|
||||
@ -1217,7 +920,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok((keypairs, move_keypairs_ret))
|
||||
Ok(keypairs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -1241,11 +944,11 @@ mod tests {
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(client, config, keypairs, None);
|
||||
do_bench_tps(client, config, keypairs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1256,9 +959,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
@ -1280,9 +982,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
|
@ -23,7 +23,6 @@ pub struct Config {
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
}
|
||||
@ -46,7 +45,6 @@ impl Default for Config {
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature,
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
}
|
||||
@ -109,11 +107,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("use-move")
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no-multi-client")
|
||||
.long("no-multi-client")
|
||||
@ -263,7 +256,6 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
args.multi_client = !matches.is_present("no-multi-client");
|
||||
|
||||
if let Some(v) = matches.value_of("num_lamports_per_account") {
|
||||
|
@ -29,7 +29,6 @@ fn main() {
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
..
|
||||
@ -86,7 +85,7 @@ fn main() {
|
||||
Arc::new(get_client(&nodes))
|
||||
};
|
||||
|
||||
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
|
||||
let keypairs = if *read_from_client_file {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
@ -115,8 +114,8 @@ fn main() {
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||
(keypairs, None)
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
@ -124,7 +123,6 @@ fn main() {
|
||||
&id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
*use_move,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
@ -132,5 +130,5 @@ fn main() {
|
||||
})
|
||||
};
|
||||
|
||||
do_bench_tps(client, cli_config, keypairs, move_keypairs);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
|
@ -6,17 +6,11 @@ use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
#[cfg(feature = "move")]
|
||||
use solana_sdk::move_loader::solana_move_loader_program;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::sync::{mpsc::channel, Arc};
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
#[cfg(feature = "move")]
|
||||
let native_instruction_processors = vec![solana_move_loader_program()];
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let native_instruction_processors = vec![];
|
||||
|
||||
solana_logger::setup();
|
||||
@ -48,17 +42,16 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(faucet_addr),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
config.use_move,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
|
||||
let _total = do_bench_tps(client, config, keypairs);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
assert!(_total > 100);
|
||||
@ -73,14 +66,3 @@ fn test_bench_tps_local_cluster_solana() {
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
254
ci/buildkite-pipeline.sh
Executable file
254
ci/buildkite-pipeline.sh
Executable file
@ -0,0 +1,254 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Builds a buildkite pipeline based on the environment variables
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
output_file=${1:-/dev/stderr}
|
||||
|
||||
if [[ -n $CI_PULL_REQUEST ]]; then
|
||||
IFS=':' read -ra affected_files <<< "$(buildkite-agent meta-data get affected_files)"
|
||||
if [[ ${#affected_files[*]} -eq 0 ]]; then
|
||||
echo "Unable to determine the files affected by this PR"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
affected_files=()
|
||||
fi
|
||||
|
||||
annotate() {
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
buildkite-agent annotate "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks if a CI pull request affects one or more path patterns. Each
|
||||
# pattern argument is checked in series. If one of them found to be affected,
|
||||
# return immediately as such.
|
||||
#
|
||||
# Bash regular expressions are permitted in the pattern:
|
||||
# affects .rs$ -- any file or directory ending in .rs
|
||||
# affects .rs -- also matches foo.rs.bar
|
||||
# affects ^snap/ -- anything under the snap/ subdirectory
|
||||
# affects snap/ -- also matches foo/snap/
|
||||
# Any pattern starting with the ! character will be negated:
|
||||
# affects !^docs/ -- anything *not* under the docs/ subdirectory
|
||||
#
|
||||
affects() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
# Assume everyting needs to be tested when any Dockerfile changes
|
||||
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
|
||||
if [[ ${pattern:0:1} = "!" ]]; then
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ ! $file =~ ${pattern:1} ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
else
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
# Checks if a CI pull request affects anything other than the provided path patterns
|
||||
#
|
||||
# Syntax is the same as `affects()` except that the negation prefix is not
|
||||
# supported
|
||||
#
|
||||
affects_other_than() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
|
||||
for file in "${affected_files[@]}"; do
|
||||
declare matched=false
|
||||
for pattern in "$@"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
matched=true
|
||||
fi
|
||||
done
|
||||
if ! $matched; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
start_pipeline() {
|
||||
echo "# $*" > "$output_file"
|
||||
echo "steps:" >> "$output_file"
|
||||
}
|
||||
|
||||
command_step() {
|
||||
cat >> "$output_file" <<EOF
|
||||
- name: "$1"
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
trigger_secondary_step() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
EOF
|
||||
}
|
||||
|
||||
wait_step() {
|
||||
echo " - wait" >> "$output_file"
|
||||
}
|
||||
|
||||
all_test_steps() {
|
||||
command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20
|
||||
wait_step
|
||||
|
||||
# Coverage...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
|
||||
# Full test suite
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-perf skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^ci/test-bench.sh \
|
||||
; then
|
||||
command_step bench "ci/test-bench.sh" 30
|
||||
else
|
||||
annotate --style info --context test-bench \
|
||||
"Bench skipped as no .rs files were modified"
|
||||
fi
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
45
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
command_step sanity "ci/test-sanity.sh" 5
|
||||
wait_step
|
||||
|
||||
# Check for any .sh file changes
|
||||
if affects .sh$; then
|
||||
command_step shellcheck "ci/shellcheck.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
|
||||
# Run the full test suite by default, skipping only if modifications are local
|
||||
# to some particular areas of the tree
|
||||
if affects_other_than ^.buildkite ^.travis .md$ ^docs/ ^web3.js/ ^explorer/ ^.gitbook; then
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# web3.js, explorer and docs changes run on Travis...
|
||||
}
|
||||
|
||||
|
||||
if [[ -n $BUILDKITE_TAG ]]; then
|
||||
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
|
||||
|
||||
annotate --style info --context release-tag \
|
||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
||||
|
||||
# Jump directly to the secondary build to publish release artifacts quickly
|
||||
trigger_secondary_step
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
echo "+++ Affected files in this PR"
|
||||
for file in "${affected_files[@]}"; do
|
||||
echo "- $file"
|
||||
done
|
||||
|
||||
start_pipeline "Pull request pipeline for $BUILDKITE_BRANCH"
|
||||
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
annotate --style info --context pr-backlink \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
|
||||
if [[ $GITHUB_USER = "dependabot-preview[bot]" ]]; then
|
||||
command_step dependabot "ci/dependabot-pr.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
pull_or_push_steps
|
||||
exit 0
|
||||
fi
|
||||
|
||||
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
|
||||
pull_or_push_steps
|
||||
wait_step
|
||||
trigger_secondary_step
|
||||
exit 0
|
@ -5,9 +5,6 @@ steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
@ -19,6 +16,3 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
# name: "move"
|
||||
# timeout_in_minutes: 20
|
||||
|
@ -5,6 +5,9 @@
|
||||
# Release tags use buildkite-release.yml instead
|
||||
|
||||
steps:
|
||||
- command: "ci/test-sanity.sh"
|
||||
name: "sanity"
|
||||
timeout_in_minutes: 5
|
||||
- command: "ci/dependabot-pr.sh"
|
||||
name: "dependabot"
|
||||
timeout_in_minutes: 5
|
||||
|
@ -67,6 +67,7 @@ ARGS+=(
|
||||
--env BUILDKITE_JOB_ID
|
||||
--env CI
|
||||
--env CI_BRANCH
|
||||
--env CI_BASE_BRANCH
|
||||
--env CI_TAG
|
||||
--env CI_BUILD_ID
|
||||
--env CI_COMMIT
|
||||
|
@ -8,10 +8,11 @@ if [[ -n $CI ]]; then
|
||||
export CI=1
|
||||
if [[ -n $TRAVIS ]]; then
|
||||
export CI_BRANCH=$TRAVIS_BRANCH
|
||||
export CI_BASE_BRANCH=$TRAVIS_BRANCH
|
||||
export CI_BUILD_ID=$TRAVIS_BUILD_ID
|
||||
export CI_COMMIT=$TRAVIS_COMMIT
|
||||
export CI_JOB_ID=$TRAVIS_JOB_ID
|
||||
if $TRAVIS_PULL_REQUEST; then
|
||||
if [[ $TRAVIS_PULL_REQUEST != false ]]; then
|
||||
export CI_PULL_REQUEST=true
|
||||
else
|
||||
export CI_PULL_REQUEST=
|
||||
@ -28,8 +29,10 @@ if [[ -n $CI ]]; then
|
||||
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
||||
# standard Buildkite PR trigger.
|
||||
if [[ $CI_BRANCH =~ pull/* ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
export CI_PULL_REQUEST=true
|
||||
else
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
export CI_OS_NAME=linux
|
||||
|
@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- build docs
|
||||
(
|
||||
set -x
|
||||
. ci/rust-version.sh stable
|
||||
ci/docker-run.sh "$rust_stable_docker_image" docs/build.sh
|
||||
)
|
||||
|
||||
echo --- update gitbook-cage
|
||||
if [[ -n $CI_BRANCH ]]; then
|
||||
(
|
||||
# make a local commit for the svgs and generated/updated markdown
|
||||
set -x
|
||||
git add -f docs/src
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$(basename "$0")"
|
||||
git commit -m "gitbook-cage update $(date -Is)"
|
||||
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
||||
# pop off the local commit
|
||||
git reset --hard HEAD~
|
||||
fi
|
||||
)
|
||||
else
|
||||
echo CI_BRANCH not set
|
||||
fi
|
||||
|
||||
exit 0
|
@ -45,7 +45,7 @@ linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
;;
|
||||
windows)
|
||||
TARGET=x86_64-pc-windows-gnu
|
||||
TARGET=x86_64-pc-windows-msvc
|
||||
;;
|
||||
*)
|
||||
echo CI_OS_NAME unset
|
||||
|
@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
timeout 15 ./run.sh &
|
||||
timeout 120 ./run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
@ -19,10 +19,16 @@ while [[ ! -f config/run/init-completed ]]; do
|
||||
fi
|
||||
done
|
||||
|
||||
snapshot_slot=1
|
||||
|
||||
# wait a bit longer than snapshot_slot
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
|
||||
sleep 1
|
||||
done
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
||||
|
||||
wait $pid
|
||||
|
||||
$solana_ledger_tool create-snapshot --ledger config/ledger 1 config/snapshot-ledger
|
||||
$solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" config/snapshot-ledger
|
||||
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
|
||||
$solana_ledger_tool verify --ledger config/snapshot-ledger
|
||||
|
@ -27,5 +27,5 @@ Alternatively, you can source it from within a script:
|
||||
local PATCH=0
|
||||
local SPECIAL=""
|
||||
|
||||
semverParseInto "1.2.3" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "1.2.21" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
||||
|
@ -6,20 +6,24 @@ cd "$(dirname "$0")/.."
|
||||
source ci/_
|
||||
source ci/rust-version.sh stable
|
||||
source ci/rust-version.sh nightly
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
# Look for failed mergify.io backports
|
||||
_ git show HEAD --check --oneline
|
||||
|
||||
if _ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets; then
|
||||
true
|
||||
# Only force up-to-date lock files on edge
|
||||
if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
if _ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets; then
|
||||
true
|
||||
else
|
||||
check_status=$?
|
||||
echo "Some Cargo.lock is outdated; please update them as well"
|
||||
echo "protip: you can use ./scripts/cargo-for-all-lock-files.sh update ..."
|
||||
exit "$check_status"
|
||||
fi
|
||||
else
|
||||
check_status=$?
|
||||
echo "Some Cargo.lock is outdated; please update them as well"
|
||||
echo "protip: you can use ./scripts/cargo-for-all-lock-files.sh update ..."
|
||||
exit "$check_status"
|
||||
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
|
||||
fi
|
||||
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
@ -29,10 +33,7 @@ _ cargo +"$rust_stable" clippy --workspace -- --deny=warnings
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
|
@ -1 +0,0 @@
|
||||
test-stable.sh
|
22
ci/test-sanity.sh
Executable file
22
ci/test-sanity.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
source ci/_
|
||||
|
||||
(
|
||||
echo --- git diff --check
|
||||
set -x
|
||||
# Look for failed mergify.io backports by searching leftover conflict markers
|
||||
# Also check for any trailing whitespaces!
|
||||
git fetch origin "$CI_BASE_BRANCH"
|
||||
git diff "$(git merge-base HEAD "origin/$CI_BASE_BRANCH")..HEAD" --check --oneline
|
||||
)
|
||||
|
||||
echo
|
||||
|
||||
_ ci/nits.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
echo --- ok
|
@ -39,15 +39,14 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 1gb/thread of memory
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>16 ? 16 : NPROC))
|
||||
NPROC=$((NPROC>14 ? 14 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
ci/affects-files.sh \
|
||||
@ -93,27 +92,6 @@ test-stable-perf)
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-move)
|
||||
ci/affects-files.sh \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-move.sh \
|
||||
^programs/move_loader \
|
||||
^programs/librapay \
|
||||
^logger/ \
|
||||
^runtime/ \
|
||||
^sdk/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipped $testName as no relevant files were modified"
|
||||
exit 0
|
||||
}
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/librapay/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
|
@ -23,10 +23,14 @@ if [[ -z $CI_TAG ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $CI_REPO_SLUG ]]; then
|
||||
echo Error: CI_REPO_SLUG not defined
|
||||
exit 1
|
||||
fi
|
||||
# Force CI_REPO_SLUG since sometimes
|
||||
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
|
||||
# artifact upload to fail
|
||||
CI_REPO_SLUG=solana-labs/solana
|
||||
#if [[ -z $CI_REPO_SLUG ]]; then
|
||||
# echo Error: CI_REPO_SLUG not defined
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
releaseId=$( \
|
||||
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \
|
||||
@ -38,6 +42,7 @@ echo "Github release id for $CI_TAG is $releaseId"
|
||||
for file in "$@"; do
|
||||
echo "--- Uploading $file to tag $CI_TAG of $CI_REPO_SLUG"
|
||||
curl \
|
||||
--verbose \
|
||||
--data-binary @"$file" \
|
||||
-H "Authorization: token $GITHUB_TOKEN" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@ -6,50 +6,86 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Signature},
|
||||
};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
U: FromStr,
|
||||
U::Err: Display,
|
||||
{
|
||||
string
|
||||
.as_ref()
|
||||
.parse::<U>()
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("error parsing '{}': {}", string, err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as type T.
|
||||
// Takes a String to avoid second type parameter when used as a clap validator
|
||||
pub fn is_parsable<T>(string: String) -> Result<(), String>
|
||||
where
|
||||
T: FromStr,
|
||||
T::Err: Display,
|
||||
{
|
||||
is_parsable_generic::<T, String>(string)
|
||||
}
|
||||
|
||||
// Return an error if a pubkey cannot be parsed.
|
||||
pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
match string.parse::<Pubkey>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Pubkey, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a hash cannot be parsed.
|
||||
pub fn is_hash(string: String) -> Result<(), String> {
|
||||
match string.parse::<Hash>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_hash<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Hash, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed.
|
||||
pub fn is_keypair(string: String) -> Result<(), String> {
|
||||
read_keypair_file(&string)
|
||||
pub fn is_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed
|
||||
pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
|
||||
if string.as_str() == ASK_KEYWORD {
|
||||
pub fn is_keypair_or_ask_keyword<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if string.as_ref() == ASK_KEYWORD {
|
||||
return Ok(());
|
||||
}
|
||||
read_keypair_file(&string)
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
||||
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
|
||||
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_pubkey(string.as_ref()).or_else(|_| is_keypair(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
|
||||
// produce a pubkey()
|
||||
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
match parse_keypair_path(&string) {
|
||||
pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match parse_keypair_path(string.as_ref()) {
|
||||
KeypairUrl::Filepath(path) => is_keypair(path),
|
||||
_ => Ok(()),
|
||||
}
|
||||
@ -63,13 +99,19 @@ pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
|
||||
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
|
||||
// also provided and correct happens in parsing, not in validation.
|
||||
pub fn is_valid_signer(string: String) -> Result<(), String> {
|
||||
pub fn is_valid_signer<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_valid_pubkey(string)
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey=signature string
|
||||
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
let mut signer = string.split('=');
|
||||
pub fn is_pubkey_sig<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let mut signer = string.as_ref().split('=');
|
||||
match Pubkey::from_str(
|
||||
signer
|
||||
.next()
|
||||
@ -90,8 +132,11 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
}
|
||||
|
||||
// Return an error if a url cannot be parsed.
|
||||
pub fn is_url(string: String) -> Result<(), String> {
|
||||
match url::Url::parse(&string) {
|
||||
pub fn is_url<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match url::Url::parse(string.as_ref()) {
|
||||
Ok(url) => {
|
||||
if url.has_host() {
|
||||
Ok(())
|
||||
@ -103,20 +148,26 @@ pub fn is_url(string: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_slot(slot: String) -> Result<(), String> {
|
||||
slot.parse::<Slot>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_slot<T>(slot: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Slot, _>(slot)
|
||||
}
|
||||
|
||||
pub fn is_port(port: String) -> Result<(), String> {
|
||||
port.parse::<u16>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_port<T>(port: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<u16, _>(port)
|
||||
}
|
||||
|
||||
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
pub fn is_valid_percentage<T>(percentage: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
percentage
|
||||
.as_ref()
|
||||
.parse::<u8>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
@ -136,8 +187,11 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
if amount.parse::<u64>().is_ok() || amount.parse::<f64>().is_ok() {
|
||||
pub fn is_amount<T>(amount: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if amount.as_ref().parse::<u64>().is_ok() || amount.as_ref().parse::<f64>().is_ok() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!(
|
||||
@ -147,8 +201,14 @@ pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_amount_or_all(amount: String) -> Result<(), String> {
|
||||
if amount.parse::<u64>().is_ok() || amount.parse::<f64>().is_ok() || amount == "ALL" {
|
||||
pub fn is_amount_or_all<T>(amount: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if amount.as_ref().parse::<u64>().is_ok()
|
||||
|| amount.as_ref().parse::<f64>().is_ok()
|
||||
|| amount.as_ref() == "ALL"
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!(
|
||||
@ -158,14 +218,20 @@ pub fn is_amount_or_all(amount: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
|
||||
DateTime::parse_from_rfc3339(&value)
|
||||
pub fn is_rfc3339_datetime<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
DateTime::parse_from_rfc3339(value.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
}
|
||||
|
||||
pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
let value = value.replace("'", "");
|
||||
pub fn is_derivation<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let value = value.as_ref().replace("'", "");
|
||||
let mut parts = value.split('/');
|
||||
let account = parts.next().unwrap();
|
||||
account
|
||||
@ -197,14 +263,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_derivation() {
|
||||
assert_eq!(is_derivation("2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("65537".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
|
||||
assert!(is_derivation("a".to_string()).is_err());
|
||||
assert!(is_derivation("4294967296".to_string()).is_err());
|
||||
assert!(is_derivation("a/b".to_string()).is_err());
|
||||
assert!(is_derivation("0/4294967296".to_string()).is_err());
|
||||
assert_eq!(is_derivation("2"), Ok(()));
|
||||
assert_eq!(is_derivation("0"), Ok(()));
|
||||
assert_eq!(is_derivation("65537"), Ok(()));
|
||||
assert_eq!(is_derivation("0/2"), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'"), Ok(()));
|
||||
assert!(is_derivation("a").is_err());
|
||||
assert!(is_derivation("4294967296").is_err());
|
||||
assert!(is_derivation("a/b").is_err());
|
||||
assert!(is_derivation("0/4294967296").is_err());
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::io;
|
||||
use std::{collections::HashMap, io};
|
||||
use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
@ -17,6 +17,8 @@ pub struct Config {
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub keypair_path: String,
|
||||
#[serde(default)]
|
||||
pub address_labels: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -36,6 +38,7 @@ impl Default for Config {
|
||||
json_rpc_url,
|
||||
websocket_url,
|
||||
keypair_path,
|
||||
address_labels: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -27,28 +27,29 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.21" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.21" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.21" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.21" }
|
||||
solana-client = { path = "../client", version = "1.2.21" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.21" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.21" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.21" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.21" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.21" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.21" }
|
||||
solana-version = { path = "../version", version = "1.2.21" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.21" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.21" }
|
||||
thiserror = "1.0.19"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.21" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.21" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@ -88,11 +88,11 @@ mod tests {
|
||||
let pubkey0 = Pubkey::new(&[0; 32]);
|
||||
let pubkey1 = Pubkey::new(&[1; 32]);
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let message0 = Message::new(&[ix0]);
|
||||
let message0 = Message::new(&[ix0], Some(&pubkey0));
|
||||
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let ix1 = system_instruction::transfer(&pubkey1, &pubkey0, 1);
|
||||
let message1 = Message::new(&[ix0, ix1]);
|
||||
let message1 = Message::new(&[ix0, ix1], Some(&pubkey0));
|
||||
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetBalance, account_balance_response.clone());
|
||||
@ -176,13 +176,13 @@ mod tests {
|
||||
let pubkey0 = Pubkey::new(&[0; 32]);
|
||||
let pubkey1 = Pubkey::new(&[1; 32]);
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let message0 = Message::new(&[ix0]);
|
||||
let message0 = Message::new(&[ix0], Some(&pubkey0));
|
||||
assert_eq!(calculate_fee(&fee_calculator, &[&message0]), 1);
|
||||
|
||||
// Two messages, additive fees.
|
||||
let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1);
|
||||
let ix1 = system_instruction::transfer(&pubkey1, &pubkey0, 1);
|
||||
let message1 = Message::new(&[ix0, ix1]);
|
||||
let message1 = Message::new(&[ix0, ix1], Some(&pubkey0));
|
||||
assert_eq!(calculate_fee(&fee_calculator, &[&message0, &message1]), 3);
|
||||
}
|
||||
|
||||
|
270
cli/src/cli.rs
270
cli/src/cli.rs
@ -2,7 +2,7 @@ use crate::{
|
||||
checks::*,
|
||||
cli_output::{CliAccount, CliSignOnlyData, CliSignature, OutputFormat},
|
||||
cluster_query::*,
|
||||
display::println_name_value,
|
||||
display::{new_spinner_progress_bar, println_name_value, println_transaction},
|
||||
nonce::{self, *},
|
||||
offline::{blockhash_query::BlockhashQuery, *},
|
||||
spend_utils::*,
|
||||
@ -15,6 +15,7 @@ use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use log::*;
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_budget_program::budget_instruction::{self, BudgetError};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg_with_default, COMMITMENT_ARG},
|
||||
@ -27,8 +28,8 @@ use solana_clap_utils::{
|
||||
use solana_client::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::RpcLargestAccountsFilter,
|
||||
rpc_response::{RpcAccount, RpcKeyedAccount},
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
@ -37,17 +38,18 @@ use solana_faucet::faucet_mock::request_airdrop_transaction;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
clock::{Epoch, Slot},
|
||||
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
|
||||
commitment_config::CommitmentConfig,
|
||||
decode_error::DecodeError,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
instruction::InstructionError,
|
||||
loader_instruction,
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
program_utils::DecodeError,
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{Keypair, Signature, Signer, SignerError},
|
||||
signers::Signers,
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
@ -56,7 +58,7 @@ use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize},
|
||||
};
|
||||
use solana_transaction_status::{EncodedTransaction, TransactionEncoding};
|
||||
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
|
||||
use solana_vote_program::vote_state::VoteAuthorize;
|
||||
use std::{
|
||||
error,
|
||||
@ -244,8 +246,8 @@ pub enum CliCommand {
|
||||
},
|
||||
TransactionHistory {
|
||||
address: Pubkey,
|
||||
end_slot: Option<Slot>, // None == latest slot
|
||||
slot_limit: Option<u64>, // None == search full history
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
@ -323,6 +325,16 @@ pub enum CliCommand {
|
||||
lamports: u64,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
MergeStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
source_stake_account_pubkey: Pubkey,
|
||||
stake_authority: SignerIndex,
|
||||
sign_only: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
ShowStakeHistory {
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
@ -370,6 +382,7 @@ pub enum CliCommand {
|
||||
},
|
||||
// Vote Commands
|
||||
CreateVoteAccount {
|
||||
vote_account: SignerIndex,
|
||||
seed: Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: Option<Pubkey>,
|
||||
@ -385,7 +398,7 @@ pub enum CliCommand {
|
||||
vote_account_pubkey: Pubkey,
|
||||
destination_account_pubkey: Pubkey,
|
||||
withdraw_authority: SignerIndex,
|
||||
lamports: u64,
|
||||
withdraw_amount: SpendAmount,
|
||||
},
|
||||
VoteAuthorize {
|
||||
vote_account_pubkey: Pubkey,
|
||||
@ -395,6 +408,12 @@ pub enum CliCommand {
|
||||
VoteUpdateValidator {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
VoteUpdateCommission {
|
||||
vote_account_pubkey: Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
@ -686,6 +705,9 @@ pub fn parse_command(
|
||||
("split-stake", Some(matches)) => {
|
||||
parse_split_stake(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("merge-stake", Some(matches)) => {
|
||||
parse_merge_stake(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("stake-authorize", Some(matches)) => {
|
||||
parse_stake_authorize(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
@ -709,6 +731,9 @@ pub fn parse_command(
|
||||
("vote-update-validator", Some(matches)) => {
|
||||
parse_vote_update_validator(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-update-commission", Some(matches)) => {
|
||||
parse_vote_update_commission(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-authorize-voter", Some(matches)) => parse_vote_authorize(
|
||||
matches,
|
||||
default_signer_path,
|
||||
@ -1152,14 +1177,14 @@ fn process_confirm(
|
||||
if let Some(transaction_status) = status {
|
||||
if config.verbose {
|
||||
match rpc_client
|
||||
.get_confirmed_transaction(signature, TransactionEncoding::Binary)
|
||||
.get_confirmed_transaction(signature, UiTransactionEncoding::Binary)
|
||||
{
|
||||
Ok(confirmed_transaction) => {
|
||||
println!(
|
||||
"\nTransaction executed in slot {}:",
|
||||
confirmed_transaction.slot
|
||||
);
|
||||
crate::display::println_transaction(
|
||||
println_transaction(
|
||||
&confirmed_transaction
|
||||
.transaction
|
||||
.transaction
|
||||
@ -1189,7 +1214,7 @@ fn process_confirm(
|
||||
}
|
||||
|
||||
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
|
||||
crate::display::println_transaction(transaction, &None, "");
|
||||
println_transaction(transaction, &None, "");
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
@ -1205,7 +1230,7 @@ fn process_show_account(
|
||||
let cli_account = CliAccount {
|
||||
keyed_account: RpcKeyedAccount {
|
||||
pubkey: account_pubkey.to_string(),
|
||||
account: RpcAccount::encode(account),
|
||||
account: UiAccount::encode(account_pubkey, account, UiAccountEncoding::Binary, None),
|
||||
},
|
||||
use_lamports_unit,
|
||||
};
|
||||
@ -1227,6 +1252,103 @@ fn process_show_account(
|
||||
Ok(account_string)
|
||||
}
|
||||
|
||||
fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
rpc_client: &RpcClient,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
// Send all transactions
|
||||
let mut transactions_signatures = vec![];
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if cfg!(not(test)) {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let signature = rpc_client
|
||||
.send_transaction_with_config(
|
||||
&transaction,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
},
|
||||
)
|
||||
.ok();
|
||||
transactions_signatures.push((transaction, signature));
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions sent",
|
||||
transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = rpc_client.get_signature_status(&signature) {
|
||||
if rpc_client
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(0)
|
||||
> 1
|
||||
{
|
||||
return false;
|
||||
} else {
|
||||
return match status {
|
||||
None => true,
|
||||
Some(result) => result.is_err(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.collect();
|
||||
|
||||
if transactions_signatures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err("Transactions failed".into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator) = rpc_client
|
||||
.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
|
||||
transactions = vec![];
|
||||
for (mut transaction, _) in transactions_signatures.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_deploy(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@ -1252,7 +1374,7 @@ fn process_deploy(
|
||||
program_data.len() as u64,
|
||||
&bpf_loader::id(),
|
||||
);
|
||||
let message = Message::new(&[ix]);
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut create_account_tx = Transaction::new_unsigned(message);
|
||||
create_account_tx.try_sign(&[config.signers[0], &program_id], blockhash)?;
|
||||
messages.push(&create_account_tx.message);
|
||||
@ -1265,7 +1387,7 @@ fn process_deploy(
|
||||
(i * DATA_CHUNK_SIZE) as u32,
|
||||
chunk.to_vec(),
|
||||
);
|
||||
let message = Message::new_with_payer(&[instruction], Some(&signers[0].pubkey()));
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
@ -1275,7 +1397,7 @@ fn process_deploy(
|
||||
}
|
||||
|
||||
let instruction = loader_instruction::finalize(&program_id.pubkey(), &bpf_loader::id());
|
||||
let message = Message::new_with_payer(&[instruction], Some(&signers[0].pubkey()));
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
let mut finalize_tx = Transaction::new_unsigned(message);
|
||||
finalize_tx.try_sign(&signers, blockhash)?;
|
||||
messages.push(&finalize_tx.message);
|
||||
@ -1294,15 +1416,18 @@ fn process_deploy(
|
||||
})?;
|
||||
|
||||
trace!("Writing program data");
|
||||
rpc_client
|
||||
.send_and_confirm_transactions_with_spinner(write_transactions, &signers)
|
||||
.map_err(|_| {
|
||||
CliError::DynamicProgramError("Data writes to program account failed".to_string())
|
||||
})?;
|
||||
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|
||||
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
|
||||
)?;
|
||||
|
||||
trace!("Finalizing program account");
|
||||
rpc_client
|
||||
.send_and_confirm_transaction_with_spinner(&finalize_tx)
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&finalize_tx,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
CliError::DynamicProgramError(format!("Finalizing program account failed: {}", e))
|
||||
})?;
|
||||
@ -1349,7 +1474,7 @@ fn process_pay(
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
Message::new_with_nonce(vec![ix], None, nonce_account, &nonce_authority.pubkey())
|
||||
} else {
|
||||
Message::new(&[ix])
|
||||
Message::new(&[ix], Some(&config.signers[0].pubkey()))
|
||||
}
|
||||
};
|
||||
|
||||
@ -1396,7 +1521,7 @@ fn process_pay(
|
||||
cancelable,
|
||||
lamports,
|
||||
);
|
||||
Message::new(&ixs)
|
||||
Message::new(&ixs, Some(&config.signers[0].pubkey()))
|
||||
};
|
||||
let (message, _) = resolve_spend_tx_and_check_account_balance(
|
||||
rpc_client,
|
||||
@ -1442,7 +1567,7 @@ fn process_pay(
|
||||
cancelable,
|
||||
lamports,
|
||||
);
|
||||
Message::new(&ixs)
|
||||
Message::new(&ixs, Some(&config.signers[0].pubkey()))
|
||||
};
|
||||
let (message, _) = resolve_spend_tx_and_check_account_balance(
|
||||
rpc_client,
|
||||
@ -1478,7 +1603,7 @@ fn process_cancel(rpc_client: &RpcClient, config: &CliConfig, pubkey: &Pubkey) -
|
||||
pubkey,
|
||||
&config.signers[0].pubkey(),
|
||||
);
|
||||
let message = Message::new(&[ix]);
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -1501,7 +1626,7 @@ fn process_time_elapsed(
|
||||
let (blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let ix = budget_instruction::apply_timestamp(&config.signers[0].pubkey(), pubkey, to, dt);
|
||||
let message = Message::new(&[ix]);
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -1530,11 +1655,6 @@ fn process_transfer(
|
||||
) -> ProcessResult {
|
||||
let from = config.signers[from];
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&from.pubkey(), "cli keypair".to_string()),
|
||||
(to, "to".to_string()),
|
||||
)?;
|
||||
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
|
||||
|
||||
@ -1552,7 +1672,7 @@ fn process_transfer(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
}
|
||||
};
|
||||
|
||||
@ -1599,7 +1719,7 @@ fn process_witness(
|
||||
let (blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let ix = budget_instruction::apply_signature(&config.signers[0].pubkey(), pubkey, to);
|
||||
let message = Message::new(&[ix]);
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -1655,7 +1775,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
seed,
|
||||
program_id,
|
||||
} => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id),
|
||||
CliCommand::Fees => process_fees(&rpc_client),
|
||||
CliCommand::Fees => process_fees(&rpc_client, config),
|
||||
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, config, *slot),
|
||||
CliCommand::GetGenesisHash => process_get_genesis_hash(&rpc_client),
|
||||
CliCommand::GetEpochInfo { commitment_config } => {
|
||||
@ -1717,9 +1837,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_show_validators(&rpc_client, config, *use_lamports_unit, *commitment_config),
|
||||
CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
|
||||
before,
|
||||
limit,
|
||||
} => process_transaction_history(&rpc_client, config, address, *before, *limit),
|
||||
|
||||
// Nonce Commands
|
||||
|
||||
@ -1889,6 +2009,27 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*lamports,
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::MergeStake {
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
stake_authority,
|
||||
sign_only,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
fee_payer,
|
||||
} => process_merge_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&source_stake_account_pubkey,
|
||||
*stake_authority,
|
||||
*sign_only,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
*nonce_authority,
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::ShowStakeAccount {
|
||||
pubkey: stake_account_pubkey,
|
||||
use_lamports_unit,
|
||||
@ -1990,6 +2131,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
|
||||
// Create vote account
|
||||
CliCommand::CreateVoteAccount {
|
||||
vote_account,
|
||||
seed,
|
||||
identity_account,
|
||||
authorized_voter,
|
||||
@ -1998,6 +2140,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_create_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
*vote_account,
|
||||
seed,
|
||||
*identity_account,
|
||||
authorized_voter,
|
||||
@ -2018,14 +2161,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority,
|
||||
lamports,
|
||||
withdraw_amount,
|
||||
destination_account_pubkey,
|
||||
} => process_withdraw_from_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
vote_account_pubkey,
|
||||
*withdraw_authority,
|
||||
*lamports,
|
||||
*withdraw_amount,
|
||||
destination_account_pubkey,
|
||||
),
|
||||
CliCommand::VoteAuthorize {
|
||||
@ -2042,11 +2185,24 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
*withdraw_authority,
|
||||
),
|
||||
CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_commission(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*commission,
|
||||
*withdraw_authority,
|
||||
),
|
||||
|
||||
// Wallet Commands
|
||||
@ -3241,6 +3397,7 @@ mod tests {
|
||||
// Success cases
|
||||
let mut config = CliConfig::default();
|
||||
config.rpc_client = Some(RpcClient::new_mock("succeeds".to_string()));
|
||||
config.json_rpc_url = "http://127.0.0.1:8899".to_string();
|
||||
|
||||
let keypair = Keypair::new();
|
||||
let pubkey = keypair.pubkey().to_string();
|
||||
@ -3274,6 +3431,7 @@ mod tests {
|
||||
let bob_pubkey = bob_keypair.pubkey();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@ -3299,6 +3457,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
@ -3328,10 +3487,10 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: to_pubkey,
|
||||
lamports: 100,
|
||||
withdraw_authority: 0,
|
||||
@ -3346,9 +3505,9 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
@ -3359,10 +3518,10 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
let split_stake_account = Keypair::new();
|
||||
config.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
@ -3377,6 +3536,23 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
let source_stake_account_pubkey = Pubkey::new_rand();
|
||||
let merge_stake_account = Keypair::new();
|
||||
config.command = CliCommand::MergeStake {
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
stake_authority: 1,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
config.signers = vec![&keypair, &merge_stake_account];
|
||||
let result = process_command(&config);
|
||||
assert!(dbg!(result).is_ok());
|
||||
|
||||
config.command = CliCommand::GetSlot {
|
||||
commitment_config: CommitmentConfig::default(),
|
||||
};
|
||||
@ -3499,6 +3675,7 @@ mod tests {
|
||||
let bob_keypair = Keypair::new();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@ -3518,6 +3695,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 1,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
@ -482,7 +482,7 @@ impl fmt::Display for CliKeyedStakeState {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliStakeState {
|
||||
pub stake_type: CliStakeType,
|
||||
pub total_stake: u64,
|
||||
pub account_balance: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@ -497,6 +497,16 @@ pub struct CliStakeState {
|
||||
pub lockup: Option<CliLockup>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
#[serde(skip_serializing)]
|
||||
pub current_epoch: Epoch,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rent_exempt_reserve: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub active_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub activating_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deactivating_stake: Option<u64>,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliStakeState {
|
||||
@ -522,52 +532,122 @@ impl fmt::Display for CliStakeState {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"Balance: {}",
|
||||
build_balance_message(self.account_balance, self.use_lamports_unit, true)
|
||||
)?;
|
||||
|
||||
if let Some(rent_exempt_reserve) = self.rent_exempt_reserve {
|
||||
writeln!(
|
||||
f,
|
||||
"Rent Exempt Reserve: {}",
|
||||
build_balance_message(rent_exempt_reserve, self.use_lamports_unit, true)
|
||||
)?;
|
||||
}
|
||||
|
||||
match self.stake_type {
|
||||
CliStakeType::RewardsPool => writeln!(f, "Stake account is a rewards pool")?,
|
||||
CliStakeType::Uninitialized => writeln!(f, "Stake account is uninitialized")?,
|
||||
CliStakeType::Initialized => {
|
||||
writeln!(
|
||||
f,
|
||||
"Total Stake: {}",
|
||||
build_balance_message(self.total_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(f, "Stake account is undelegated")?;
|
||||
show_authorized(f, self.authorized.as_ref().unwrap())?;
|
||||
show_lockup(f, self.lockup.as_ref().unwrap())?;
|
||||
}
|
||||
CliStakeType::Stake => {
|
||||
writeln!(
|
||||
f,
|
||||
"Total Stake: {}",
|
||||
build_balance_message(self.total_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Stake: {}",
|
||||
build_balance_message(
|
||||
self.delegated_stake.unwrap(),
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
)
|
||||
)?;
|
||||
if let Some(delegated_vote_account_address) = &self.delegated_vote_account_address {
|
||||
let show_delegation = {
|
||||
self.active_stake.is_some()
|
||||
|| self.activating_stake.is_some()
|
||||
|| self.deactivating_stake.is_some()
|
||||
|| self
|
||||
.deactivation_epoch
|
||||
.map(|de| de > self.current_epoch)
|
||||
.unwrap_or(true)
|
||||
};
|
||||
if show_delegation {
|
||||
let delegated_stake = self.delegated_stake.unwrap();
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Vote Account Address: {}",
|
||||
delegated_vote_account_address
|
||||
)?;
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"Stake activates starting from epoch: {}",
|
||||
self.activation_epoch.unwrap()
|
||||
)?;
|
||||
if let Some(deactivation_epoch) = self.deactivation_epoch {
|
||||
writeln!(
|
||||
f,
|
||||
"Stake deactivates starting from epoch: {}",
|
||||
deactivation_epoch
|
||||
"Delegated Stake: {}",
|
||||
build_balance_message(delegated_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
if self
|
||||
.deactivation_epoch
|
||||
.map(|d| self.current_epoch <= d)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
let active_stake = self.active_stake.unwrap_or(0);
|
||||
writeln!(
|
||||
f,
|
||||
"Active Stake: {}",
|
||||
build_balance_message(active_stake, self.use_lamports_unit, true),
|
||||
)?;
|
||||
let activating_stake = self.activating_stake.or_else(|| {
|
||||
if self.active_stake.is_none() {
|
||||
Some(delegated_stake)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
if let Some(activating_stake) = activating_stake {
|
||||
writeln!(
|
||||
f,
|
||||
"Activating Stake: {}",
|
||||
build_balance_message(
|
||||
activating_stake,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
),
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Stake activates starting from epoch: {}",
|
||||
self.activation_epoch.unwrap()
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(deactivation_epoch) = self.deactivation_epoch {
|
||||
if self.current_epoch > deactivation_epoch {
|
||||
let deactivating_stake = self.deactivating_stake.or(self.active_stake);
|
||||
if let Some(deactivating_stake) = deactivating_stake {
|
||||
writeln!(
|
||||
f,
|
||||
"Inactive Stake: {}",
|
||||
build_balance_message(
|
||||
delegated_stake - deactivating_stake,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
),
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Deactivating Stake: {}",
|
||||
build_balance_message(
|
||||
deactivating_stake,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"Stake deactivates starting from epoch: {}",
|
||||
deactivation_epoch
|
||||
)?;
|
||||
}
|
||||
if let Some(delegated_vote_account_address) =
|
||||
&self.delegated_vote_account_address
|
||||
{
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Vote Account Address: {}",
|
||||
delegated_vote_account_address
|
||||
)?;
|
||||
}
|
||||
} else {
|
||||
writeln!(f, "Stake account is undelegated")?;
|
||||
}
|
||||
show_authorized(f, self.authorized.as_ref().unwrap())?;
|
||||
show_lockup(f, self.lockup.as_ref().unwrap())?;
|
||||
@ -900,6 +980,7 @@ impl fmt::Display for CliSignOnlyData {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliSignature {
|
||||
pub signature: String,
|
||||
}
|
||||
@ -913,6 +994,7 @@ impl fmt::Display for CliSignature {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliAccountBalances {
|
||||
pub accounts: Vec<RpcAccountBalance>,
|
||||
}
|
||||
@ -937,6 +1019,7 @@ impl fmt::Display for CliAccountBalances {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliSupply {
|
||||
pub total: u64,
|
||||
pub circulating: u64,
|
||||
@ -981,3 +1064,25 @@ impl fmt::Display for CliSupply {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFees {
|
||||
pub slot: Slot,
|
||||
pub blockhash: String,
|
||||
pub lamports_per_signature: u64,
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliFees {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln_name_value(f, "Blockhash:", &self.blockhash)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Lamports per signature:",
|
||||
&self.lamports_per_signature.to_string(),
|
||||
)?;
|
||||
writeln_name_value(f, "Last valid slot:", &self.last_valid_slot.to_string())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,11 @@
|
||||
use crate::{
|
||||
cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
|
||||
cli_output::*,
|
||||
display::println_name_value,
|
||||
display::{new_spinner_progress_bar, println_name_value},
|
||||
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
|
||||
};
|
||||
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg, COMMITMENT_ARG},
|
||||
input_parsers::*,
|
||||
@ -17,7 +16,6 @@ use solana_client::{
|
||||
pubsub_client::{PubsubClient, SlotInfoMessage},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
|
||||
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@ -28,8 +26,13 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::{self, Pubkey},
|
||||
signature::Signature,
|
||||
system_instruction, system_program,
|
||||
sysvar::{self, Sysvar},
|
||||
sysvar::{
|
||||
self,
|
||||
stake_history::{self, StakeHistory},
|
||||
Sysvar,
|
||||
},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
@ -253,9 +256,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-history")
|
||||
.about("Show historical transactions affecting the given address, \
|
||||
ordered based on the slot in which they were confirmed in \
|
||||
from lowest to highest slot")
|
||||
.about("Show historical transactions affecting the given address \
|
||||
from newest to oldest")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("address")
|
||||
.index(1)
|
||||
@ -263,26 +265,22 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Account address"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("end_slot")
|
||||
.takes_value(false)
|
||||
.value_name("SLOT")
|
||||
.index(2)
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Slot to start from [default: latest slot at maximum commitment]"
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.takes_value(true)
|
||||
.value_name("NUMBER OF SLOTS")
|
||||
.value_name("LIMIT")
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Limit the search to this many slots"
|
||||
),
|
||||
),
|
||||
.default_value("1000")
|
||||
.help("Maximum number of transaction signatures to return"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("before")
|
||||
.long("before")
|
||||
.value_name("TRANSACTION_SIGNATURE")
|
||||
.takes_value(true)
|
||||
.help("Start with the first signature older than this one"),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -450,28 +448,27 @@ pub fn parse_transaction_history(
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
|
||||
let end_slot = value_t!(matches, "end_slot", Slot).ok();
|
||||
let slot_limit = value_t!(matches, "limit", u64).ok();
|
||||
|
||||
let before = match matches.value_of("before") {
|
||||
Some(signature) => Some(
|
||||
signature
|
||||
.parse()
|
||||
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
let limit = value_t_or_exit!(matches, "limit", usize);
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
before,
|
||||
limit,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
||||
pub fn process_catchup(
|
||||
rpc_client: &RpcClient,
|
||||
node_pubkey: &Pubkey,
|
||||
@ -597,13 +594,16 @@ pub fn process_cluster_version(rpc_client: &RpcClient) -> ProcessResult {
|
||||
Ok(remote_version.solana_core)
|
||||
}
|
||||
|
||||
pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
Ok(format!(
|
||||
"blockhash: {}\nlamports per signature: {}",
|
||||
recent_blockhash, fee_calculator.lamports_per_signature
|
||||
))
|
||||
pub fn process_fees(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult {
|
||||
let result = rpc_client.get_recent_blockhash_with_commitment(CommitmentConfig::default())?;
|
||||
let (recent_blockhash, fee_calculator, last_valid_slot) = result.value;
|
||||
let fees = CliFees {
|
||||
slot: result.context.slot,
|
||||
blockhash: recent_blockhash.to_string(),
|
||||
lamports_per_signature: fee_calculator.lamports_per_signature,
|
||||
last_valid_slot,
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&fees))
|
||||
}
|
||||
|
||||
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
|
||||
@ -938,7 +938,7 @@ pub fn process_ping(
|
||||
|
||||
let build_message = |lamports| {
|
||||
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
|
||||
Message::new(&[ix])
|
||||
Message::new(&[ix], Some(&config.signers[0].pubkey()))
|
||||
};
|
||||
let (message, _) = resolve_spend_tx_and_check_account_balance(
|
||||
rpc_client,
|
||||
@ -1181,8 +1181,13 @@ pub fn process_show_stakes(
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Fetching stake accounts...");
|
||||
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
progress_bar.finish_and_clear();
|
||||
|
||||
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
|
||||
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
|
||||
for (stake_pubkey, stake_account) in all_stake_accounts {
|
||||
if let Ok(stake_state) = stake_account.state() {
|
||||
@ -1195,6 +1200,7 @@ pub fn process_show_stakes(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
),
|
||||
});
|
||||
}
|
||||
@ -1211,6 +1217,7 @@ pub fn process_show_stakes(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
),
|
||||
});
|
||||
}
|
||||
@ -1272,41 +1279,36 @@ pub fn process_show_validators(
|
||||
|
||||
pub fn process_transaction_history(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
address: &Pubkey,
|
||||
end_slot: Option<Slot>, // None == use latest slot
|
||||
slot_limit: Option<u64>,
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
) -> ProcessResult {
|
||||
let end_slot = {
|
||||
if let Some(end_slot) = end_slot {
|
||||
end_slot
|
||||
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
|
||||
address,
|
||||
before,
|
||||
Some(limit),
|
||||
)?;
|
||||
|
||||
let transactions_found = format!("{} transactions found", results.len());
|
||||
|
||||
for result in results {
|
||||
if config.verbose {
|
||||
println!(
|
||||
"{} [slot={} status={}] {}",
|
||||
result.signature,
|
||||
result.slot,
|
||||
match result.err {
|
||||
None => "Confirmed".to_string(),
|
||||
Some(err) => format!("Failed: {:?}", err),
|
||||
},
|
||||
result.memo.unwrap_or_else(|| "".to_string()),
|
||||
);
|
||||
} else {
|
||||
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
|
||||
println!("{}", result.signature);
|
||||
}
|
||||
};
|
||||
let mut start_slot = match slot_limit {
|
||||
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
|
||||
None => rpc_client.minimum_ledger_slot()?,
|
||||
};
|
||||
|
||||
println!(
|
||||
"Transactions affecting {} within slots [{},{}]",
|
||||
address, start_slot, end_slot
|
||||
);
|
||||
|
||||
let mut transaction_count = 0;
|
||||
while start_slot < end_slot {
|
||||
let signatures = rpc_client.get_confirmed_signatures_for_address(
|
||||
address,
|
||||
start_slot,
|
||||
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
|
||||
)?;
|
||||
for signature in &signatures {
|
||||
println!("{}", signature);
|
||||
}
|
||||
transaction_count += signatures.len();
|
||||
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
|
||||
}
|
||||
Ok(format!("{} transactions found", transaction_count))
|
||||
Ok(transactions_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -1,10 +1,11 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::RpcTransactionStatusMeta;
|
||||
use solana_transaction_status::UiTransactionStatusMeta;
|
||||
use std::{fmt, io};
|
||||
|
||||
// Pretty print a "name value"
|
||||
@ -67,7 +68,7 @@ pub fn println_signers(
|
||||
pub fn write_transaction<W: io::Write>(
|
||||
w: &mut W,
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
transaction_status: &Option<UiTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) -> io::Result<()> {
|
||||
let message = &transaction.message;
|
||||
@ -190,7 +191,7 @@ pub fn write_transaction<W: io::Write>(
|
||||
|
||||
pub fn println_transaction(
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
transaction_status: &Option<UiTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) {
|
||||
let mut w = Vec::new();
|
||||
@ -200,3 +201,12 @@ pub fn println_transaction(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
pub fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
@ -437,7 +437,7 @@ pub fn process_authorize_nonce_account(
|
||||
|
||||
let nonce_authority = config.signers[nonce_authority];
|
||||
let ix = authorize_nonce_account(nonce_account, &nonce_authority.pubkey(), new_authority);
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
|
||||
@ -491,7 +491,7 @@ pub fn process_create_nonce_account(
|
||||
lamports,
|
||||
)
|
||||
};
|
||||
Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()))
|
||||
Message::new(&ixs, Some(&config.signers[0].pubkey()))
|
||||
};
|
||||
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
@ -560,7 +560,7 @@ pub fn process_new_nonce(
|
||||
let nonce_authority = config.signers[nonce_authority];
|
||||
let ix = advance_nonce_account(&nonce_account, &nonce_authority.pubkey());
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -619,7 +619,7 @@ pub fn process_withdraw_from_nonce_account(
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
);
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
|
@ -106,9 +106,10 @@ mod tests {
|
||||
use crate::{nonce::nonce_arg, offline::blockhash_query::BlockhashQuery};
|
||||
use clap::App;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcAccount, RpcFeeCalculator, RpcResponseContext},
|
||||
rpc_response::{Response, RpcFeeCalculator, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account, fee_calculator::FeeCalculator, hash::hash, nonce, system_program,
|
||||
@ -344,7 +345,12 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
let nonce_pubkey = Pubkey::new(&[4u8; 32]);
|
||||
let rpc_nonce_account = RpcAccount::encode(nonce_account);
|
||||
let rpc_nonce_account = UiAccount::encode(
|
||||
&nonce_pubkey,
|
||||
nonce_account,
|
||||
UiAccountEncoding::Binary64,
|
||||
None,
|
||||
);
|
||||
let get_account_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(Some(rpc_nonce_account)),
|
||||
|
319
cli/src/stake.rs
319
cli/src/stake.rs
@ -12,7 +12,7 @@ use crate::{
|
||||
};
|
||||
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, offline::*, ArgConstant};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_client::{rpc_client::RpcClient, rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
@ -264,6 +264,29 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(fee_payer_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("merge-stake")
|
||||
.about("Merges one stake account into another")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("stake_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("STAKE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Stake account to merge into")
|
||||
)
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("source_stake_account_pubkey")
|
||||
.index(2)
|
||||
.value_name("SOURCE_STAKE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Source stake account for the merge. If successful, this stake account will no longer exist after the merge")
|
||||
)
|
||||
.arg(stake_authority_arg())
|
||||
.offline_args()
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(fee_payer_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-stake")
|
||||
.about("Withdraw the unstaked SOL from the stake account")
|
||||
@ -606,6 +629,47 @@ pub fn parse_split_stake(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_merge_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
|
||||
let source_stake_account_pubkey = pubkey_of(matches, "source_stake_account_pubkey").unwrap();
|
||||
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let blockhash_query = BlockhashQuery::new_from_matches(matches);
|
||||
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
|
||||
let (stake_authority, stake_authority_pubkey) =
|
||||
signer_of(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
|
||||
|
||||
let mut bulk_signers = vec![stake_authority, fee_payer];
|
||||
if nonce_account.is_some() {
|
||||
bulk_signers.push(nonce_authority);
|
||||
}
|
||||
let signer_info =
|
||||
generate_unique_signers(bulk_signers, matches, default_signer_path, wallet_manager)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::MergeStake {
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
stake_authority: signer_info.index_of(stake_authority_pubkey).unwrap(),
|
||||
sign_only,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
|
||||
fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_stake_deactivate_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
@ -825,7 +889,7 @@ pub fn process_create_stake_account(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
}
|
||||
};
|
||||
|
||||
@ -924,7 +988,7 @@ pub fn process_stake_authorize(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
@ -978,7 +1042,7 @@ pub fn process_deactivate_stake_account(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
@ -1041,7 +1105,7 @@ pub fn process_withdraw_stake(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
@ -1175,7 +1239,100 @@ pub fn process_split_stake(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
if sign_only {
|
||||
tx.try_partial_sign(&config.signers, recent_blockhash)?;
|
||||
return_signers(&tx, &config)
|
||||
} else {
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_account = rpc_client.get_account(nonce_account)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?;
|
||||
}
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&tx.message.account_keys[0],
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn process_merge_stake(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
stake_account_pubkey: &Pubkey,
|
||||
source_stake_account_pubkey: &Pubkey,
|
||||
stake_authority: SignerIndex,
|
||||
sign_only: bool,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
fee_payer: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let fee_payer = config.signers[fee_payer];
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
|
||||
(
|
||||
&source_stake_account_pubkey,
|
||||
"source_stake_account".to_string(),
|
||||
),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
(
|
||||
&source_stake_account_pubkey,
|
||||
"source_stake_account".to_string(),
|
||||
),
|
||||
)?;
|
||||
|
||||
let stake_authority = config.signers[stake_authority];
|
||||
|
||||
if !sign_only {
|
||||
for stake_account_address in &[stake_account_pubkey, source_stake_account_pubkey] {
|
||||
if let Ok(stake_account) = rpc_client.get_account(stake_account_address) {
|
||||
if stake_account.owner != solana_stake_program::id() {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Account {} is not a stake account",
|
||||
stake_account_address
|
||||
))
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
|
||||
|
||||
let ixs = stake_instruction::merge(
|
||||
&stake_account_pubkey,
|
||||
&source_stake_account_pubkey,
|
||||
&stake_authority.pubkey(),
|
||||
);
|
||||
|
||||
let nonce_authority = config.signers[nonce_authority];
|
||||
|
||||
let message = if let Some(nonce_account) = &nonce_account {
|
||||
Message::new_with_nonce(
|
||||
ixs,
|
||||
Some(&fee_payer.pubkey()),
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
@ -1232,7 +1389,7 @@ pub fn process_stake_set_lockup(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
@ -1256,53 +1413,85 @@ pub fn process_stake_set_lockup(
|
||||
}
|
||||
}
|
||||
|
||||
fn u64_some_if_not_zero(n: u64) -> Option<u64> {
|
||||
if n > 0 {
|
||||
Some(n)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_stake_state(
|
||||
stake_lamports: u64,
|
||||
account_balance: u64,
|
||||
stake_state: &StakeState,
|
||||
use_lamports_unit: bool,
|
||||
stake_history: &StakeHistory,
|
||||
) -> CliStakeState {
|
||||
match stake_state {
|
||||
StakeState::Stake(
|
||||
Meta {
|
||||
authorized, lockup, ..
|
||||
rent_exempt_reserve,
|
||||
authorized,
|
||||
lockup,
|
||||
},
|
||||
stake,
|
||||
) => CliStakeState {
|
||||
stake_type: CliStakeType::Stake,
|
||||
total_stake: stake_lamports,
|
||||
delegated_stake: Some(stake.delegation.stake),
|
||||
delegated_vote_account_address: if stake.delegation.voter_pubkey != Pubkey::default() {
|
||||
Some(stake.delegation.voter_pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
activation_epoch: Some(if stake.delegation.activation_epoch < std::u64::MAX {
|
||||
stake.delegation.activation_epoch
|
||||
} else {
|
||||
0
|
||||
}),
|
||||
deactivation_epoch: if stake.delegation.deactivation_epoch < std::u64::MAX {
|
||||
Some(stake.delegation.deactivation_epoch)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
},
|
||||
) => {
|
||||
// The first entry in stake history is the previous epoch, so +1 for current
|
||||
let current_epoch = stake_history.iter().next().unwrap().0 + 1;
|
||||
let (active_stake, activating_stake, deactivating_stake) = stake
|
||||
.delegation
|
||||
.stake_activating_and_deactivating(current_epoch, Some(stake_history));
|
||||
CliStakeState {
|
||||
stake_type: CliStakeType::Stake,
|
||||
account_balance,
|
||||
delegated_stake: Some(stake.delegation.stake),
|
||||
delegated_vote_account_address: if stake.delegation.voter_pubkey
|
||||
!= Pubkey::default()
|
||||
{
|
||||
Some(stake.delegation.voter_pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
activation_epoch: Some(if stake.delegation.activation_epoch < std::u64::MAX {
|
||||
stake.delegation.activation_epoch
|
||||
} else {
|
||||
0
|
||||
}),
|
||||
deactivation_epoch: if stake.delegation.deactivation_epoch < std::u64::MAX {
|
||||
Some(stake.delegation.deactivation_epoch)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
current_epoch,
|
||||
rent_exempt_reserve: Some(*rent_exempt_reserve),
|
||||
active_stake: u64_some_if_not_zero(active_stake),
|
||||
activating_stake: u64_some_if_not_zero(activating_stake),
|
||||
deactivating_stake: u64_some_if_not_zero(deactivating_stake),
|
||||
}
|
||||
}
|
||||
StakeState::RewardsPool => CliStakeState {
|
||||
stake_type: CliStakeType::RewardsPool,
|
||||
account_balance,
|
||||
..CliStakeState::default()
|
||||
},
|
||||
StakeState::Uninitialized => CliStakeState {
|
||||
account_balance,
|
||||
..CliStakeState::default()
|
||||
},
|
||||
StakeState::Uninitialized => CliStakeState::default(),
|
||||
StakeState::Initialized(Meta {
|
||||
authorized, lockup, ..
|
||||
rent_exempt_reserve,
|
||||
authorized,
|
||||
lockup,
|
||||
}) => CliStakeState {
|
||||
stake_type: CliStakeType::Initialized,
|
||||
total_stake: stake_lamports,
|
||||
account_balance,
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
rent_exempt_reserve: Some(*rent_exempt_reserve),
|
||||
..CliStakeState::default()
|
||||
},
|
||||
}
|
||||
@ -1324,7 +1513,18 @@ pub fn process_show_stake_account(
|
||||
}
|
||||
match stake_account.state() {
|
||||
Ok(stake_state) => {
|
||||
let state = build_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
let stake_history =
|
||||
StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
|
||||
let state = build_stake_state(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
);
|
||||
Ok(config.output_format.formatted_string(&state))
|
||||
}
|
||||
Err(err) => Err(CliError::RpcRequestError(format!(
|
||||
@ -1399,13 +1599,15 @@ pub fn process_delegate_stake(
|
||||
"Unable to delegate. Vote account has no root slot".to_string(),
|
||||
)),
|
||||
Some(root_slot) => {
|
||||
let slot = rpc_client.get_slot()?;
|
||||
if root_slot + solana_sdk::clock::DEFAULT_SLOTS_PER_TURN < slot {
|
||||
Err(CliError::BadParameter(
|
||||
format!(
|
||||
"Unable to delegate. Vote account root slot ({}) is too old, the current slot is {}", root_slot, slot
|
||||
)
|
||||
))
|
||||
let min_root_slot = rpc_client
|
||||
.get_slot()?
|
||||
.saturating_sub(DELINQUENT_VALIDATOR_SLOT_DISTANCE);
|
||||
if root_slot < min_root_slot {
|
||||
Err(CliError::DynamicProgramError(format!(
|
||||
"Unable to delegate. Vote account appears delinquent \
|
||||
because its current root slot, {}, is less than {}",
|
||||
root_slot, min_root_slot
|
||||
)))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@ -1440,7 +1642,7 @@ pub fn process_delegate_stake(
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
Message::new(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
@ -2878,5 +3080,34 @@ mod tests {
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test MergeStake SubCommand
|
||||
let (keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let stake_account_keypair = Keypair::new();
|
||||
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let source_stake_account_pubkey = Pubkey::new_rand();
|
||||
let test_merge_stake_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"merge-stake",
|
||||
&keypair_file,
|
||||
&source_stake_account_pubkey.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_merge_stake_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::MergeStake {
|
||||
stake_account_pubkey: stake_account_keypair.pubkey(),
|
||||
source_stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),],
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -6,9 +6,10 @@ use crate::{
|
||||
use bincode::deserialize;
|
||||
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use reqwest::blocking::Client;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
use solana_account_decoder::validator_info::{
|
||||
self, ValidatorInfo, MAX_LONG_FIELD_LENGTH, MAX_SHORT_FIELD_LENGTH,
|
||||
};
|
||||
use solana_clap_utils::{
|
||||
input_parsers::pubkey_of,
|
||||
input_validators::{is_pubkey, is_url},
|
||||
@ -27,23 +28,6 @@ use solana_sdk::{
|
||||
};
|
||||
use std::{error, sync::Arc};
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
pub const MAX_VALIDATOR_INFO: u64 = 576;
|
||||
|
||||
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
pub struct ValidatorInfo {
|
||||
info: String,
|
||||
}
|
||||
|
||||
impl ConfigState for ValidatorInfo {
|
||||
fn max_space() -> u64 {
|
||||
MAX_VALIDATOR_INFO
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if a validator details are longer than the max length.
|
||||
pub fn check_details_length(string: String) -> Result<(), String> {
|
||||
if string.len() > MAX_LONG_FIELD_LENGTH {
|
||||
@ -289,7 +273,7 @@ pub fn process_set_validator_info(
|
||||
.iter()
|
||||
.filter(|(_, account)| {
|
||||
let key_list: ConfigKeys = deserialize(&account.data).map_err(|_| false).unwrap();
|
||||
key_list.keys.contains(&(id(), false))
|
||||
key_list.keys.contains(&(validator_info::id(), false))
|
||||
})
|
||||
.find(|(pubkey, account)| {
|
||||
let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap();
|
||||
@ -328,7 +312,10 @@ pub fn process_set_validator_info(
|
||||
};
|
||||
|
||||
let build_message = |lamports| {
|
||||
let keys = vec![(id(), false), (config.signers[0].pubkey(), true)];
|
||||
let keys = vec![
|
||||
(validator_info::id(), false),
|
||||
(config.signers[0].pubkey(), true),
|
||||
];
|
||||
if balance == 0 {
|
||||
println!(
|
||||
"Publishing info for Validator {:?}",
|
||||
@ -346,7 +333,7 @@ pub fn process_set_validator_info(
|
||||
keys,
|
||||
&validator_info,
|
||||
)]);
|
||||
Message::new(&instructions)
|
||||
Message::new(&instructions, Some(&config.signers[0].pubkey()))
|
||||
} else {
|
||||
println!(
|
||||
"Updating Validator {:?} info at: {:?}",
|
||||
@ -359,7 +346,7 @@ pub fn process_set_validator_info(
|
||||
keys,
|
||||
&validator_info,
|
||||
)];
|
||||
Message::new_with_payer(&instructions, Some(&config.signers[0].pubkey()))
|
||||
Message::new(&instructions, Some(&config.signers[0].pubkey()))
|
||||
}
|
||||
};
|
||||
|
||||
@ -400,7 +387,7 @@ pub fn process_get_validator_info(
|
||||
let key_list: ConfigKeys = deserialize(&validator_info_account.data)
|
||||
.map_err(|_| false)
|
||||
.unwrap();
|
||||
key_list.keys.contains(&(id(), false))
|
||||
key_list.keys.contains(&(validator_info::id(), false))
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
@ -502,7 +489,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_validator_info() {
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let keys = vec![(id(), false), (pubkey, true)];
|
||||
let keys = vec![(validator_info::id(), false), (pubkey, true)];
|
||||
let config = ConfigKeys { keys };
|
||||
|
||||
let mut info = Map::new();
|
||||
|
196
cli/src/vote.rs
196
cli/src/vote.rs
@ -16,8 +16,9 @@ use solana_clap_utils::{
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::Account, commitment_config::CommitmentConfig, message::Message, pubkey::Pubkey,
|
||||
system_instruction::SystemError, transaction::Transaction,
|
||||
account::Account, commitment_config::CommitmentConfig, message::Message,
|
||||
native_token::lamports_to_sol, pubkey::Pubkey, system_instruction::SystemError,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_instruction::{self, withdraw, VoteError},
|
||||
@ -161,6 +162,35 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-commission")
|
||||
.about("Update the vote account's commission")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Vote account to update. "),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("commission")
|
||||
.index(2)
|
||||
.value_name("PERCENTAGE")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_percentage)
|
||||
.help("The new commission")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-account")
|
||||
.about("Show the contents of a vote account")
|
||||
@ -203,8 +233,8 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.value_name("AMOUNT")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_amount)
|
||||
.help("The amount to withdraw, in SOL"),
|
||||
.validator(is_amount_or_all)
|
||||
.help("The amount to withdraw, in SOL; accepts keyword ALL"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
@ -223,7 +253,7 @@ pub fn parse_create_vote_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let (vote_account, vote_account_pubkey) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let (identity_account, identity_pubkey) =
|
||||
signer_of(matches, "identity_account", wallet_manager)?;
|
||||
@ -241,6 +271,7 @@ pub fn parse_create_vote_account(
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: signer_info.index_of(vote_account_pubkey).unwrap(),
|
||||
seed,
|
||||
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
|
||||
authorized_voter,
|
||||
@ -290,7 +321,8 @@ pub fn parse_vote_update_validator(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (new_identity_account, new_identity_pubkey) =
|
||||
signer_of(matches, "new_identity_account", wallet_manager)?;
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
@ -304,6 +336,36 @@ pub fn parse_vote_update_validator(
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_update_commission(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, authorized_withdrawer],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@ -336,7 +398,8 @@ pub fn parse_withdraw_from_vote_account(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let destination_account_pubkey =
|
||||
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let withdraw_amount = SpendAmount::new_from_matches(matches, "amount");
|
||||
|
||||
let (withdraw_authority, withdraw_authority_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
@ -353,7 +416,7 @@ pub fn parse_withdraw_from_vote_account(
|
||||
vote_account_pubkey,
|
||||
destination_account_pubkey,
|
||||
withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(),
|
||||
lamports,
|
||||
withdraw_amount,
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@ -362,13 +425,14 @@ pub fn parse_withdraw_from_vote_account(
|
||||
pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account: SignerIndex,
|
||||
seed: &Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: &Option<Pubkey>,
|
||||
authorized_withdrawer: &Option<Pubkey>,
|
||||
commission: u8,
|
||||
) -> ProcessResult {
|
||||
let vote_account = config.signers[1];
|
||||
let vote_account = config.signers[vote_account];
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let vote_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
@ -417,7 +481,7 @@ pub fn process_create_vote_account(
|
||||
lamports,
|
||||
)
|
||||
};
|
||||
Message::new(&ixs)
|
||||
Message::new(&ixs, Some(&config.signers[0].pubkey()))
|
||||
};
|
||||
|
||||
if let Ok(vote_account) = rpc_client.get_account(&vote_account_address) {
|
||||
@ -475,7 +539,7 @@ pub fn process_vote_authorize(
|
||||
vote_authorize, // vote or withdraw
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -493,8 +557,9 @@ pub fn process_vote_update_validator(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let new_identity_account = config.signers[new_identity_account];
|
||||
let new_identity_pubkey = new_identity_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
@ -508,7 +573,35 @@ pub fn process_vote_update_validator(
|
||||
&new_identity_pubkey,
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.signers[0].pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
}
|
||||
|
||||
pub fn process_vote_update_commission(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_commission(
|
||||
vote_account_pubkey,
|
||||
&authorized_withdrawer.pubkey(),
|
||||
commission,
|
||||
)];
|
||||
|
||||
let message = Message::new(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -600,12 +693,28 @@ pub fn process_withdraw_from_vote_account(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
withdraw_authority: SignerIndex,
|
||||
lamports: u64,
|
||||
withdraw_amount: SpendAmount,
|
||||
destination_account_pubkey: &Pubkey,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let withdraw_authority = config.signers[withdraw_authority];
|
||||
|
||||
let current_balance = rpc_client.get_balance(&vote_account_pubkey)?;
|
||||
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?;
|
||||
|
||||
let lamports = match withdraw_amount {
|
||||
SpendAmount::All => current_balance.saturating_sub(minimum_balance),
|
||||
SpendAmount::Some(withdraw_amount) => {
|
||||
if current_balance.saturating_sub(withdraw_amount) < minimum_balance {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Withdraw amount too large. The vote account balance must be at least {} SOL to remain rent exempt", lamports_to_sol(minimum_balance)
|
||||
))
|
||||
.into());
|
||||
}
|
||||
withdraw_amount
|
||||
}
|
||||
};
|
||||
|
||||
let ix = withdraw(
|
||||
vote_account_pubkey,
|
||||
&withdraw_authority.pubkey(),
|
||||
@ -613,7 +722,7 @@ pub fn process_withdraw_from_vote_account(
|
||||
destination_account_pubkey,
|
||||
);
|
||||
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut transaction = Transaction::new_unsigned(message);
|
||||
transaction.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -716,6 +825,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@ -744,6 +854,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@ -776,6 +887,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(authed),
|
||||
@ -806,6 +918,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@ -833,6 +946,7 @@ mod tests {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@ -842,6 +956,28 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_update_commission = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-update-commission",
|
||||
&pubkey_string,
|
||||
"42",
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_update_commission, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey: pubkey,
|
||||
commission: 42,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(read_keypair_file(&keypair_file).unwrap()),
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromVoteAccount subcommand
|
||||
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
@ -862,7 +998,33 @@ mod tests {
|
||||
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
destination_account_pubkey: pubkey,
|
||||
withdraw_authority: 0,
|
||||
lamports: 42_000_000_000
|
||||
withdraw_amount: SpendAmount::Some(42_000_000_000),
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromVoteAccount subcommand
|
||||
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"withdraw-from-vote-account",
|
||||
&keypair_file,
|
||||
&pubkey_string,
|
||||
"ALL",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(
|
||||
&test_withdraw_from_vote_account,
|
||||
&default_keypair_file,
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
destination_account_pubkey: pubkey,
|
||||
withdraw_authority: 0,
|
||||
withdraw_amount: SpendAmount::All,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
@ -893,7 +1055,7 @@ mod tests {
|
||||
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
destination_account_pubkey: pubkey,
|
||||
withdraw_authority: 1,
|
||||
lamports: 42_000_000_000
|
||||
withdraw_amount: SpendAmount::Some(42_000_000_000),
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
|
@ -57,6 +57,7 @@ fn test_stake_delegation_force() {
|
||||
let vote_keypair = Keypair::new();
|
||||
config.signers = vec![&default_signer, &vote_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
|
@ -1,5 +1,9 @@
|
||||
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
|
||||
use solana_cli::test_utils::check_balance;
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
offline::{blockhash_query::BlockhashQuery, *},
|
||||
spend_utils::SpendAmount,
|
||||
test_utils::check_balance,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
@ -45,6 +49,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
let vote_account_pubkey = vote_account_keypair.pubkey();
|
||||
config.signers = vec![&default_signer, &vote_account_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
@ -64,6 +69,23 @@ fn test_vote_authorize_and_withdraw() {
|
||||
.max(1);
|
||||
check_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Transfer in some more SOL
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(1_000),
|
||||
to: vote_account_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let expected_balance = expected_balance + 1_000;
|
||||
check_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Authorize vote account withdrawal to another signer
|
||||
let withdraw_authority = Keypair::new();
|
||||
config.signers = vec![&default_signer];
|
||||
@ -86,7 +108,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
lamports: 100,
|
||||
withdraw_amount: SpendAmount::Some(100),
|
||||
destination_account_pubkey: destination_account,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
@ -99,6 +121,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,11 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.21" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.21" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.21" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@ -31,7 +32,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.1.0"
|
||||
jsonrpc-http-server = "14.1.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -8,6 +8,7 @@ pub mod perf_utils;
|
||||
pub mod pubsub_client;
|
||||
pub mod rpc_client;
|
||||
pub mod rpc_config;
|
||||
pub mod rpc_filter;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_response;
|
||||
pub mod rpc_sender;
|
||||
|
@ -2,8 +2,12 @@ use crate::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
http_sender::HttpSender,
|
||||
mock_sender::{MockSender, Mocks},
|
||||
rpc_config::RpcLargestAccountsConfig,
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
rpc_config::RpcAccountInfoConfig,
|
||||
rpc_config::{
|
||||
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
|
||||
RpcSendTransactionConfig, RpcTokenAccountsFilter,
|
||||
},
|
||||
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
|
||||
rpc_response::*,
|
||||
rpc_sender::RpcSender,
|
||||
};
|
||||
@ -11,6 +15,15 @@ use bincode::serialize;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use log::*;
|
||||
use serde_json::{json, Value};
|
||||
use solana_account_decoder::{
|
||||
parse_token::{
|
||||
get_token_account_mint, parse_token, TokenAccountType, UiMint, UiMultisig, UiTokenAccount,
|
||||
UiTokenAmount,
|
||||
},
|
||||
UiAccount,
|
||||
UiAccountData::{Binary, Binary64},
|
||||
UiAccountEncoding,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{
|
||||
@ -22,18 +35,17 @@ use solana_sdk::{
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
hash::Hash,
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
signers::Signers,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionStatus, UiTransactionEncoding,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
error,
|
||||
collections::HashMap,
|
||||
net::SocketAddr,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
@ -95,10 +107,20 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
|
||||
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
|
||||
}
|
||||
|
||||
pub fn send_transaction_with_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
|
||||
let signature_base58_str: String =
|
||||
self.send(RpcRequest::SendTransaction, json!([serialized_encoded]))?;
|
||||
let signature_base58_str: String = self.send(
|
||||
RpcRequest::SendTransaction,
|
||||
json!([serialized_encoded, config]),
|
||||
)?;
|
||||
|
||||
let signature = signature_base58_str
|
||||
.parse::<Signature>()
|
||||
@ -122,7 +144,7 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
sig_verify: bool,
|
||||
) -> RpcResult<TransactionStatus> {
|
||||
) -> RpcResult<RpcSimulateTransactionResult> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
self.send(
|
||||
RpcRequest::SimulateTransaction,
|
||||
@ -230,13 +252,13 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
|
||||
self.get_confirmed_block_with_encoding(slot, TransactionEncoding::Json)
|
||||
self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block_with_encoding(
|
||||
&self,
|
||||
slot: Slot,
|
||||
encoding: TransactionEncoding,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<ConfirmedBlock> {
|
||||
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
|
||||
}
|
||||
@ -274,10 +296,36 @@ impl RpcClient {
|
||||
Ok(signatures)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
self.get_confirmed_signatures_for_address2_with_config(address, None, None)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2_with_config(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
before: Option<Signature>,
|
||||
limit: Option<usize>,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
let config = RpcGetConfirmedSignaturesForAddress2Config {
|
||||
before: before.map(|signature| signature.to_string()),
|
||||
limit,
|
||||
};
|
||||
|
||||
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2,
|
||||
json!([address.to_string(), config]),
|
||||
)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
encoding: TransactionEncoding,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<ConfirmedTransaction> {
|
||||
self.send(
|
||||
RpcRequest::GetConfirmedTransaction,
|
||||
@ -346,8 +394,12 @@ impl RpcClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_inflation(&self) -> ClientResult<Inflation> {
|
||||
self.send(RpcRequest::GetInflation, Value::Null)
|
||||
pub fn get_inflation_governor(&self) -> ClientResult<RpcInflationGovernor> {
|
||||
self.send(RpcRequest::GetInflationGovernor, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_inflation_rate(&self) -> ClientResult<RpcInflationRate> {
|
||||
self.send(RpcRequest::GetInflationRate, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
|
||||
@ -403,96 +455,6 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
&self,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
// Send all transactions
|
||||
let mut transactions_signatures = vec![];
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if cfg!(not(test)) {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let signature = self.send_transaction(&transaction).ok();
|
||||
transactions_signatures.push((transaction, signature));
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions sent",
|
||||
transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = self.get_signature_status(&signature) {
|
||||
if self
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(0)
|
||||
> 1
|
||||
{
|
||||
return false;
|
||||
} else {
|
||||
return match status {
|
||||
None => true,
|
||||
Some(result) => result.is_err(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.collect();
|
||||
|
||||
if transactions_signatures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err(RpcError::ForUser("Transactions failed".to_string()).into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator) =
|
||||
self.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
|
||||
transactions = vec![];
|
||||
for (mut transaction, _) in transactions_signatures.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resign_transaction<T: Signers>(
|
||||
&self,
|
||||
tx: &mut Transaction,
|
||||
@ -515,9 +477,13 @@ impl RpcClient {
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<Account>> {
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Binary64),
|
||||
commitment: Some(commitment_config),
|
||||
};
|
||||
let response = self.sender.send(
|
||||
RpcRequest::GetAccountInfo,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
json!([pubkey.to_string(), config]),
|
||||
);
|
||||
|
||||
response
|
||||
@ -529,10 +495,19 @@ impl RpcClient {
|
||||
}
|
||||
let Response {
|
||||
context,
|
||||
value: rpc_account,
|
||||
} = serde_json::from_value::<Response<Option<RpcAccount>>>(result_json)?;
|
||||
value: mut rpc_account,
|
||||
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
|
||||
if let Some(ref mut account) = rpc_account {
|
||||
if let Binary(_) = &account.data {
|
||||
let tmp = Binary64(String::new());
|
||||
match std::mem::replace(&mut account.data, tmp) {
|
||||
Binary(new_data) => account.data = Binary64(new_data),
|
||||
_ => panic!("should have gotten binary here."),
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("Response account {:?} {:?}", pubkey, rpc_account);
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode().ok());
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account,
|
||||
@ -588,17 +563,7 @@ impl RpcClient {
|
||||
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let accounts: Vec<RpcKeyedAccount> =
|
||||
self.send(RpcRequest::GetProgramAccounts, json!([pubkey.to_string()]))?;
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
RpcRequest::GetProgramAccounts,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((pubkey, account.decode().unwrap()));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
parse_keyed_accounts(accounts, RpcRequest::GetProgramAccounts)
|
||||
}
|
||||
|
||||
/// Request the transaction count.
|
||||
@ -614,26 +579,46 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
|
||||
Ok(self
|
||||
let (blockhash, fee_calculator, _last_valid_slot) = self
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::default())?
|
||||
.value)
|
||||
.value;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<(Hash, FeeCalculator)> {
|
||||
let Response {
|
||||
) -> RpcResult<(Hash, FeeCalculator, Slot)> {
|
||||
let (context, blockhash, fee_calculator, last_valid_slot) = if let Ok(Response {
|
||||
context,
|
||||
value:
|
||||
RpcFees {
|
||||
blockhash,
|
||||
fee_calculator,
|
||||
last_valid_slot,
|
||||
},
|
||||
}) =
|
||||
self.send::<Response<RpcFees>>(RpcRequest::GetFees, json!([commitment_config]))
|
||||
{
|
||||
(context, blockhash, fee_calculator, last_valid_slot)
|
||||
} else if let Ok(Response {
|
||||
context,
|
||||
value:
|
||||
RpcBlockhashFeeCalculator {
|
||||
blockhash,
|
||||
fee_calculator,
|
||||
},
|
||||
} = self.send::<Response<RpcBlockhashFeeCalculator>>(
|
||||
}) = self.send::<Response<RpcBlockhashFeeCalculator>>(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
json!([commitment_config]),
|
||||
)?;
|
||||
) {
|
||||
(context, blockhash, fee_calculator, 0)
|
||||
} else {
|
||||
return Err(ClientError::new_with_request(
|
||||
RpcError::ParseError("RpcBlockhashFeeCalculator or RpcFees".to_string()).into(),
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
));
|
||||
};
|
||||
|
||||
let blockhash = blockhash.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
@ -643,7 +628,7 @@ impl RpcClient {
|
||||
})?;
|
||||
Ok(Response {
|
||||
context,
|
||||
value: (blockhash, fee_calculator),
|
||||
value: (blockhash, fee_calculator, last_valid_slot),
|
||||
})
|
||||
}
|
||||
|
||||
@ -651,12 +636,28 @@ impl RpcClient {
|
||||
&self,
|
||||
blockhash: &Hash,
|
||||
) -> ClientResult<Option<FeeCalculator>> {
|
||||
let Response { value, .. } = self.send::<Response<Option<RpcFeeCalculator>>>(
|
||||
Ok(self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
blockhash,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_fee_calculator_for_blockhash_with_commitment(
|
||||
&self,
|
||||
blockhash: &Hash,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<FeeCalculator>> {
|
||||
let Response { context, value } = self.send::<Response<Option<RpcFeeCalculator>>>(
|
||||
RpcRequest::GetFeeCalculatorForBlockhash,
|
||||
json!([blockhash.to_string()]),
|
||||
json!([blockhash.to_string(), commitment_config]),
|
||||
)?;
|
||||
|
||||
Ok(value.map(|rf| rf.fee_calculator))
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value.map(|rf| rf.fee_calculator),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
|
||||
@ -709,6 +710,256 @@ impl RpcClient {
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
|
||||
Ok(self
|
||||
.get_token_account_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_account_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<UiTokenAccount>> {
|
||||
let Response {
|
||||
context,
|
||||
value: account,
|
||||
} = self.get_account_with_commitment(pubkey, commitment_config)?;
|
||||
|
||||
if account.is_none() {
|
||||
return Err(RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into());
|
||||
}
|
||||
let account = account.unwrap();
|
||||
let mint = get_token_account_mint(&account.data)
|
||||
.and_then(|mint_pubkey| {
|
||||
self.get_token_mint_with_commitment(&mint_pubkey, commitment_config)
|
||||
.ok()
|
||||
.map(|response| response.value)
|
||||
.flatten()
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
Into::<ClientError>::into(RpcError::ForUser(format!(
|
||||
"AccountNotFound: mint for token acccount pubkey={}",
|
||||
pubkey
|
||||
)))
|
||||
})?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
value: match parse_token(&account.data, Some(mint.decimals)) {
|
||||
Ok(TokenAccountType::Account(ui_token_account)) => Some(ui_token_account),
|
||||
_ => None,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_mint(&self, pubkey: &Pubkey) -> ClientResult<Option<UiMint>> {
|
||||
Ok(self
|
||||
.get_token_mint_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_mint_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<UiMint>> {
|
||||
let Response {
|
||||
context,
|
||||
value: account,
|
||||
} = self.get_account_with_commitment(pubkey, commitment_config)?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account
|
||||
.map(|account| match parse_token(&account.data, None) {
|
||||
Ok(TokenAccountType::Mint(ui_token_mint)) => Some(ui_token_mint),
|
||||
_ => None,
|
||||
})
|
||||
.flatten(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_multisig(&self, pubkey: &Pubkey) -> ClientResult<Option<UiMultisig>> {
|
||||
Ok(self
|
||||
.get_token_multisig_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_multisig_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<UiMultisig>> {
|
||||
let Response {
|
||||
context,
|
||||
value: account,
|
||||
} = self.get_account_with_commitment(pubkey, commitment_config)?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account
|
||||
.map(|account| match parse_token(&account.data, None) {
|
||||
Ok(TokenAccountType::Multisig(ui_token_multisig)) => Some(ui_token_multisig),
|
||||
_ => None,
|
||||
})
|
||||
.flatten(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_account_balance_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<UiTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountBalance,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_delegate_with_commitment(
|
||||
delegate,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate_with_commitment(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
let Response {
|
||||
context,
|
||||
value: accounts,
|
||||
} = self.send(
|
||||
RpcRequest::GetTokenAccountsByDelegate,
|
||||
json!([
|
||||
delegate.to_string(),
|
||||
token_account_filter,
|
||||
commitment_config
|
||||
]),
|
||||
)?;
|
||||
let pubkey_accounts = self.accounts_to_token_accounts(
|
||||
commitment_config,
|
||||
parse_keyed_accounts(accounts, RpcRequest::GetTokenAccountsByDelegate)?,
|
||||
);
|
||||
Ok(Response {
|
||||
context,
|
||||
value: pubkey_accounts,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_owner_with_commitment(
|
||||
owner,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner_with_commitment(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
let Response {
|
||||
context,
|
||||
value: accounts,
|
||||
} = self.send(
|
||||
RpcRequest::GetTokenAccountsByOwner,
|
||||
json!([owner.to_string(), token_account_filter, commitment_config]),
|
||||
)?;
|
||||
let pubkey_accounts = self.accounts_to_token_accounts(
|
||||
commitment_config,
|
||||
parse_keyed_accounts(accounts, RpcRequest::GetTokenAccountsByDelegate)?,
|
||||
);
|
||||
Ok(Response {
|
||||
context,
|
||||
value: pubkey_accounts,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_supply_with_commitment(mint, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_supply_with_commitment(
|
||||
&self,
|
||||
mint: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<UiTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenSupply,
|
||||
json!([mint.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
fn accounts_to_token_accounts(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
pubkey_accounts: Vec<(Pubkey, Account)>,
|
||||
) -> Vec<(Pubkey, UiTokenAccount)> {
|
||||
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
|
||||
pubkey_accounts
|
||||
.into_iter()
|
||||
.filter_map(|(pubkey, account)| {
|
||||
let mint_pubkey = get_token_account_mint(&account.data)?;
|
||||
let decimals = mint_decimals.get(&mint_pubkey).cloned().or_else(|| {
|
||||
let mint = self
|
||||
.get_token_mint_with_commitment(&mint_pubkey, commitment_config)
|
||||
.ok()
|
||||
.map(|response| response.value)
|
||||
.flatten()?;
|
||||
mint_decimals.insert(mint_pubkey, mint.decimals);
|
||||
Some(mint.decimals)
|
||||
})?;
|
||||
match parse_token(&account.data, Some(decimals)) {
|
||||
Ok(TokenAccountType::Account(ui_token_account)) => {
|
||||
Some((pubkey, ui_token_account))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn poll_balance_with_timeout_and_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
@ -913,6 +1164,17 @@ impl RpcClient {
|
||||
pub fn send_and_confirm_transaction_with_spinner(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_config(
|
||||
transaction,
|
||||
RpcSendTransactionConfig::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner_and_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let mut confirmations = 0;
|
||||
|
||||
@ -928,7 +1190,7 @@ impl RpcClient {
|
||||
));
|
||||
let mut status_retries = 15;
|
||||
let (signature, status) = loop {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
let signature = self.send_transaction_with_config(transaction, config.clone())?;
|
||||
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self
|
||||
@ -1039,6 +1301,31 @@ pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_keyed_accounts(
|
||||
accounts: Vec<RpcKeyedAccount>,
|
||||
request: RpcRequest,
|
||||
) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((
|
||||
pubkey,
|
||||
account.decode().ok_or_else(|| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Account from rpc".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?,
|
||||
));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -1,4 +1,6 @@
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use crate::rpc_filter::RpcFilterType;
|
||||
use solana_account_decoder::UiAccountEncoding;
|
||||
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@ -6,7 +8,13 @@ pub struct RpcSignatureStatusConfig {
|
||||
pub search_transaction_history: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSendTransactionConfig {
|
||||
pub skip_preflight: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSimulateTransactionConfig {
|
||||
pub sig_verify: bool,
|
||||
@ -26,3 +34,41 @@ pub struct RpcLargestAccountsConfig {
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub filter: Option<RpcLargestAccountsFilter>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStakeConfig {
|
||||
pub epoch: Option<Epoch>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccountInfoConfig {
|
||||
pub encoding: Option<UiAccountEncoding>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcProgramAccountsConfig {
|
||||
pub filters: Option<Vec<RpcFilterType>>,
|
||||
#[serde(flatten)]
|
||||
pub account_config: RpcAccountInfoConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTokenAccountsFilter {
|
||||
Mint(String),
|
||||
ProgramId(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcGetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<String>, // Signature as base-58 string
|
||||
pub limit: Option<usize>,
|
||||
}
|
||||
|
143
client/src/rpc_filter.rs
Normal file
143
client/src/rpc_filter.rs
Normal file
@ -0,0 +1,143 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcFilterType {
|
||||
DataSize(u64),
|
||||
Memcmp(Memcmp),
|
||||
}
|
||||
|
||||
impl RpcFilterType {
|
||||
pub fn verify(&self) -> Result<(), RpcFilterError> {
|
||||
match self {
|
||||
RpcFilterType::DataSize(_) => Ok(()),
|
||||
RpcFilterType::Memcmp(compare) => {
|
||||
let encoding = compare.encoding.as_ref().unwrap_or(&MemcmpEncoding::Binary);
|
||||
match encoding {
|
||||
MemcmpEncoding::Binary => {
|
||||
let MemcmpEncodedBytes::Binary(bytes) = &compare.bytes;
|
||||
bs58::decode(&bytes)
|
||||
.into_vec()
|
||||
.map(|_| ())
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RpcFilterError {
|
||||
#[error("bs58 decode error")]
|
||||
DecodeError(#[from] bs58::decode::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum MemcmpEncoding {
|
||||
Binary,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum MemcmpEncodedBytes {
|
||||
Binary(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Memcmp {
|
||||
/// Data offset to begin match
|
||||
pub offset: usize,
|
||||
/// Bytes, encoded with specified encoding, or default Binary
|
||||
pub bytes: MemcmpEncodedBytes,
|
||||
/// Optional encoding specification
|
||||
pub encoding: Option<MemcmpEncoding>,
|
||||
}
|
||||
|
||||
impl Memcmp {
|
||||
pub fn bytes_match(&self, data: &[u8]) -> bool {
|
||||
match &self.bytes {
|
||||
MemcmpEncodedBytes::Binary(bytes) => {
|
||||
let bytes = bs58::decode(bytes).into_vec();
|
||||
if bytes.is_err() {
|
||||
return false;
|
||||
}
|
||||
let bytes = bytes.unwrap();
|
||||
if self.offset > data.len() {
|
||||
return false;
|
||||
}
|
||||
if data[self.offset..].len() < bytes.len() {
|
||||
return false;
|
||||
}
|
||||
data[self.offset..self.offset + bytes.len()] == bytes[..]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bytes_match() {
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
// Exact match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1, 2, 3, 4, 5]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Partial match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1, 2]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Offset partial match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 2,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![3, 4]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Incorrect partial match of data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![2]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Bytes overrun data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 2,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![3, 4, 5, 6]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Offset outside data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 6,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![5]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Invalid base-58 fails
|
||||
assert!(!Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary("III".to_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
@ -13,18 +14,22 @@ pub enum RpcRequest {
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetConfirmedSignaturesForAddress,
|
||||
GetConfirmedSignaturesForAddress2,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetGenesisHash,
|
||||
GetIdentity,
|
||||
GetInflation,
|
||||
GetLargestAccounts,
|
||||
GetLeaderSchedule,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
GetFeeRateGovernor,
|
||||
GetFees,
|
||||
GetGenesisHash,
|
||||
GetIdentity,
|
||||
GetInflationGovernor,
|
||||
GetInflationRate,
|
||||
GetLargestAccounts,
|
||||
GetLeaderSchedule,
|
||||
GetMinimumBalanceForRentExemption,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetSignatureStatuses,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
@ -33,17 +38,20 @@ pub enum RpcRequest {
|
||||
GetSlotsPerSegment,
|
||||
GetStoragePubkeysForSlot,
|
||||
GetSupply,
|
||||
GetTokenAccountBalance,
|
||||
GetTokenAccountsByDelegate,
|
||||
GetTokenAccountsByOwner,
|
||||
GetTokenSupply,
|
||||
GetTotalSupply,
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
GetVoteAccounts,
|
||||
MinimumLedgerSlot,
|
||||
RegisterNode,
|
||||
RequestAirdrop,
|
||||
SendTransaction,
|
||||
SimulateTransaction,
|
||||
SignVote,
|
||||
GetMinimumBalanceForRentExemption,
|
||||
MinimumLedgerSlot,
|
||||
}
|
||||
|
||||
impl fmt::Display for RpcRequest {
|
||||
@ -58,18 +66,22 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
RpcRequest::GetIdentity => "getIdentity",
|
||||
RpcRequest::GetInflation => "getInflation",
|
||||
RpcRequest::GetLargestAccounts => "getLargestAccounts",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
|
||||
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
|
||||
RpcRequest::GetFees => "getFees",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
RpcRequest::GetIdentity => "getIdentity",
|
||||
RpcRequest::GetInflationGovernor => "getInflationGovernor",
|
||||
RpcRequest::GetInflationRate => "getInflationRate",
|
||||
RpcRequest::GetLargestAccounts => "getLargestAccounts",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
@ -78,17 +90,20 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
|
||||
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||
RpcRequest::GetSupply => "getSupply",
|
||||
RpcRequest::GetTokenAccountBalance => "getTokenAccountBalance",
|
||||
RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate",
|
||||
RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner",
|
||||
RpcRequest::GetTokenSupply => "getTokenSupply",
|
||||
RpcRequest::GetTotalSupply => "getTotalSupply",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
RpcRequest::GetVoteAccounts => "getVoteAccounts",
|
||||
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
|
||||
RpcRequest::RegisterNode => "registerNode",
|
||||
RpcRequest::RequestAirdrop => "requestAirdrop",
|
||||
RpcRequest::SendTransaction => "sendTransaction",
|
||||
RpcRequest::SimulateTransaction => "simulateTransaction",
|
||||
RpcRequest::SignVote => "signVote",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
|
||||
};
|
||||
|
||||
write!(f, "{}", method)
|
||||
@ -98,6 +113,11 @@ impl fmt::Display for RpcRequest {
|
||||
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
|
||||
|
||||
// Validators that are this number of slots behind are considered delinquent
|
||||
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;
|
||||
|
||||
impl RpcRequest {
|
||||
pub(crate) fn build_request_json(self, id: u64, params: Value) -> Value {
|
||||
@ -123,9 +143,16 @@ pub enum RpcError {
|
||||
ForUser(String), /* "direct-to-user message" */
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum TokenAccountsFilter {
|
||||
Mint(Pubkey),
|
||||
ProgramId(Pubkey),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::rpc_config::RpcTokenAccountsFilter;
|
||||
use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel};
|
||||
|
||||
#[test]
|
||||
@ -144,10 +171,6 @@ mod tests {
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getEpochInfo");
|
||||
|
||||
let test_request = RpcRequest::GetInflation;
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getInflation");
|
||||
|
||||
let test_request = RpcRequest::GetRecentBlockhash;
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getRecentBlockhash");
|
||||
@ -194,5 +217,16 @@ mod tests {
|
||||
let request =
|
||||
test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
|
||||
assert_eq!(request["params"], json!([addr, commitment_config]));
|
||||
|
||||
// Test request with CommitmentConfig and params
|
||||
let test_request = RpcRequest::GetTokenAccountsByOwner;
|
||||
let mint = Pubkey::new_rand();
|
||||
let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string());
|
||||
let request = test_request
|
||||
.build_request_json(1, json!([addr, token_account_filter, commitment_config]));
|
||||
assert_eq!(
|
||||
request["params"],
|
||||
json!([addr, token_account_filter, commitment_config])
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,13 @@
|
||||
use crate::{client_error, rpc_request::RpcError};
|
||||
use crate::client_error;
|
||||
use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
pubkey::Pubkey,
|
||||
inflation::Inflation,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
|
||||
use solana_transaction_status::ConfirmedTransactionStatusWithSignature;
|
||||
use std::{collections::HashMap, net::SocketAddr};
|
||||
|
||||
pub type RpcResult<T> = client_error::Result<Response<T>>;
|
||||
|
||||
@ -35,6 +36,14 @@ pub struct RpcBlockhashFeeCalculator {
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFees {
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFeeCalculator {
|
||||
@ -47,11 +56,42 @@ pub struct RpcFeeRateGovernor {
|
||||
pub fee_rate_governor: FeeRateGovernor,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationGovernor {
|
||||
pub initial: f64,
|
||||
pub terminal: f64,
|
||||
pub taper: f64,
|
||||
pub foundation: f64,
|
||||
pub foundation_term: f64,
|
||||
}
|
||||
|
||||
impl From<Inflation> for RpcInflationGovernor {
|
||||
fn from(inflation: Inflation) -> Self {
|
||||
Self {
|
||||
initial: inflation.initial,
|
||||
terminal: inflation.terminal,
|
||||
taper: inflation.taper,
|
||||
foundation: inflation.foundation,
|
||||
foundation_term: inflation.foundation_term,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationRate {
|
||||
pub total: f64,
|
||||
pub validator: f64,
|
||||
pub foundation: f64,
|
||||
pub epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcKeyedAccount {
|
||||
pub pubkey: String,
|
||||
pub account: RpcAccount,
|
||||
pub account: UiAccount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
@ -60,43 +100,6 @@ pub struct RpcSignatureResult {
|
||||
pub err: Option<TransactionError>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccount {
|
||||
pub lamports: u64,
|
||||
pub data: String,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
impl RpcAccount {
|
||||
pub fn encode(account: Account) -> Self {
|
||||
RpcAccount {
|
||||
lamports: account.lamports,
|
||||
data: bs58::encode(account.data.clone()).into_string(),
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> std::result::Result<Account, RpcError> {
|
||||
Ok(Account {
|
||||
lamports: self.lamports,
|
||||
data: bs58::decode(self.data.clone()).into_vec().map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account data".to_string())
|
||||
})?,
|
||||
owner: Pubkey::from_str(&self.owner).map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account owner".to_string())
|
||||
})?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct RpcContactInfo {
|
||||
/// Pubkey of the node as a base-58 string
|
||||
@ -171,6 +174,13 @@ pub struct RpcSignatureConfirmation {
|
||||
pub status: Result<()>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSimulateTransactionResult {
|
||||
pub err: Option<TransactionError>,
|
||||
pub logs: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStorageTurn {
|
||||
@ -193,3 +203,54 @@ pub struct RpcSupply {
|
||||
pub non_circulating: u64,
|
||||
pub non_circulating_accounts: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum StakeActivationState {
|
||||
Activating,
|
||||
Active,
|
||||
Deactivating,
|
||||
Inactive,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStakeActivation {
|
||||
pub state: StakeActivationState,
|
||||
pub active: u64,
|
||||
pub inactive: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTokenAccountBalance {
|
||||
pub address: String,
|
||||
#[serde(flatten)]
|
||||
pub amount: UiTokenAmount,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedTransactionStatusWithSignature {
|
||||
pub signature: String,
|
||||
pub slot: Slot,
|
||||
pub err: Option<TransactionError>,
|
||||
pub memo: Option<String>,
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
|
||||
let ConfirmedTransactionStatusWithSignature {
|
||||
signature,
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
} = value;
|
||||
Self {
|
||||
signature: signature.to_string(),
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ use log::*;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
client::{AsyncClient, Client, SyncClient},
|
||||
clock::MAX_PROCESSING_AGE,
|
||||
clock::{Slot, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_info::EpochInfo,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
@ -357,7 +357,7 @@ impl Client for ThinClient {
|
||||
}
|
||||
|
||||
impl SyncClient for ThinClient {
|
||||
fn send_message<T: Signers>(
|
||||
fn send_and_confirm_message<T: Signers>(
|
||||
&self,
|
||||
keypairs: &T,
|
||||
message: Message,
|
||||
@ -368,16 +368,16 @@ impl SyncClient for ThinClient {
|
||||
Ok(signature)
|
||||
}
|
||||
|
||||
fn send_instruction(
|
||||
fn send_and_confirm_instruction(
|
||||
&self,
|
||||
keypair: &Keypair,
|
||||
instruction: Instruction,
|
||||
) -> TransportResult<Signature> {
|
||||
let message = Message::new(&[instruction]);
|
||||
self.send_message(&[keypair], message)
|
||||
let message = Message::new(&[instruction], Some(&keypair.pubkey()));
|
||||
self.send_and_confirm_message(&[keypair], message)
|
||||
}
|
||||
|
||||
fn transfer(
|
||||
fn transfer_and_confirm(
|
||||
&self,
|
||||
lamports: u64,
|
||||
keypair: &Keypair,
|
||||
@ -385,7 +385,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Signature> {
|
||||
let transfer_instruction =
|
||||
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
|
||||
self.send_instruction(keypair, transfer_instruction)
|
||||
self.send_and_confirm_instruction(keypair, transfer_instruction)
|
||||
}
|
||||
|
||||
fn get_account_data(&self, pubkey: &Pubkey) -> TransportResult<Option<Vec<u8>>> {
|
||||
@ -427,13 +427,15 @@ impl SyncClient for ThinClient {
|
||||
}
|
||||
|
||||
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::default())
|
||||
let (blockhash, fee_calculator, _last_valid_slot) =
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::default())?;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
|
||||
fn get_recent_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
) -> TransportResult<(Hash, FeeCalculator, Slot)> {
|
||||
let index = self.optimizer.experiment();
|
||||
let now = Instant::now();
|
||||
let recent_blockhash =
|
||||
@ -441,7 +443,7 @@ impl SyncClient for ThinClient {
|
||||
match recent_blockhash {
|
||||
Ok(Response { value, .. }) => {
|
||||
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
|
||||
Ok(value)
|
||||
Ok((value.0, value.1, value.2))
|
||||
}
|
||||
Err(e) => {
|
||||
self.optimizer.report(index, std::u64::MAX);
|
||||
@ -609,7 +611,7 @@ impl AsyncClient for ThinClient {
|
||||
instruction: Instruction,
|
||||
recent_blockhash: Hash,
|
||||
) -> TransportResult<Signature> {
|
||||
let message = Message::new(&[instruction]);
|
||||
let message = Message::new(&[instruction], Some(&keypair.pubkey()));
|
||||
self.async_send_message(&[keypair], message, recent_blockhash)
|
||||
}
|
||||
fn async_transfer(
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.0"
|
||||
version = "1.2.21"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -21,6 +21,7 @@ byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.4"
|
||||
fs_extra = "1.1.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = "1.3"
|
||||
@ -41,35 +42,38 @@ regex = "1.3.7"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
solana-ledger = { path = "../ledger", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-perf = { path = "../perf", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.0" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.21" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.21" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.21" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.21" }
|
||||
solana-client = { path = "../client", version = "1.2.21" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.21" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.21" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.21" }
|
||||
solana-logger = { path = "../logger", version = "1.2.21" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.21" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.21" }
|
||||
solana-measure = { path = "../measure", version = "1.2.21" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.21" }
|
||||
solana-perf = { path = "../perf", version = "1.2.21" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.21" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.21" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.21" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.2.21" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.21" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.21" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.21" }
|
||||
solana-version = { path = "../version", version = "1.2.21" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.21" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.21" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.21" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.0" }
|
||||
tokio = { version = "0.2.22", features = ["full"] }
|
||||
tokio_01 = { version = "0.1", package = "tokio" }
|
||||
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
|
||||
tokio_io_01 = { version = "0.1", package = "tokio-io" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -19,6 +19,7 @@ use solana_perf::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::message::Message;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signature::Signature;
|
||||
@ -117,9 +118,8 @@ fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
|
||||
let to_key = Pubkey::new_rand();
|
||||
instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1));
|
||||
}
|
||||
let mut new = Transaction::new_unsigned_instructions(&instructions);
|
||||
new.sign(&[&from_key], hash);
|
||||
new
|
||||
let message = Message::new(&instructions, Some(&from_key.pubkey()));
|
||||
Transaction::new(&[&from_key], message, hash)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
|
||||
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
@ -47,7 +48,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&last_datapoint,
|
||||
&mut 0,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
|
@ -12,9 +12,10 @@ const NUM_ENTRIES: usize = 800;
|
||||
|
||||
#[bench]
|
||||
fn bench_poh_verify_ticks(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let zero = Hash::default();
|
||||
let mut cur_hash = hash(&zero.as_ref());
|
||||
let start = *&cur_hash;
|
||||
let start_hash = hash(&zero.as_ref());
|
||||
let mut cur_hash = start_hash;
|
||||
|
||||
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
|
||||
for _ in 0..NUM_ENTRIES {
|
||||
@ -22,15 +23,15 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
|
||||
}
|
||||
|
||||
bencher.iter(|| {
|
||||
ticks.verify(&start);
|
||||
assert!(ticks.verify(&start_hash));
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let mut cur_hash = hash(&zero.as_ref());
|
||||
let start = *&cur_hash;
|
||||
let start_hash = hash(&zero.as_ref());
|
||||
let mut cur_hash = start_hash;
|
||||
|
||||
let keypair1 = Keypair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
@ -42,6 +43,6 @@ fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
|
||||
}
|
||||
|
||||
bencher.iter(|| {
|
||||
ticks.verify(&start);
|
||||
assert!(ticks.verify(&start_hash));
|
||||
})
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let bank_forks = BankForks::new(0, bank0);
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let bank = bank_forks.working_bank();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
|
@ -15,23 +15,34 @@ pub struct AccountsBackgroundService {
|
||||
}
|
||||
|
||||
const INTERVAL_MS: u64 = 100;
|
||||
const SHRUNKEN_ACCOUNT_PER_SEC: usize = 250;
|
||||
const SHRUNKEN_ACCOUNT_PER_INTERVAL: usize =
|
||||
SHRUNKEN_ACCOUNT_PER_SEC / (1000 / INTERVAL_MS as usize);
|
||||
const CLEAN_INTERVAL_SLOTS: u64 = 100;
|
||||
|
||||
impl AccountsBackgroundService {
|
||||
pub fn new(bank_forks: Arc<RwLock<BankForks>>, exit: &Arc<AtomicBool>) -> Self {
|
||||
info!("AccountsBackgroundService active");
|
||||
let exit = exit.clone();
|
||||
let mut consumed_budget = 0;
|
||||
let mut last_cleaned_slot = 0;
|
||||
let t_background = Builder::new()
|
||||
.name("solana-accounts-background".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
|
||||
bank.process_dead_slots();
|
||||
|
||||
// Currently, given INTERVAL_MS, we process 1 slot/100 ms
|
||||
bank.process_stale_slot();
|
||||
consumed_budget = bank
|
||||
.process_stale_slot_with_budget(consumed_budget, SHRUNKEN_ACCOUNT_PER_INTERVAL);
|
||||
|
||||
if bank.block_height() - last_cleaned_slot > CLEAN_INTERVAL_SLOTS {
|
||||
bank.clean_accounts();
|
||||
last_cleaned_slot = bank.block_height();
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(INTERVAL_MS));
|
||||
})
|
||||
|
@ -177,6 +177,7 @@ mod tests {
|
||||
use crate::cluster_info::make_accounts_hashes_message;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_ledger::bank_forks::CompressionType;
|
||||
use solana_ledger::snapshot_utils::SnapshotVersion;
|
||||
use solana_sdk::{
|
||||
hash::hash,
|
||||
signature::{Keypair, Signer},
|
||||
@ -239,6 +240,7 @@ mod tests {
|
||||
tar_output_file: PathBuf::from("."),
|
||||
storages: vec![],
|
||||
compression: CompressionType::Bzip2,
|
||||
snapshot_version: SnapshotVersion::default(),
|
||||
};
|
||||
|
||||
AccountsHashVerifier::process_accounts_package(
|
||||
|
152
core/src/bank_weight_fork_choice.rs
Normal file
152
core/src/bank_weight_fork_choice.rs
Normal file
@ -0,0 +1,152 @@
|
||||
use crate::{
|
||||
consensus::{ComputedBankState, Tower},
|
||||
fork_choice::ForkChoice,
|
||||
progress_map::{ForkStats, ProgressMap},
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::timing;
|
||||
use std::time::Instant;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BankWeightForkChoice {}
|
||||
|
||||
impl ForkChoice for BankWeightForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
bank: &Bank,
|
||||
_tower: &Tower,
|
||||
progress: &mut ProgressMap,
|
||||
computed_bank_state: &ComputedBankState,
|
||||
) {
|
||||
let bank_slot = bank.slot();
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let parent_weight = bank
|
||||
.parent()
|
||||
.and_then(|b| progress.get(&b.slot()))
|
||||
.map(|x| x.fork_stats.fork_weight)
|
||||
.unwrap_or(0);
|
||||
|
||||
let stats = progress
|
||||
.get_fork_stats_mut(bank_slot)
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
let ComputedBankState { bank_weight, .. } = computed_bank_state;
|
||||
stats.weight = *bank_weight;
|
||||
stats.fork_weight = stats.weight + parent_weight;
|
||||
}
|
||||
|
||||
// Returns:
|
||||
// 1) The heaviest overall bank
|
||||
// 2) The heaviest bank on the same fork as the last vote (doesn't require a
|
||||
// switching proof to vote for)
|
||||
fn select_forks(
|
||||
&self,
|
||||
frozen_banks: &[Arc<Bank>],
|
||||
tower: &Tower,
|
||||
progress: &ProgressMap,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
_bank_forks: &RwLock<BankForks>,
|
||||
) -> (Arc<Bank>, Option<Arc<Bank>>) {
|
||||
let tower_start = Instant::now();
|
||||
assert!(!frozen_banks.is_empty());
|
||||
let num_frozen_banks = frozen_banks.len();
|
||||
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let num_old_banks = frozen_banks
|
||||
.iter()
|
||||
.filter(|b| b.slot() < tower.root().unwrap_or(0))
|
||||
.count();
|
||||
|
||||
let last_vote = tower.last_vote().slots.last().cloned();
|
||||
let mut heaviest_bank_on_same_fork = None;
|
||||
let mut heaviest_same_fork_weight = 0;
|
||||
let stats: Vec<&ForkStats> = frozen_banks
|
||||
.iter()
|
||||
.map(|bank| {
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let stats = progress
|
||||
.get_fork_stats(bank.slot())
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
if let Some(last_vote) = last_vote {
|
||||
if ancestors
|
||||
.get(&bank.slot())
|
||||
.expect("Entry in frozen banks must exist in ancestors")
|
||||
.contains(&last_vote)
|
||||
{
|
||||
// Descendant of last vote cannot be locked out
|
||||
assert!(!stats.is_locked_out);
|
||||
|
||||
// ancestors(slot) should not contain the slot itself,
|
||||
// so we should never get the same bank as the last vote
|
||||
assert_ne!(bank.slot(), last_vote);
|
||||
// highest weight, lowest slot first. frozen_banks is sorted
|
||||
// from least slot to greatest slot, so if two banks have
|
||||
// the same fork weight, the lower slot will be picked
|
||||
if stats.fork_weight > heaviest_same_fork_weight {
|
||||
heaviest_bank_on_same_fork = Some(bank.clone());
|
||||
heaviest_same_fork_weight = stats.fork_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats
|
||||
})
|
||||
.collect();
|
||||
let num_not_recent = stats.iter().filter(|s| !s.is_recent).count();
|
||||
let num_has_voted = stats.iter().filter(|s| s.has_voted).count();
|
||||
let num_empty = stats.iter().filter(|s| s.is_empty).count();
|
||||
let num_threshold_failure = stats.iter().filter(|s| !s.vote_threshold).count();
|
||||
let num_votable_threshold_failure = stats
|
||||
.iter()
|
||||
.filter(|s| s.is_recent && !s.has_voted && !s.vote_threshold)
|
||||
.count();
|
||||
|
||||
let mut candidates: Vec<_> = frozen_banks.iter().zip(stats.iter()).collect();
|
||||
|
||||
//highest weight, lowest slot first
|
||||
candidates.sort_by_key(|b| (b.1.fork_weight, 0i64 - b.0.slot() as i64));
|
||||
let rv = candidates
|
||||
.last()
|
||||
.expect("frozen banks was nonempty so candidates must also be nonempty");
|
||||
let ms = timing::duration_as_ms(&tower_start.elapsed());
|
||||
let weights: Vec<(u128, u64, u64)> = candidates
|
||||
.iter()
|
||||
.map(|x| (x.1.weight, x.0.slot(), x.1.block_height))
|
||||
.collect();
|
||||
debug!(
|
||||
"@{:?} tower duration: {:?} len: {}/{} weights: {:?}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
candidates.len(),
|
||||
stats.iter().filter(|s| !s.has_voted).count(),
|
||||
weights,
|
||||
);
|
||||
datapoint_debug!(
|
||||
"replay_stage-select_forks",
|
||||
("frozen_banks", num_frozen_banks as i64, i64),
|
||||
("not_recent", num_not_recent as i64, i64),
|
||||
("has_voted", num_has_voted as i64, i64),
|
||||
("old_banks", num_old_banks as i64, i64),
|
||||
("empty_banks", num_empty as i64, i64),
|
||||
("threshold_failure", num_threshold_failure as i64, i64),
|
||||
(
|
||||
"votable_threshold_failure",
|
||||
num_votable_threshold_failure as i64,
|
||||
i64
|
||||
),
|
||||
("tower_duration", ms as i64, i64),
|
||||
);
|
||||
|
||||
(rv.0.clone(), heaviest_bank_on_same_fork)
|
||||
}
|
||||
}
|
@ -51,7 +51,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
|
||||
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
|
||||
|
||||
/// Transaction forwarding
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 4;
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
|
||||
|
||||
// Fixed thread size seems to be fastest on GCP setup
|
||||
pub const NUM_THREADS: u32 = 4;
|
||||
@ -509,12 +509,12 @@ impl BankingStage {
|
||||
// expires.
|
||||
let txs = batch.transactions();
|
||||
let pre_balances = if transaction_status_sender.is_some() {
|
||||
bank.collect_balances(txs)
|
||||
bank.collect_balances(batch)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let (mut loaded_accounts, results, mut retryable_txs, tx_count, signature_count) =
|
||||
bank.load_and_execute_transactions(batch, MAX_PROCESSING_AGE);
|
||||
bank.load_and_execute_transactions(batch, MAX_PROCESSING_AGE, None);
|
||||
load_execute_time.stop();
|
||||
|
||||
let freeze_lock = bank.freeze_lock();
|
||||
@ -545,10 +545,11 @@ impl BankingStage {
|
||||
.processing_results;
|
||||
|
||||
if let Some(sender) = transaction_status_sender {
|
||||
let post_balances = bank.collect_balances(txs);
|
||||
let post_balances = bank.collect_balances(batch);
|
||||
send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
transaction_statuses,
|
||||
TransactionBalancesSet::new(pre_balances, post_balances),
|
||||
sender,
|
||||
|
@ -35,7 +35,7 @@ use std::{
|
||||
};
|
||||
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub(crate) mod broadcast_metrics;
|
||||
pub mod broadcast_metrics;
|
||||
pub(crate) mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
@ -374,13 +374,14 @@ pub fn broadcast_shreds(
|
||||
peers_and_stakes: &[(u64, usize)],
|
||||
peers: &[ContactInfo],
|
||||
last_datapoint_submit: &Arc<AtomicU64>,
|
||||
send_mmsg_total: &mut u64,
|
||||
transmit_stats: &mut TransmitShredsStats,
|
||||
) -> Result<()> {
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
if broadcast_len == 0 {
|
||||
update_peer_stats(1, 1, last_datapoint_submit);
|
||||
return Ok(());
|
||||
}
|
||||
let mut shred_select = Measure::start("shred_select");
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
@ -389,6 +390,8 @@ pub fn broadcast_shreds(
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
})
|
||||
.collect();
|
||||
shred_select.stop();
|
||||
transmit_stats.shred_select += shred_select.as_us();
|
||||
|
||||
let mut sent = 0;
|
||||
let mut send_mmsg_time = Measure::start("send_mmsg");
|
||||
@ -401,7 +404,7 @@ pub fn broadcast_shreds(
|
||||
}
|
||||
}
|
||||
send_mmsg_time.stop();
|
||||
*send_mmsg_total += send_mmsg_time.as_us();
|
||||
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(&peers);
|
||||
update_peer_stats(
|
||||
|
@ -29,11 +29,12 @@ impl ProcessShredsStats {
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct TransmitShredsStats {
|
||||
pub(crate) transmit_elapsed: u64,
|
||||
pub(crate) send_mmsg_elapsed: u64,
|
||||
pub(crate) get_peers_elapsed: u64,
|
||||
pub(crate) num_shreds: usize,
|
||||
pub struct TransmitShredsStats {
|
||||
pub transmit_elapsed: u64,
|
||||
pub send_mmsg_elapsed: u64,
|
||||
pub get_peers_elapsed: u64,
|
||||
pub shred_select: u64,
|
||||
pub num_shreds: usize,
|
||||
}
|
||||
|
||||
impl BroadcastStats for TransmitShredsStats {
|
||||
@ -42,6 +43,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
|
||||
self.get_peers_elapsed += new_stats.get_peers_elapsed;
|
||||
self.num_shreds += new_stats.num_shreds;
|
||||
self.shred_select += new_stats.shred_select;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
@ -58,6 +60,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("shred_select", self.shred_select as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -176,15 +179,16 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update() {
|
||||
fn test_update_broadcast() {
|
||||
let start = Instant::now();
|
||||
let mut slot_broadcast_stats = SlotBroadcastStats::default();
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
get_peers_elapsed: 2,
|
||||
send_mmsg_elapsed: 3,
|
||||
shred_select: 4,
|
||||
num_shreds: 5,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
slot: 0,
|
||||
@ -198,16 +202,18 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
transmit_elapsed: 7,
|
||||
get_peers_elapsed: 8,
|
||||
send_mmsg_elapsed: 9,
|
||||
shred_select: 10,
|
||||
num_shreds: 11,
|
||||
},
|
||||
&None,
|
||||
);
|
||||
@ -217,9 +223,10 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
// If another batch is given, then total number of batches == num_expected_batches == 2,
|
||||
// so the batch should be purged from the HashMap
|
||||
@ -228,6 +235,7 @@ mod test {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
shred_select: 1,
|
||||
num_shreds: 1,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
|
@ -137,14 +137,13 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
// Broadcast data
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
let mut send_mmsg_total = 0;
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&Arc::new(AtomicU64::new(0)),
|
||||
&mut send_mmsg_total,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
@ -9,6 +9,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -23,6 +24,14 @@ pub struct StandardBroadcastRun {
|
||||
shred_version: u16,
|
||||
last_datapoint_submit: Arc<AtomicU64>,
|
||||
num_batches: usize,
|
||||
broadcast_peer_cache: Arc<RwLock<BroadcastPeerCache>>,
|
||||
last_peer_update: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct BroadcastPeerCache {
|
||||
peers: Vec<ContactInfo>,
|
||||
peers_and_stakes: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
@ -38,6 +47,8 @@ impl StandardBroadcastRun {
|
||||
shred_version,
|
||||
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
|
||||
num_batches: 0,
|
||||
broadcast_peer_cache: Arc::new(RwLock::new(BroadcastPeerCache::default())),
|
||||
last_peer_update: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,33 +304,46 @@ impl StandardBroadcastRun {
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) -> Result<()> {
|
||||
const BROADCAST_PEER_UPDATE_INTERVAL_MS: u64 = 1000;
|
||||
trace!("Broadcasting {:?} shreds", shreds.len());
|
||||
// Get the list of peers to broadcast to
|
||||
let get_peers_start = Instant::now();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
let get_peers_elapsed = get_peers_start.elapsed();
|
||||
let mut get_peers_time = Measure::start("broadcast::get_peers");
|
||||
let now = timestamp();
|
||||
let last = self.last_peer_update.load(Ordering::Relaxed);
|
||||
if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS
|
||||
&& self
|
||||
.last_peer_update
|
||||
.compare_and_swap(now, last, Ordering::Relaxed)
|
||||
== last
|
||||
{
|
||||
let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
w_broadcast_peer_cache.peers = peers;
|
||||
w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes;
|
||||
}
|
||||
get_peers_time.stop();
|
||||
let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap();
|
||||
|
||||
let mut transmit_stats = TransmitShredsStats::default();
|
||||
// Broadcast the shreds
|
||||
let transmit_start = Instant::now();
|
||||
let mut send_mmsg_total = 0;
|
||||
let mut transmit_time = Measure::start("broadcast_shreds");
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&r_broadcast_peer_cache.peers_and_stakes,
|
||||
&r_broadcast_peer_cache.peers,
|
||||
&self.last_datapoint_submit,
|
||||
&mut send_mmsg_total,
|
||||
&mut transmit_stats,
|
||||
)?;
|
||||
let transmit_elapsed = transmit_start.elapsed();
|
||||
let new_transmit_shreds_stats = TransmitShredsStats {
|
||||
transmit_elapsed: duration_as_us(&transmit_elapsed),
|
||||
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
|
||||
send_mmsg_elapsed: send_mmsg_total,
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
drop(r_broadcast_peer_cache);
|
||||
transmit_time.stop();
|
||||
|
||||
transmit_stats.transmit_elapsed = transmit_time.as_us();
|
||||
transmit_stats.get_peers_elapsed = get_peers_time.as_us();
|
||||
transmit_stats.num_shreds = shreds.len();
|
||||
|
||||
// Process metrics
|
||||
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
|
||||
self.update_transmit_metrics(&transmit_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,16 +1,18 @@
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
consensus::VOTE_THRESHOLD_SIZE,
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
consensus::PubkeyVotes,
|
||||
crds_value::CrdsValueLabel,
|
||||
poh_recorder::PohRecorder,
|
||||
pubkey_references::LockedPubkeyReferences,
|
||||
replay_stage::ReplayVotesReceiver,
|
||||
result::{Error, Result},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify,
|
||||
verified_vote_packets::VerifiedVotePackets,
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender,
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Select, Sender as CrossbeamSender,
|
||||
};
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
@ -30,7 +32,7 @@ use solana_sdk::{
|
||||
};
|
||||
use solana_vote_program::vote_instruction::VoteInstruction;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
{Arc, Mutex, RwLock},
|
||||
@ -40,16 +42,22 @@ use std::{
|
||||
};
|
||||
|
||||
// Map from a vote account to the authorized voter for an epoch
|
||||
pub type VerifiedVotePacketsSender = CrossbeamSender<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedVotePacketsReceiver = CrossbeamReceiver<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedLabelVotePacketsSender = CrossbeamSender<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedVoteTransactionsSender = CrossbeamSender<Vec<Transaction>>;
|
||||
pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
|
||||
pub type VerifiedVoteSender = CrossbeamSender<(Pubkey, Vec<Slot>)>;
|
||||
pub type VerifiedVoteReceiver = CrossbeamReceiver<(Pubkey, Vec<Slot>)>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SlotVoteTracker {
|
||||
voted: HashSet<Arc<Pubkey>>,
|
||||
// Maps pubkeys that have voted for this slot
|
||||
// to whether or not we've seen the vote on gossip.
|
||||
// True if seen on gossip, false if only seen in replay.
|
||||
voted: HashMap<Arc<Pubkey>, bool>,
|
||||
updates: Option<Vec<Arc<Pubkey>>>,
|
||||
total_stake: u64,
|
||||
gossip_only_stake: u64,
|
||||
}
|
||||
|
||||
impl SlotVoteTracker {
|
||||
@ -126,7 +134,7 @@ impl VoteTracker {
|
||||
|
||||
let mut w_slot_vote_tracker = slot_vote_tracker.write().unwrap();
|
||||
|
||||
w_slot_vote_tracker.voted.insert(pubkey.clone());
|
||||
w_slot_vote_tracker.voted.insert(pubkey.clone(), true);
|
||||
if let Some(ref mut updates) = w_slot_vote_tracker.updates {
|
||||
updates.push(pubkey.clone())
|
||||
} else {
|
||||
@ -202,15 +210,18 @@ impl ClusterInfoVoteListener {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
sender: CrossbeamSender<Vec<Packets>>,
|
||||
verified_packets_sender: CrossbeamSender<Vec<Packets>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Self {
|
||||
let exit_ = exit.clone();
|
||||
|
||||
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
|
||||
let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) =
|
||||
unbounded();
|
||||
let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded();
|
||||
let listen_thread = Builder::new()
|
||||
.name("solana-cluster_info_vote_listener".to_string())
|
||||
@ -218,7 +229,7 @@ impl ClusterInfoVoteListener {
|
||||
let _ = Self::recv_loop(
|
||||
exit_,
|
||||
&cluster_info,
|
||||
verified_vote_packets_sender,
|
||||
verified_vote_label_packets_sender,
|
||||
verified_vote_transactions_sender,
|
||||
);
|
||||
})
|
||||
@ -231,9 +242,9 @@ impl ClusterInfoVoteListener {
|
||||
.spawn(move || {
|
||||
let _ = Self::bank_send_loop(
|
||||
exit_,
|
||||
verified_vote_packets_receiver,
|
||||
verified_vote_label_packets_receiver,
|
||||
poh_recorder,
|
||||
&sender,
|
||||
&verified_packets_sender,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@ -248,6 +259,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker,
|
||||
&bank_forks,
|
||||
subscriptions,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@ -267,7 +280,7 @@ impl ClusterInfoVoteListener {
|
||||
fn recv_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: &ClusterInfo,
|
||||
verified_vote_packets_sender: VerifiedVotePacketsSender,
|
||||
verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender,
|
||||
verified_vote_transactions_sender: VerifiedVoteTransactionsSender,
|
||||
) -> Result<()> {
|
||||
let mut last_ts = 0;
|
||||
@ -282,7 +295,7 @@ impl ClusterInfoVoteListener {
|
||||
if !votes.is_empty() {
|
||||
let (vote_txs, packets) = Self::verify_votes(votes, labels);
|
||||
verified_vote_transactions_sender.send(vote_txs)?;
|
||||
verified_vote_packets_sender.send(packets)?;
|
||||
verified_vote_label_packets_sender.send(packets)?;
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
|
||||
@ -322,9 +335,9 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
fn bank_send_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_vote_packets_receiver: VerifiedVotePacketsReceiver,
|
||||
verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
packets_sender: &CrossbeamSender<Vec<Packets>>,
|
||||
verified_packets_sender: &CrossbeamSender<Vec<Packets>>,
|
||||
) -> Result<()> {
|
||||
let mut verified_vote_packets = VerifiedVotePackets::default();
|
||||
let mut time_since_lock = Instant::now();
|
||||
@ -334,9 +347,10 @@ impl ClusterInfoVoteListener {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(e) = verified_vote_packets
|
||||
.get_and_process_vote_packets(&verified_vote_packets_receiver, &mut update_version)
|
||||
{
|
||||
if let Err(e) = verified_vote_packets.get_and_process_vote_packets(
|
||||
&verified_vote_label_packets_receiver,
|
||||
&mut update_version,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
@ -353,7 +367,7 @@ impl ClusterInfoVoteListener {
|
||||
if let Some(bank) = bank {
|
||||
let last_version = bank.last_vote_sync.load(Ordering::Relaxed);
|
||||
let (new_version, msgs) = verified_vote_packets.get_latest_votes(last_version);
|
||||
packets_sender.send(msgs)?;
|
||||
verified_packets_sender.send(msgs)?;
|
||||
bank.last_vote_sync.compare_and_swap(
|
||||
last_version,
|
||||
new_version,
|
||||
@ -371,6 +385,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@ -380,19 +396,18 @@ impl ClusterInfoVoteListener {
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
vote_tracker.process_new_root_bank(&root_bank);
|
||||
let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch());
|
||||
|
||||
if let Err(e) = Self::get_and_process_votes(
|
||||
&vote_txs_receiver,
|
||||
&vote_tracker,
|
||||
root_bank.slot(),
|
||||
subscriptions.clone(),
|
||||
epoch_stakes,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
}
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
|
||||
| Error::ReadyTimeoutError => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
@ -407,6 +422,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes_receiver: &ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
Self::get_and_process_votes(
|
||||
vote_txs_receiver,
|
||||
@ -414,6 +431,8 @@ impl ClusterInfoVoteListener {
|
||||
last_root,
|
||||
subscriptions,
|
||||
None,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
)
|
||||
}
|
||||
|
||||
@ -423,19 +442,41 @@ impl ClusterInfoVoteListener {
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes_receiver: &ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut vote_txs = vote_txs_receiver.recv_timeout(timer)?;
|
||||
while let Ok(new_txs) = vote_txs_receiver.try_recv() {
|
||||
vote_txs.extend(new_txs);
|
||||
let mut sel = Select::new();
|
||||
sel.recv(vote_txs_receiver);
|
||||
sel.recv(replay_votes_receiver);
|
||||
let mut remaining_wait_time = 200;
|
||||
loop {
|
||||
if remaining_wait_time == 0 {
|
||||
break;
|
||||
}
|
||||
let start = Instant::now();
|
||||
// Wait for one of the receivers to be ready. `ready_timeout`
|
||||
// will return if channels either have something, or are
|
||||
// disconnected. `ready_timeout` can wake up spuriously,
|
||||
// hence the loop
|
||||
let _ = sel.ready_timeout(Duration::from_millis(remaining_wait_time))?;
|
||||
let vote_txs: Vec<_> = vote_txs_receiver.try_iter().flatten().collect();
|
||||
let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect();
|
||||
if !vote_txs.is_empty() || !replay_votes.is_empty() {
|
||||
Self::process_votes(
|
||||
vote_tracker,
|
||||
vote_txs,
|
||||
last_root,
|
||||
subscriptions,
|
||||
epoch_stakes,
|
||||
verified_vote_sender,
|
||||
&replay_votes,
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
remaining_wait_time = remaining_wait_time
|
||||
.saturating_sub(std::cmp::max(start.elapsed().as_millis() as u64, 1));
|
||||
}
|
||||
}
|
||||
Self::process_votes(
|
||||
vote_tracker,
|
||||
vote_txs,
|
||||
last_root,
|
||||
subscriptions,
|
||||
epoch_stakes,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -445,10 +486,11 @@ impl ClusterInfoVoteListener {
|
||||
root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes: &[Arc<PubkeyVotes>],
|
||||
) {
|
||||
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
|
||||
let mut diff: HashMap<Slot, HashMap<Arc<Pubkey>, bool>> = HashMap::new();
|
||||
{
|
||||
let all_slot_trackers = &vote_tracker.slot_vote_trackers;
|
||||
for tx in vote_txs {
|
||||
if let (Some(vote_pubkey), Some(vote_instruction)) = tx
|
||||
.message
|
||||
@ -502,25 +544,33 @@ impl ClusterInfoVoteListener {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't insert if we already have marked down this pubkey
|
||||
// voting for this slot
|
||||
let maybe_slot_tracker =
|
||||
all_slot_trackers.read().unwrap().get(&slot).cloned();
|
||||
if let Some(slot_tracker) = maybe_slot_tracker {
|
||||
if slot_tracker.read().unwrap().voted.contains(vote_pubkey) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(vote_pubkey);
|
||||
diff.entry(slot).or_default().insert(unduplicated_pubkey);
|
||||
diff.entry(slot)
|
||||
.or_default()
|
||||
.insert(unduplicated_pubkey, true);
|
||||
}
|
||||
|
||||
subscriptions.notify_vote(&vote);
|
||||
let _ = verified_vote_sender.send((*vote_pubkey, vote.slots));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (slot, slot_diff) in diff {
|
||||
// Process the replay votes
|
||||
for votes in replay_votes {
|
||||
for (pubkey, slot) in votes.iter() {
|
||||
if *slot <= root {
|
||||
continue;
|
||||
}
|
||||
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(pubkey);
|
||||
diff.entry(*slot)
|
||||
.or_default()
|
||||
.entry(unduplicated_pubkey)
|
||||
.or_default();
|
||||
}
|
||||
}
|
||||
|
||||
for (slot, mut slot_diff) in diff {
|
||||
let slot_tracker = vote_tracker
|
||||
.slot_vote_trackers
|
||||
.read()
|
||||
@ -528,15 +578,55 @@ impl ClusterInfoVoteListener {
|
||||
.get(&slot)
|
||||
.cloned();
|
||||
if let Some(slot_tracker) = slot_tracker {
|
||||
{
|
||||
let r_slot_tracker = slot_tracker.read().unwrap();
|
||||
// Only keep the pubkeys we haven't seen voting for this slot
|
||||
slot_diff.retain(|pubkey, seen_in_gossip_above| {
|
||||
let seen_in_gossip_previously = r_slot_tracker.voted.get(pubkey);
|
||||
let is_new = seen_in_gossip_previously.is_none();
|
||||
if is_new && !*seen_in_gossip_above {
|
||||
// If this vote wasn't seen in gossip, then it must be a
|
||||
// replay vote, and we haven't sent a notification for
|
||||
// those yet
|
||||
let _ = verified_vote_sender.send((**pubkey, vec![slot]));
|
||||
}
|
||||
|
||||
// `is_new_from_gossip` means we observed a vote for this slot
|
||||
// for the first time in gossip
|
||||
let is_new_from_gossip =
|
||||
!seen_in_gossip_previously.cloned().unwrap_or(false)
|
||||
&& *seen_in_gossip_above;
|
||||
is_new || is_new_from_gossip
|
||||
});
|
||||
}
|
||||
let mut w_slot_tracker = slot_tracker.write().unwrap();
|
||||
if w_slot_tracker.updates.is_none() {
|
||||
w_slot_tracker.updates = Some(vec![]);
|
||||
}
|
||||
let mut current_stake = 0;
|
||||
for pubkey in slot_diff {
|
||||
Self::sum_stake(&mut current_stake, epoch_stakes, &pubkey);
|
||||
let mut gossip_only_stake = 0;
|
||||
for (pubkey, seen_in_gossip_above) in slot_diff {
|
||||
let is_new = !w_slot_tracker.voted.contains_key(&pubkey);
|
||||
Self::sum_stake(
|
||||
&mut current_stake,
|
||||
&mut gossip_only_stake,
|
||||
epoch_stakes,
|
||||
&pubkey,
|
||||
// By this point we know if the vote was seen in gossip above,
|
||||
// it was not seen in gossip at any point in the past, so it's
|
||||
// safe to pass this in here as an overall indicator of whether
|
||||
// this vote is new
|
||||
seen_in_gossip_above,
|
||||
is_new,
|
||||
);
|
||||
|
||||
w_slot_tracker.voted.insert(pubkey.clone());
|
||||
// From the `slot_diff.retain` earlier, we know because there are
|
||||
// no other writers to `slot_vote_tracker` that
|
||||
// `is_new || is_new_from_gossip`. In both cases we want to record
|
||||
// `is_new_from_gossip` for the `pubkey` entry.
|
||||
w_slot_tracker
|
||||
.voted
|
||||
.insert(pubkey.clone(), seen_in_gossip_above);
|
||||
w_slot_tracker.updates.as_mut().unwrap().push(pubkey);
|
||||
}
|
||||
Self::notify_for_stake_change(
|
||||
@ -547,20 +637,33 @@ impl ClusterInfoVoteListener {
|
||||
slot,
|
||||
);
|
||||
w_slot_tracker.total_stake += current_stake;
|
||||
w_slot_tracker.gossip_only_stake += gossip_only_stake
|
||||
} else {
|
||||
let mut total_stake = 0;
|
||||
let voted: HashSet<_> = slot_diff
|
||||
let mut gossip_only_stake = 0;
|
||||
let voted: HashMap<_, _> = slot_diff
|
||||
.into_iter()
|
||||
.map(|pubkey| {
|
||||
Self::sum_stake(&mut total_stake, epoch_stakes, &pubkey);
|
||||
pubkey
|
||||
.map(|(pubkey, seen_in_gossip_above)| {
|
||||
if !seen_in_gossip_above {
|
||||
let _ = verified_vote_sender.send((*pubkey, vec![slot]));
|
||||
}
|
||||
Self::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
epoch_stakes,
|
||||
&pubkey,
|
||||
seen_in_gossip_above,
|
||||
true,
|
||||
);
|
||||
(pubkey, seen_in_gossip_above)
|
||||
})
|
||||
.collect();
|
||||
Self::notify_for_stake_change(total_stake, 0, &subscriptions, epoch_stakes, slot);
|
||||
let new_slot_tracker = SlotVoteTracker {
|
||||
voted: voted.clone(),
|
||||
updates: Some(voted.into_iter().collect()),
|
||||
updates: Some(voted.keys().cloned().collect()),
|
||||
voted,
|
||||
total_stake,
|
||||
gossip_only_stake,
|
||||
};
|
||||
vote_tracker
|
||||
.slot_vote_trackers
|
||||
@ -588,10 +691,26 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
|
||||
fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) {
|
||||
fn sum_stake(
|
||||
sum: &mut u64,
|
||||
gossip_only_stake: &mut u64,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
pubkey: &Pubkey,
|
||||
is_new_from_gossip: bool,
|
||||
is_new: bool,
|
||||
) {
|
||||
if !is_new_from_gossip && !is_new {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(stakes) = epoch_stakes {
|
||||
if let Some(vote_account) = stakes.stakes().vote_accounts().get(pubkey) {
|
||||
*sum += vote_account.0;
|
||||
if is_new {
|
||||
*sum += vote_account.0;
|
||||
}
|
||||
if is_new_from_gossip {
|
||||
*gossip_only_stake += vote_account.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -611,6 +730,7 @@ mod tests {
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_vote_program::vote_transaction;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
#[test]
|
||||
fn test_max_vote_tx_fits() {
|
||||
@ -783,8 +903,11 @@ mod tests {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slots = vec![1, 2];
|
||||
let replay_vote_slots = vec![3, 4];
|
||||
validator_voting_keypairs.iter().for_each(|keypairs| {
|
||||
let node_keypair = &keypairs.node_keypair;
|
||||
let vote_keypair = &keypairs.vote_keypair;
|
||||
@ -797,6 +920,15 @@ mod tests {
|
||||
vote_keypair,
|
||||
);
|
||||
votes_sender.send(vec![vote_tx]).unwrap();
|
||||
for vote_slot in &replay_vote_slots {
|
||||
// Send twice, should only expect to be notified once later
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), *vote_slot)]))
|
||||
.unwrap();
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), *vote_slot)]))
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
@ -806,14 +938,42 @@ mod tests {
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
for vote_slot in vote_slots {
|
||||
|
||||
// Check that the received votes were pushed to other commponents
|
||||
// subscribing via `verified_vote_receiver`
|
||||
let all_expected_slots: BTreeSet<_> = vote_slots
|
||||
.into_iter()
|
||||
.chain(replay_vote_slots.into_iter())
|
||||
.collect();
|
||||
let mut pubkey_to_votes: HashMap<Pubkey, BTreeSet<Slot>> = HashMap::new();
|
||||
for (received_pubkey, new_votes) in verified_vote_receiver.try_iter() {
|
||||
let already_received_votes = pubkey_to_votes.entry(received_pubkey).or_default();
|
||||
for new_vote in new_votes {
|
||||
// `new_vote` should only be received once
|
||||
assert!(already_received_votes.insert(new_vote));
|
||||
}
|
||||
}
|
||||
assert_eq!(pubkey_to_votes.len(), validator_voting_keypairs.len());
|
||||
for keypairs in &validator_voting_keypairs {
|
||||
assert_eq!(
|
||||
*pubkey_to_votes
|
||||
.get(&keypairs.vote_keypair.pubkey())
|
||||
.unwrap(),
|
||||
all_expected_slots
|
||||
);
|
||||
}
|
||||
|
||||
// Check the vote trackers were updated correctly
|
||||
for vote_slot in all_expected_slots {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
|
||||
let r_slot_vote_tracker = slot_vote_tracker.read().unwrap();
|
||||
for voting_keypairs in &validator_voting_keypairs {
|
||||
let pubkey = voting_keypairs.vote_keypair.pubkey();
|
||||
assert!(r_slot_vote_tracker.voted.contains(&pubkey));
|
||||
assert!(r_slot_vote_tracker.voted.contains_key(&pubkey));
|
||||
assert!(r_slot_vote_tracker
|
||||
.updates
|
||||
.as_ref()
|
||||
@ -828,14 +988,18 @@ mod tests {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
// Send some votes to process
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (votes_txs_sender, votes_txs_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (_replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let mut expected_votes = vec![];
|
||||
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
|
||||
let validator_votes: Vec<_> = keyset
|
||||
.iter()
|
||||
.map(|keypairs| {
|
||||
let node_keypair = &keypairs.node_keypair;
|
||||
let vote_keypair = &keypairs.vote_keypair;
|
||||
expected_votes.push((vote_keypair.pubkey(), vec![i as Slot + 1]));
|
||||
vote_transaction::new_vote_transaction(
|
||||
vec![i as u64 + 1],
|
||||
Hash::default(),
|
||||
@ -846,24 +1010,38 @@ mod tests {
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
votes_sender.send(validator_votes).unwrap();
|
||||
votes_txs_sender.send(validator_votes).unwrap();
|
||||
}
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
// Read and process votes from channel `votes_receiver`
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&votes_txs_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check that the received votes were pushed to other commponents
|
||||
// subscribing via a channel
|
||||
let received_votes: Vec<_> = verified_vote_receiver.try_iter().collect();
|
||||
assert_eq!(received_votes.len(), validator_voting_keypairs.len());
|
||||
for (expected_pubkey_vote, received_pubkey_vote) in
|
||||
expected_votes.iter().zip(received_votes.iter())
|
||||
{
|
||||
assert_eq!(expected_pubkey_vote, received_pubkey_vote);
|
||||
}
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(i as u64 + 1).unwrap();
|
||||
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
|
||||
for voting_keypairs in keyset {
|
||||
let pubkey = voting_keypairs.vote_keypair.pubkey();
|
||||
assert!(r_slot_vote_tracker.voted.contains(&pubkey));
|
||||
assert!(r_slot_vote_tracker.voted.contains_key(&pubkey));
|
||||
assert!(r_slot_vote_tracker
|
||||
.updates
|
||||
.as_ref()
|
||||
@ -873,6 +1051,79 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_votes3() {
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slot = 1;
|
||||
|
||||
// Events:
|
||||
// 0: Send gossip vote
|
||||
// 1: Send replay vote
|
||||
// 2: Send both
|
||||
let ordered_events = vec![
|
||||
vec![0],
|
||||
vec![1],
|
||||
vec![0, 1],
|
||||
vec![1, 0],
|
||||
vec![2],
|
||||
vec![0, 1, 2],
|
||||
vec![1, 0, 2],
|
||||
];
|
||||
for events in ordered_events {
|
||||
let (vote_tracker, bank, validator_voting_keypairs, subscriptions) = setup();
|
||||
let node_keypair = &validator_voting_keypairs[0].node_keypair;
|
||||
let vote_keypair = &validator_voting_keypairs[0].vote_keypair;
|
||||
for &e in &events {
|
||||
if e == 0 || e == 2 {
|
||||
// Create vote transaction
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
vec![vote_slot],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
node_keypair,
|
||||
vote_keypair,
|
||||
vote_keypair,
|
||||
);
|
||||
votes_sender.send(vec![vote_tx.clone()]).unwrap();
|
||||
}
|
||||
if e == 1 || e == 2 {
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), vote_slot)]))
|
||||
.unwrap();
|
||||
}
|
||||
let _ = ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
Some(
|
||||
// Make sure `epoch_stakes` exists for this slot by unwrapping
|
||||
bank.epoch_stakes(bank.epoch_schedule().get_epoch(vote_slot))
|
||||
.unwrap(),
|
||||
),
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
);
|
||||
}
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
|
||||
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
|
||||
|
||||
if events == vec![1] {
|
||||
// Check `gossip_only_stake` is not incremented
|
||||
assert_eq!(r_slot_vote_tracker.total_stake, 100);
|
||||
assert_eq!(r_slot_vote_tracker.gossip_only_stake, 0);
|
||||
} else {
|
||||
// Check that both the `gossip_only_stake` and `total_stake` both
|
||||
// increased
|
||||
assert_eq!(r_slot_vote_tracker.total_stake, 100);
|
||||
assert_eq!(r_slot_vote_tracker.gossip_only_stake, 100);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_voters_by_epoch() {
|
||||
// Create some voters at genesis
|
||||
@ -936,19 +1187,19 @@ mod tests {
|
||||
let ref_count_per_vote = 2;
|
||||
|
||||
// Create some voters at genesis
|
||||
let validator_voting_keypairs: Vec<_> = (0..2)
|
||||
let validator_keypairs: Vec<_> = (0..2)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_voting_keypairs,
|
||||
&validator_keypairs,
|
||||
100,
|
||||
);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank_forks = BankForks::new(bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
@ -957,16 +1208,17 @@ mod tests {
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
|
||||
// Send a vote to process, should add a reference to the pubkey for that voter
|
||||
// in the tracker
|
||||
let validator0_keypairs = &validator_voting_keypairs[0];
|
||||
let validator0_keypairs = &validator_keypairs[0];
|
||||
let voted_slot = bank.slot() + 1;
|
||||
let vote_tx = vec![vote_transaction::new_vote_transaction(
|
||||
// Must vote > root to be processed
|
||||
vec![bank.slot() + 1],
|
||||
vec![voted_slot],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
&validator0_keypairs.node_keypair,
|
||||
@ -974,12 +1226,19 @@ mod tests {
|
||||
&validator0_keypairs.vote_keypair,
|
||||
)];
|
||||
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_tx,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
// Add vote for same slot, should not affect outcome
|
||||
&[Arc::new(vec![(
|
||||
validator0_keypairs.vote_keypair.pubkey(),
|
||||
voted_slot,
|
||||
)])],
|
||||
);
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
@ -1014,8 +1273,9 @@ mod tests {
|
||||
// Test with votes across two epochs
|
||||
let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch);
|
||||
|
||||
// Make 2 new votes in two different epochs, ref count should go up
|
||||
// by 2 * ref_count_per_vote
|
||||
// Make 2 new votes in two different epochs for the same pubkey,
|
||||
// the ref count should go up by 3 * ref_count_per_vote
|
||||
// Add 1 vote through the replay channel, ref count should
|
||||
let vote_txs: Vec<_> = [bank.slot() + 2, first_slot_in_new_epoch]
|
||||
.iter()
|
||||
.map(|slot| {
|
||||
@ -1031,8 +1291,32 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_txs,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&[Arc::new(vec![(
|
||||
validator_keypairs[1].vote_keypair.pubkey(),
|
||||
first_slot_in_new_epoch,
|
||||
)])],
|
||||
);
|
||||
|
||||
// Check new replay vote pubkey first
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
.0
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&validator_keypairs[1].vote_keypair.pubkey())
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(ref_count, current_ref_count);
|
||||
|
||||
// Check the existing pubkey
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
@ -1064,7 +1348,7 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank_forks = BankForks::new(bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
@ -1072,7 +1356,7 @@ mod tests {
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
|
||||
@ -1158,4 +1442,78 @@ mod tests {
|
||||
assert_eq!(vote_txs.len(), 2);
|
||||
verify_packets_len(&packets, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum_stake() {
|
||||
let (_, bank, validator_voting_keypairs, _) = setup();
|
||||
let vote_keypair = &validator_voting_keypairs[0].vote_keypair;
|
||||
let epoch_stakes = bank.epoch_stakes(bank.epoch()).unwrap();
|
||||
|
||||
// If `is_new_from_gossip` and `is_new` are both true, both fields
|
||||
// should increase
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = true;
|
||||
let is_new = true;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 100);
|
||||
assert_eq!(gossip_only_stake, 100);
|
||||
|
||||
// If `is_new_from_gossip` and `is_new` are both false, none should increase
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = false;
|
||||
let is_new = false;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 0);
|
||||
assert_eq!(gossip_only_stake, 0);
|
||||
|
||||
// If only `is_new`, but not `is_new_from_gossip` then
|
||||
// `total_stake` will increase, but `gossip_only_stake` won't
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = false;
|
||||
let is_new = true;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 100);
|
||||
assert_eq!(gossip_only_stake, 0);
|
||||
|
||||
// If only `is_new_from_gossip`, but not `is_new` then
|
||||
// `gossip_only_stake` will increase, but `total_stake` won't
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = true;
|
||||
let is_new = false;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 0);
|
||||
assert_eq!(gossip_only_stake, 100);
|
||||
}
|
||||
}
|
||||
|
@ -1,26 +1,10 @@
|
||||
use crate::{consensus::VOTE_THRESHOLD_SIZE, rpc_subscriptions::RpcSubscriptions};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CacheSlotInfo {
|
||||
pub current_slot: Slot,
|
||||
pub node_root: Slot,
|
||||
pub largest_confirmed_root: Slot,
|
||||
pub highest_confirmed_slot: Slot,
|
||||
}
|
||||
pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
|
||||
|
||||
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
|
||||
|
||||
@ -59,9 +43,9 @@ pub struct BlockCommitmentCache {
|
||||
largest_confirmed_root: Slot,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
pub blockstore: Arc<Blockstore>,
|
||||
root: Slot,
|
||||
highest_confirmed_slot: Slot,
|
||||
pub highest_confirmed_slot: Slot,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BlockCommitmentCache {
|
||||
@ -151,7 +135,7 @@ impl BlockCommitmentCache {
|
||||
self.root
|
||||
}
|
||||
|
||||
fn calculate_highest_confirmed_slot(&self) -> Slot {
|
||||
pub fn calculate_highest_confirmed_slot(&self) -> Slot {
|
||||
self.highest_slot_with_confirmation_count(1)
|
||||
}
|
||||
|
||||
@ -219,222 +203,11 @@ impl BlockCommitmentCache {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
}
|
||||
|
||||
impl CommitmentAggregationData {
|
||||
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
|
||||
Self {
|
||||
bank,
|
||||
root,
|
||||
total_staked,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_largest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
|
||||
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
|
||||
let mut stake_sum = 0;
|
||||
for (root, stake) in rooted_stake {
|
||||
stake_sum += stake;
|
||||
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
|
||||
return root;
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct AggregateCommitmentService {
|
||||
t_commitment: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl AggregateCommitmentService {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
) -> (Sender<CommitmentAggregationData>, Self) {
|
||||
let (sender, receiver): (
|
||||
Sender<CommitmentAggregationData>,
|
||||
Receiver<CommitmentAggregationData>,
|
||||
) = channel();
|
||||
let exit_ = exit.clone();
|
||||
(
|
||||
sender,
|
||||
Self {
|
||||
t_commitment: Builder::new()
|
||||
.name("solana-aggregate-stake-lockouts".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit_.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Err(RecvTimeoutError::Disconnected) =
|
||||
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
|
||||
{
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn run(
|
||||
receiver: &Receiver<CommitmentAggregationData>,
|
||||
block_commitment_cache: &RwLock<BlockCommitmentCache>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
while let Ok(new_data) = receiver.try_recv() {
|
||||
aggregation_data = new_data;
|
||||
}
|
||||
|
||||
let ancestors = aggregation_data.bank.status_cache_ancestors();
|
||||
if ancestors.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
aggregate_commitment_time.stop();
|
||||
datapoint_info!(
|
||||
"block-commitment-cache",
|
||||
(
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as i64,
|
||||
i64
|
||||
)
|
||||
);
|
||||
|
||||
subscriptions.notify_subscribers(CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_commitment(
|
||||
ancestors: &[Slot],
|
||||
bank: &Bank,
|
||||
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
|
||||
assert!(!ancestors.is_empty());
|
||||
|
||||
// Check ancestors is sorted
|
||||
for a in ancestors.windows(2) {
|
||||
assert!(a[0] < a[1]);
|
||||
}
|
||||
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
|
||||
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
(commitment, rooted_stake)
|
||||
}
|
||||
|
||||
fn aggregate_commitment_for_vote_account(
|
||||
commitment: &mut HashMap<Slot, BlockCommitment>,
|
||||
rooted_stake: &mut Vec<(Slot, u64)>,
|
||||
vote_state: &VoteState,
|
||||
ancestors: &[Slot],
|
||||
lamports: u64,
|
||||
) {
|
||||
assert!(!ancestors.is_empty());
|
||||
let mut ancestors_index = 0;
|
||||
if let Some(root) = vote_state.root_slot {
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
commitment
|
||||
.entry(*a)
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_rooted_stake(lamports);
|
||||
} else {
|
||||
ancestors_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rooted_stake.push((root, lamports));
|
||||
}
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
while ancestors[ancestors_index] <= vote.slot {
|
||||
commitment
|
||||
.entry(ancestors[ancestors_index])
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_confirmation_stake(vote.confirmation_count as usize, lamports);
|
||||
ancestors_index += 1;
|
||||
|
||||
if ancestors_index == ancestors.len() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_commitment.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::{
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::{genesis_config::GenesisConfig, pubkey::Pubkey};
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_block_commitment() {
|
||||
@ -512,21 +285,6 @@ mod tests {
|
||||
assert!(!block_commitment_cache.is_confirmed_rooted(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
assert_eq!(get_largest_confirmed_root(vec![], 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((0, 5));
|
||||
rooted_stake.push((1, 5));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((1, 5));
|
||||
rooted_stake.push((0, 10));
|
||||
rooted_stake.push((2, 5));
|
||||
rooted_stake.push((1, 4));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_highest_confirmed_slot() {
|
||||
let bank = Arc::new(Bank::new(&GenesisConfig::default()));
|
||||
@ -634,211 +392,4 @@ mod tests {
|
||||
|
||||
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_1() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = *ancestors.last().unwrap();
|
||||
vote_state.root_slot = Some(root);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_2() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_3() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
assert!(ancestors[4] + 2 >= ancestors[6]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[4]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[6]);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 6 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_validity() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let rooted_stake_amount = 40;
|
||||
|
||||
let sk1 = Pubkey::new_rand();
|
||||
let pk1 = Pubkey::new_rand();
|
||||
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
|
||||
let stake_account1 =
|
||||
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
|
||||
let sk2 = Pubkey::new_rand();
|
||||
let pk2 = Pubkey::new_rand();
|
||||
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
|
||||
let stake_account2 =
|
||||
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
|
||||
let sk3 = Pubkey::new_rand();
|
||||
let pk3 = Pubkey::new_rand();
|
||||
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account3 = stake_state::create_account(
|
||||
&sk3,
|
||||
&pk3,
|
||||
&vote_account3,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
let sk4 = Pubkey::new_rand();
|
||||
let pk4 = Pubkey::new_rand();
|
||||
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account4 = stake_state::create_account(
|
||||
&sk4,
|
||||
&pk4,
|
||||
&vote_account4,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
|
||||
genesis_config.accounts.extend(vec![
|
||||
(pk1, vote_account1.clone()),
|
||||
(sk1, stake_account1),
|
||||
(pk2, vote_account2.clone()),
|
||||
(sk2, stake_account2),
|
||||
(pk3, vote_account3.clone()),
|
||||
(sk3, stake_account3),
|
||||
(pk4, vote_account4.clone()),
|
||||
(sk4, stake_account4),
|
||||
]);
|
||||
|
||||
// Create bank
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
let mut vote_state1 = VoteState::from(&vote_account1).unwrap();
|
||||
vote_state1.process_slot_vote_unchecked(3);
|
||||
vote_state1.process_slot_vote_unchecked(5);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state1));
|
||||
VoteState::to(&versioned, &mut vote_account1).unwrap();
|
||||
bank.store_account(&pk1, &vote_account1);
|
||||
|
||||
let mut vote_state2 = VoteState::from(&vote_account2).unwrap();
|
||||
vote_state2.process_slot_vote_unchecked(9);
|
||||
vote_state2.process_slot_vote_unchecked(10);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state2));
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
let (commitment, rooted_stake) =
|
||||
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= 3 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 150);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 5 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 100);
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 9 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 10 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
assert!(commitment.get(&a).is_none());
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
}
|
||||
|
454
core/src/commitment_service.rs
Normal file
454
core/src/commitment_service.rs
Normal file
@ -0,0 +1,454 @@
|
||||
use crate::{
|
||||
commitment::{BlockCommitment, BlockCommitmentCache, VOTE_THRESHOLD_SIZE},
|
||||
rpc_subscriptions::{CacheSlotInfo, RpcSubscriptions},
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
}
|
||||
|
||||
impl CommitmentAggregationData {
|
||||
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
|
||||
Self {
|
||||
bank,
|
||||
root,
|
||||
total_staked,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_largest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
|
||||
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
|
||||
let mut stake_sum = 0;
|
||||
for (root, stake) in rooted_stake {
|
||||
stake_sum += stake;
|
||||
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
|
||||
return root;
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct AggregateCommitmentService {
|
||||
t_commitment: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl AggregateCommitmentService {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
) -> (Sender<CommitmentAggregationData>, Self) {
|
||||
let (sender, receiver): (
|
||||
Sender<CommitmentAggregationData>,
|
||||
Receiver<CommitmentAggregationData>,
|
||||
) = channel();
|
||||
let exit_ = exit.clone();
|
||||
(
|
||||
sender,
|
||||
Self {
|
||||
t_commitment: Builder::new()
|
||||
.name("solana-aggregate-stake-lockouts".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit_.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Err(RecvTimeoutError::Disconnected) =
|
||||
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
|
||||
{
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn run(
|
||||
receiver: &Receiver<CommitmentAggregationData>,
|
||||
block_commitment_cache: &RwLock<BlockCommitmentCache>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
while let Ok(new_data) = receiver.try_recv() {
|
||||
aggregation_data = new_data;
|
||||
}
|
||||
|
||||
let ancestors = aggregation_data.bank.status_cache_ancestors();
|
||||
if ancestors.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
aggregate_commitment_time.stop();
|
||||
datapoint_info!(
|
||||
"block-commitment-cache",
|
||||
(
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as i64,
|
||||
i64
|
||||
)
|
||||
);
|
||||
|
||||
subscriptions.notify_subscribers(CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_commitment(
|
||||
ancestors: &[Slot],
|
||||
bank: &Bank,
|
||||
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
|
||||
assert!(!ancestors.is_empty());
|
||||
|
||||
// Check ancestors is sorted
|
||||
for a in ancestors.windows(2) {
|
||||
assert!(a[0] < a[1]);
|
||||
}
|
||||
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
|
||||
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
(commitment, rooted_stake)
|
||||
}
|
||||
|
||||
fn aggregate_commitment_for_vote_account(
|
||||
commitment: &mut HashMap<Slot, BlockCommitment>,
|
||||
rooted_stake: &mut Vec<(Slot, u64)>,
|
||||
vote_state: &VoteState,
|
||||
ancestors: &[Slot],
|
||||
lamports: u64,
|
||||
) {
|
||||
assert!(!ancestors.is_empty());
|
||||
let mut ancestors_index = 0;
|
||||
if let Some(root) = vote_state.root_slot {
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
commitment
|
||||
.entry(*a)
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_rooted_stake(lamports);
|
||||
} else {
|
||||
ancestors_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rooted_stake.push((root, lamports));
|
||||
}
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
while ancestors[ancestors_index] <= vote.slot {
|
||||
commitment
|
||||
.entry(ancestors[ancestors_index])
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_confirmation_stake(vote.confirmation_count as usize, lamports);
|
||||
ancestors_index += 1;
|
||||
|
||||
if ancestors_index == ancestors.len() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_commitment.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
assert_eq!(get_largest_confirmed_root(vec![], 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((0, 5));
|
||||
rooted_stake.push((1, 5));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((1, 5));
|
||||
rooted_stake.push((0, 10));
|
||||
rooted_stake.push((2, 5));
|
||||
rooted_stake.push((1, 4));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_1() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = *ancestors.last().unwrap();
|
||||
vote_state.root_slot = Some(root);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_2() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_3() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
assert!(ancestors[4] + 2 >= ancestors[6]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[4]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[6]);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 6 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_validity() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let rooted_stake_amount = 40;
|
||||
|
||||
let sk1 = Pubkey::new_rand();
|
||||
let pk1 = Pubkey::new_rand();
|
||||
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
|
||||
let stake_account1 =
|
||||
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
|
||||
let sk2 = Pubkey::new_rand();
|
||||
let pk2 = Pubkey::new_rand();
|
||||
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
|
||||
let stake_account2 =
|
||||
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
|
||||
let sk3 = Pubkey::new_rand();
|
||||
let pk3 = Pubkey::new_rand();
|
||||
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account3 = stake_state::create_account(
|
||||
&sk3,
|
||||
&pk3,
|
||||
&vote_account3,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
let sk4 = Pubkey::new_rand();
|
||||
let pk4 = Pubkey::new_rand();
|
||||
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account4 = stake_state::create_account(
|
||||
&sk4,
|
||||
&pk4,
|
||||
&vote_account4,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
|
||||
genesis_config.accounts.extend(vec![
|
||||
(pk1, vote_account1.clone()),
|
||||
(sk1, stake_account1),
|
||||
(pk2, vote_account2.clone()),
|
||||
(sk2, stake_account2),
|
||||
(pk3, vote_account3.clone()),
|
||||
(sk3, stake_account3),
|
||||
(pk4, vote_account4.clone()),
|
||||
(sk4, stake_account4),
|
||||
]);
|
||||
|
||||
// Create bank
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
let mut vote_state1 = VoteState::from(&vote_account1).unwrap();
|
||||
vote_state1.process_slot_vote_unchecked(3);
|
||||
vote_state1.process_slot_vote_unchecked(5);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state1));
|
||||
VoteState::to(&versioned, &mut vote_account1).unwrap();
|
||||
bank.store_account(&pk1, &vote_account1);
|
||||
|
||||
let mut vote_state2 = VoteState::from(&vote_account2).unwrap();
|
||||
vote_state2.process_slot_vote_unchecked(9);
|
||||
vote_state2.process_slot_vote_unchecked(10);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state2));
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
let (commitment, rooted_stake) =
|
||||
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= 3 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 150);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 5 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 100);
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 9 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 10 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
assert!(commitment.get(&a).is_none());
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
use crate::{
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
progress_map::{LockoutIntervals, ProgressMap},
|
||||
pubkey_references::PubkeyReferences,
|
||||
};
|
||||
@ -9,21 +10,59 @@ use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, UnixTimestamp},
|
||||
hash::Hash,
|
||||
instruction::Instruction,
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{
|
||||
BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY, TIMESTAMP_SLOT_INTERVAL,
|
||||
use solana_vote_program::{
|
||||
vote_instruction,
|
||||
vote_state::{
|
||||
BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY, TIMESTAMP_SLOT_INTERVAL,
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
ops::Bound::{Included, Unbounded},
|
||||
sync::Arc,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Clone, Debug)]
|
||||
pub enum SwitchForkDecision {
|
||||
SwitchProof(Hash),
|
||||
NoSwitch,
|
||||
FailedSwitchThreshold,
|
||||
}
|
||||
|
||||
impl SwitchForkDecision {
|
||||
pub fn to_vote_instruction(
|
||||
&self,
|
||||
vote: Vote,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
authorized_voter_pubkey: &Pubkey,
|
||||
) -> Option<Instruction> {
|
||||
match self {
|
||||
SwitchForkDecision::FailedSwitchThreshold => None,
|
||||
SwitchForkDecision::NoSwitch => Some(vote_instruction::vote(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
vote,
|
||||
)),
|
||||
SwitchForkDecision::SwitchProof(switch_proof_hash) => {
|
||||
Some(vote_instruction::vote_switch(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
vote,
|
||||
*switch_proof_hash,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
|
||||
pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
|
||||
pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
|
||||
|
||||
pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct StakeLockout {
|
||||
lockout: u64,
|
||||
@ -42,6 +81,14 @@ impl StakeLockout {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ComputedBankState {
|
||||
pub stake_lockouts: HashMap<Slot, StakeLockout>,
|
||||
pub total_staked: u64,
|
||||
pub bank_weight: u128,
|
||||
pub lockout_intervals: LockoutIntervals,
|
||||
pub pubkey_votes: Arc<PubkeyVotes>,
|
||||
}
|
||||
|
||||
pub struct Tower {
|
||||
node_pubkey: Pubkey,
|
||||
threshold_depth: usize,
|
||||
@ -65,10 +112,14 @@ impl Default for Tower {
|
||||
}
|
||||
|
||||
impl Tower {
|
||||
pub fn new(node_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, bank_forks: &BankForks) -> Self {
|
||||
pub fn new(
|
||||
node_pubkey: &Pubkey,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
root: Slot,
|
||||
heaviest_bank: &Bank,
|
||||
) -> Self {
|
||||
let mut tower = Self::new_with_key(node_pubkey);
|
||||
|
||||
tower.initialize_lockouts_from_bank_forks(&bank_forks, vote_account_pubkey);
|
||||
tower.initialize_lockouts_from_bank_forks(vote_account_pubkey, root, heaviest_bank);
|
||||
|
||||
tower
|
||||
}
|
||||
@ -89,27 +140,28 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn collect_vote_lockouts<F>(
|
||||
&self,
|
||||
pub(crate) fn collect_vote_lockouts<F>(
|
||||
node_pubkey: &Pubkey,
|
||||
bank_slot: u64,
|
||||
vote_accounts: F,
|
||||
ancestors: &HashMap<Slot, HashSet<u64>>,
|
||||
all_pubkeys: &mut PubkeyReferences,
|
||||
) -> (HashMap<Slot, StakeLockout>, u64, u128, LockoutIntervals)
|
||||
) -> ComputedBankState
|
||||
where
|
||||
F: Iterator<Item = (Pubkey, (u64, Account))>,
|
||||
{
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let mut total_stake = 0;
|
||||
let mut total_weight = 0;
|
||||
let mut total_staked = 0;
|
||||
let mut bank_weight = 0;
|
||||
// Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
|
||||
// keyed by end of the range
|
||||
let mut lockout_intervals = BTreeMap::new();
|
||||
let mut pubkey_votes = vec![];
|
||||
for (key, (lamports, account)) in vote_accounts {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
trace!("{} {} with stake {}", self.node_pubkey, key, lamports);
|
||||
trace!("{} {} with stake {}", node_pubkey, key, lamports);
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
datapoint_warn!(
|
||||
@ -128,11 +180,11 @@ impl Tower {
|
||||
let key = all_pubkeys.get_or_insert(&key);
|
||||
lockout_intervals
|
||||
.entry(vote.expiration_slot())
|
||||
.or_insert_with(|| vec![])
|
||||
.or_insert_with(Vec::new)
|
||||
.push((vote.slot, key));
|
||||
}
|
||||
|
||||
if key == self.node_pubkey || vote_state.node_pubkey == self.node_pubkey {
|
||||
if key == *node_pubkey || vote_state.node_pubkey == *node_pubkey {
|
||||
debug!("vote state {:?}", vote_state);
|
||||
debug!(
|
||||
"observed slot {}",
|
||||
@ -151,10 +203,15 @@ impl Tower {
|
||||
}
|
||||
let start_root = vote_state.root_slot;
|
||||
|
||||
// Add the latest vote to update the `heaviest_subtree_fork_choice`
|
||||
if let Some(latest_vote) = vote_state.votes.back() {
|
||||
pubkey_votes.push((key, latest_vote.slot));
|
||||
}
|
||||
|
||||
vote_state.process_slot_vote_unchecked(bank_slot);
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
total_weight += vote.lockout() as u128 * lamports as u128;
|
||||
bank_weight += vote.lockout() as u128 * lamports as u128;
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
|
||||
@ -165,7 +222,7 @@ impl Tower {
|
||||
slot: root,
|
||||
};
|
||||
trace!("ROOT: {}", vote.slot);
|
||||
total_weight += vote.lockout() as u128 * lamports as u128;
|
||||
bank_weight += vote.lockout() as u128 * lamports as u128;
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
}
|
||||
@ -174,7 +231,7 @@ impl Tower {
|
||||
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
|
||||
slot: root,
|
||||
};
|
||||
total_weight += vote.lockout() as u128 * lamports as u128;
|
||||
bank_weight += vote.lockout() as u128 * lamports as u128;
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
|
||||
@ -195,9 +252,16 @@ impl Tower {
|
||||
// Update all the parents of this last vote with the stake of this vote account
|
||||
Self::update_ancestor_stakes(&mut stake_lockouts, vote.slot, lamports, ancestors);
|
||||
}
|
||||
total_stake += lamports;
|
||||
total_staked += lamports;
|
||||
}
|
||||
|
||||
ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
bank_weight,
|
||||
lockout_intervals,
|
||||
pubkey_votes: Arc::new(pubkey_votes),
|
||||
}
|
||||
(stake_lockouts, total_stake, total_weight, lockout_intervals)
|
||||
}
|
||||
|
||||
pub fn is_slot_confirmed(
|
||||
@ -345,7 +409,7 @@ impl Tower {
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
) -> bool {
|
||||
) -> SwitchForkDecision {
|
||||
self.last_vote()
|
||||
.slots
|
||||
.last()
|
||||
@ -355,14 +419,18 @@ impl Tower {
|
||||
|
||||
if switch_slot == *last_vote || switch_slot_ancestors.contains(last_vote) {
|
||||
// If the `switch_slot is a descendant of the last vote,
|
||||
// no switching proof is neceessary
|
||||
return true;
|
||||
// no switching proof is necessary
|
||||
return SwitchForkDecision::NoSwitch;
|
||||
}
|
||||
|
||||
// Should never consider switching to an ancestor
|
||||
// of your last vote
|
||||
assert!(!last_vote_ancestors.contains(&switch_slot));
|
||||
|
||||
// By this point, we know the `switch_slot` is on a different fork
|
||||
// (is neither an ancestor nor descendant of `last_vote`), so a
|
||||
// switching proof is necessary
|
||||
let switch_proof = Hash::default();
|
||||
let mut locked_out_stake = 0;
|
||||
let mut locked_out_vote_accounts = HashSet::new();
|
||||
for (candidate_slot, descendants) in descendants.iter() {
|
||||
@ -423,9 +491,14 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
}
|
||||
(locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD
|
||||
|
||||
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
|
||||
SwitchForkDecision::SwitchProof(switch_proof)
|
||||
} else {
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
}
|
||||
})
|
||||
.unwrap_or(true)
|
||||
.unwrap_or(SwitchForkDecision::NoSwitch)
|
||||
}
|
||||
|
||||
pub fn check_vote_stake_threshold(
|
||||
@ -463,7 +536,7 @@ impl Tower {
|
||||
}
|
||||
|
||||
/// Update lockouts for all the ancestors
|
||||
fn update_ancestor_lockouts(
|
||||
pub(crate) fn update_ancestor_lockouts(
|
||||
stake_lockouts: &mut HashMap<Slot, StakeLockout>,
|
||||
vote: &Lockout,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
@ -483,6 +556,28 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn find_heaviest_bank(
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
node_pubkey: &Pubkey,
|
||||
) -> Option<Arc<Bank>> {
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let mut bank_weights: Vec<_> = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.map(|b| {
|
||||
(
|
||||
Self::bank_weight(node_pubkey, b, &ancestors),
|
||||
b.parents().len(),
|
||||
b.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
bank_weights.sort_by_key(|b| (b.0, b.1));
|
||||
bank_weights.pop().map(|b| b.2)
|
||||
}
|
||||
|
||||
/// Update stake for all the ancestors.
|
||||
/// Note, stake is the same for all the ancestor.
|
||||
fn update_ancestor_stakes(
|
||||
@ -491,9 +586,8 @@ impl Tower {
|
||||
lamports: u64,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
) {
|
||||
// If there's no ancestors, that means this slot must be from before the current root,
|
||||
// in which case the lockouts won't be calculated in bank_weight anyways, so ignore
|
||||
// this slot
|
||||
// If there's no ancestors, that means this slot must be from
|
||||
// before the current root, so ignore this slot
|
||||
let vote_slot_ancestors = ancestors.get(&slot);
|
||||
if vote_slot_ancestors.is_none() {
|
||||
return;
|
||||
@ -506,8 +600,13 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
fn bank_weight(&self, bank: &Bank, ancestors: &HashMap<Slot, HashSet<Slot>>) -> u128 {
|
||||
let (_, _, bank_weight, _) = self.collect_vote_lockouts(
|
||||
fn bank_weight(
|
||||
node_pubkey: &Pubkey,
|
||||
bank: &Bank,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
) -> u128 {
|
||||
let ComputedBankState { bank_weight, .. } = Self::collect_vote_lockouts(
|
||||
node_pubkey,
|
||||
bank.slot(),
|
||||
bank.vote_accounts().into_iter(),
|
||||
ancestors,
|
||||
@ -516,47 +615,28 @@ impl Tower {
|
||||
bank_weight
|
||||
}
|
||||
|
||||
fn find_heaviest_bank(&self, bank_forks: &BankForks) -> Option<Arc<Bank>> {
|
||||
let ancestors = bank_forks.ancestors();
|
||||
let mut bank_weights: Vec<_> = bank_forks
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.map(|b| {
|
||||
(
|
||||
self.bank_weight(b, &ancestors),
|
||||
b.parents().len(),
|
||||
b.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
bank_weights.sort_by_key(|b| (b.0, b.1));
|
||||
bank_weights.pop().map(|b| b.2)
|
||||
}
|
||||
|
||||
fn initialize_lockouts_from_bank_forks(
|
||||
&mut self,
|
||||
bank_forks: &BankForks,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
root: Slot,
|
||||
heaviest_bank: &Bank,
|
||||
) {
|
||||
if let Some(bank) = self.find_heaviest_bank(bank_forks) {
|
||||
let root = bank_forks.root();
|
||||
if let Some((_stake, vote_account)) = bank.vote_accounts().get(vote_account_pubkey) {
|
||||
let mut vote_state = VoteState::deserialize(&vote_account.data)
|
||||
.expect("vote_account isn't a VoteState?");
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.votes.retain(|v| v.slot > root);
|
||||
trace!(
|
||||
"{} lockouts initialized to {:?}",
|
||||
self.node_pubkey,
|
||||
vote_state
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
vote_state.node_pubkey, self.node_pubkey,
|
||||
"vote account's node_pubkey doesn't match",
|
||||
);
|
||||
self.lockouts = vote_state;
|
||||
}
|
||||
if let Some((_stake, vote_account)) = heaviest_bank.vote_accounts().get(vote_account_pubkey)
|
||||
{
|
||||
let mut vote_state = VoteState::deserialize(&vote_account.data)
|
||||
.expect("vote_account isn't a VoteState?");
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.votes.retain(|v| v.slot > root);
|
||||
trace!(
|
||||
"{} lockouts initialized to {:?}",
|
||||
self.node_pubkey,
|
||||
vote_state
|
||||
);
|
||||
assert_eq!(
|
||||
vote_state.node_pubkey, self.node_pubkey,
|
||||
"vote account's node_pubkey doesn't match",
|
||||
);
|
||||
self.lockouts = vote_state;
|
||||
}
|
||||
}
|
||||
|
||||
@ -580,11 +660,15 @@ impl Tower {
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
bank_weight_fork_choice::BankWeightForkChoice,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slots::ClusterSlots,
|
||||
fork_choice::SelectVoteAndResetForkResult,
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
@ -599,7 +683,7 @@ pub mod test {
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_state::{Vote, VoteStateVersions},
|
||||
vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY},
|
||||
vote_transaction,
|
||||
};
|
||||
use std::{
|
||||
@ -616,18 +700,26 @@ pub mod test {
|
||||
pub vote_pubkeys: Vec<Pubkey>,
|
||||
pub bank_forks: RwLock<BankForks>,
|
||||
pub progress: ProgressMap,
|
||||
pub heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice,
|
||||
}
|
||||
|
||||
impl VoteSimulator {
|
||||
pub(crate) fn new(num_keypairs: usize) -> Self {
|
||||
let (validator_keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress) =
|
||||
Self::init_state(num_keypairs);
|
||||
let (
|
||||
validator_keypairs,
|
||||
node_pubkeys,
|
||||
vote_pubkeys,
|
||||
bank_forks,
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
) = Self::init_state(num_keypairs);
|
||||
Self {
|
||||
validator_keypairs,
|
||||
node_pubkeys,
|
||||
vote_pubkeys,
|
||||
bank_forks: RwLock::new(bank_forks),
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
}
|
||||
}
|
||||
pub(crate) fn fill_bank_forks(
|
||||
@ -670,6 +762,8 @@ pub mod test {
|
||||
}
|
||||
}
|
||||
new_bank.freeze();
|
||||
self.heaviest_subtree_fork_choice
|
||||
.add_new_leaf_slot(new_bank.slot(), Some(new_bank.parent_slot()));
|
||||
self.bank_forks.write().unwrap().insert(new_bank);
|
||||
walk.forward();
|
||||
}
|
||||
@ -694,6 +788,7 @@ pub mod test {
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let (replay_slot_sender, _replay_slot_receiver) = unbounded();
|
||||
let _ = ReplayStage::compute_bank_stats(
|
||||
&my_pubkey,
|
||||
&ancestors,
|
||||
@ -704,6 +799,9 @@ pub mod test {
|
||||
&ClusterSlots::default(),
|
||||
&self.bank_forks,
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_slot_sender,
|
||||
);
|
||||
|
||||
let vote_bank = self
|
||||
@ -716,8 +814,11 @@ pub mod test {
|
||||
|
||||
// Try to vote on the given slot
|
||||
let descendants = self.bank_forks.read().unwrap().descendants();
|
||||
let (_, _, failure_reasons) = ReplayStage::select_vote_and_reset_forks(
|
||||
&Some(vote_bank.clone()),
|
||||
let SelectVoteAndResetForkResult {
|
||||
heaviest_fork_failures,
|
||||
..
|
||||
} = ReplayStage::select_vote_and_reset_forks(
|
||||
&vote_bank,
|
||||
&None,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
@ -727,8 +828,8 @@ pub mod test {
|
||||
|
||||
// Make sure this slot isn't locked out or failing threshold
|
||||
info!("Checking vote: {}", vote_bank.slot());
|
||||
if !failure_reasons.is_empty() {
|
||||
return failure_reasons;
|
||||
if !heaviest_fork_failures.is_empty() {
|
||||
return heaviest_fork_failures;
|
||||
}
|
||||
let vote = tower.new_vote_from_bank(&vote_bank, &my_vote_pubkey).0;
|
||||
if let Some(new_root) = tower.record_bank_vote(vote) {
|
||||
@ -746,6 +847,7 @@ pub mod test {
|
||||
&None,
|
||||
&mut PubkeyReferences::default(),
|
||||
None,
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
)
|
||||
}
|
||||
|
||||
@ -831,6 +933,7 @@ pub mod test {
|
||||
Vec<Pubkey>,
|
||||
BankForks,
|
||||
ProgressMap,
|
||||
HeaviestSubtreeForkChoice,
|
||||
) {
|
||||
let keypairs: HashMap<_, _> = std::iter::repeat_with(|| {
|
||||
let node_keypair = Keypair::new();
|
||||
@ -853,8 +956,16 @@ pub mod test {
|
||||
.map(|keys| keys.vote_keypair.pubkey())
|
||||
.collect();
|
||||
|
||||
let (bank_forks, progress) = initialize_state(&keypairs, 10_000);
|
||||
(keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress)
|
||||
let (bank_forks, progress, heaviest_subtree_fork_choice) =
|
||||
initialize_state(&keypairs, 10_000);
|
||||
(
|
||||
keypairs,
|
||||
node_pubkeys,
|
||||
vote_pubkeys,
|
||||
bank_forks,
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -862,7 +973,7 @@ pub mod test {
|
||||
pub(crate) fn initialize_state(
|
||||
validator_keypairs_map: &HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
stake: u64,
|
||||
) -> (BankForks, ProgressMap) {
|
||||
) -> (BankForks, ProgressMap, HeaviestSubtreeForkChoice) {
|
||||
let validator_keypairs: Vec<_> = validator_keypairs_map.values().collect();
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@ -882,7 +993,10 @@ pub mod test {
|
||||
0,
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
);
|
||||
(BankForks::new(0, bank0), progress)
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let heaviest_subtree_fork_choice =
|
||||
HeaviestSubtreeForkChoice::new_from_bank_forks(&bank_forks);
|
||||
(bank_forks, progress, heaviest_subtree_fork_choice)
|
||||
}
|
||||
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, Account))> {
|
||||
@ -905,6 +1019,34 @@ pub mod test {
|
||||
stakes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_vote_instruction() {
|
||||
let vote = Vote::default();
|
||||
let mut decision = SwitchForkDecision::FailedSwitchThreshold;
|
||||
assert!(decision
|
||||
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
|
||||
.is_none());
|
||||
decision = SwitchForkDecision::NoSwitch;
|
||||
assert_eq!(
|
||||
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
|
||||
Some(vote_instruction::vote(
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default(),
|
||||
vote.clone(),
|
||||
))
|
||||
);
|
||||
decision = SwitchForkDecision::SwitchProof(Hash::default());
|
||||
assert_eq!(
|
||||
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
|
||||
Some(vote_instruction::vote_switch(
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default(),
|
||||
vote,
|
||||
Hash::default()
|
||||
))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_votes() {
|
||||
// Init state
|
||||
@ -975,85 +1117,106 @@ pub mod test {
|
||||
tower.record_vote(47, Hash::default());
|
||||
|
||||
// Trying to switch to a descendant of last vote should always work
|
||||
assert!(tower.check_switch_threshold(
|
||||
48,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
48,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::NoSwitch
|
||||
);
|
||||
|
||||
// Trying to switch to another fork at 110 should fail
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on a descendant of last vote should
|
||||
// not count toward the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on an ancestor of last vote should
|
||||
// not count toward the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(50, (45, 100), &other_vote_account);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on a different fork, but the lockout
|
||||
// doesn't cover the last vote, should not satisfy the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(14, (12, 46), &other_vote_account);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on a different fork, and the lockout
|
||||
// covers the last vote, should satisfy the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(14, (12, 47), &other_vote_account);
|
||||
assert!(tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
||||
// If we set a root, then any lockout intervals below the root shouldn't
|
||||
// count toward the switch threshold. This means the other validator's
|
||||
// vote lockout no longer counts
|
||||
vote_simulator.set_root(43);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&vote_simulator.bank_forks.read().unwrap().ancestors(),
|
||||
&vote_simulator.bank_forks.read().unwrap().descendants(),
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&vote_simulator.bank_forks.read().unwrap().ancestors(),
|
||||
&vote_simulator.bank_forks.read().unwrap().descendants(),
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1193,20 +1356,33 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_collect_vote_lockouts_sums() {
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
|
||||
let tower = Tower::new_for_tests(0, 0.67);
|
||||
let mut accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: PubkeyVotes =
|
||||
accounts.iter().map(|(pubkey, _)| (*pubkey, 0)).collect();
|
||||
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let (staked_lockouts, total_staked, bank_weight, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
bank_weight,
|
||||
pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert_eq!(staked_lockouts[&0].stake, 2);
|
||||
assert_eq!(staked_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
assert_eq!(stake_lockouts[&0].stake, 2);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
assert_eq!(total_staked, 2);
|
||||
let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap();
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
|
||||
// Each acccount has 1 vote in it. After simulating a vote in collect_vote_lockouts,
|
||||
// the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for
|
||||
@ -1218,7 +1394,12 @@ pub mod test {
|
||||
fn test_collect_vote_lockouts_root() {
|
||||
let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).collect();
|
||||
//two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
|
||||
let accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
|
||||
let mut accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: PubkeyVotes = accounts
|
||||
.iter()
|
||||
.map(|(pubkey, _)| (*pubkey, (MAX_LOCKOUT_HISTORY - 1) as Slot))
|
||||
.collect();
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let mut ancestors = HashMap::new();
|
||||
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
|
||||
@ -1239,18 +1420,28 @@ pub mod test {
|
||||
+ root_weight;
|
||||
let expected_bank_weight = 2 * vote_account_expected_weight;
|
||||
assert_eq!(tower.lockouts.root_slot, Some(0));
|
||||
let (staked_lockouts, _total_staked, bank_weight, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
bank_weight,
|
||||
pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
MAX_LOCKOUT_HISTORY as u64,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
for i in 0..MAX_LOCKOUT_HISTORY {
|
||||
assert_eq!(staked_lockouts[&(i as u64)].stake, 2);
|
||||
assert_eq!(stake_lockouts[&(i as u64)].stake, 2);
|
||||
}
|
||||
|
||||
// should be the sum of all the weights for root
|
||||
assert!(staked_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
|
||||
assert!(stake_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
|
||||
assert_eq!(bank_weight, expected_bank_weight);
|
||||
let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap();
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1283,7 +1474,7 @@ pub mod test {
|
||||
);
|
||||
tower.record_vote(i, Hash::default());
|
||||
}
|
||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2));
|
||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1511,48 +1702,7 @@ pub mod test {
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
tower.record_vote(2, Hash::default());
|
||||
assert!(tower.check_vote_stake_threshold(6, &stakes, 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lockout_is_updated_for_entire_branch() {
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let vote = Lockout {
|
||||
slot: 2,
|
||||
confirmation_count: 1,
|
||||
};
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let mut ancestors = HashMap::new();
|
||||
ancestors.insert(2, set);
|
||||
let set: HashSet<u64> = vec![0u64].into_iter().collect();
|
||||
ancestors.insert(1, set);
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lockout_is_updated_for_slot_or_lower() {
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let mut ancestors = HashMap::new();
|
||||
ancestors.insert(2, set);
|
||||
let set: HashSet<u64> = vec![0u64].into_iter().collect();
|
||||
ancestors.insert(1, set);
|
||||
let vote = Lockout {
|
||||
slot: 2,
|
||||
confirmation_count: 1,
|
||||
};
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
let vote = Lockout {
|
||||
slot: 1,
|
||||
confirmation_count: 2,
|
||||
};
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
assert!(tower.check_vote_stake_threshold(6, &stakes, 2,));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1631,7 +1781,7 @@ pub mod test {
|
||||
let total_stake = 4;
|
||||
let threshold_size = 0.67;
|
||||
let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
|
||||
let tower_votes: Vec<u64> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
|
||||
let tower_votes: Vec<Slot> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
|
||||
let accounts = gen_stakes(&[
|
||||
(threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
|
||||
(total_stake - threshold_stake, &tower_votes[..]),
|
||||
@ -1648,29 +1798,35 @@ pub mod test {
|
||||
for vote in &tower_votes {
|
||||
tower.record_vote(*vote, Hash::default());
|
||||
}
|
||||
let (staked_lockouts, total_staked, _, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
vote_to_evaluate,
|
||||
accounts.clone().into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &staked_lockouts, total_staked));
|
||||
assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &stake_lockouts, total_staked,));
|
||||
|
||||
// CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
|
||||
// will expire the vote in one of the vote accounts, so we should have insufficient
|
||||
// stake to pass the threshold
|
||||
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
|
||||
let (staked_lockouts, total_staked, _, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
vote_to_evaluate,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(!tower.check_vote_stake_threshold(
|
||||
vote_to_evaluate,
|
||||
&staked_lockouts,
|
||||
total_staked
|
||||
));
|
||||
assert!(!tower.check_vote_stake_threshold(vote_to_evaluate, &stake_lockouts, total_staked,));
|
||||
}
|
||||
|
||||
fn vote_and_check_recent(num_votes: usize) {
|
||||
|
@ -36,6 +36,7 @@ use std::collections::HashMap;
|
||||
pub struct Crds {
|
||||
/// Stores the map of labels and values
|
||||
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
pub num_inserts: usize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@ -84,6 +85,7 @@ impl Default for Crds {
|
||||
fn default() -> Self {
|
||||
Crds {
|
||||
table: IndexMap::new(),
|
||||
num_inserts: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -93,6 +95,24 @@ impl Crds {
|
||||
pub fn new_versioned(&self, local_timestamp: u64, value: CrdsValue) -> VersionedCrdsValue {
|
||||
VersionedCrdsValue::new(local_timestamp, value)
|
||||
}
|
||||
pub fn would_insert(
|
||||
&self,
|
||||
value: CrdsValue,
|
||||
local_timestamp: u64,
|
||||
) -> Option<VersionedCrdsValue> {
|
||||
let new_value = self.new_versioned(local_timestamp, value);
|
||||
let label = new_value.value.label();
|
||||
let would_insert = self
|
||||
.table
|
||||
.get(&label)
|
||||
.map(|current| new_value > *current)
|
||||
.unwrap_or(true);
|
||||
if would_insert {
|
||||
Some(new_value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
/// insert the new value, returns the old value if insert succeeds
|
||||
pub fn insert_versioned(
|
||||
&mut self,
|
||||
@ -107,6 +127,7 @@ impl Crds {
|
||||
.unwrap_or(true);
|
||||
if do_insert {
|
||||
let old = self.table.insert(label, new_value);
|
||||
self.num_inserts += 1;
|
||||
Ok(old)
|
||||
} else {
|
||||
trace!("INSERT FAILED data: {} new.wallclock: {}", label, wallclock,);
|
||||
|
@ -6,7 +6,7 @@
|
||||
use crate::{
|
||||
crds::{Crds, VersionedCrdsValue},
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
|
||||
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
|
||||
crds_value::{CrdsValue, CrdsValueLabel},
|
||||
};
|
||||
@ -76,17 +76,10 @@ impl CrdsGossip {
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> HashMap<Pubkey, HashSet<Pubkey>> {
|
||||
let id = &self.id;
|
||||
let crds = &self.crds;
|
||||
let push = &mut self.push;
|
||||
let versioned = labels
|
||||
.into_iter()
|
||||
.filter_map(|label| crds.lookup_versioned(&label));
|
||||
|
||||
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
|
||||
for val in versioned {
|
||||
let origin = val.value.pubkey();
|
||||
let hash = val.value_hash;
|
||||
let peers = push.prune_received_cache(id, &origin, hash, stakes);
|
||||
for origin in labels.iter().map(|k| k.pubkey()) {
|
||||
let peers = push.prune_received_cache(id, &origin, stakes);
|
||||
for from in peers {
|
||||
prune_map.entry(from).or_default().insert(origin);
|
||||
}
|
||||
@ -113,7 +106,7 @@ impl CrdsGossip {
|
||||
return Err(CrdsGossipError::PruneMessageTimeout);
|
||||
}
|
||||
if self.id == *destination {
|
||||
self.push.process_prune_msg(peer, origin);
|
||||
self.push.process_prune_msg(&self.id, peer, origin);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(CrdsGossipError::BadPruneDestination)
|
||||
@ -158,24 +151,47 @@ impl CrdsGossip {
|
||||
self.pull.mark_pull_request_creation_time(from, now)
|
||||
}
|
||||
/// process a pull request and create a response
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
filters: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
|
||||
self.pull
|
||||
.process_pull_requests(&mut self.crds, filters, now)
|
||||
.process_pull_requests(&mut self.crds, filters, now);
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.pull.generate_pull_responses(&self.crds, filters)
|
||||
}
|
||||
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
self.pull
|
||||
.process_pull_response(&mut self.crds, from, timeouts, response, now)
|
||||
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
|
||||
}
|
||||
|
||||
/// process a pull response
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) {
|
||||
let success = self.pull.process_pull_responses(
|
||||
&mut self.crds,
|
||||
from,
|
||||
responses,
|
||||
responses_expired_timeout,
|
||||
now,
|
||||
process_pull_stats,
|
||||
);
|
||||
self.push.push_pull_responses(success, now);
|
||||
}
|
||||
|
||||
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {
|
||||
|
@ -2,7 +2,6 @@
|
||||
pub enum CrdsGossipError {
|
||||
NoPeers,
|
||||
PushMessageTimeout,
|
||||
PushMessageAlreadyReceived,
|
||||
PushMessageOldVersion,
|
||||
BadPruneDestination,
|
||||
PruneMessageTimeout,
|
||||
|
@ -10,7 +10,7 @@
|
||||
//! of false positives.
|
||||
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds::Crds;
|
||||
use crate::crds::{Crds, VersionedCrdsValue};
|
||||
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
@ -20,8 +20,8 @@ use solana_runtime::bloom::Bloom;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
|
||||
// The maximum age of a value received over pull responses
|
||||
@ -118,6 +118,14 @@ impl CrdsFilter {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ProcessPullStats {
|
||||
pub success: usize,
|
||||
pub failed_insert: usize,
|
||||
pub failed_timeout: usize,
|
||||
pub timeout_count: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPull {
|
||||
/// timestamp of last request
|
||||
@ -126,6 +134,7 @@ pub struct CrdsGossipPull {
|
||||
purged_values: VecDeque<(Hash, u64)>,
|
||||
pub crds_timeout: u64,
|
||||
pub msg_timeout: u64,
|
||||
pub num_pulls: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPull {
|
||||
@ -135,6 +144,7 @@ impl Default for CrdsGossipPull {
|
||||
pull_request_time: HashMap::new(),
|
||||
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
num_pulls: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -204,14 +214,13 @@ impl CrdsGossipPull {
|
||||
self.purged_values.push_back((hash, timestamp))
|
||||
}
|
||||
|
||||
/// process a pull request and create a response
|
||||
/// process a pull request
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
requests: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
let rv = self.filter_crds_values(crds, &requests);
|
||||
) {
|
||||
requests.into_iter().for_each(|(caller, _)| {
|
||||
let key = caller.label().pubkey();
|
||||
let old = crds.insert(caller, now);
|
||||
@ -221,19 +230,33 @@ impl CrdsGossipPull {
|
||||
}
|
||||
crds.update_record_timestamp(&key, now);
|
||||
});
|
||||
rv
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
|
||||
/// Create gossip responses to pull requests
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
requests: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.filter_crds_values(crds, requests)
|
||||
}
|
||||
|
||||
// Checks if responses should be inserted and
|
||||
// returns those responses converted to VersionedCrdsValue
|
||||
// Separated in two vecs as:
|
||||
// .0 => responses that update the owner timestamp
|
||||
// .1 => responses that do not update the owner timestamp
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
responses: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
let mut failed = 0;
|
||||
for r in response {
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
let mut versioned = vec![];
|
||||
let mut versioned_expired_timestamp = vec![];
|
||||
for r in responses {
|
||||
let owner = r.label().pubkey();
|
||||
// Check if the crds value is older than the msg_timeout
|
||||
if now
|
||||
@ -252,11 +275,8 @@ impl CrdsGossipPull {
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -264,32 +284,69 @@ impl CrdsGossipPull {
|
||||
// Before discarding this value, check if a ContactInfo for the owner
|
||||
// exists in the table. If it doesn't, that implies that this value can be discarded
|
||||
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
} else {
|
||||
// Silently insert this old value without bumping record timestamps
|
||||
failed += crds.insert(r, now).is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned_expired_timestamp.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let old = crds.insert(r, now);
|
||||
failed += old.is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
}
|
||||
(versioned, versioned_expired_timestamp)
|
||||
}
|
||||
|
||||
/// process a vec of pull responses
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> Vec<(CrdsValueLabel, Hash, u64)> {
|
||||
let mut success = vec![];
|
||||
let mut owners = HashSet::new();
|
||||
for r in responses_expired_timeout {
|
||||
stats.failed_insert += crds.insert_versioned(r).is_err() as usize;
|
||||
}
|
||||
for r in responses {
|
||||
let owner = r.value.label().pubkey();
|
||||
let label = r.value.label();
|
||||
let wc = r.value.wallclock();
|
||||
let hash = r.value_hash;
|
||||
let old = crds.insert_versioned(r);
|
||||
if old.is_err() {
|
||||
stats.failed_insert += 1;
|
||||
} else {
|
||||
stats.success += 1;
|
||||
self.num_pulls += 1;
|
||||
success.push((label, hash, wc));
|
||||
}
|
||||
old.ok().map(|opt| {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
owners.insert(owner);
|
||||
opt.map(|val| {
|
||||
self.purged_values
|
||||
.push_back((val.value_hash, val.local_timestamp))
|
||||
})
|
||||
});
|
||||
}
|
||||
crds.update_record_timestamp(from, now);
|
||||
failed
|
||||
owners.insert(*from);
|
||||
for owner in owners {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
}
|
||||
success
|
||||
}
|
||||
// build a set of filters of the current crds table
|
||||
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
|
||||
@ -379,6 +436,34 @@ impl CrdsGossipPull {
|
||||
.count();
|
||||
self.purged_values.drain(..cnt);
|
||||
}
|
||||
|
||||
/// For legacy tests
|
||||
#[cfg(test)]
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> (usize, usize, usize) {
|
||||
let mut stats = ProcessPullStats::default();
|
||||
let (versioned, versioned_expired_timeout) =
|
||||
self.filter_pull_responses(crds, timeouts, response, now, &mut stats);
|
||||
self.process_pull_responses(
|
||||
crds,
|
||||
from,
|
||||
versioned,
|
||||
versioned_expired_timeout,
|
||||
now,
|
||||
&mut stats,
|
||||
);
|
||||
(
|
||||
stats.failed_timeout + stats.failed_insert,
|
||||
stats.timeout_count,
|
||||
stats.success,
|
||||
)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
@ -578,8 +663,9 @@ mod test {
|
||||
let mut dest_crds = Crds::default();
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
|
||||
assert!(dest_crds.lookup(&caller.label()).is_some());
|
||||
assert_eq!(
|
||||
@ -648,8 +734,9 @@ mod test {
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
// if there is a false positive this is empty
|
||||
// prob should be around 0.1 per iteration
|
||||
if rsp.is_empty() {
|
||||
@ -660,13 +747,15 @@ mod test {
|
||||
continue;
|
||||
}
|
||||
assert_eq!(rsp.len(), 1);
|
||||
let failed = node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&node_pubkey,
|
||||
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
|
||||
rsp.pop().unwrap(),
|
||||
1,
|
||||
);
|
||||
let failed = node
|
||||
.process_pull_response(
|
||||
&mut node_crds,
|
||||
&node_pubkey,
|
||||
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
|
||||
rsp.pop().unwrap(),
|
||||
1,
|
||||
)
|
||||
.0;
|
||||
assert_eq!(failed, 0);
|
||||
assert_eq!(
|
||||
node_crds
|
||||
@ -827,7 +916,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry.clone()],
|
||||
1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -843,7 +933,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry.clone(), unstaked_peer_entry],
|
||||
node.msg_timeout + 100,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
2
|
||||
);
|
||||
|
||||
@ -856,7 +947,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -872,7 +964,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_vote.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -885,7 +978,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_vote],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
1
|
||||
);
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
//!
|
||||
//! Main differences are:
|
||||
//! 1. There is no `max hop`. Messages are signed with a local wallclock. If they are outside of
|
||||
//! the local nodes wallclock window they are drooped silently.
|
||||
//! the local nodes wallclock window they are dropped silently.
|
||||
//! 2. The prune set is stored in a Bloom filter.
|
||||
|
||||
use crate::{
|
||||
@ -35,6 +35,7 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
|
||||
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
|
||||
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
|
||||
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPush {
|
||||
@ -44,12 +45,18 @@ pub struct CrdsGossipPush {
|
||||
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
|
||||
/// push message queue
|
||||
push_messages: HashMap<CrdsValueLabel, Hash>,
|
||||
/// cache that tracks which validators a message was received from
|
||||
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
|
||||
/// Cache that tracks which validators a message was received from
|
||||
/// bool indicates it has been pruned.
|
||||
/// This cache represents a lagging view of which validators
|
||||
/// currently have this node in their `active_set`
|
||||
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
|
||||
pub num_active: usize,
|
||||
pub push_fanout: usize,
|
||||
pub msg_timeout: u64,
|
||||
pub prune_timeout: u64,
|
||||
pub num_total: usize,
|
||||
pub num_old: usize,
|
||||
pub num_pushes: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPush {
|
||||
@ -64,6 +71,9 @@ impl Default for CrdsGossipPush {
|
||||
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
|
||||
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
|
||||
prune_timeout: CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS,
|
||||
num_total: 0,
|
||||
num_old: 0,
|
||||
num_pushes: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -81,18 +91,21 @@ impl CrdsGossipPush {
|
||||
&mut self,
|
||||
self_pubkey: &Pubkey,
|
||||
origin: &Pubkey,
|
||||
hash: Hash,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<Pubkey> {
|
||||
let origin_stake = stakes.get(origin).unwrap_or(&0);
|
||||
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
|
||||
let cache = self.received_cache.get(&hash);
|
||||
let cache = self.received_cache.get(origin);
|
||||
if cache.is_none() {
|
||||
return Vec::new();
|
||||
}
|
||||
let peers = cache.unwrap();
|
||||
|
||||
let peers = &cache.unwrap().1;
|
||||
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
|
||||
let peer_stake_total: u64 = peers
|
||||
.iter()
|
||||
.filter(|v| !(v.1).0)
|
||||
.map(|v| stakes.get(v.0).unwrap_or(&0))
|
||||
.sum();
|
||||
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
|
||||
if peer_stake_total < prune_stake_threshold {
|
||||
return Vec::new();
|
||||
@ -100,7 +113,8 @@ impl CrdsGossipPush {
|
||||
|
||||
let staked_peers: Vec<(Pubkey, u64)> = peers
|
||||
.iter()
|
||||
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
|
||||
.filter(|v| !(v.1).0)
|
||||
.filter_map(|p| stakes.get(p.0).map(|s| (*p.0, *s)))
|
||||
.filter(|(_, s)| *s > 0)
|
||||
.collect();
|
||||
|
||||
@ -117,16 +131,27 @@ impl CrdsGossipPush {
|
||||
let (next_peer, next_stake) = staked_peers[next];
|
||||
keep.insert(next_peer);
|
||||
peer_stake_sum += next_stake;
|
||||
if peer_stake_sum >= prune_stake_threshold {
|
||||
if peer_stake_sum >= prune_stake_threshold
|
||||
&& keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
peers
|
||||
.iter()
|
||||
let pruned_peers: Vec<Pubkey> = peers
|
||||
.keys()
|
||||
.filter(|p| !keep.contains(p))
|
||||
.cloned()
|
||||
.collect()
|
||||
.collect();
|
||||
pruned_peers.iter().for_each(|p| {
|
||||
self.received_cache
|
||||
.get_mut(origin)
|
||||
.unwrap()
|
||||
.get_mut(p)
|
||||
.unwrap()
|
||||
.0 = true;
|
||||
});
|
||||
pruned_peers
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
@ -137,6 +162,7 @@ impl CrdsGossipPush {
|
||||
value: CrdsValue,
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
self.num_total += 1;
|
||||
if now
|
||||
> value
|
||||
.wallclock()
|
||||
@ -149,21 +175,32 @@ impl CrdsGossipPush {
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
let label = value.label();
|
||||
let origin = label.pubkey();
|
||||
let new_value = crds.new_versioned(now, value);
|
||||
let value_hash = new_value.value_hash;
|
||||
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
||||
received_set.insert(from.clone());
|
||||
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
||||
}
|
||||
let received_set = self
|
||||
.received_cache
|
||||
.entry(origin)
|
||||
.or_insert_with(HashMap::new);
|
||||
received_set.entry(*from).or_insert((false, 0)).1 = now;
|
||||
|
||||
let old = crds.insert_versioned(new_value);
|
||||
if old.is_err() {
|
||||
self.num_old += 1;
|
||||
return Err(CrdsGossipError::PushMessageOldVersion);
|
||||
}
|
||||
let mut received_set = HashSet::new();
|
||||
received_set.insert(from.clone());
|
||||
self.push_messages.insert(label, value_hash);
|
||||
self.received_cache.insert(value_hash, (now, received_set));
|
||||
Ok(old.ok().and_then(|opt| opt))
|
||||
Ok(old.unwrap())
|
||||
}
|
||||
|
||||
/// push pull responses
|
||||
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
||||
for (label, value_hash, wc) in values {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
|
||||
continue;
|
||||
}
|
||||
self.push_messages.insert(label, value_hash);
|
||||
}
|
||||
}
|
||||
|
||||
/// New push message to broadcast to peers.
|
||||
@ -172,18 +209,10 @@ impl CrdsGossipPush {
|
||||
/// The list of push messages is created such that all the randomly selected peers have not
|
||||
/// pruned the source addresses.
|
||||
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> {
|
||||
let max = self.active_set.len();
|
||||
let mut nodes: Vec<_> = (0..max).collect();
|
||||
nodes.shuffle(&mut rand::thread_rng());
|
||||
let peers: Vec<Pubkey> = nodes
|
||||
.into_iter()
|
||||
.filter_map(|n| self.active_set.get_index(n))
|
||||
.take(self.push_fanout)
|
||||
.map(|n| *n.0)
|
||||
.collect();
|
||||
let mut total_bytes: usize = 0;
|
||||
let mut values = vec![];
|
||||
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
|
||||
trace!("new_push_messages {}", self.push_messages.len());
|
||||
for (label, hash) in &self.push_messages {
|
||||
let res = crds.lookup_versioned(label);
|
||||
if res.is_none() {
|
||||
@ -203,21 +232,37 @@ impl CrdsGossipPush {
|
||||
}
|
||||
values.push(value.clone());
|
||||
}
|
||||
trace!(
|
||||
"new_push_messages {} {}",
|
||||
values.len(),
|
||||
self.active_set.len()
|
||||
);
|
||||
for v in values {
|
||||
for p in peers.iter() {
|
||||
let filter = self.active_set.get_mut(p);
|
||||
if filter.is_some() && !filter.unwrap().contains(&v.label().pubkey()) {
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
//use a consistent index for the same origin so
|
||||
//the active set learns the MST for that origin
|
||||
let start = v.label().pubkey().as_ref()[0] as usize;
|
||||
let max = self.push_fanout.min(self.active_set.len());
|
||||
for i in start..(start + max) {
|
||||
let ix = i % self.active_set.len();
|
||||
if let Some((p, filter)) = self.active_set.get_index(ix) {
|
||||
if !filter.contains(&v.label().pubkey()) {
|
||||
trace!("new_push_messages insert {} {:?}", *p, v);
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
self.num_pushes += 1;
|
||||
}
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
push_messages
|
||||
}
|
||||
|
||||
/// add the `from` to the peer's filter of nodes
|
||||
pub fn process_prune_msg(&mut self, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
for origin in origins {
|
||||
if origin == self_pubkey {
|
||||
continue;
|
||||
}
|
||||
if let Some(p) = self.active_set.get_mut(peer) {
|
||||
p.add(origin)
|
||||
}
|
||||
@ -339,15 +384,11 @@ impl CrdsGossipPush {
|
||||
|
||||
/// purge received push message cache
|
||||
pub fn purge_old_received_cache(&mut self, min_time: u64) {
|
||||
let old_msgs: Vec<Hash> = self
|
||||
.received_cache
|
||||
.iter()
|
||||
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
|
||||
.cloned()
|
||||
.collect();
|
||||
for k in old_msgs {
|
||||
self.received_cache.remove(&k);
|
||||
}
|
||||
self.received_cache
|
||||
.iter_mut()
|
||||
.for_each(|v| v.1.retain(|_, v| v.1 > min_time));
|
||||
|
||||
self.received_cache.retain(|_, v| !v.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@ -371,7 +412,6 @@ mod test {
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&origin, 0,
|
||||
)));
|
||||
let label = value.label();
|
||||
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
|
||||
let mut low_staked_set = HashSet::new();
|
||||
low_staked_peers.for_each(|p| {
|
||||
@ -380,11 +420,7 @@ mod test {
|
||||
stakes.insert(p, 1);
|
||||
});
|
||||
|
||||
let versioned = crds
|
||||
.lookup_versioned(&label)
|
||||
.expect("versioned value should exist");
|
||||
let hash = versioned.value_hash;
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.is_empty(),
|
||||
"should not prune if min threshold has not been reached"
|
||||
@ -395,7 +431,7 @@ mod test {
|
||||
stakes.insert(high_staked_peer, high_stake);
|
||||
let _ = push.process_push_message(&mut crds, &high_staked_peer, value, 0);
|
||||
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.len() < low_staked_set.len() + 1,
|
||||
"should not prune all peers"
|
||||
@ -409,7 +445,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_push() {
|
||||
fn test_process_push_one() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@ -425,9 +461,9 @@ mod test {
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
@ -690,6 +726,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_prune() {
|
||||
let mut crds = Crds::default();
|
||||
let self_id = Pubkey::new_rand();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@ -707,7 +744,11 @@ mod test {
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
|
||||
push.process_prune_msg(
|
||||
&self_id,
|
||||
&peer.label().pubkey(),
|
||||
&[new_msg.label().pubkey()],
|
||||
);
|
||||
assert_eq!(push.new_push_messages(&crds, 0), expected);
|
||||
}
|
||||
#[test]
|
||||
@ -749,9 +790,9 @@ mod test {
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
|
||||
// purge the old pushed
|
||||
|
40
core/src/fork_choice.rs
Normal file
40
core/src/fork_choice.rs
Normal file
@ -0,0 +1,40 @@
|
||||
use crate::{
|
||||
consensus::{ComputedBankState, SwitchForkDecision, Tower},
|
||||
progress_map::ProgressMap,
|
||||
replay_stage::HeaviestForkFailures,
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
pub(crate) struct SelectVoteAndResetForkResult {
|
||||
pub vote_bank: Option<(Arc<Bank>, SwitchForkDecision)>,
|
||||
pub reset_bank: Option<Arc<Bank>>,
|
||||
pub heaviest_fork_failures: Vec<HeaviestForkFailures>,
|
||||
}
|
||||
|
||||
pub(crate) trait ForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
bank: &Bank,
|
||||
tower: &Tower,
|
||||
progress: &mut ProgressMap,
|
||||
computed_bank_stats: &ComputedBankState,
|
||||
);
|
||||
|
||||
// Returns:
|
||||
// 1) The heaviest overall bbank
|
||||
// 2) The heavest bank on the same fork as the last vote (doesn't require a
|
||||
// switching proof to vote for)
|
||||
fn select_forks(
|
||||
&self,
|
||||
frozen_banks: &[Arc<Bank>],
|
||||
tower: &Tower,
|
||||
progress: &ProgressMap,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
) -> (Arc<Bank>, Option<Arc<Bank>>);
|
||||
}
|
1591
core/src/heaviest_subtree_fork_choice.rs
Normal file
1591
core/src/heaviest_subtree_fork_choice.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,9 @@
|
||||
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
|
||||
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore::{Blockstore, PurgeType};
|
||||
use solana_ledger::blockstore_db::Result as BlockstoreResult;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::clock::{Slot, DEFAULT_TICKS_PER_SLOT, TICKS_PER_DAY};
|
||||
use std::string::ToString;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
@ -29,8 +29,9 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
|
||||
// and starve other blockstore users.
|
||||
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
|
||||
|
||||
// Delay between purges to cooperate with other blockstore users
|
||||
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
|
||||
// Compacting at a slower interval than purging helps keep IOPS down.
|
||||
// Once a day should be ample
|
||||
const DEFAULT_COMPACTION_SLOT_INTERVAL: u64 = TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT;
|
||||
|
||||
pub struct LedgerCleanupService {
|
||||
t_cleanup: JoinHandle<()>,
|
||||
@ -49,6 +50,8 @@ impl LedgerCleanupService {
|
||||
);
|
||||
let exit = exit.clone();
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-ledger-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
@ -61,7 +64,8 @@ impl LedgerCleanupService {
|
||||
max_ledger_shreds,
|
||||
&mut last_purge_slot,
|
||||
DEFAULT_PURGE_SLOT_INTERVAL,
|
||||
Some(DEFAULT_DELAY_BETWEEN_PURGES),
|
||||
&mut last_compaction_slot,
|
||||
DEFAULT_COMPACTION_SLOT_INTERVAL,
|
||||
) {
|
||||
match e {
|
||||
RecvTimeoutError::Disconnected => break,
|
||||
@ -116,7 +120,7 @@ impl LedgerCleanupService {
|
||||
}
|
||||
}
|
||||
|
||||
(true, lowest_cleanup_slot, first_slot, total_shreds)
|
||||
(true, first_slot, lowest_cleanup_slot, total_shreds)
|
||||
}
|
||||
|
||||
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
|
||||
@ -134,7 +138,8 @@ impl LedgerCleanupService {
|
||||
max_ledger_shreds: u64,
|
||||
last_purge_slot: &mut u64,
|
||||
purge_interval: u64,
|
||||
delay_between_purges: Option<Duration>,
|
||||
last_compaction_slot: &mut u64,
|
||||
compaction_interval: u64,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let root = Self::receive_new_roots(new_root_receiver)?;
|
||||
if root - *last_purge_slot <= purge_interval {
|
||||
@ -143,19 +148,21 @@ impl LedgerCleanupService {
|
||||
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
"purge: last_root={}, last_purge_slot={}, purge_interval={}, disk_utilization={:?}",
|
||||
root, last_purge_slot, purge_interval, disk_utilization_pre
|
||||
"purge: last_root={}, last_purge_slot={}, purge_interval={}, last_compaction_slot={}, disk_utilization={:?}",
|
||||
root, last_purge_slot, purge_interval, last_compaction_slot, disk_utilization_pre
|
||||
);
|
||||
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (slots_to_clean, lowest_cleanup_slot, first_slot, total_shreds) =
|
||||
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
|
||||
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
|
||||
|
||||
if slots_to_clean {
|
||||
info!(
|
||||
"purging data from slots {} to {}",
|
||||
first_slot, lowest_cleanup_slot
|
||||
);
|
||||
let mut compact_first_slot = std::u64::MAX;
|
||||
if lowest_cleanup_slot.saturating_sub(*last_compaction_slot) > compaction_interval {
|
||||
compact_first_slot = *last_compaction_slot;
|
||||
*last_compaction_slot = lowest_cleanup_slot;
|
||||
}
|
||||
|
||||
let purge_complete = Arc::new(AtomicBool::new(false));
|
||||
let blockstore = blockstore.clone();
|
||||
@ -167,14 +174,36 @@ impl LedgerCleanupService {
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
|
||||
slot_update_time.stop();
|
||||
|
||||
let mut purge_time = Measure::start("purge_slots_with_delay");
|
||||
blockstore.purge_slots_with_delay(
|
||||
first_slot,
|
||||
info!(
|
||||
"purging data from slots {} to {}",
|
||||
purge_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
|
||||
let mut purge_time = Measure::start("purge_slots");
|
||||
blockstore.purge_slots(
|
||||
purge_first_slot,
|
||||
lowest_cleanup_slot,
|
||||
delay_between_purges,
|
||||
PurgeType::PrimaryIndex,
|
||||
);
|
||||
purge_time.stop();
|
||||
info!("{}", purge_time);
|
||||
|
||||
if compact_first_slot < lowest_cleanup_slot {
|
||||
info!(
|
||||
"compacting data from slots {} to {}",
|
||||
compact_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
if let Err(err) =
|
||||
blockstore.compact_storage(compact_first_slot, lowest_cleanup_slot)
|
||||
{
|
||||
// This error is not fatal and indicates an internal error?
|
||||
error!(
|
||||
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
|
||||
err, compact_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
purge_complete1.store(true, Ordering::Relaxed);
|
||||
})
|
||||
.unwrap();
|
||||
@ -233,6 +262,7 @@ mod tests {
|
||||
|
||||
//send a signal to kill all but 5 shreds, which will be in the newest slots
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
sender.send(50).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
@ -240,7 +270,8 @@ mod tests {
|
||||
5,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -272,6 +303,7 @@ mod tests {
|
||||
info!("{}", first_insert);
|
||||
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
let mut slot = initial_slots;
|
||||
let mut num_slots = 6;
|
||||
for _ in 0..5 {
|
||||
@ -295,7 +327,8 @@ mod tests {
|
||||
initial_slots,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
time.stop();
|
||||
|
@ -11,10 +11,12 @@ pub mod banking_stage;
|
||||
pub mod broadcast_stage;
|
||||
pub mod cluster_info_vote_listener;
|
||||
pub mod commitment;
|
||||
pub mod commitment_service;
|
||||
mod deprecated;
|
||||
pub mod shred_fetch_stage;
|
||||
#[macro_use]
|
||||
pub mod contact_info;
|
||||
pub mod bank_weight_fork_choice;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_slots;
|
||||
pub mod consensus;
|
||||
@ -26,8 +28,10 @@ pub mod crds_gossip_push;
|
||||
pub mod crds_value;
|
||||
pub mod epoch_slots;
|
||||
pub mod fetch_stage;
|
||||
pub mod fork_choice;
|
||||
pub mod gen_keys;
|
||||
pub mod gossip_service;
|
||||
pub mod heaviest_subtree_fork_choice;
|
||||
pub mod ledger_cleanup_service;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod non_circulating_supply;
|
||||
@ -37,16 +41,20 @@ pub mod progress_map;
|
||||
pub mod pubkey_references;
|
||||
pub mod repair_response;
|
||||
pub mod repair_service;
|
||||
pub mod repair_weight;
|
||||
pub mod repair_weighted_traversal;
|
||||
pub mod replay_stage;
|
||||
mod result;
|
||||
pub mod retransmit_stage;
|
||||
pub mod rewards_recorder_service;
|
||||
pub mod rpc;
|
||||
pub mod rpc_error;
|
||||
pub mod rpc_health;
|
||||
pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_service;
|
||||
pub mod rpc_subscriptions;
|
||||
pub mod send_transaction_service;
|
||||
pub mod serve_repair;
|
||||
pub mod serve_repair_service;
|
||||
pub mod sigverify;
|
||||
@ -55,6 +63,7 @@ pub mod sigverify_stage;
|
||||
pub mod snapshot_packager_service;
|
||||
pub mod tpu;
|
||||
pub mod transaction_status_service;
|
||||
pub mod tree_diff;
|
||||
pub mod tvu;
|
||||
pub mod validator;
|
||||
pub mod verified_vote_packets;
|
||||
|
@ -23,14 +23,14 @@ pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSuppl
|
||||
let stake_account = StakeState::from(&account).unwrap_or_default();
|
||||
match stake_account {
|
||||
StakeState::Initialized(meta) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
StakeState::Stake(meta, _stake) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
@ -57,8 +57,8 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
|
||||
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
|
||||
"CWeRmXme7LmbaUWTZWFLt6FMnpzLCHaQLuR2TdgFn4Lq",
|
||||
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
|
||||
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
|
||||
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
|
||||
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
|
||||
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
|
||||
@ -76,6 +76,8 @@ solana_sdk::pubkeys!(
|
||||
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
|
||||
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
|
||||
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
|
||||
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
|
||||
"AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF",
|
||||
]
|
||||
);
|
||||
|
||||
@ -85,6 +87,8 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
|
||||
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
|
||||
"FdGYQdiRky8NZzN9wZtczTBcWLYYRXrJ3LMDhqDPn5rM",
|
||||
"4e6KwQpyzGQPfgVr5Jn3g5jLjbXB4pKPa2jRLohEb1QA",
|
||||
]
|
||||
);
|
||||
|
||||
|
@ -354,7 +354,7 @@ impl PohRecorder {
|
||||
pub fn tick(&mut self) {
|
||||
let now = Instant::now();
|
||||
let poh_entry = self.poh.lock().unwrap().tick();
|
||||
inc_new_counter_warn!(
|
||||
inc_new_counter_info!(
|
||||
"poh_recorder-tick_lock_contention",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
@ -364,7 +364,7 @@ impl PohRecorder {
|
||||
trace!("tick_height {}", self.tick_height);
|
||||
|
||||
if self.leader_first_tick_height.is_none() {
|
||||
inc_new_counter_warn!(
|
||||
inc_new_counter_info!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
@ -380,7 +380,7 @@ impl PohRecorder {
|
||||
self.tick_cache.push((entry, self.tick_height));
|
||||
let _ = self.flush_cache(true);
|
||||
}
|
||||
inc_new_counter_warn!(
|
||||
inc_new_counter_info!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
|
@ -89,18 +89,22 @@ impl PohService {
|
||||
let poh = poh_recorder.lock().unwrap().poh.clone();
|
||||
let mut now = Instant::now();
|
||||
let mut num_ticks = 0;
|
||||
let mut num_hashes = 0;
|
||||
loop {
|
||||
num_hashes += NUM_HASHES_PER_BATCH;
|
||||
if poh.lock().unwrap().hash(NUM_HASHES_PER_BATCH) {
|
||||
// Lock PohRecorder only for the final hash...
|
||||
poh_recorder.lock().unwrap().tick();
|
||||
num_ticks += 1;
|
||||
if num_ticks >= DEFAULT_TICKS_PER_SLOT * 2 {
|
||||
datapoint_debug!(
|
||||
datapoint_info!(
|
||||
"poh-service",
|
||||
("ticks", num_ticks as i64, i64),
|
||||
("hashes", num_hashes as i64, i64),
|
||||
("elapsed_ms", now.elapsed().as_millis() as i64, i64),
|
||||
);
|
||||
num_ticks = 0;
|
||||
num_hashes = 0;
|
||||
now = Instant::now();
|
||||
}
|
||||
if poh_exit.load(Ordering::Relaxed) {
|
||||
|
@ -42,7 +42,16 @@ impl ReplaySlotStats {
|
||||
self.fetch_fail_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("entry_verification_time", self.verify_elapsed as i64, i64),
|
||||
(
|
||||
"entry_poh_verification_time",
|
||||
self.poh_verify_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"entry_transaction_verification_time",
|
||||
self.transaction_verify_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("replay_time", self.replay_elapsed as i64, i64),
|
||||
(
|
||||
"replay_total_elapsed",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user