Compare commits
406 Commits
Author | SHA1 | Date | |
---|---|---|---|
c66d528e85 | |||
8ba8deb933 | |||
587342d5e3 | |||
f31d2d9cc4 | |||
bc761c2c02 | |||
6f4bc3aaff | |||
070664ff94 | |||
61c2883de6 | |||
e32f7dbe49 | |||
c0b178db45 | |||
1027b0681b | |||
3ae6e0b8ab | |||
4b7da6e60d | |||
2863f8ec65 | |||
e2491c6322 | |||
4a8b1d9b2c | |||
74aed5cb58 | |||
b130c298df | |||
e5a6f8c2de | |||
87e5f8acbf | |||
c1a3b6ecc2 | |||
c242d66130 | |||
864d212c64 | |||
a9564d207b | |||
b82a9c832b | |||
5d9298543f | |||
4e9ae61044 | |||
d47262d233 | |||
8fdcf9f968 | |||
c82d37f6c3 | |||
5a8658283a | |||
4b97e58cba | |||
48031651a0 | |||
f3d556e3f9 | |||
8d4cecdb77 | |||
39a622f66e | |||
dae28b9cfe | |||
b7b4aa5d4d | |||
ed036b978d | |||
284920433f | |||
30bed18b77 | |||
6678dd10a5 | |||
296d740f83 | |||
b8fda9d730 | |||
2623c71ed3 | |||
e4472db33f | |||
076fef5e57 | |||
40eba48109 | |||
095c79e863 | |||
959c1ea857 | |||
ef3af104ae | |||
9dc69d9843 | |||
45348b2c83 | |||
c558db2a48 | |||
f987c18a7e | |||
5d3f43c10b | |||
216b01b224 | |||
35dd52e9ba | |||
b0c83921be | |||
e744b15ad2 | |||
1fd695d337 | |||
8f38bc7dc0 | |||
7d6ea6c17e | |||
56dc958116 | |||
19dfb87b1f | |||
a5287f56fc | |||
eed8087d87 | |||
4115d73b9a | |||
064b95c16a | |||
70c167182a | |||
fee002382e | |||
d75a470ffa | |||
c530fbd22b | |||
1b8f9e75dd | |||
1a5b01676d | |||
4b397d15b3 | |||
4d2b83d01f | |||
87096f13d2 | |||
a0ffcc61ae | |||
4b4819cd07 | |||
ca791a0378 | |||
b08f8d3103 | |||
88ba8439fc | |||
4dd0367136 | |||
ff2c183ac1 | |||
aa24181a53 | |||
1f83c56e05 | |||
2592894958 | |||
85027caf42 | |||
3ea556bc24 | |||
ca4a22d4ba | |||
18c1f0dfe9 | |||
734afee5e0 | |||
271e17547a | |||
e28368ff1b | |||
1aab959d4e | |||
bca769111f | |||
909321928c | |||
8b0a7f6838 | |||
5fa36bbab3 | |||
d65a7a3c30 | |||
453f5ce8f2 | |||
dc1db33ec9 | |||
c68e80c93b | |||
6b9a0935c1 | |||
b84468ecd3 | |||
ff4ba54553 | |||
f78a90bce2 | |||
24d871b529 | |||
e547f38589 | |||
6fb16f9879 | |||
2dc50cff5b | |||
98228c392e | |||
aeb7278b00 | |||
42d7609d54 | |||
a70008cc5c | |||
306a5c849e | |||
bb92184085 | |||
90c9462dd4 | |||
21b287ef0b | |||
b0c524765e | |||
6d0318cbe6 | |||
8f5ee6832f | |||
38fe766fa7 | |||
74866882f2 | |||
c638e83bf5 | |||
de6ef68571 | |||
c51049a59b | |||
9cedeb0a8d | |||
e37a4823f1 | |||
bf60345b7a | |||
cb29b8dd2a | |||
3a501ad69e | |||
e6e43d236f | |||
142601d4b6 | |||
f192e4f08f | |||
f020370ae7 | |||
24935af867 | |||
6a213bc8f5 | |||
f0414711b7 | |||
d087ed5bf6 | |||
d14dea4660 | |||
29abfebb68 | |||
668dfc40c7 | |||
61514e3b0e | |||
46fcab14dd | |||
2435c3ce0c | |||
55907b2167 | |||
a03eff51af | |||
10175618d2 | |||
4ff033852d | |||
2237f47b90 | |||
bfca226964 | |||
6077458ad8 | |||
7079559c2d | |||
0641244378 | |||
563da2bb18 | |||
dc347dd3d7 | |||
eab4fe50a3 | |||
ead6dc553a | |||
009c124fac | |||
7029c88305 | |||
9411fc00b8 | |||
5a93a4c466 | |||
9afc5da2e1 | |||
49706172f3 | |||
b2a0cdaa38 | |||
5481d1a039 | |||
dd5e320aa1 | |||
3c2aff2b5b | |||
c3c4c9326b | |||
ae70f4ea92 | |||
29fb79382c | |||
5c2cf04e10 | |||
9e0a26628b | |||
ce88602ced | |||
53b8d0d528 | |||
96a61cc4e4 | |||
b7b36bb0a4 | |||
52b254071c | |||
fbf2dd1672 | |||
4bbf09f582 | |||
952cd38b7b | |||
9a79be5ca0 | |||
2182521a8b | |||
fe65c2ae02 | |||
554d36c74b | |||
29ef0916db | |||
f93c8290f4 | |||
a69293df24 | |||
48ac038f7a | |||
5a7d2560c9 | |||
d91027f771 | |||
deaf3cb416 | |||
f95e1ea40f | |||
f64ab49307 | |||
fe1c99c0cf | |||
bdb7b73b8a | |||
293fff90d3 | |||
6eb4973780 | |||
5f5824d78d | |||
0ef9d79056 | |||
215650f6e7 | |||
a0d0d4c0e9 | |||
0422af2aae | |||
cef8e42938 | |||
0eeeec38fa | |||
75a84ecdae | |||
87c507fdbe | |||
3783ae823d | |||
f3ed00e28e | |||
307d023b2e | |||
775ce3a03f | |||
f655372b08 | |||
2c4079f4c8 | |||
ac1f90f1a9 | |||
4bb55b1622 | |||
23c5bb17c7 | |||
a0ed3261c9 | |||
261732f140 | |||
595c96b262 | |||
496999beba | |||
bb50881346 | |||
948902eae0 | |||
e41ff2df66 | |||
f88b79d42b | |||
1a0dd53450 | |||
9872430bd2 | |||
ae8badb141 | |||
36fa3a1a0a | |||
df8a69d15f | |||
fad08a19cc | |||
6527d05d77 | |||
d303e6b94e | |||
5fa397ceed | |||
c0fd017906 | |||
74e7da214a | |||
756ba07b16 | |||
5c236fd06c | |||
f671be814e | |||
e277437bd2 | |||
beead7e54d | |||
ea010be5cb | |||
97b6c41d42 | |||
6d0f3762b2 | |||
132a2a73af | |||
eab80d0aea | |||
88b1383eed | |||
ff74452ef3 | |||
bf8e9b3d71 | |||
de34187db0 | |||
acb23e8ef0 | |||
f992ee3140 | |||
97986a5241 | |||
a7d1346d51 | |||
983ec5debc | |||
cb28ac3aed | |||
a817a7c889 | |||
a5f2444ad2 | |||
cea8067219 | |||
4db074a5aa | |||
3eb00ef60f | |||
ca8bf8f964 | |||
39b3ce9bd3 | |||
4caa313aef | |||
a78a339407 | |||
0919b13c87 | |||
f2b0e2f418 | |||
cb6848aa80 | |||
542691c4e4 | |||
8ad6a8767f | |||
2242b1b4a5 | |||
8df4d8b905 | |||
7fad53b112 | |||
9d667db634 | |||
f47a789b15 | |||
5e3ce30d02 | |||
97c5fb8141 | |||
0e3a8fa6d9 | |||
5eae76c66e | |||
849f79e4ed | |||
ff7cf839d8 | |||
f3cbd243cc | |||
f146c92e88 | |||
fb2620b3a5 | |||
fd00e5cb35 | |||
44fde2d964 | |||
448b957a13 | |||
01607b9860 | |||
23d8c7ff0e | |||
b321da00b4 | |||
dec3da8f9d | |||
80aae18794 | |||
1f2aaf3f98 | |||
2534a028c0 | |||
fc409d9262 | |||
b70d195473 | |||
7eedff2714 | |||
6d9185d121 | |||
f89c22b5ee | |||
f23dc11a86 | |||
09a0325534 | |||
408d5da50f | |||
561808cf90 | |||
25df95be6f | |||
b85d7c1f70 | |||
642720a2fe | |||
1cc7131bb7 | |||
8f60f1093a | |||
d3b458dd9b | |||
a08e2cc434 | |||
b83a0434a4 | |||
b68b74ac32 | |||
b084c1d437 | |||
63ed892502 | |||
1cb6101c6a | |||
be0cc0273f | |||
abf33b3b3b | |||
d9b0490f72 | |||
caa70d2bca | |||
4f05f08f5d | |||
0c76b89e55 | |||
08ab4b93ea | |||
f0028b6972 | |||
b6553357f9 | |||
d86103383a | |||
1265afebbb | |||
306783c661 | |||
8ec8204a30 | |||
8cf3ef895d | |||
e4498adb1f | |||
42c5c59800 | |||
8ef8c9094a | |||
8dc4724340 | |||
13551885c2 | |||
d677e83ed4 | |||
5d9130a3c4 | |||
1ca4913328 | |||
b7614abb9e | |||
862a4a243f | |||
db291234ed | |||
2a5605db24 | |||
b4362cc18b | |||
6a5a6387e2 | |||
0f31adeafb | |||
ae817722d8 | |||
90bedd7e06 | |||
7d27be2a73 | |||
74da2de3b7 | |||
35db70a56c | |||
7dac8e2dde | |||
82c6992d6f | |||
4831c7b9af | |||
113db8d656 | |||
de6679ea95 | |||
0b66ae5c53 | |||
61a20febb9 | |||
29f81577e9 | |||
3acf956f6f | |||
87b13bef8e | |||
0d4cb252c4 | |||
fcabc6f799 | |||
848c43a9ab | |||
5f766cd20b | |||
8c07ba635e | |||
bb07aecfec | |||
27c5ec0149 | |||
4f01db0482 | |||
f2f8a7a90e | |||
e743414908 | |||
f6f0f94e17 | |||
d47a47924a | |||
7a2bf7e7eb | |||
d5a7867087 | |||
fbf78b83c4 | |||
2c63cf3cbd | |||
3b648e71e6 | |||
021d0a46f8 | |||
8839dbfe5b | |||
407d058611 | |||
c6a7f499ce | |||
d821fd29d6 | |||
6b99ab3a57 | |||
004f1d5aed | |||
1caeea8bc2 | |||
6ce4a1a18d | |||
0b48c8eb35 | |||
fef913085e | |||
2059af822d | |||
0fe74e95fe | |||
b7755123c1 | |||
39282be486 | |||
b18e4057bb | |||
12a9b5f35e | |||
89baa94002 | |||
1ef3478709 | |||
73063544bd | |||
90240bf11d | |||
5c5a06198c | |||
394933e53c | |||
b106d3ba60 | |||
947a339714 | |||
edb18349c9 | |||
9dcb965959 | |||
72ae82fe47 | |||
2d9d2f1e99 |
@ -1,42 +0,0 @@
|
||||
version: '{build}'
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^v[0-9.]+\.[0-9.]+/
|
||||
|
||||
cache:
|
||||
- '%USERPROFILE%\.cargo'
|
||||
- '%APPVEYOR_BUILD_FOLDER%\target'
|
||||
|
||||
clone_folder: d:\projects\solana
|
||||
|
||||
build_script:
|
||||
- bash ci/publish-tarball.sh
|
||||
|
||||
notifications:
|
||||
- provider: Slack
|
||||
incoming_webhook:
|
||||
secure: GJsBey+F5apAtUm86MHVJ68Uqa6WN1SImcuIc4TsTZrDhA8K1QWUNw9FFQPybUWDyOcS5dly3kubnUqlGt9ux6Ad2efsfRIQYWv0tOVXKeY=
|
||||
channel: ci-status
|
||||
on_build_success: false
|
||||
on_build_failure: true
|
||||
on_build_status_changed: true
|
||||
|
||||
deploy:
|
||||
- provider: S3
|
||||
access_key_id:
|
||||
secure: fTbJl6JpFebR40J7cOWZ2mXBa3kIvEiXgzxAj6L3N7A=
|
||||
secret_access_key:
|
||||
secure: vItsBXb2rEFLvkWtVn/Rcxu5a5+2EwC+b7GsA0waJy9hXh6XuBAD0lnHd9re3g/4
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
set_public: true
|
||||
|
||||
- provider: GitHub
|
||||
auth_token:
|
||||
secure: 81fEmPZ0cV1wLtNuUrcmtgxKF6ROQF1+/ft5m+fHX21z6PoeCbaNo8cTyLioWBj7
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
5
.buildkite/env/secrets.ejson
vendored
5
.buildkite/env/secrets.ejson
vendored
@ -7,9 +7,6 @@
|
||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]",
|
||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Egc2dMrHDU0NcZ71LwGv/V66shUhwYUE:04VoIb8CKy7KYhQ5W4cEW9SDKZltxWBL5Hob106lMBbUOD/yUvKYcG3Ep8JfTMwO3K8zowW5HpU/IdGoilX0XWLiJJ6t+p05WWK0TA16nOEtwrEG+UK8wm3sN+xCO20i4jDhpNpgg3FYFHT5rKTHW8+zaBTNUX/SFxkN67Lm+92IM28CXYE43SU1WV6H99hGFFVpTK5JVM3JuYU1ex/dHRE+xCzTr4MYUB/F+nGoNFW8HUDV/y0e1jxT9to3x0SmnytEEuk+5RUzFuEt9cKNFeNml3fOCi4qL+sfj/Y5pjH9xDiUxsvH/8NL35jbLP244aFHgWcp]",
|
||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:NeOxSoWCvXB9AL4H6OK26l/7bmsKd/oz:Ijfoxtvk2CHlN1ZXHup3Gg/914kbbAkEGWJfvozA8UIe+aUzUObMyTrKkVOeNAH8Q8YH9tNzk7RRnrTcpnzeCCBLlWcVEeruMxHox3mPRzmSeDLxtbzCl9VePlRO3T7jg90K5hW+ZAkd5J/WJNzpAcmr93ts/of3MbvGHSujId/efCTzJEcP6JInnBb8Vrj7TlgKbzUlnqpq1+NjYPSXN3maKa9pKeo2JWxZlGBMoy6QWUUY5GbYEylw9smwh1LJcHZjlaZNMuOl4gNKtaSr38IXQkAXaRUJDPAmPras00YObKzXU8RkTrP4EoP/jx5LPR7f]",
|
||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:7t+56twjW+jR7fpFNNeRFLPd7E4lbmyN:JuviDpkQrfVcNUGRGsa2e/UhvH6tTYyk1s4cHHE5xZH1NByL7Kpqx36VG/+o1AUGEeSQdsBnKgzYdMoFYbO8o50DoRPc86QIEVXCupD6J9avxLFtQgOWgJp+/mCdUVXlqXiFs/vQgS/L4psrcKdF6WHd77BeUr6ll8DjH+9m5FC9Rcai2pXno6VbPpunHQ0oUdYzhFR64+LiRacBaefQ9igZ+nSEWDLqbaZSyfm9viWkijoVFTq8gAgdXXEh7g0QdxVE5T6bPristJhT6jWBhWunPUCDNFFErWIsbRGctepl4pbCWqh2hNTw9btSgVfeY6uGCOsdy9E=]"
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
||||
}
|
||||
}
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,5 +1,6 @@
|
||||
/docs/html/
|
||||
/docs/src/tests.ok
|
||||
/docs/src/cli/usage.md
|
||||
/docs/src/.gitbook/assets/*.svg
|
||||
/farf/
|
||||
/solana-release/
|
||||
|
11
.mergify.yml
11
.mergify.yml
@ -19,20 +19,13 @@ pull_request_rules:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v0.23 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.23
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.23
|
||||
- name: v1.0 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v1.0
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.0
|
||||
- name: v1.1 backport
|
||||
@ -41,6 +34,7 @@ pull_request_rules:
|
||||
- label=v1.1
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.1
|
||||
- name: v1.2 backport
|
||||
@ -49,5 +43,6 @@ pull_request_rules:
|
||||
- label=v1.2
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.2
|
||||
|
@ -1,5 +1,6 @@
|
||||
os:
|
||||
- osx
|
||||
- windows
|
||||
|
||||
language: rust
|
||||
rust:
|
||||
|
@ -45,7 +45,7 @@ $ git pull --rebase upstream master
|
||||
|
||||
If there are no functional changes, PRs can be very large and that's no
|
||||
problem. If, however, your changes are making meaningful changes or additions,
|
||||
then about 1.0.4 lines of changes is about the most you should ask a Solana
|
||||
then about 1,000 lines of changes is about the most you should ask a Solana
|
||||
maintainer to review.
|
||||
|
||||
### Should I send small PRs as I develop large, new components?
|
||||
|
1975
Cargo.lock
generated
1975
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,7 @@ members = [
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"accounts-bench",
|
||||
"banking-bench",
|
||||
"chacha",
|
||||
"chacha-cuda",
|
||||
@ -10,6 +11,8 @@ members = [
|
||||
"cli-config",
|
||||
"client",
|
||||
"core",
|
||||
"dos",
|
||||
"download-utils",
|
||||
"faucet",
|
||||
"perf",
|
||||
"validator",
|
||||
@ -24,6 +27,7 @@ members = [
|
||||
"logger",
|
||||
"log-analyzer",
|
||||
"merkle-tree",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
@ -48,7 +52,10 @@ members = [
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"transaction-status",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"vote-signer",
|
||||
|
60
README.md
60
README.md
@ -9,60 +9,7 @@ Blockchain Rebuilt for Scale
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
Introduction
|
||||
===
|
||||
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
||||
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
Documentation
|
||||
===
|
||||
|
||||
Before you jump into the code, review the documentation [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com).
|
||||
|
||||
(The _latest_ development version of the docs is [available here](https://docs.solana.com/v/master).)
|
||||
|
||||
Release Binaries
|
||||
===
|
||||
Official release binaries are available at [Github Releases](https://github.com/solana-labs/solana/releases).
|
||||
|
||||
Additionally we provide pre-release binaries for the latest code on the edge and
|
||||
beta channels. Note that these pre-release binaries may be less stable than an
|
||||
official release.
|
||||
|
||||
### Edge channel
|
||||
#### Linux (x86_64-unknown-linux-gnu)
|
||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
||||
#### mac OS (x86_64-apple-darwin)
|
||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-apple-darwin.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
||||
#### Windows (x86_64-pc-windows-msvc)
|
||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
||||
* [solana-install-init.exe](http://release.solana.com/edge/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
||||
#### All platforms
|
||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/edge/solana-metrics.tar.bz2)
|
||||
|
||||
### Beta channel
|
||||
#### Linux (x86_64-unknown-linux-gnu)
|
||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
||||
#### mac OS (x86_64-apple-darwin)
|
||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-apple-darwin.tar.bz2)
|
||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
||||
#### Windows (x86_64-pc-windows-msvc)
|
||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
||||
* [solana-install-init.exe](http://release.solana.com/beta/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
||||
#### All platforms
|
||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/beta/solana-metrics.tar.bz2)
|
||||
Read all about it at [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com/v/master).
|
||||
|
||||
Developing
|
||||
===
|
||||
@ -238,3 +185,8 @@ problem is solved by this code?" On the other hand, if a test does fail and you
|
||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||
send us that patch!
|
||||
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
19
accounts-bench/Cargo.toml
Normal file
19
accounts-bench/Cargo.toml
Normal file
@ -0,0 +1,19 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
rand = "0.6.5"
|
||||
clap = "2.33.0"
|
||||
crossbeam-channel = "0.4"
|
103
accounts-bench/src/main.rs
Normal file
103
accounts-bench/src/main.rs
Normal file
@ -0,0 +1,103 @@
|
||||
use clap::{value_t, App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::accounts::{create_test_accounts, update_accounts, Accounts};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
|
||||
let matches = App::new("crate")
|
||||
.about("about")
|
||||
.version("version")
|
||||
.arg(
|
||||
Arg::with_name("num_slots")
|
||||
.long("num_slots")
|
||||
.takes_value(true)
|
||||
.value_name("SLOTS")
|
||||
.help("Number of slots to store to."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_accounts")
|
||||
.long("num_accounts")
|
||||
.takes_value(true)
|
||||
.value_name("NUM_ACCOUNTS")
|
||||
.help("Total number of accounts"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("iterations")
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.value_name("ITERATIONS")
|
||||
.help("Number of bench iterations"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("clean")
|
||||
.long("clean")
|
||||
.takes_value(false)
|
||||
.help("Run clean"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let num_slots = value_t!(matches, "num_slots", usize).unwrap_or(4);
|
||||
let num_accounts = value_t!(matches, "num_accounts", usize).unwrap_or(10_000);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(20);
|
||||
let clean = matches.is_present("clean");
|
||||
println!("clean: {:?}", clean);
|
||||
|
||||
let path = PathBuf::from("farf/accounts-bench");
|
||||
if fs::remove_dir_all(path.clone()).is_err() {
|
||||
println!("Warning: Couldn't remove {:?}", path);
|
||||
}
|
||||
let accounts = Accounts::new(vec![path]);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
let pubkeys: Vec<_> = (0..num_slots)
|
||||
.into_par_iter()
|
||||
.map(|slot| {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_test_accounts(
|
||||
&accounts,
|
||||
&mut pubkeys,
|
||||
num_accounts / num_slots,
|
||||
slot as u64,
|
||||
);
|
||||
pubkeys
|
||||
})
|
||||
.collect();
|
||||
let pubkeys: Vec<_> = pubkeys.into_iter().flatten().collect();
|
||||
create_time.stop();
|
||||
println!(
|
||||
"created {} accounts in {} slots {}",
|
||||
(num_accounts / num_slots) * num_slots,
|
||||
num_slots,
|
||||
create_time
|
||||
);
|
||||
let mut ancestors: HashMap<u64, usize> = vec![(0, 0)].into_iter().collect();
|
||||
for i in 1..num_slots {
|
||||
ancestors.insert(i as u64, i - 1);
|
||||
accounts.add_root(i as u64);
|
||||
}
|
||||
for x in 0..iterations {
|
||||
if clean {
|
||||
let mut time = Measure::start("clean");
|
||||
accounts.accounts_db.clean_accounts();
|
||||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
update_accounts(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
|
||||
accounts.add_root((x * num_slots + slot) as u64);
|
||||
}
|
||||
} else {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
let mut time = Measure::start("hash");
|
||||
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors);
|
||||
time.stop();
|
||||
println!("hash: {} {}", hash, time);
|
||||
create_test_accounts(&accounts, &mut pubkeys, 1, 0);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,30 +10,31 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
crossbeam-channel = "0.3"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "1.0.4" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.4" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.46"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.4" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.4" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-perf = { path = "../perf", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-core = { path = "../core", version = "1.0.4" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.4" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
hex = "0.4.2"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_lib"
|
||||
|
@ -10,16 +10,15 @@ use solana_client::{
|
||||
};
|
||||
use solana_core::{
|
||||
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
|
||||
cluster_slots::ClusterSlots,
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::GossipService,
|
||||
packet::{limited_deserialize, PACKET_DATA_SIZE},
|
||||
repair_service,
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy},
|
||||
serve_repair::ServeRepair,
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
storage_stage::NUM_STORAGE_SAMPLES,
|
||||
streamer::{receiver, responder, PacketReceiver},
|
||||
window_service::WindowService,
|
||||
};
|
||||
use solana_ledger::{
|
||||
@ -27,6 +26,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_net_utils::bind_in_range;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_perf::packet::{limited_deserialize, PACKET_DATA_SIZE};
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::packet::Packet;
|
||||
use solana_sdk::{
|
||||
@ -45,6 +45,7 @@ use solana_storage_program::{
|
||||
storage_contract::StorageContract,
|
||||
storage_instruction::{self, StorageAccountType},
|
||||
};
|
||||
use solana_streamer::streamer::{receiver, responder, PacketReceiver};
|
||||
use std::{
|
||||
io::{self, ErrorKind},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
@ -187,7 +188,7 @@ impl Archiver {
|
||||
let mut cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
|
||||
cluster_info.set_entrypoint(cluster_entrypoint.clone());
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
|
||||
let cluster_slots = Arc::new(ClusterSlots::default());
|
||||
// Note for now, this ledger will not contain any of the existing entries
|
||||
// in the ledger located at ledger_path, and will only append on newly received
|
||||
// entries after being passed to window_service
|
||||
@ -235,6 +236,7 @@ impl Archiver {
|
||||
shred_forward_sockets,
|
||||
repair_socket.clone(),
|
||||
&shred_fetch_sender,
|
||||
None,
|
||||
&exit,
|
||||
);
|
||||
let (slot_sender, slot_receiver) = channel();
|
||||
@ -261,6 +263,7 @@ impl Archiver {
|
||||
repair_socket,
|
||||
shred_fetch_receiver,
|
||||
slot_sender,
|
||||
cluster_slots,
|
||||
) {
|
||||
Ok(window_service) => window_service,
|
||||
Err(e) => {
|
||||
@ -379,8 +382,7 @@ impl Archiver {
|
||||
&archiver_keypair.pubkey(),
|
||||
&storage_keypair.pubkey(),
|
||||
);
|
||||
let message =
|
||||
Message::new_with_payer(vec![ix], Some(&archiver_keypair.pubkey()));
|
||||
let message = Message::new_with_payer(&[ix], Some(&archiver_keypair.pubkey()));
|
||||
if let Err(e) = client.send_message(&[archiver_keypair.as_ref()], message) {
|
||||
error!("unable to redeem reward, tx failed: {:?}", e);
|
||||
} else {
|
||||
@ -400,6 +402,7 @@ impl Archiver {
|
||||
}
|
||||
|
||||
// Find a segment to replicate and download it.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn setup(
|
||||
meta: &mut ArchiverMeta,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
@ -410,6 +413,7 @@ impl Archiver {
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
shred_fetch_receiver: PacketReceiver,
|
||||
slot_sender: Sender<u64>,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
) -> Result<WindowService> {
|
||||
let slots_per_segment =
|
||||
match Self::get_segment_config(&cluster_info, meta.client_commitment) {
|
||||
@ -467,6 +471,7 @@ impl Archiver {
|
||||
RepairStrategy::RepairRange(repair_slot_range),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
|_, _, _, _| true,
|
||||
cluster_slots,
|
||||
);
|
||||
info!("waiting for ledger download");
|
||||
Self::wait_for_segment_download(
|
||||
@ -613,6 +618,7 @@ impl Archiver {
|
||||
ErrorKind::Other,
|
||||
"setup_mining_account: signature not found",
|
||||
),
|
||||
TransportError::Custom(e) => io::Error::new(ErrorKind::Other, e),
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
@ -655,7 +661,7 @@ impl Archiver {
|
||||
Signature::new(&meta.signature.as_ref()),
|
||||
meta.blockhash,
|
||||
);
|
||||
let message = Message::new_with_payer(vec![instruction], Some(&archiver_keypair.pubkey()));
|
||||
let message = Message::new_with_payer(&[instruction], Some(&archiver_keypair.pubkey()));
|
||||
let mut transaction = Transaction::new(
|
||||
&[archiver_keypair.as_ref(), storage_keypair.as_ref()],
|
||||
message,
|
||||
@ -838,13 +844,14 @@ impl Archiver {
|
||||
repair_service::MAX_REPAIR_LENGTH,
|
||||
&repair_slot_range,
|
||||
);
|
||||
let mut repair_stats = RepairStats::default();
|
||||
//iter over the repairs and send them
|
||||
if let Ok(repairs) = repairs {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
serve_repair
|
||||
.map_repair_request(&repair_request)
|
||||
.map_repair_request(&repair_request, &mut repair_stats)
|
||||
.map(|result| ((archiver_info.gossip, result), repair_request))
|
||||
.ok()
|
||||
})
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,15 +11,15 @@ edition = "2018"
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-chacha = { path = "../chacha", version = "1.0.4" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.4" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-perf = { path = "../perf", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
hex = "0.4.2"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_utils"
|
||||
|
@ -2,19 +2,19 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.2"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
|
||||
solana-core = { path = "../core", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.4" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
console = "0.10.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
|
@ -2,21 +2,20 @@ use clap::{crate_description, crate_name, App, Arg};
|
||||
use console::style;
|
||||
use solana_archiver_lib::archiver::Archiver;
|
||||
use solana_clap_utils::{
|
||||
input_validators::is_keypair,
|
||||
keypair::{
|
||||
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
|
||||
SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
},
|
||||
input_parsers::keypair_of, input_validators::is_keypair_or_ask_keyword,
|
||||
keypair::SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
};
|
||||
use solana_core::{
|
||||
cluster_info::{Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
};
|
||||
use solana_sdk::{commitment_config::CommitmentConfig, signature::Signer};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
path::PathBuf,
|
||||
process::exit,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
@ -29,10 +28,10 @@ fn main() {
|
||||
.arg(
|
||||
Arg::with_name("identity_keypair")
|
||||
.short("i")
|
||||
.long("identity-keypair")
|
||||
.long("identity")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.validator(is_keypair)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help("File containing an identity (keypair)"),
|
||||
)
|
||||
.arg(
|
||||
@ -60,48 +59,27 @@ fn main() {
|
||||
.long("storage-keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.validator(is_keypair)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help("File containing the storage account keypair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ASK_SEED_PHRASE_ARG.name)
|
||||
.long(ASK_SEED_PHRASE_ARG.long)
|
||||
.value_name("KEYPAIR NAME")
|
||||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.possible_values(&["identity-keypair", "storage-keypair"])
|
||||
.help(ASK_SEED_PHRASE_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
||||
.requires(ASK_SEED_PHRASE_ARG.name)
|
||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap());
|
||||
|
||||
let identity_keypair = keypair_input(&matches, "identity_keypair")
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Identity keypair input failed: {}", err);
|
||||
exit(1);
|
||||
})
|
||||
.keypair;
|
||||
let KeypairWithSource {
|
||||
keypair: storage_keypair,
|
||||
source: storage_keypair_source,
|
||||
} = keypair_input(&matches, "storage_keypair").unwrap_or_else(|err| {
|
||||
eprintln!("Storage keypair input failed: {}", err);
|
||||
exit(1);
|
||||
});
|
||||
if storage_keypair_source == keypair::Source::Generated {
|
||||
let identity_keypair = keypair_of(&matches, "identity_keypair").unwrap_or_else(Keypair::new);
|
||||
|
||||
let storage_keypair = keypair_of(&matches, "storage_keypair").unwrap_or_else(|| {
|
||||
clap::Error::with_description(
|
||||
"The `storage-keypair` argument was not found",
|
||||
clap::ErrorKind::ArgumentNotFound,
|
||||
)
|
||||
.exit();
|
||||
}
|
||||
});
|
||||
|
||||
let entrypoint_addr = matches
|
||||
.value_of("entrypoint")
|
||||
|
@ -2,19 +2,21 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "1.0.4" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.4" }
|
||||
solana-measure = { path = "../measure", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@ -2,29 +2,36 @@ use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::banking_stage::{create_test_recorder, BankingStage};
|
||||
use solana_core::cluster_info::ClusterInfo;
|
||||
use solana_core::cluster_info::Node;
|
||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::poh_recorder::PohRecorder;
|
||||
use solana_core::poh_recorder::WorkingBankEntry;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_core::{
|
||||
banking_stage::{create_test_recorder, BankingStage},
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info::Node,
|
||||
poh_recorder::PohRecorder,
|
||||
poh_recorder::WorkingBankEntry,
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::timing::{duration_as_us, timestamp};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::{Duration, Instant};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Keypair,
|
||||
signature::Signature,
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
fn check_txs(
|
||||
receiver: &Arc<Receiver<WorkingBankEntry>>,
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,25 +10,25 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.32.0"
|
||||
itertools = "0.8.2"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.8"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
|
||||
solana-core = { path = "../core", version = "1.0.4" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.4" }
|
||||
solana-client = { path = "../client", version = "1.0.4" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.4" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.4" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
|
||||
|
@ -2,14 +2,14 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
|
||||
solana-core = { path = "../core", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
|
@ -1,6 +1,6 @@
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_core::streamer::{receiver, PacketReceiver};
|
||||
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,27 +11,27 @@ homepage = "https://solana.com/"
|
||||
bincode = "1.2.1"
|
||||
clap = "2.33.0"
|
||||
log = "0.4.8"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
|
||||
solana-core = { path = "../core", version = "1.0.4" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.4" }
|
||||
solana-client = { path = "../client", version = "1.0.4" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.4" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.0.4", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.4" }
|
||||
solana-measure = { path = "../measure", version = "1.0.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.4", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.1.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.0", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.3.2"
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.4" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,12 +10,12 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.4" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.4" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-perf = { path = "../perf", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,11 +12,11 @@ edition = "2018"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.4" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-perf = { path = "../perf", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
8
ci/_
8
ci/_
@ -5,7 +5,13 @@
|
||||
# |source| me
|
||||
#
|
||||
|
||||
base_dir=$(realpath --strip "$(dirname "$0")/..")
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
if [[ $(pwd) = $base_dir ]]; then
|
||||
echo "--- $*"
|
||||
else
|
||||
echo "--- $* (wd: $(pwd))"
|
||||
fi
|
||||
"$@"
|
||||
}
|
||||
|
@ -22,16 +22,20 @@ steps:
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 30
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- wait
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
|
@ -67,6 +67,7 @@ ARGS+=(
|
||||
--env BUILDKITE_JOB_ID
|
||||
--env CI
|
||||
--env CI_BRANCH
|
||||
--env CI_TAG
|
||||
--env CI_BUILD_ID
|
||||
--env CI_COMMIT
|
||||
--env CI_JOB_ID
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.41.1
|
||||
FROM solanalabs/rust:1.42.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@ -15,6 +15,8 @@ To update the pinned version:
|
||||
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
||||
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
||||
specific YYYY-MM-DD that is desired (default is today's build).
|
||||
Check https://rust-lang.github.io/rustup-components-history/ for build
|
||||
status
|
||||
1. Update `ci/rust-version.sh` to reflect the new nightly `YYY-MM-DD`
|
||||
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
|
||||
to confirm the new nightly image builds. Fix any issues as needed
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.41.1
|
||||
FROM rust:1.42.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
@ -32,6 +32,7 @@ RUN set -x \
|
||||
&& cargo install cargo-audit \
|
||||
&& cargo install svgbob_cli \
|
||||
&& cargo install mdbook \
|
||||
&& cargo install mdbook-linkcheck \
|
||||
&& rustc --version \
|
||||
&& cargo --version \
|
||||
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
|
||||
|
@ -178,7 +178,7 @@ startNodes() {
|
||||
|
||||
(
|
||||
set -x
|
||||
$solana_cli --keypair config/bootstrap-validator/identity-keypair.json \
|
||||
$solana_cli --keypair config/bootstrap-validator/identity.json \
|
||||
--url http://127.0.0.1:8899 genesis-hash
|
||||
) | tee genesis-hash.log
|
||||
maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)"
|
||||
|
@ -13,8 +13,8 @@ if [[ -n $CI_BRANCH ]]; then
|
||||
. ci/rust-version.sh stable
|
||||
ci/docker-run.sh "$rust_stable_docker_image" make -C docs
|
||||
)
|
||||
# make a local commit for the svgs
|
||||
git add -A -f docs/src/.gitbook/assets/.
|
||||
# make a local commit for the svgs and generated/updated markdown
|
||||
git add -f docs/src
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$me"
|
||||
|
@ -45,7 +45,7 @@ linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
;;
|
||||
windows)
|
||||
TARGET=x86_64-pc-windows-msvc
|
||||
TARGET=x86_64-pc-windows-gnu
|
||||
;;
|
||||
*)
|
||||
echo CI_OS_NAME unset
|
||||
@ -73,15 +73,6 @@ echo --- Creating release tarball
|
||||
source ci/rust-version.sh stable
|
||||
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
|
||||
|
||||
# Reduce the Windows archive size until
|
||||
# https://github.com/appveyor/ci/issues/2997 is fixed
|
||||
if [[ -n $APPVEYOR ]]; then
|
||||
rm -f \
|
||||
solana-release/bin/solana-validator.exe \
|
||||
solana-release/bin/solana-bench-exchange.exe \
|
||||
|
||||
fi
|
||||
|
||||
tar cvf solana-release-$TARGET.tar solana-release
|
||||
bzip2 solana-release-$TARGET.tar
|
||||
cp solana-release/bin/solana-install-init solana-install-init-$TARGET
|
||||
|
@ -16,13 +16,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.41.1
|
||||
stable_version=1.42.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2020-02-27
|
||||
nightly_version=2020-03-12
|
||||
fi
|
||||
|
||||
|
||||
|
@ -67,8 +67,9 @@ _ cargo +$rust_nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||
_ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \
|
||||
-- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run banking bench. Doesn't require nightly, but use since it is already built.
|
||||
# Run banking/accounts bench. Doesn't require nightly, but use since it is already built.
|
||||
_ cargo +$rust_nightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
|
||||
_ cargo +$rust_nightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE"
|
||||
|
||||
# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some
|
||||
# reason
|
||||
|
@ -22,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
|
||||
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0006
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,8 +11,9 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
chrono = "0.4"
|
||||
|
@ -1,9 +1,10 @@
|
||||
use crate::keypair::{
|
||||
keypair_from_seed_phrase, signer_from_path, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
|
||||
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
};
|
||||
use chrono::DateTime;
|
||||
use clap::ArgMatches;
|
||||
use solana_remote_wallet::remote_wallet::{DerivationPath, RemoteWalletManager};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
clock::UnixTimestamp,
|
||||
native_token::sol_to_lamports,
|
||||
@ -111,18 +112,54 @@ pub fn signer_of(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
|
||||
value_of(matches, name).map(sol_to_lamports)
|
||||
pub fn pubkey_of_signer(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<Pubkey>, Box<dyn std::error::Error>> {
|
||||
if let Some(location) = matches.value_of(name) {
|
||||
Ok(Some(pubkey_from_path(
|
||||
matches,
|
||||
location,
|
||||
name,
|
||||
wallet_manager,
|
||||
)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn derivation_of(matches: &ArgMatches<'_>, name: &str) -> Option<DerivationPath> {
|
||||
matches.value_of(name).map(|derivation_str| {
|
||||
let derivation_str = derivation_str.replace("'", "");
|
||||
let mut parts = derivation_str.split('/');
|
||||
let account = parts.next().map(|account| account.parse::<u32>().unwrap());
|
||||
let change = parts.next().map(|change| change.parse::<u32>().unwrap());
|
||||
DerivationPath { account, change }
|
||||
})
|
||||
pub fn pubkeys_of_multiple_signers(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<Vec<Pubkey>>, Box<dyn std::error::Error>> {
|
||||
if let Some(pubkey_matches) = matches.values_of(name) {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
for signer in pubkey_matches {
|
||||
pubkeys.push(pubkey_from_path(matches, signer, name, wallet_manager)?);
|
||||
}
|
||||
Ok(Some(pubkeys))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolve_signer(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<String>, Box<dyn std::error::Error>> {
|
||||
Ok(resolve_signer_from_path(
|
||||
matches,
|
||||
matches.value_of(name).unwrap(),
|
||||
name,
|
||||
wallet_manager,
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
|
||||
value_of(matches, name).map(sol_to_lamports)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -299,40 +336,4 @@ mod tests {
|
||||
.get_matches_from(vec!["test", "--single", "0.03"]);
|
||||
assert_eq!(lamports_of_sol(&matches, "single"), Some(30000000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_derivation_of() {
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "2/3"]);
|
||||
assert_eq!(
|
||||
derivation_of(&matches, "single"),
|
||||
Some(DerivationPath {
|
||||
account: Some(2),
|
||||
change: Some(3)
|
||||
})
|
||||
);
|
||||
assert_eq!(derivation_of(&matches, "another"), None);
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "2"]);
|
||||
assert_eq!(
|
||||
derivation_of(&matches, "single"),
|
||||
Some(DerivationPath {
|
||||
account: Some(2),
|
||||
change: None
|
||||
})
|
||||
);
|
||||
assert_eq!(derivation_of(&matches, "another"), None);
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "2'/3'"]);
|
||||
assert_eq!(
|
||||
derivation_of(&matches, "single"),
|
||||
Some(DerivationPath {
|
||||
account: Some(2),
|
||||
change: Some(3)
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ use std::str::FromStr;
|
||||
pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
match string.parse::<Pubkey>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
pub fn is_hash(string: String) -> Result<(), String> {
|
||||
match string.parse::<Hash>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@ pub fn is_hash(string: String) -> Result<(), String> {
|
||||
pub fn is_keypair(string: String) -> Result<(), String> {
|
||||
read_keypair_file(&string)
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{:?}", err))
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed
|
||||
@ -38,7 +38,7 @@ pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
|
||||
}
|
||||
read_keypair_file(&string)
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{:?}", err))
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
||||
@ -46,18 +46,27 @@ pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey or keypair file or keypair ask keyword
|
||||
pub fn is_pubkey_or_keypair_or_ask_keyword(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair_or_ask_keyword(string))
|
||||
}
|
||||
|
||||
pub fn is_valid_signer(string: String) -> Result<(), String> {
|
||||
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
|
||||
// produce a pubkey()
|
||||
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
match parse_keypair_path(&string) {
|
||||
KeypairUrl::Filepath(path) => is_keypair(path),
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as a valid Signer. This is an alias of
|
||||
// `is_valid_pubkey`, and does accept pubkey strings, even though a Pubkey is not by itself
|
||||
// sufficient to sign a transaction.
|
||||
//
|
||||
// In the current offline-signing implementation, a pubkey is the valid input for a signer field
|
||||
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
|
||||
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
|
||||
// also provided and correct happens in parsing, not in validation.
|
||||
pub fn is_valid_signer(string: String) -> Result<(), String> {
|
||||
is_valid_pubkey(string)
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey=signature string
|
||||
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
let mut signer = string.split('=');
|
||||
@ -73,10 +82,10 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
.ok_or_else(|| "Malformed signer string".to_string())?,
|
||||
) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
}
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,20 +99,20 @@ pub fn is_url(string: String) -> Result<(), String> {
|
||||
Err("no host provided".to_string())
|
||||
}
|
||||
}
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_slot(slot: String) -> Result<(), String> {
|
||||
slot.parse::<Slot>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
.map_err(|e| format!("{}", e))
|
||||
}
|
||||
|
||||
pub fn is_port(port: String) -> Result<(), String> {
|
||||
port.parse::<u16>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
.map_err(|e| format!("{}", e))
|
||||
}
|
||||
|
||||
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
@ -111,7 +120,7 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
.parse::<u8>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to parse input percentage, provided: {}, err: {:?}",
|
||||
"Unable to parse input percentage, provided: {}, err: {}",
|
||||
percentage, e
|
||||
)
|
||||
})
|
||||
@ -141,7 +150,7 @@ pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
|
||||
DateTime::parse_from_rfc3339(&value)
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
.map_err(|e| format!("{}", e))
|
||||
}
|
||||
|
||||
pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
@ -152,7 +161,7 @@ pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
.parse::<u32>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to parse derivation, provided: {}, err: {:?}",
|
||||
"Unable to parse derivation, provided: {}, err: {}",
|
||||
account, e
|
||||
)
|
||||
})
|
||||
@ -160,7 +169,7 @@ pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
if let Some(change) = parts.next() {
|
||||
change.parse::<u32>().map_err(|e| {
|
||||
format!(
|
||||
"Unable to parse derivation, provided: {}, err: {:?}",
|
||||
"Unable to parse derivation, provided: {}, err: {}",
|
||||
change, e
|
||||
)
|
||||
})
|
||||
|
@ -1,10 +1,10 @@
|
||||
use crate::{
|
||||
input_parsers::{derivation_of, pubkeys_sigs_of},
|
||||
offline::SIGNER_ARG,
|
||||
input_parsers::pubkeys_sigs_of,
|
||||
offline::{SIGNER_ARG, SIGN_ONLY_ARG},
|
||||
ArgConstant,
|
||||
};
|
||||
use bip39::{Language, Mnemonic, Seed};
|
||||
use clap::{values_t, ArgMatches, Error, ErrorKind};
|
||||
use clap::ArgMatches;
|
||||
use rpassword::prompt_password_stderr;
|
||||
use solana_remote_wallet::{
|
||||
remote_keypair::generate_remote_keypair,
|
||||
@ -14,7 +14,7 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{
|
||||
keypair_from_seed, keypair_from_seed_phrase_and_passphrase, read_keypair,
|
||||
read_keypair_file, Keypair, Presigner, Signature, Signer,
|
||||
read_keypair_file, Keypair, NullSigner, Presigner, Signature, Signer,
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
@ -75,7 +75,14 @@ pub fn signer_from_path(
|
||||
false,
|
||||
)?))
|
||||
}
|
||||
KeypairUrl::Filepath(path) => Ok(Box::new(read_keypair_file(&path)?)),
|
||||
KeypairUrl::Filepath(path) => match read_keypair_file(&path) {
|
||||
Err(e) => Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!("could not find keypair file: {} error: {}", path, e),
|
||||
)
|
||||
.into()),
|
||||
Ok(file) => Ok(Box::new(file)),
|
||||
},
|
||||
KeypairUrl::Stdin => {
|
||||
let mut stdin = std::io::stdin();
|
||||
Ok(Box::new(read_keypair(&mut stdin)?))
|
||||
@ -84,9 +91,9 @@ pub fn signer_from_path(
|
||||
if let Some(wallet_manager) = wallet_manager {
|
||||
Ok(Box::new(generate_remote_keypair(
|
||||
path,
|
||||
derivation_of(matches, "derivation_path"),
|
||||
wallet_manager,
|
||||
matches.is_present("confirm_key"),
|
||||
keypair_name,
|
||||
)?))
|
||||
} else {
|
||||
Err(RemoteWalletError::NoDeviceFound.into())
|
||||
@ -98,10 +105,12 @@ pub fn signer_from_path(
|
||||
.and_then(|presigners| presigner_from_pubkey_sigs(&pubkey, presigners));
|
||||
if let Some(presigner) = presigner {
|
||||
Ok(Box::new(presigner))
|
||||
} else if matches.is_present(SIGN_ONLY_ARG.name) {
|
||||
Ok(Box::new(NullSigner::new(&pubkey)))
|
||||
} else {
|
||||
Err(Error::with_description(
|
||||
"Missing signature for supplied pubkey",
|
||||
ErrorKind::MissingRequiredArgument,
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!("missing signature for supplied pubkey: {}", pubkey),
|
||||
)
|
||||
.into())
|
||||
}
|
||||
@ -109,39 +118,72 @@ pub fn signer_from_path(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pubkey_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<Pubkey, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Pubkey(pubkey) => Ok(pubkey),
|
||||
_ => Ok(signer_from_path(matches, path, keypair_name, wallet_manager)?.pubkey()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolve_signer_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<String>, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Ask => {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
// This method validates the seed phrase, but returns `None` because there is no path
|
||||
// on disk or to a device
|
||||
keypair_from_seed_phrase(keypair_name, skip_validation, false).map(|_| None)
|
||||
}
|
||||
KeypairUrl::Filepath(path) => match read_keypair_file(&path) {
|
||||
Err(e) => Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!("could not find keypair file: {} error: {}", path, e),
|
||||
)
|
||||
.into()),
|
||||
Ok(_) => Ok(Some(path.to_string())),
|
||||
},
|
||||
KeypairUrl::Stdin => {
|
||||
let mut stdin = std::io::stdin();
|
||||
// This method validates the keypair from stdin, but returns `None` because there is no
|
||||
// path on disk or to a device
|
||||
read_keypair(&mut stdin).map(|_| None)
|
||||
}
|
||||
KeypairUrl::Usb(path) => {
|
||||
if let Some(wallet_manager) = wallet_manager {
|
||||
let path = generate_remote_keypair(
|
||||
path,
|
||||
wallet_manager,
|
||||
matches.is_present("confirm_key"),
|
||||
keypair_name,
|
||||
)
|
||||
.map(|keypair| keypair.path)?;
|
||||
Ok(Some(path))
|
||||
} else {
|
||||
Err(RemoteWalletError::NoDeviceFound.into())
|
||||
}
|
||||
}
|
||||
_ => Ok(Some(path.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// Keyword used to indicate that the user should be asked for a keypair seed phrase
|
||||
pub const ASK_KEYWORD: &str = "ASK";
|
||||
|
||||
pub const ASK_SEED_PHRASE_ARG: ArgConstant<'static> = ArgConstant {
|
||||
long: "ask-seed-phrase",
|
||||
name: "ask_seed_phrase",
|
||||
help: "Recover a keypair using a seed phrase and optional passphrase",
|
||||
};
|
||||
|
||||
pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
|
||||
long: "skip-seed-phrase-validation",
|
||||
name: "skip_seed_phrase_validation",
|
||||
help: "Skip validation of seed phrases. Use this if your phrase does not use the BIP39 official English word list",
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Source {
|
||||
Generated,
|
||||
Path,
|
||||
SeedPhrase,
|
||||
}
|
||||
|
||||
pub struct KeypairWithSource {
|
||||
pub keypair: Keypair,
|
||||
pub source: Source,
|
||||
}
|
||||
|
||||
impl KeypairWithSource {
|
||||
fn new(keypair: Keypair, source: Source) -> Self {
|
||||
Self { keypair, source }
|
||||
}
|
||||
}
|
||||
|
||||
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
|
||||
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
|
||||
let passphrase = prompt_password_stderr(&prompt)?;
|
||||
@ -195,47 +237,6 @@ pub fn keypair_from_seed_phrase(
|
||||
Ok(keypair)
|
||||
}
|
||||
|
||||
/// Checks CLI arguments to determine whether a keypair should be:
|
||||
/// - inputted securely via stdin,
|
||||
/// - read in from a file,
|
||||
/// - or newly generated
|
||||
pub fn keypair_input(
|
||||
matches: &clap::ArgMatches,
|
||||
keypair_name: &str,
|
||||
) -> Result<KeypairWithSource, Box<dyn error::Error>> {
|
||||
let ask_seed_phrase_matches =
|
||||
values_t!(matches.values_of(ASK_SEED_PHRASE_ARG.name), String).unwrap_or_default();
|
||||
let keypair_match_name = keypair_name.replace('-', "_");
|
||||
if ask_seed_phrase_matches
|
||||
.iter()
|
||||
.any(|s| s.as_str() == keypair_name)
|
||||
{
|
||||
if matches.value_of(keypair_match_name).is_some() {
|
||||
clap::Error::with_description(
|
||||
&format!(
|
||||
"`--{} {}` cannot be used with `{} <PATH>`",
|
||||
ASK_SEED_PHRASE_ARG.long, keypair_name, keypair_name
|
||||
),
|
||||
clap::ErrorKind::ArgumentConflict,
|
||||
)
|
||||
.exit();
|
||||
}
|
||||
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase(keypair_name, skip_validation, true)
|
||||
.map(|keypair| KeypairWithSource::new(keypair, Source::SeedPhrase))
|
||||
} else if let Some(keypair_file) = matches.value_of(keypair_match_name) {
|
||||
if keypair_file.starts_with("usb://") {
|
||||
Ok(KeypairWithSource::new(Keypair::new(), Source::Path))
|
||||
} else {
|
||||
read_keypair_file(keypair_file)
|
||||
.map(|keypair| KeypairWithSource::new(keypair, Source::Path))
|
||||
}
|
||||
} else {
|
||||
Ok(KeypairWithSource::new(Keypair::new(), Source::Generated))
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_seed_phrase(seed_phrase: &str) -> String {
|
||||
seed_phrase
|
||||
.split_whitespace()
|
||||
@ -246,14 +247,6 @@ fn sanitize_seed_phrase(seed_phrase: &str) -> String {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::ArgMatches;
|
||||
|
||||
#[test]
|
||||
fn test_keypair_input() {
|
||||
let arg_matches = ArgMatches::default();
|
||||
let KeypairWithSource { source, .. } = keypair_input(&arg_matches, "").unwrap();
|
||||
assert_eq!(source, Source::Generated);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_seed_phrase() {
|
||||
|
@ -1,3 +1,5 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! version {
|
||||
() => {
|
||||
@ -23,6 +25,23 @@ pub struct ArgConstant<'a> {
|
||||
pub help: &'a str,
|
||||
}
|
||||
|
||||
/// Error type for forwarding Errors out of `main()` of a `clap` app
|
||||
/// and still using the `Display` formatter
|
||||
#[derive(Error)]
|
||||
#[error("{0}")]
|
||||
pub struct DisplayError(Box<dyn std::error::Error>);
|
||||
impl DisplayError {
|
||||
pub fn new_as_boxed(inner: Box<dyn std::error::Error>) -> Box<Self> {
|
||||
DisplayError(inner).into()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for DisplayError {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(fmt, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
pub mod input_parsers;
|
||||
pub mod input_validators;
|
||||
pub mod keypair;
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,6 +11,7 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
dirs = "2.0.2"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.104"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
url = "2.1.1"
|
||||
|
@ -1,10 +1,7 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
use std::io;
|
||||
use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG_FILE: Option<String> = {
|
||||
@ -15,39 +12,65 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq)]
|
||||
pub struct Config {
|
||||
pub url: String,
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub keypair_path: String,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn new(url: &str, websocket_url: &str, keypair_path: &str) -> Self {
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
let keypair_path = {
|
||||
let mut keypair_path = dirs::home_dir().expect("home directory");
|
||||
keypair_path.extend(&[".config", "solana", "id.json"]);
|
||||
keypair_path.to_str().unwrap().to_string()
|
||||
};
|
||||
let json_rpc_url = "http://127.0.0.1:8899".to_string();
|
||||
|
||||
// Empty websocket_url string indicates the client should
|
||||
// `Config::compute_websocket_url(&json_rpc_url)`
|
||||
let websocket_url = "".to_string();
|
||||
|
||||
Self {
|
||||
url: url.to_string(),
|
||||
websocket_url: websocket_url.to_string(),
|
||||
keypair_path: keypair_path.to_string(),
|
||||
json_rpc_url,
|
||||
websocket_url,
|
||||
keypair_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load(config_file: &str) -> Result<Self, io::Error> {
|
||||
let file = File::open(config_file.to_string())?;
|
||||
let config = serde_yaml::from_reader(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
Ok(config)
|
||||
crate::load_config_file(config_file)
|
||||
}
|
||||
|
||||
pub fn save(&self, config_file: &str) -> Result<(), io::Error> {
|
||||
let serialized = serde_yaml::to_string(self)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
crate::save_config_file(self, config_file)
|
||||
}
|
||||
|
||||
if let Some(outdir) = Path::new(&config_file).parent() {
|
||||
create_dir_all(outdir)?;
|
||||
pub fn compute_websocket_url(json_rpc_url: &str) -> String {
|
||||
let json_rpc_url: Option<Url> = json_rpc_url.parse().ok();
|
||||
if json_rpc_url.is_none() {
|
||||
return "".to_string();
|
||||
}
|
||||
let mut file = File::create(config_file)?;
|
||||
file.write_all(&serialized.into_bytes())?;
|
||||
|
||||
Ok(())
|
||||
let json_rpc_url = json_rpc_url.unwrap();
|
||||
let is_secure = json_rpc_url.scheme().to_ascii_lowercase() == "https";
|
||||
let mut ws_url = json_rpc_url.clone();
|
||||
ws_url
|
||||
.set_scheme(if is_secure { "wss" } else { "ws" })
|
||||
.expect("unable to set scheme");
|
||||
let ws_port = match json_rpc_url.port() {
|
||||
Some(port) => port + 1,
|
||||
None => {
|
||||
if is_secure {
|
||||
8901
|
||||
} else {
|
||||
8900
|
||||
}
|
||||
}
|
||||
};
|
||||
ws_url.set_port(Some(ws_port)).expect("unable to set port");
|
||||
ws_url.to_string()
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,39 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod config;
|
||||
mod config;
|
||||
pub use config::{Config, CONFIG_FILE};
|
||||
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
pub fn load_config_file<T, P>(config_file: P) -> Result<T, io::Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let file = File::open(config_file)?;
|
||||
let config = serde_yaml::from_reader(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn save_config_file<T, P>(config: &T, config_file: P) -> Result<(), io::Error>
|
||||
where
|
||||
T: serde::ser::Serialize,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let serialized = serde_yaml::to_string(config)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
|
||||
if let Some(outdir) = config_file.as_ref().parent() {
|
||||
create_dir_all(outdir)?;
|
||||
}
|
||||
let mut file = File::create(config_file)?;
|
||||
file.write_all(&serialized.into_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,42 +11,43 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
bs58 = "0.3.0"
|
||||
chrono = { version = "0.4.10", features = ["serde"] }
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
clap = "2.33.0"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.4", features = ["termination"] }
|
||||
console = "0.9.2"
|
||||
console = "0.10.0"
|
||||
dirs = "2.0.2"
|
||||
log = "0.4.8"
|
||||
indicatif = "0.14.0"
|
||||
humantime = "2.0.0"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.1.1"
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
|
||||
serde = "1.0.104"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.0.4" }
|
||||
solana-client = { path = "../client", version = "1.0.4" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.4" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.4" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.4" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.4" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.4" }
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.0" }
|
||||
titlecase = "1.1.0"
|
||||
thiserror = "1.0.13"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.0.4" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.4" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
681
cli/src/cli.rs
681
cli/src/cli.rs
File diff suppressed because it is too large
Load Diff
@ -5,6 +5,7 @@ use crate::{
|
||||
},
|
||||
display::println_name_value,
|
||||
};
|
||||
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
|
||||
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
@ -22,6 +23,7 @@ use solana_sdk::{
|
||||
epoch_schedule::Epoch,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction,
|
||||
@ -55,8 +57,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
Arg::with_name("node_pubkey")
|
||||
.index(1)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.value_name("VALIDATOR_PUBKEY")
|
||||
.validator(is_valid_pubkey)
|
||||
.required(true)
|
||||
.help("Identity pubkey of the validator"),
|
||||
)
|
||||
@ -117,6 +119,17 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("total-supply").about("Get total number of SOL")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return count at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-count").about("Get current transaction count")
|
||||
.alias("get-transaction-count")
|
||||
@ -208,10 +221,10 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkeys")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEYS")
|
||||
.value_name("VOTE_ACCOUNT_PUBKEYS")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Only show stake accounts delegated to the provided vote accounts"),
|
||||
)
|
||||
.arg(
|
||||
@ -243,8 +256,11 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_catchup(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap();
|
||||
pub fn parse_catchup(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
|
||||
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Catchup {
|
||||
@ -322,6 +338,18 @@ pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliErr
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::TotalSupply { commitment_config },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
@ -334,9 +362,13 @@ pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliComman
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_show_stakes(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
pub fn parse_show_stakes(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let vote_account_pubkeys = pubkeys_of(matches, "vote_account_pubkeys");
|
||||
let vote_account_pubkeys =
|
||||
pubkeys_of_multiple_signers(matches, "vote_account_pubkeys", wallet_manager)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowStakes {
|
||||
@ -378,19 +410,34 @@ pub fn process_catchup(
|
||||
node_pubkey: &Pubkey,
|
||||
node_json_rpc_url: &Option<String>,
|
||||
) -> ProcessResult {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
let sleep_interval = 5;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Connecting...");
|
||||
|
||||
let node_client = if let Some(node_json_rpc_url) = node_json_rpc_url {
|
||||
RpcClient::new(node_json_rpc_url.to_string())
|
||||
} else {
|
||||
RpcClient::new_socket(
|
||||
cluster_nodes
|
||||
let rpc_addr = loop {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
if let Some(contact_info) = cluster_nodes
|
||||
.iter()
|
||||
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
|
||||
.ok_or_else(|| format!("Contact information not found for {}", node_pubkey))?
|
||||
.rpc
|
||||
.ok_or_else(|| format!("RPC service not found for {}", node_pubkey))?,
|
||||
)
|
||||
{
|
||||
if let Some(rpc_addr) = contact_info.rpc {
|
||||
break rpc_addr;
|
||||
}
|
||||
progress_bar.set_message(&format!("RPC service not found for {}", node_pubkey));
|
||||
} else {
|
||||
progress_bar.set_message(&format!(
|
||||
"Contact information not found for {}",
|
||||
node_pubkey
|
||||
));
|
||||
}
|
||||
sleep(Duration::from_secs(sleep_interval as u64));
|
||||
};
|
||||
|
||||
RpcClient::new_socket(rpc_addr)
|
||||
};
|
||||
|
||||
let reported_node_pubkey = node_client.get_identity()?;
|
||||
@ -406,12 +453,8 @@ pub fn process_catchup(
|
||||
return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into());
|
||||
}
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Connecting...");
|
||||
|
||||
let mut previous_rpc_slot = std::u64::MAX;
|
||||
let mut previous_slot_distance = 0;
|
||||
let sleep_interval = 5;
|
||||
loop {
|
||||
let rpc_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::recent())?;
|
||||
let node_slot = node_client.get_slot_with_commitment(CommitmentConfig::recent())?;
|
||||
@ -504,7 +547,13 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
|
||||
|
||||
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
|
||||
let timestamp = rpc_client.get_block_time(slot)?;
|
||||
Ok(timestamp.to_string())
|
||||
let result = format!(
|
||||
"{} (UnixTimestamp: {})",
|
||||
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc)
|
||||
.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||
timestamp
|
||||
);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn slot_to_human_time(slot: Slot) -> String {
|
||||
@ -749,6 +798,14 @@ pub fn process_show_block_production(
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_total_supply(
|
||||
rpc_client: &RpcClient,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ProcessResult {
|
||||
let total_supply = rpc_client.total_supply_with_commitment(commitment_config.clone())?;
|
||||
Ok(format!("{} SOL", lamports_to_sol(total_supply)))
|
||||
}
|
||||
|
||||
pub fn process_get_transaction_count(
|
||||
rpc_client: &RpcClient,
|
||||
commitment_config: CommitmentConfig,
|
||||
@ -789,7 +846,7 @@ pub fn process_ping(
|
||||
last_blockhash = recent_blockhash;
|
||||
|
||||
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
|
||||
let message = Message::new(vec![ix]);
|
||||
let message = Message::new(&[ix]);
|
||||
let mut transaction = Transaction::new_unsigned(message);
|
||||
transaction.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -972,7 +1029,7 @@ pub fn process_live_slots(url: &str) -> ProcessResult {
|
||||
current = Some(new_info);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("disconnected: {:?}", err);
|
||||
eprintln!("disconnected: {}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1273,6 +1330,19 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_total_supply = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "total-supply"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_total_supply, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::TotalSupply {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
},
|
||||
signers: vec![],
|
||||
}
|
||||
);
|
||||
|
||||
let test_transaction_count = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "transaction-count"]);
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_sdk::hash::Hash;
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
@ -27,13 +27,25 @@ pub fn println_name_value_or(name: &str, value: &str, setting_type: SettingType)
|
||||
);
|
||||
}
|
||||
|
||||
pub fn println_signers(tx: &Transaction) {
|
||||
pub fn println_signers(
|
||||
blockhash: &Hash,
|
||||
signers: &[String],
|
||||
absent: &[String],
|
||||
bad_sig: &[String],
|
||||
) {
|
||||
println!();
|
||||
println!("Blockhash: {}", tx.message.recent_blockhash);
|
||||
println!("Signers (Pubkey=Signature):");
|
||||
tx.signatures
|
||||
.iter()
|
||||
.zip(tx.message.account_keys.clone())
|
||||
.for_each(|(signature, pubkey)| println!(" {:?}={:?}", pubkey, signature));
|
||||
println!("Blockhash: {}", blockhash);
|
||||
if !signers.is_empty() {
|
||||
println!("Signers (Pubkey=Signature):");
|
||||
signers.iter().for_each(|signer| println!(" {}", signer))
|
||||
}
|
||||
if !absent.is_empty() {
|
||||
println!("Absent Signers (Pubkey):");
|
||||
absent.iter().for_each(|pubkey| println!(" {}", pubkey))
|
||||
}
|
||||
if !bad_sig.is_empty() {
|
||||
println!("Bad Signatures (Pubkey):");
|
||||
bad_sig.iter().for_each(|pubkey| println!(" {}", pubkey))
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
@ -2,15 +2,14 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
|
||||
use console::style;
|
||||
|
||||
use solana_clap_utils::{
|
||||
input_parsers::derivation_of,
|
||||
input_validators::{is_derivation, is_url},
|
||||
keypair::SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
|
||||
DisplayError,
|
||||
};
|
||||
use solana_cli::{
|
||||
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
|
||||
display::{println_name_value, println_name_value_or},
|
||||
};
|
||||
use solana_cli_config::config::{Config, CONFIG_FILE};
|
||||
use solana_cli_config::{Config, CONFIG_FILE};
|
||||
use solana_remote_wallet::remote_wallet::{maybe_wallet_manager, RemoteWalletManager};
|
||||
use std::{error, sync::Arc};
|
||||
|
||||
@ -22,12 +21,12 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
let config = Config::load(config_file).unwrap_or_default();
|
||||
|
||||
let (url_setting_type, json_rpc_url) =
|
||||
CliConfig::compute_json_rpc_url_setting("", &config.url);
|
||||
CliConfig::compute_json_rpc_url_setting("", &config.json_rpc_url);
|
||||
let (ws_setting_type, websocket_url) = CliConfig::compute_websocket_url_setting(
|
||||
"",
|
||||
&config.websocket_url,
|
||||
"",
|
||||
&config.url,
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
let (keypair_setting_type, keypair_path) =
|
||||
CliConfig::compute_keypair_path_setting("", &config.keypair_path);
|
||||
@ -35,7 +34,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
if let Some(field) = subcommand_matches.value_of("specific_setting") {
|
||||
let (field_name, value, setting_type) = match field {
|
||||
"json_rpc_url" => ("RPC URL", json_rpc_url, url_setting_type),
|
||||
"websocket_url" => ("WS URL", websocket_url, ws_setting_type),
|
||||
"websocket_url" => ("WebSocket URL", websocket_url, ws_setting_type),
|
||||
"keypair" => ("Key Path", keypair_path, keypair_setting_type),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
@ -43,7 +42,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
} else {
|
||||
println_name_value("Config File:", config_file);
|
||||
println_name_value_or("RPC URL:", &json_rpc_url, url_setting_type);
|
||||
println_name_value_or("WS URL:", &websocket_url, ws_setting_type);
|
||||
println_name_value_or("WebSocket URL:", &websocket_url, ws_setting_type);
|
||||
println_name_value_or("Keypair Path:", &keypair_path, keypair_setting_type);
|
||||
}
|
||||
} else {
|
||||
@ -58,7 +57,10 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let mut config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(url) = subcommand_matches.value_of("json_rpc_url") {
|
||||
config.url = url.to_string();
|
||||
config.json_rpc_url = url.to_string();
|
||||
// Revert to a computed `websocket_url` value when `json_rpc_url` is
|
||||
// changed
|
||||
config.websocket_url = "".to_string();
|
||||
}
|
||||
if let Some(url) = subcommand_matches.value_of("websocket_url") {
|
||||
config.websocket_url = url.to_string();
|
||||
@ -69,19 +71,19 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
config.save(config_file)?;
|
||||
|
||||
let (url_setting_type, json_rpc_url) =
|
||||
CliConfig::compute_json_rpc_url_setting("", &config.url);
|
||||
CliConfig::compute_json_rpc_url_setting("", &config.json_rpc_url);
|
||||
let (ws_setting_type, websocket_url) = CliConfig::compute_websocket_url_setting(
|
||||
"",
|
||||
&config.websocket_url,
|
||||
"",
|
||||
&config.url,
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
let (keypair_setting_type, keypair_path) =
|
||||
CliConfig::compute_keypair_path_setting("", &config.keypair_path);
|
||||
|
||||
println_name_value("Config File:", config_file);
|
||||
println_name_value_or("RPC URL:", &json_rpc_url, url_setting_type);
|
||||
println_name_value_or("WS URL:", &websocket_url, ws_setting_type);
|
||||
println_name_value_or("WebSocket URL:", &websocket_url, ws_setting_type);
|
||||
println_name_value_or("Keypair Path:", &keypair_path, keypair_setting_type);
|
||||
} else {
|
||||
println!(
|
||||
@ -109,13 +111,13 @@ pub fn parse_args<'a>(
|
||||
};
|
||||
let (_, json_rpc_url) = CliConfig::compute_json_rpc_url_setting(
|
||||
matches.value_of("json_rpc_url").unwrap_or(""),
|
||||
&config.url,
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
let (_, websocket_url) = CliConfig::compute_websocket_url_setting(
|
||||
matches.value_of("websocket_url").unwrap_or(""),
|
||||
&config.websocket_url,
|
||||
matches.value_of("json_rpc_url").unwrap_or(""),
|
||||
&config.url,
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
let (_, default_signer_path) = CliConfig::compute_keypair_path_setting(
|
||||
matches.value_of("keypair").unwrap_or(""),
|
||||
@ -132,7 +134,6 @@ pub fn parse_args<'a>(
|
||||
websocket_url,
|
||||
signers: vec![],
|
||||
keypair_path: default_signer_path,
|
||||
derivation_path: derivation_of(matches, "derivation_path"),
|
||||
rpc_client: None,
|
||||
verbose: matches.is_present("verbose"),
|
||||
},
|
||||
@ -151,7 +152,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let arg = Arg::with_name("config_file")
|
||||
.short("C")
|
||||
.long("config")
|
||||
.value_name("PATH")
|
||||
.value_name("FILEPATH")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.help("Configuration file to use");
|
||||
@ -184,26 +185,17 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.value_name("KEYPAIR")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json or usb://remote/wallet/path"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("derivation_path")
|
||||
.long("derivation-path")
|
||||
.value_name("ACCOUNT or ACCOUNT/CHANGE")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.validator(is_derivation)
|
||||
.help("Derivation path to use: m/44'/501'/ACCOUNT'/CHANGE'; default key is device base pubkey: m/44'/501'/0'")
|
||||
.help("Filepath or URL to a keypair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("verbose")
|
||||
.long("verbose")
|
||||
.short("v")
|
||||
.global(true)
|
||||
.help("Show extra information header"),
|
||||
.help("Show additional information"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
@ -241,13 +233,23 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
do_main(&matches).map_err(|err| DisplayError::new_as_boxed(err).into())
|
||||
}
|
||||
|
||||
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
|
||||
if parse_settings(&matches)? {
|
||||
let wallet_manager = maybe_wallet_manager()?;
|
||||
|
||||
let (mut config, signers) = parse_args(&matches, wallet_manager)?;
|
||||
config.signers = signers.iter().map(|s| s.as_ref()).collect();
|
||||
let result = process_command(&config)?;
|
||||
println!("{}", result);
|
||||
}
|
||||
let (_, submatches) = matches.subcommand();
|
||||
let sign_only = submatches
|
||||
.map(|m| m.is_present(SIGN_ONLY_ARG.name))
|
||||
.unwrap_or(false);
|
||||
if !sign_only {
|
||||
println!("{}", result);
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
358
cli/src/nonce.rs
358
cli/src/nonce.rs
@ -14,25 +14,38 @@ use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
nonce::{self, state::Versions, State},
|
||||
nonce::{
|
||||
self,
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
system_instruction::{
|
||||
advance_nonce_account, authorize_nonce_account, create_address_with_seed,
|
||||
create_nonce_account, create_nonce_account_with_seed, withdraw_nonce_account, NonceError,
|
||||
SystemError,
|
||||
advance_nonce_account, authorize_nonce_account, create_nonce_account,
|
||||
create_nonce_account_with_seed, withdraw_nonce_account, NonceError, SystemError,
|
||||
},
|
||||
system_program,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum CliNonceError {
|
||||
#[error("invalid account owner")]
|
||||
InvalidAccountOwner,
|
||||
#[error("invalid account data")]
|
||||
InvalidAccountData,
|
||||
#[error("unexpected account data size")]
|
||||
UnexpectedDataSize,
|
||||
#[error("query hash does not match stored hash")]
|
||||
InvalidHash,
|
||||
#[error("query authority does not match account authority")]
|
||||
InvalidAuthority,
|
||||
InvalidState,
|
||||
#[error("invalid state for requested operation")]
|
||||
InvalidStateForOperation,
|
||||
#[error("client error: {0}")]
|
||||
Client(String),
|
||||
}
|
||||
|
||||
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
|
||||
@ -60,7 +73,7 @@ pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.validator(is_pubkey)
|
||||
.validator(is_valid_pubkey)
|
||||
.help(NONCE_ARG.help)
|
||||
}
|
||||
|
||||
@ -68,7 +81,7 @@ pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("KEYPAIR or PUBKEY or REMOTE WALLET PATH")
|
||||
.value_name("KEYPAIR")
|
||||
.validator(is_valid_signer)
|
||||
.help(NONCE_AUTHORITY_ARG.help)
|
||||
}
|
||||
@ -79,30 +92,23 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("authorize-nonce-account")
|
||||
.about("Assign account authority to a new entity")
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_keypair")
|
||||
Arg::with_name("nonce_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("NONCE_ACCOUNT")
|
||||
.value_name("NONCE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Address of the nonce account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authority")
|
||||
.index(2)
|
||||
.value_name("NEW_AUTHORITY_PUBKEY")
|
||||
.value_name("AUTHORITY_PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Account to be granted authority of the nonce account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seed")
|
||||
.long("seed")
|
||||
.value_name("SEED STRING")
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
@ -111,10 +117,10 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_keypair")
|
||||
.index(1)
|
||||
.value_name("NONCE ACCOUNT")
|
||||
.value_name("ACCOUNT_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_signer)
|
||||
.help("Keypair of the nonce account to fund"),
|
||||
)
|
||||
.arg(
|
||||
@ -130,9 +136,16 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("BASE58_PUBKEY")
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.value_name("PUBKEY")
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Assign noncing authority to another entity"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seed")
|
||||
.long("seed")
|
||||
.value_name("STRING")
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -142,10 +155,10 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("NONCE ACCOUNT")
|
||||
.value_name("NONCE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Address of the nonce account to display"),
|
||||
),
|
||||
)
|
||||
@ -153,12 +166,12 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("new-nonce")
|
||||
.about("Generate a new nonce, rendering the existing nonce useless")
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_keypair")
|
||||
Arg::with_name("nonce_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("NONCE ACCOUNT")
|
||||
.value_name("NONCE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Address of the nonce account"),
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
@ -170,10 +183,10 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("NONCE ACCOUNT")
|
||||
.value_name("NONCE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Address of the nonce account to display"),
|
||||
)
|
||||
.arg(
|
||||
@ -185,24 +198,24 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-from-nonce-account")
|
||||
.about("Withdraw lamports from the nonce account")
|
||||
.about("Withdraw SOL from the nonce account")
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_keypair")
|
||||
Arg::with_name("nonce_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("NONCE ACCOUNT")
|
||||
.value_name("NONCE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help("Nonce account from to withdraw from"),
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Nonce account to withdraw from"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("destination_account_pubkey")
|
||||
.index(2)
|
||||
.value_name("DESTINATION ACCOUNT")
|
||||
.value_name("RECIPIENT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("The account to which the lamports should be transferred"),
|
||||
.validator(is_valid_pubkey)
|
||||
.help("The account to which the SOL should be transferred"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("amount")
|
||||
@ -218,13 +231,55 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_account(
|
||||
rpc_client: &RpcClient,
|
||||
nonce_pubkey: &Pubkey,
|
||||
) -> Result<Account, CliNonceError> {
|
||||
rpc_client
|
||||
.get_account(nonce_pubkey)
|
||||
.map_err(|e| CliNonceError::Client(format!("{}", e)))
|
||||
.and_then(|a| match account_identity_ok(&a) {
|
||||
Ok(()) => Ok(a),
|
||||
Err(e) => Err(e),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn account_identity_ok(account: &Account) -> Result<(), CliNonceError> {
|
||||
if account.owner != system_program::id() {
|
||||
Err(CliNonceError::InvalidAccountOwner)
|
||||
} else if account.data.is_empty() {
|
||||
Err(CliNonceError::UnexpectedDataSize)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn state_from_account(account: &Account) -> Result<State, CliNonceError> {
|
||||
account_identity_ok(account)?;
|
||||
StateMut::<Versions>::state(account)
|
||||
.map_err(|_| CliNonceError::InvalidAccountData)
|
||||
.map(|v| v.convert_to_current())
|
||||
}
|
||||
|
||||
pub fn data_from_account(account: &Account) -> Result<Data, CliNonceError> {
|
||||
account_identity_ok(account)?;
|
||||
state_from_account(account).and_then(|ref s| data_from_state(s).map(|d| d.clone()))
|
||||
}
|
||||
|
||||
pub fn data_from_state(state: &State) -> Result<&Data, CliNonceError> {
|
||||
match state {
|
||||
State::Uninitialized => Err(CliNonceError::InvalidStateForOperation),
|
||||
State::Initialized(data) => Ok(data),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_authorize_nonce_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let new_authority = pubkey_of(matches, "new_authority").unwrap();
|
||||
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
let new_authority = pubkey_of_signer(matches, "new_authority", wallet_manager)?.unwrap();
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
|
||||
@ -255,7 +310,7 @@ pub fn parse_nonce_create_account(
|
||||
signer_of(matches, "nonce_account_keypair", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let nonce_authority = pubkey_of(matches, NONCE_AUTHORITY_ARG.name);
|
||||
let nonce_authority = pubkey_of_signer(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
@ -276,8 +331,12 @@ pub fn parse_nonce_create_account(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account_pubkey = pubkey_of(matches, "nonce_account_pubkey").unwrap();
|
||||
pub fn parse_get_nonce(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account_pubkey =
|
||||
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetNonce(nonce_account_pubkey),
|
||||
@ -290,7 +349,7 @@ pub fn parse_new_nonce(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
|
||||
@ -311,8 +370,12 @@ pub fn parse_new_nonce(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_show_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account_pubkey = pubkey_of(matches, "nonce_account_pubkey").unwrap();
|
||||
pub fn parse_show_nonce_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account_pubkey =
|
||||
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
@ -329,8 +392,9 @@ pub fn parse_withdraw_from_nonce_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
|
||||
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
let destination_account_pubkey =
|
||||
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
@ -359,24 +423,18 @@ pub fn check_nonce_account(
|
||||
nonce_account: &Account,
|
||||
nonce_authority: &Pubkey,
|
||||
nonce_hash: &Hash,
|
||||
) -> Result<(), Box<CliError>> {
|
||||
if nonce_account.owner != system_program::ID {
|
||||
return Err(CliError::InvalidNonce(CliNonceError::InvalidAccountOwner).into());
|
||||
}
|
||||
let nonce_state = StateMut::<Versions>::state(nonce_account)
|
||||
.map(|v| v.convert_to_current())
|
||||
.map_err(|_| Box::new(CliError::InvalidNonce(CliNonceError::InvalidAccountData)))?;
|
||||
match nonce_state {
|
||||
) -> Result<(), CliError> {
|
||||
match state_from_account(nonce_account)? {
|
||||
State::Initialized(ref data) => {
|
||||
if &data.blockhash != nonce_hash {
|
||||
Err(CliError::InvalidNonce(CliNonceError::InvalidHash).into())
|
||||
Err(CliNonceError::InvalidHash.into())
|
||||
} else if nonce_authority != &data.authority {
|
||||
Err(CliError::InvalidNonce(CliNonceError::InvalidAuthority).into())
|
||||
Err(CliNonceError::InvalidAuthority.into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
State::Uninitialized => Err(CliError::InvalidNonce(CliNonceError::InvalidState).into()),
|
||||
State::Uninitialized => Err(CliNonceError::InvalidStateForOperation.into()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -391,7 +449,7 @@ pub fn process_authorize_nonce_account(
|
||||
|
||||
let nonce_authority = config.signers[nonce_authority];
|
||||
let ix = authorize_nonce_account(nonce_account, &nonce_authority.pubkey(), new_authority);
|
||||
let message = Message::new_with_payer(vec![ix], Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
|
||||
@ -401,7 +459,7 @@ pub fn process_authorize_nonce_account(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
log_instruction_custom_error::<NonceError>(result)
|
||||
}
|
||||
|
||||
@ -415,7 +473,7 @@ pub fn process_create_nonce_account(
|
||||
) -> ProcessResult {
|
||||
let nonce_account_pubkey = config.signers[nonce_account].pubkey();
|
||||
let nonce_account_address = if let Some(seed) = seed.clone() {
|
||||
create_address_with_seed(&nonce_account_pubkey, &seed, &system_program::id())?
|
||||
Pubkey::create_with_seed(&nonce_account_pubkey, &seed, &system_program::id())?
|
||||
} else {
|
||||
nonce_account_pubkey
|
||||
};
|
||||
@ -425,10 +483,8 @@ pub fn process_create_nonce_account(
|
||||
(&nonce_account_address, "nonce_account".to_string()),
|
||||
)?;
|
||||
|
||||
if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_address) {
|
||||
let err_msg = if nonce_account.owner == system_program::id()
|
||||
&& StateMut::<Versions>::state(&nonce_account).is_ok()
|
||||
{
|
||||
if let Ok(nonce_account) = get_account(rpc_client, &nonce_account_address) {
|
||||
let err_msg = if state_from_account(&nonce_account).is_ok() {
|
||||
format!("Nonce account {} already exists", nonce_account_address)
|
||||
} else {
|
||||
format!(
|
||||
@ -470,7 +526,7 @@ pub fn process_create_nonce_account(
|
||||
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let message = Message::new_with_payer(ixs, Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
|
||||
@ -480,28 +536,14 @@ pub fn process_create_nonce_account(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
|
||||
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
|
||||
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
|
||||
if nonce_account.owner != system_program::id() {
|
||||
return Err(CliError::RpcRequestError(format!(
|
||||
"{:?} is not a nonce account",
|
||||
nonce_account_pubkey
|
||||
))
|
||||
.into());
|
||||
}
|
||||
let nonce_state = StateMut::<Versions>::state(&nonce_account).map(|v| v.convert_to_current());
|
||||
match nonce_state {
|
||||
Ok(State::Uninitialized) => Ok("Nonce account is uninitialized".to_string()),
|
||||
Ok(State::Initialized(ref data)) => Ok(format!("{:?}", data.blockhash)),
|
||||
Err(err) => Err(CliError::RpcRequestError(format!(
|
||||
"Account data could not be deserialized to nonce state: {:?}",
|
||||
err
|
||||
))
|
||||
.into()),
|
||||
match get_account(rpc_client, nonce_account_pubkey).and_then(|ref a| state_from_account(a))? {
|
||||
State::Uninitialized => Ok("Nonce account is uninitialized".to_string()),
|
||||
State::Initialized(ref data) => Ok(format!("{:?}", data.blockhash)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -526,7 +568,7 @@ pub fn process_new_nonce(
|
||||
let nonce_authority = config.signers[nonce_authority];
|
||||
let ix = advance_nonce_account(&nonce_account, &nonce_authority.pubkey());
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let message = Message::new_with_payer(vec![ix], Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -535,8 +577,8 @@ pub fn process_new_nonce(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction(&mut tx, &[config.signers[0], nonce_authority]);
|
||||
let result = rpc_client
|
||||
.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0], nonce_authority]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
|
||||
@ -545,14 +587,7 @@ pub fn process_show_nonce_account(
|
||||
nonce_account_pubkey: &Pubkey,
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
|
||||
if nonce_account.owner != system_program::id() {
|
||||
return Err(CliError::RpcRequestError(format!(
|
||||
"{:?} is not a nonce account",
|
||||
nonce_account_pubkey
|
||||
))
|
||||
.into());
|
||||
}
|
||||
let nonce_account = get_account(rpc_client, nonce_account_pubkey)?;
|
||||
let print_account = |data: Option<&nonce::state::Data>| {
|
||||
println!(
|
||||
"Balance: {}",
|
||||
@ -583,15 +618,9 @@ pub fn process_show_nonce_account(
|
||||
}
|
||||
Ok("".to_string())
|
||||
};
|
||||
let nonce_state = StateMut::<Versions>::state(&nonce_account).map(|v| v.convert_to_current());
|
||||
match nonce_state {
|
||||
Ok(State::Uninitialized) => print_account(None),
|
||||
Ok(State::Initialized(ref data)) => print_account(Some(data)),
|
||||
Err(err) => Err(CliError::RpcRequestError(format!(
|
||||
"Account data could not be deserialized to nonce state: {:?}",
|
||||
err
|
||||
))
|
||||
.into()),
|
||||
match state_from_account(&nonce_account)? {
|
||||
State::Uninitialized => print_account(None),
|
||||
State::Initialized(ref data) => print_account(Some(data)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -612,7 +641,7 @@ pub fn process_withdraw_from_nonce_account(
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
);
|
||||
let message = Message::new_with_payer(vec![ix], Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -621,7 +650,7 @@ pub fn process_withdraw_from_nonce_account(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
log_instruction_custom_error::<NonceError>(result)
|
||||
}
|
||||
|
||||
@ -848,31 +877,6 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_withdraw_from_nonce_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"withdraw-from-nonce-account",
|
||||
&keypair_file,
|
||||
&nonce_account_string,
|
||||
"42",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(
|
||||
&test_withdraw_from_nonce_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: 0,
|
||||
destination_account_pubkey: nonce_account_pubkey,
|
||||
lamports: 42000000000
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromNonceAccount Subcommand with authority
|
||||
let test_withdraw_from_nonce_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
@ -920,17 +924,13 @@ mod tests {
|
||||
let invalid_owner = Account::new_data(1, &data, &Pubkey::new(&[1u8; 32]));
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_owner.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidAccountOwner
|
||||
))),
|
||||
Err(CliNonceError::InvalidAccountOwner.into()),
|
||||
);
|
||||
|
||||
let invalid_data = Account::new_data(1, &"invalid", &system_program::ID);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_data.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidAccountData
|
||||
))),
|
||||
Err(CliNonceError::InvalidAccountData.into()),
|
||||
);
|
||||
|
||||
let data = Versions::new_current(State::Initialized(nonce::state::Data {
|
||||
@ -941,7 +941,7 @@ mod tests {
|
||||
let invalid_hash = Account::new_data(1, &data, &system_program::ID);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_hash.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(CliNonceError::InvalidHash))),
|
||||
Err(CliNonceError::InvalidHash.into()),
|
||||
);
|
||||
|
||||
let data = Versions::new_current(State::Initialized(nonce::state::Data {
|
||||
@ -952,18 +952,84 @@ mod tests {
|
||||
let invalid_authority = Account::new_data(1, &data, &system_program::ID);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidAuthority
|
||||
))),
|
||||
Err(CliNonceError::InvalidAuthority.into()),
|
||||
);
|
||||
|
||||
let data = Versions::new_current(State::Uninitialized);
|
||||
let invalid_state = Account::new_data(1, &data, &system_program::ID);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_state.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidState
|
||||
))),
|
||||
Err(CliNonceError::InvalidStateForOperation.into()),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_account_identity_ok() {
|
||||
let nonce_account = nonce::create_account(1).into_inner();
|
||||
assert_eq!(account_identity_ok(&nonce_account), Ok(()));
|
||||
|
||||
let system_account = Account::new(1, 0, &system_program::id());
|
||||
assert_eq!(
|
||||
account_identity_ok(&system_account),
|
||||
Err(CliNonceError::UnexpectedDataSize),
|
||||
);
|
||||
|
||||
let other_program = Pubkey::new(&[1u8; 32]);
|
||||
let other_account_no_data = Account::new(1, 0, &other_program);
|
||||
assert_eq!(
|
||||
account_identity_ok(&other_account_no_data),
|
||||
Err(CliNonceError::InvalidAccountOwner),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_from_account() {
|
||||
let mut nonce_account = nonce::create_account(1).into_inner();
|
||||
assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized));
|
||||
|
||||
let data = nonce::state::Data {
|
||||
authority: Pubkey::new(&[1u8; 32]),
|
||||
blockhash: Hash::new(&[42u8; 32]),
|
||||
fee_calculator: FeeCalculator::new(42),
|
||||
};
|
||||
nonce_account
|
||||
.set_state(&Versions::new_current(State::Initialized(data.clone())))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
state_from_account(&nonce_account),
|
||||
Ok(State::Initialized(data))
|
||||
);
|
||||
|
||||
let wrong_data_size_account = Account::new(1, 1, &system_program::id());
|
||||
assert_eq!(
|
||||
state_from_account(&wrong_data_size_account),
|
||||
Err(CliNonceError::InvalidAccountData),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_data_from_helpers() {
|
||||
let mut nonce_account = nonce::create_account(1).into_inner();
|
||||
let state = state_from_account(&nonce_account).unwrap();
|
||||
assert_eq!(
|
||||
data_from_state(&state),
|
||||
Err(CliNonceError::InvalidStateForOperation)
|
||||
);
|
||||
assert_eq!(
|
||||
data_from_account(&nonce_account),
|
||||
Err(CliNonceError::InvalidStateForOperation)
|
||||
);
|
||||
|
||||
let data = nonce::state::Data {
|
||||
authority: Pubkey::new(&[1u8; 32]),
|
||||
blockhash: Hash::new(&[42u8; 32]),
|
||||
fee_calculator: FeeCalculator::new(42),
|
||||
};
|
||||
nonce_account
|
||||
.set_state(&Versions::new_current(State::Initialized(data.clone())))
|
||||
.unwrap();
|
||||
let state = state_from_account(&nonce_account).unwrap();
|
||||
assert_eq!(data_from_state(&state), Ok(&data));
|
||||
assert_eq!(data_from_account(&nonce_account), Ok(data));
|
||||
}
|
||||
}
|
||||
|
@ -1,253 +0,0 @@
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use serde_json::Value;
|
||||
use solana_clap_utils::{
|
||||
input_parsers::value_of,
|
||||
input_validators::{is_hash, is_pubkey_sig},
|
||||
offline::{BLOCKHASH_ARG, SIGNER_ARG, SIGN_ONLY_ARG},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::{fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature};
|
||||
use std::str::FromStr;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum BlockhashQuery {
|
||||
None(Hash, FeeCalculator),
|
||||
FeeCalculator(Hash),
|
||||
All,
|
||||
}
|
||||
|
||||
impl BlockhashQuery {
|
||||
pub fn new(blockhash: Option<Hash>, sign_only: bool) -> Self {
|
||||
match blockhash {
|
||||
Some(hash) if sign_only => Self::None(hash, FeeCalculator::default()),
|
||||
Some(hash) if !sign_only => Self::FeeCalculator(hash),
|
||||
None if !sign_only => Self::All,
|
||||
_ => panic!("Cannot resolve blockhash"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self {
|
||||
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
BlockhashQuery::new(blockhash, sign_only)
|
||||
}
|
||||
|
||||
pub fn get_blockhash_fee_calculator(
|
||||
&self,
|
||||
rpc_client: &RpcClient,
|
||||
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
|
||||
let (hash, fee_calc) = match self {
|
||||
BlockhashQuery::None(hash, fee_calc) => (Some(hash), Some(fee_calc)),
|
||||
BlockhashQuery::FeeCalculator(hash) => (Some(hash), None),
|
||||
BlockhashQuery::All => (None, None),
|
||||
};
|
||||
if None == fee_calc {
|
||||
let (cluster_hash, fee_calc) = rpc_client.get_recent_blockhash()?;
|
||||
Ok((*hash.unwrap_or(&cluster_hash), fee_calc))
|
||||
} else {
|
||||
Ok((*hash.unwrap(), fee_calc.unwrap().clone()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BlockhashQuery {
|
||||
fn default() -> Self {
|
||||
BlockhashQuery::All
|
||||
}
|
||||
}
|
||||
|
||||
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(BLOCKHASH_ARG.name)
|
||||
.long(BLOCKHASH_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("BLOCKHASH")
|
||||
.validator(is_hash)
|
||||
.help(BLOCKHASH_ARG.help)
|
||||
}
|
||||
|
||||
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(SIGN_ONLY_ARG.name)
|
||||
.long(SIGN_ONLY_ARG.long)
|
||||
.takes_value(false)
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.help(SIGN_ONLY_ARG.help)
|
||||
}
|
||||
|
||||
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(SIGNER_ARG.name)
|
||||
.long(SIGNER_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("BASE58_PUBKEY=BASE58_SIG")
|
||||
.validator(is_pubkey_sig)
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.multiple(true)
|
||||
.help(SIGNER_ARG.help)
|
||||
}
|
||||
|
||||
pub trait OfflineArgs {
|
||||
fn offline_args(self) -> Self;
|
||||
}
|
||||
|
||||
impl OfflineArgs for App<'_, '_> {
|
||||
fn offline_args(self) -> Self {
|
||||
self.arg(blockhash_arg())
|
||||
.arg(sign_only_arg())
|
||||
.arg(signer_arg())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
|
||||
let object: Value = serde_json::from_str(&reply).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let blockhash = blockhash_str.parse::<Hash>().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
(blockhash, signers)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::App;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::{fee_calculator::FeeCalculator, hash::hash};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_blockhashspec_new_ok() {
|
||||
let blockhash = hash(&[1u8]);
|
||||
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), true),
|
||||
BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), false),
|
||||
BlockhashQuery::FeeCalculator(blockhash),
|
||||
);
|
||||
assert_eq!(BlockhashQuery::new(None, false), BlockhashQuery::All,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhashspec_new_fail() {
|
||||
BlockhashQuery::new(None, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockhashspec_new_from_matches_ok() {
|
||||
let test_commands = App::new("blockhashspec_test").offline_args();
|
||||
let blockhash = hash(&[1u8]);
|
||||
let blockhash_string = blockhash.to_string();
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhashspec_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
);
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhashspec_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::FeeCalculator(blockhash),
|
||||
);
|
||||
|
||||
let matches = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["blockhashspec_test"]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::All,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhashspec_new_from_matches_fail() {
|
||||
let test_commands = App::new("blockhashspec_test")
|
||||
.arg(blockhash_arg())
|
||||
// We can really only hit this case unless the arg requirements
|
||||
// are broken, so unset the requires() to recreate that condition
|
||||
.arg(sign_only_arg().requires(""));
|
||||
|
||||
let matches = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["blockhashspec_test", "--sign-only"]);
|
||||
BlockhashQuery::new_from_matches(&matches);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockhashspec_get_blockhash_fee_calc() {
|
||||
let test_blockhash = hash(&[0u8]);
|
||||
let rpc_blockhash = hash(&[1u8]);
|
||||
let rpc_fee_calc = FeeCalculator::new(42);
|
||||
let get_recent_blockhash_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!((
|
||||
Value::String(rpc_blockhash.to_string()),
|
||||
serde_json::to_value(rpc_fee_calc.clone()).unwrap()
|
||||
)),
|
||||
});
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::All
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(rpc_blockhash, rpc_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::FeeCalculator(test_blockhash)
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(test_blockhash, rpc_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::None(test_blockhash, FeeCalculator::default())
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(test_blockhash, FeeCalculator::default()),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock("fails".to_string());
|
||||
assert!(BlockhashQuery::All
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.is_err());
|
||||
}
|
||||
}
|
394
cli/src/offline/blockhash_query.rs
Normal file
394
cli/src/offline/blockhash_query.rs
Normal file
@ -0,0 +1,394 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Source {
|
||||
Cluster,
|
||||
NonceAccount(Pubkey),
|
||||
}
|
||||
|
||||
impl Source {
|
||||
pub fn get_blockhash_and_fee_calculator(
|
||||
&self,
|
||||
rpc_client: &RpcClient,
|
||||
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
|
||||
match self {
|
||||
Self::Cluster => {
|
||||
let res = rpc_client.get_recent_blockhash()?;
|
||||
Ok(res)
|
||||
}
|
||||
Self::NonceAccount(ref pubkey) => {
|
||||
let data = nonce::get_account(rpc_client, pubkey)
|
||||
.and_then(|ref a| nonce::data_from_account(a))?;
|
||||
Ok((data.blockhash, data.fee_calculator))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_fee_calculator(
|
||||
&self,
|
||||
rpc_client: &RpcClient,
|
||||
blockhash: &Hash,
|
||||
) -> Result<Option<FeeCalculator>, Box<dyn std::error::Error>> {
|
||||
match self {
|
||||
Self::Cluster => {
|
||||
let res = rpc_client.get_fee_calculator_for_blockhash(blockhash)?;
|
||||
Ok(res)
|
||||
}
|
||||
Self::NonceAccount(ref pubkey) => {
|
||||
let res = nonce::get_account(rpc_client, pubkey)
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.and_then(|d| {
|
||||
if d.blockhash == *blockhash {
|
||||
Ok(Some(d.fee_calculator))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
})?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BlockhashQuery {
|
||||
None(Hash),
|
||||
FeeCalculator(Source, Hash),
|
||||
All(Source),
|
||||
}
|
||||
|
||||
impl BlockhashQuery {
|
||||
pub fn new(blockhash: Option<Hash>, sign_only: bool, nonce_account: Option<Pubkey>) -> Self {
|
||||
let source = nonce_account
|
||||
.map(Source::NonceAccount)
|
||||
.unwrap_or(Source::Cluster);
|
||||
match blockhash {
|
||||
Some(hash) if sign_only => Self::None(hash),
|
||||
Some(hash) if !sign_only => Self::FeeCalculator(source, hash),
|
||||
None if !sign_only => Self::All(source),
|
||||
_ => panic!("Cannot resolve blockhash"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self {
|
||||
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let nonce_account = pubkey_of(matches, nonce::NONCE_ARG.name);
|
||||
BlockhashQuery::new(blockhash, sign_only, nonce_account)
|
||||
}
|
||||
|
||||
pub fn get_blockhash_and_fee_calculator(
|
||||
&self,
|
||||
rpc_client: &RpcClient,
|
||||
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
|
||||
match self {
|
||||
BlockhashQuery::None(hash) => Ok((*hash, FeeCalculator::default())),
|
||||
BlockhashQuery::FeeCalculator(source, hash) => {
|
||||
let fee_calculator = source
|
||||
.get_fee_calculator(rpc_client, hash)?
|
||||
.ok_or(format!("Hash has expired {:?}", hash))?;
|
||||
Ok((*hash, fee_calculator))
|
||||
}
|
||||
BlockhashQuery::All(source) => source.get_blockhash_and_fee_calculator(rpc_client),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BlockhashQuery {
|
||||
fn default() -> Self {
|
||||
BlockhashQuery::All(Source::Cluster)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{nonce::nonce_arg, offline::blockhash_query::BlockhashQuery};
|
||||
use clap::App;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcAccount, RpcFeeCalculator, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account, fee_calculator::FeeCalculator, hash::hash, nonce, system_program,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_blockhash_query_new_ok() {
|
||||
let blockhash = hash(&[1u8]);
|
||||
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), true, None),
|
||||
BlockhashQuery::None(blockhash),
|
||||
);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), false, None),
|
||||
BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(None, false, None),
|
||||
BlockhashQuery::All(blockhash_query::Source::Cluster)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), true, Some(nonce_pubkey)),
|
||||
BlockhashQuery::None(blockhash),
|
||||
);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), false, Some(nonce_pubkey)),
|
||||
BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_pubkey),
|
||||
blockhash
|
||||
),
|
||||
);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(None, false, Some(nonce_pubkey)),
|
||||
BlockhashQuery::All(blockhash_query::Source::NonceAccount(nonce_pubkey)),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhash_query_new_no_nonce_fail() {
|
||||
BlockhashQuery::new(None, true, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhash_query_new_nonce_fail() {
|
||||
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
BlockhashQuery::new(None, true, Some(nonce_pubkey));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockhash_query_new_from_matches_ok() {
|
||||
let test_commands = App::new("blockhash_query_test")
|
||||
.arg(nonce_arg())
|
||||
.offline_args();
|
||||
let blockhash = hash(&[1u8]);
|
||||
let blockhash_string = blockhash.to_string();
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhash_query_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::None(blockhash),
|
||||
);
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhash_query_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
);
|
||||
|
||||
let matches = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["blockhash_query_test"]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
);
|
||||
|
||||
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
let nonce_string = nonce_pubkey.to_string();
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhash_query_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--sign-only",
|
||||
"--nonce",
|
||||
&nonce_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::None(blockhash),
|
||||
);
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhash_query_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--nonce",
|
||||
&nonce_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_pubkey),
|
||||
blockhash
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhash_query_new_from_matches_without_nonce_fail() {
|
||||
let test_commands = App::new("blockhash_query_test")
|
||||
.arg(blockhash_arg())
|
||||
// We can really only hit this case if the arg requirements
|
||||
// are broken, so unset the requires() to recreate that condition
|
||||
.arg(sign_only_arg().requires(""));
|
||||
|
||||
let matches = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["blockhash_query_test", "--sign-only"]);
|
||||
BlockhashQuery::new_from_matches(&matches);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhash_query_new_from_matches_with_nonce_fail() {
|
||||
let test_commands = App::new("blockhash_query_test")
|
||||
.arg(blockhash_arg())
|
||||
// We can really only hit this case if the arg requirements
|
||||
// are broken, so unset the requires() to recreate that condition
|
||||
.arg(sign_only_arg().requires(""));
|
||||
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
let nonce_string = nonce_pubkey.to_string();
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhash_query_test",
|
||||
"--sign-only",
|
||||
"--nonce",
|
||||
&nonce_string,
|
||||
]);
|
||||
BlockhashQuery::new_from_matches(&matches);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockhash_query_get_blockhash_fee_calc() {
|
||||
let test_blockhash = hash(&[0u8]);
|
||||
let rpc_blockhash = hash(&[1u8]);
|
||||
let rpc_fee_calc = FeeCalculator::new(42);
|
||||
let get_recent_blockhash_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!((
|
||||
Value::String(rpc_blockhash.to_string()),
|
||||
serde_json::to_value(rpc_fee_calc.clone()).unwrap()
|
||||
)),
|
||||
});
|
||||
let get_fee_calculator_for_blockhash_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(RpcFeeCalculator {
|
||||
fee_calculator: rpc_fee_calc.clone()
|
||||
}),
|
||||
});
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::default()
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(rpc_blockhash, rpc_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
mocks.insert(
|
||||
RpcRequest::GetFeeCalculatorForBlockhash,
|
||||
get_fee_calculator_for_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::FeeCalculator(Source::Cluster, test_blockhash)
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(test_blockhash, rpc_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::None(test_blockhash)
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(test_blockhash, FeeCalculator::default()),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock("fails".to_string());
|
||||
assert!(BlockhashQuery::default()
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.is_err());
|
||||
|
||||
let nonce_blockhash = Hash::new(&[2u8; 32]);
|
||||
let nonce_fee_calc = FeeCalculator::new(4242);
|
||||
let data = nonce::state::Data {
|
||||
authority: Pubkey::new(&[3u8; 32]),
|
||||
blockhash: nonce_blockhash,
|
||||
fee_calculator: nonce_fee_calc.clone(),
|
||||
};
|
||||
let nonce_account = Account::new_data_with_space(
|
||||
42,
|
||||
&nonce::state::Versions::new_current(nonce::State::Initialized(data)),
|
||||
nonce::State::size(),
|
||||
&system_program::id(),
|
||||
)
|
||||
.unwrap();
|
||||
let nonce_pubkey = Pubkey::new(&[4u8; 32]);
|
||||
let rpc_nonce_account = RpcAccount::encode(nonce_account);
|
||||
let get_account_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(Some(rpc_nonce_account.clone())),
|
||||
});
|
||||
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::All(Source::NonceAccount(nonce_pubkey))
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(nonce_blockhash, nonce_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::FeeCalculator(Source::NonceAccount(nonce_pubkey), nonce_blockhash)
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(nonce_blockhash, nonce_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert!(
|
||||
BlockhashQuery::FeeCalculator(Source::NonceAccount(nonce_pubkey), test_blockhash)
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.is_err()
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::None(nonce_blockhash)
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(nonce_blockhash, FeeCalculator::default()),
|
||||
);
|
||||
|
||||
let rpc_client = RpcClient::new_mock("fails".to_string());
|
||||
assert!(BlockhashQuery::All(Source::NonceAccount(nonce_pubkey))
|
||||
.get_blockhash_and_fee_calculator(&rpc_client)
|
||||
.is_err());
|
||||
}
|
||||
}
|
114
cli/src/offline/mod.rs
Normal file
114
cli/src/offline/mod.rs
Normal file
@ -0,0 +1,114 @@
|
||||
pub mod blockhash_query;
|
||||
|
||||
use crate::nonce;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use serde_json::Value;
|
||||
use solana_clap_utils::{
|
||||
input_parsers::{pubkey_of, value_of},
|
||||
input_validators::{is_hash, is_pubkey_sig},
|
||||
keypair::presigner_from_pubkey_sigs,
|
||||
offline::{BLOCKHASH_ARG, SIGNER_ARG, SIGN_ONLY_ARG},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Presigner, Signature},
|
||||
};
|
||||
use std::str::FromStr;
|
||||
|
||||
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(BLOCKHASH_ARG.name)
|
||||
.long(BLOCKHASH_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("BLOCKHASH")
|
||||
.validator(is_hash)
|
||||
.help(BLOCKHASH_ARG.help)
|
||||
}
|
||||
|
||||
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(SIGN_ONLY_ARG.name)
|
||||
.long(SIGN_ONLY_ARG.long)
|
||||
.takes_value(false)
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.help(SIGN_ONLY_ARG.help)
|
||||
}
|
||||
|
||||
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(SIGNER_ARG.name)
|
||||
.long(SIGNER_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY=SIGNATURE")
|
||||
.validator(is_pubkey_sig)
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.multiple(true)
|
||||
.help(SIGNER_ARG.help)
|
||||
}
|
||||
|
||||
pub trait OfflineArgs {
|
||||
fn offline_args(self) -> Self;
|
||||
}
|
||||
|
||||
impl OfflineArgs for App<'_, '_> {
|
||||
fn offline_args(self) -> Self {
|
||||
self.arg(blockhash_arg())
|
||||
.arg(sign_only_arg())
|
||||
.arg(signer_arg())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SignOnly {
|
||||
pub blockhash: Hash,
|
||||
pub present_signers: Vec<(Pubkey, Signature)>,
|
||||
pub absent_signers: Vec<Pubkey>,
|
||||
pub bad_signers: Vec<Pubkey>,
|
||||
}
|
||||
|
||||
impl SignOnly {
|
||||
pub fn has_all_signers(&self) -> bool {
|
||||
self.absent_signers.is_empty() && self.bad_signers.is_empty()
|
||||
}
|
||||
|
||||
pub fn presigner_of(&self, pubkey: &Pubkey) -> Option<Presigner> {
|
||||
presigner_from_pubkey_sigs(pubkey, &self.present_signers)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
|
||||
let object: Value = serde_json::from_str(&reply).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let blockhash = blockhash_str.parse::<Hash>().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let present_signers = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
let signer_strings = object.get("absent").unwrap().as_array().unwrap();
|
||||
let absent_signers = signer_strings
|
||||
.iter()
|
||||
.map(|val| {
|
||||
let s = val.as_str().unwrap();
|
||||
Pubkey::from_str(s).unwrap()
|
||||
})
|
||||
.collect();
|
||||
let signer_strings = object.get("badSig").unwrap().as_array().unwrap();
|
||||
let bad_signers = signer_strings
|
||||
.iter()
|
||||
.map(|val| {
|
||||
let s = val.as_str().unwrap();
|
||||
Pubkey::from_str(s).unwrap()
|
||||
})
|
||||
.collect();
|
||||
SignOnly {
|
||||
blockhash,
|
||||
present_signers,
|
||||
absent_signers,
|
||||
bad_signers,
|
||||
}
|
||||
}
|
824
cli/src/stake.rs
824
cli/src/stake.rs
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,7 @@
|
||||
use crate::cli::{
|
||||
check_account_for_fee, check_unique_pubkeys, log_instruction_custom_error, CliCommand,
|
||||
CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex,
|
||||
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
SignerIndex,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::signer_from_path};
|
||||
@ -25,18 +26,18 @@ impl StorageSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("storage_account_owner")
|
||||
.index(1)
|
||||
.value_name("STORAGE ACCOUNT OWNER PUBKEY")
|
||||
.value_name("AUTHORITY_PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair),
|
||||
.validator(is_valid_pubkey),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("storage_account")
|
||||
.index(2)
|
||||
.value_name("STORAGE ACCOUNT")
|
||||
.value_name("ACCOUNT_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_keypair_or_ask_keyword),
|
||||
.validator(is_valid_signer),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -45,18 +46,18 @@ impl StorageSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("storage_account_owner")
|
||||
.index(1)
|
||||
.value_name("STORAGE ACCOUNT OWNER PUBKEY")
|
||||
.value_name("AUTHORITY_PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair),
|
||||
.validator(is_valid_pubkey),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("storage_account")
|
||||
.index(2)
|
||||
.value_name("STORAGE ACCOUNT")
|
||||
.value_name("ACCOUNT_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_keypair_or_ask_keyword),
|
||||
.validator(is_valid_signer),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -65,19 +66,19 @@ impl StorageSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("node_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("NODE PUBKEY")
|
||||
.value_name("NODE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("The node account to credit the rewards to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("storage_account_pubkey")
|
||||
.index(2)
|
||||
.value_name("STORAGE ACCOUNT PUBKEY")
|
||||
.value_name("STORAGE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Storage account address to redeem credits for"),
|
||||
),
|
||||
)
|
||||
@ -88,11 +89,11 @@ impl StorageSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("storage_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("STORAGE ACCOUNT PUBKEY")
|
||||
.value_name("STORAGE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Storage account pubkey"),
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Storage account address"),
|
||||
),
|
||||
)
|
||||
}
|
||||
@ -103,18 +104,26 @@ pub fn parse_storage_create_archiver_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let account_owner = pubkey_of(matches, "storage_account_owner").unwrap();
|
||||
let storage_account = keypair_of(matches, "storage_account").unwrap();
|
||||
let account_owner =
|
||||
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
|
||||
let (storage_account, storage_account_pubkey) =
|
||||
signer_of(matches, "storage_account", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, storage_account],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateStorageAccount {
|
||||
account_owner,
|
||||
storage_account: 1,
|
||||
storage_account: signer_info.index_of(storage_account_pubkey).unwrap(),
|
||||
account_type: StorageAccountType::Archiver,
|
||||
},
|
||||
signers: vec![
|
||||
signer_from_path(matches, default_signer_path, "keypair", wallet_manager)?,
|
||||
storage_account.into(),
|
||||
],
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
@ -123,18 +132,26 @@ pub fn parse_storage_create_validator_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let account_owner = pubkey_of(matches, "storage_account_owner").unwrap();
|
||||
let storage_account = keypair_of(matches, "storage_account").unwrap();
|
||||
let account_owner =
|
||||
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
|
||||
let (storage_account, storage_account_pubkey) =
|
||||
signer_of(matches, "storage_account", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, storage_account],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateStorageAccount {
|
||||
account_owner,
|
||||
storage_account: 1,
|
||||
storage_account: signer_info.index_of(storage_account_pubkey).unwrap(),
|
||||
account_type: StorageAccountType::Validator,
|
||||
},
|
||||
signers: vec![
|
||||
signer_from_path(matches, default_signer_path, "keypair", wallet_manager)?,
|
||||
storage_account.into(),
|
||||
],
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
@ -143,8 +160,10 @@ pub fn parse_storage_claim_reward(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let node_account_pubkey = pubkey_of(matches, "node_account_pubkey").unwrap();
|
||||
let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap();
|
||||
let node_account_pubkey =
|
||||
pubkey_of_signer(matches, "node_account_pubkey", wallet_manager)?.unwrap();
|
||||
let storage_account_pubkey =
|
||||
pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ClaimStorageReward {
|
||||
node_account_pubkey,
|
||||
@ -161,8 +180,10 @@ pub fn parse_storage_claim_reward(
|
||||
|
||||
pub fn parse_storage_get_account_command(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap();
|
||||
let storage_account_pubkey =
|
||||
pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowStorageAccount(storage_account_pubkey),
|
||||
signers: vec![],
|
||||
@ -212,7 +233,7 @@ pub fn process_create_storage_account(
|
||||
);
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let message = Message::new(ixs);
|
||||
let message = Message::new(&ixs);
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -221,7 +242,7 @@ pub fn process_create_storage_account(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
|
||||
@ -236,7 +257,7 @@ pub fn process_claim_storage_reward(
|
||||
let instruction =
|
||||
storage_instruction::claim_reward(node_account_pubkey, storage_account_pubkey);
|
||||
let signers = [config.signers[0]];
|
||||
let message = Message::new_with_payer(vec![instruction], Some(&signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&[instruction], Some(&signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -245,8 +266,8 @@ pub fn process_claim_storage_reward(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
|
||||
Ok(signature_str)
|
||||
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
|
||||
Ok(signature.to_string())
|
||||
}
|
||||
|
||||
pub fn process_show_storage_account(
|
||||
@ -266,7 +287,7 @@ pub fn process_show_storage_account(
|
||||
|
||||
use solana_storage_program::storage_contract::StorageContract;
|
||||
let storage_contract: StorageContract = account.state().map_err(|err| {
|
||||
CliError::RpcRequestError(format!("Unable to deserialize storage account: {:?}", err))
|
||||
CliError::RpcRequestError(format!("Unable to deserialize storage account: {}", err))
|
||||
})?;
|
||||
println!("{:#?}", storage_contract);
|
||||
println!("Account Lamports: {}", account.lamports);
|
||||
|
@ -274,7 +274,7 @@ pub fn process_set_validator_info(
|
||||
println!("--force supplied, ignoring: {:?}", result);
|
||||
} else {
|
||||
result.map_err(|err| {
|
||||
CliError::BadParameter(format!("Invalid validator keybase username: {:?}", err))
|
||||
CliError::BadParameter(format!("Invalid validator keybase username: {}", err))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
@ -339,7 +339,7 @@ pub fn process_set_validator_info(
|
||||
&validator_info,
|
||||
)]);
|
||||
let signers = vec![config.signers[0], &info_keypair];
|
||||
let message = Message::new(instructions);
|
||||
let message = Message::new(&instructions);
|
||||
(message, signers)
|
||||
} else {
|
||||
println!(
|
||||
@ -353,7 +353,7 @@ pub fn process_set_validator_info(
|
||||
keys,
|
||||
&validator_info,
|
||||
)];
|
||||
let message = Message::new_with_payer(instructions, Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&instructions, Some(&config.signers[0].pubkey()));
|
||||
let signers = vec![config.signers[0]];
|
||||
(message, signers)
|
||||
};
|
||||
@ -368,7 +368,7 @@ pub fn process_set_validator_info(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
|
||||
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
|
||||
|
||||
println!("Success! Validator info published at: {:?}", info_pubkey);
|
||||
println!("{}", signature_str);
|
||||
|
411
cli/src/vote.rs
411
cli/src/vote.rs
@ -1,22 +1,18 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, CliSignerInfo,
|
||||
ProcessResult,
|
||||
ProcessResult, SignerIndex,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
system_instruction::{create_address_with_seed, SystemError},
|
||||
transaction::Transaction,
|
||||
account::Account, commitment_config::CommitmentConfig, message::Message, pubkey::Pubkey,
|
||||
system_instruction::SystemError, transaction::Transaction,
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_instruction::{self, VoteError},
|
||||
vote_instruction::{self, withdraw, VoteError},
|
||||
vote_state::{VoteAuthorize, VoteInit, VoteState},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
@ -33,25 +29,25 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("vote_account")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT KEYPAIR")
|
||||
.value_name("ACCOUNT_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help("Vote account keypair to fund"),
|
||||
.validator(is_valid_signer)
|
||||
.help("Vote account keypair to create"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("identity_pubkey")
|
||||
Arg::with_name("identity_account")
|
||||
.index(2)
|
||||
.value_name("VALIDATOR IDENTITY PUBKEY")
|
||||
.value_name("IDENTITY_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Validator that will vote with this account"),
|
||||
.validator(is_valid_signer)
|
||||
.help("Keypair of validator that will vote with this account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("commission")
|
||||
.long("commission")
|
||||
.value_name("NUM")
|
||||
.value_name("PERCENTAGE")
|
||||
.takes_value(true)
|
||||
.default_value("100")
|
||||
.help("The commission taken on reward redemption (0-100)"),
|
||||
@ -59,78 +55,47 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("authorized_voter")
|
||||
.long("authorized-voter")
|
||||
.value_name("PUBKEY")
|
||||
.value_name("VOTER_PUBKEY")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Public key of the authorized voter (defaults to vote account)"),
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Public key of the authorized voter [default: validator identity pubkey]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.long("authorized-withdrawer")
|
||||
.value_name("PUBKEY")
|
||||
.value_name("WITHDRAWER_PUBKEY")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Public key of the authorized withdrawer (defaults to cli config pubkey)"),
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Public key of the authorized withdrawer [default: validator identity pubkey]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seed")
|
||||
.long("seed")
|
||||
.value_name("SEED STRING")
|
||||
.value_name("STRING")
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey")
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-validator")
|
||||
.about("Update the vote account's validator identity")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Vote account to update"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_identity_pubkey")
|
||||
.index(2)
|
||||
.value_name("NEW VALIDATOR IDENTITY PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("New validator that will vote with this account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_voter")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED VOTER KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_keypair)
|
||||
.help("Authorized voter keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-voter")
|
||||
.about("Authorize a new vote signing keypair for the given vote account")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEY")
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Vote account in which to set the authorized voter"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized_pubkey")
|
||||
.index(2)
|
||||
.value_name("NEW VOTER PUBKEY")
|
||||
.value_name("AUTHORIZED_PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("New vote signer to authorize"),
|
||||
.validator(is_valid_pubkey)
|
||||
.help("New authorized vote signer"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -139,22 +104,53 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEY")
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Vote account in which to set the authorized withdrawer"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized_pubkey")
|
||||
.index(2)
|
||||
.value_name("NEW WITHDRAWER PUBKEY")
|
||||
.value_name("AUTHORIZED_PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("New withdrawer to authorize"),
|
||||
.validator(is_valid_pubkey)
|
||||
.help("New authorized withdrawer"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-validator")
|
||||
.about("Update the vote account's validator identity")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Vote account to update"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_identity_account")
|
||||
.index(2)
|
||||
.value_name("IDENTITY_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Keypair of new validator that will vote with this account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-account")
|
||||
.about("Show the contents of a vote account")
|
||||
@ -170,10 +166,10 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEY")
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Vote account pubkey"),
|
||||
)
|
||||
.arg(
|
||||
@ -183,24 +179,64 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-from-vote-account")
|
||||
.about("Withdraw lamports from a vote account into a specified account")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Vote account from which to withdraw"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("destination_account_pubkey")
|
||||
.index(2)
|
||||
.value_name("RECIPIENT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("The recipient of withdrawn SOL"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("amount")
|
||||
.index(3)
|
||||
.value_name("AMOUNT")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_amount)
|
||||
.help("The amount to withdraw, in SOL"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.long("authorized-withdrawer")
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer [default: cli config keypair]"),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_vote_create_account(
|
||||
pub fn parse_create_vote_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let identity_pubkey = pubkey_of(matches, "identity_pubkey").unwrap();
|
||||
let (identity_account, identity_pubkey) =
|
||||
signer_of(matches, "identity_account", wallet_manager)?;
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
let authorized_voter = pubkey_of(matches, "authorized_voter");
|
||||
let authorized_withdrawer = pubkey_of(matches, "authorized_withdrawer");
|
||||
let authorized_voter = pubkey_of_signer(matches, "authorized_voter", wallet_manager)?;
|
||||
let authorized_withdrawer = pubkey_of_signer(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let CliSignerInfo { signers } = generate_unique_signers(
|
||||
vec![payer_provided, vote_account],
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, vote_account, identity_account],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
@ -209,12 +245,12 @@ pub fn parse_vote_create_account(
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed,
|
||||
node_pubkey: identity_pubkey,
|
||||
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
|
||||
authorized_voter,
|
||||
authorized_withdrawer,
|
||||
commission,
|
||||
},
|
||||
signers,
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
@ -224,8 +260,10 @@ pub fn parse_vote_authorize(
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
vote_authorize: VoteAuthorize,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
let new_authorized_pubkey = pubkey_of(matches, "new_authorized_pubkey").unwrap();
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let new_authorized_pubkey =
|
||||
pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap();
|
||||
|
||||
let authorized_voter_provided = None;
|
||||
let CliSignerInfo { signers } = generate_unique_signers(
|
||||
@ -250,13 +288,15 @@ pub fn parse_vote_update_validator(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
let new_identity_pubkey = pubkey_of(matches, "new_identity_pubkey").unwrap();
|
||||
let (authorized_voter, _) = signer_of(matches, "authorized_voter", wallet_manager)?;
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (new_identity_account, new_identity_pubkey) =
|
||||
signer_of(matches, "new_identity_account", wallet_manager)?;
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let CliSignerInfo { signers } = generate_unique_signers(
|
||||
vec![payer_provided, authorized_voter],
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, authorized_withdrawer, new_identity_account],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
@ -265,16 +305,18 @@ pub fn parse_vote_update_validator(
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_pubkey,
|
||||
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
|
||||
},
|
||||
signers,
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_get_account_command(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
@ -291,11 +333,43 @@ pub fn parse_vote_get_account_command(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_withdraw_from_vote_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let destination_account_pubkey =
|
||||
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let (withdraw_authority, withdraw_authority_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, withdraw_authority],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
destination_account_pubkey,
|
||||
withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(),
|
||||
lamports,
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
seed: &Option<String>,
|
||||
identity_pubkey: &Pubkey,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: &Option<Pubkey>,
|
||||
authorized_withdrawer: &Option<Pubkey>,
|
||||
commission: u8,
|
||||
@ -303,7 +377,7 @@ pub fn process_create_vote_account(
|
||||
let vote_account = config.signers[1];
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let vote_account_address = if let Some(seed) = seed {
|
||||
create_address_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
} else {
|
||||
vote_account_pubkey
|
||||
};
|
||||
@ -312,6 +386,8 @@ pub fn process_create_vote_account(
|
||||
(&vote_account_address, "vote_account".to_string()),
|
||||
)?;
|
||||
|
||||
let identity_account = config.signers[identity_account];
|
||||
let identity_pubkey = identity_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
(&vote_account_address, "vote_account".to_string()),
|
||||
(&identity_pubkey, "identity_pubkey".to_string()),
|
||||
@ -334,9 +410,9 @@ pub fn process_create_vote_account(
|
||||
.max(1);
|
||||
|
||||
let vote_init = VoteInit {
|
||||
node_pubkey: *identity_pubkey,
|
||||
authorized_voter: authorized_voter.unwrap_or(vote_account_pubkey),
|
||||
authorized_withdrawer: authorized_withdrawer.unwrap_or(vote_account_pubkey),
|
||||
node_pubkey: identity_pubkey,
|
||||
authorized_voter: authorized_voter.unwrap_or(identity_pubkey),
|
||||
authorized_withdrawer: authorized_withdrawer.unwrap_or(identity_pubkey),
|
||||
commission,
|
||||
};
|
||||
|
||||
@ -359,7 +435,7 @@ pub fn process_create_vote_account(
|
||||
};
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let message = Message::new(ixs);
|
||||
let message = Message::new(&ixs);
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -368,7 +444,7 @@ pub fn process_create_vote_account(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
|
||||
@ -391,7 +467,7 @@ pub fn process_vote_authorize(
|
||||
vote_authorize, // vote or withdraw
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(ixs, Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -400,7 +476,8 @@ pub fn process_vote_authorize(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[config.signers[0]]);
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
|
||||
log_instruction_custom_error::<VoteError>(result)
|
||||
}
|
||||
|
||||
@ -408,21 +485,23 @@ pub fn process_vote_update_validator(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_identity_pubkey: &Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_voter = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let new_identity_account = config.signers[new_identity_account];
|
||||
let new_identity_pubkey = new_identity_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
(vote_account_pubkey, "vote_account_pubkey".to_string()),
|
||||
(new_identity_pubkey, "new_identity_pubkey".to_string()),
|
||||
(&new_identity_pubkey, "new_identity_account".to_string()),
|
||||
)?;
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_node(
|
||||
vote_account_pubkey,
|
||||
&authorized_voter.pubkey(),
|
||||
new_identity_pubkey,
|
||||
&authorized_withdrawer.pubkey(),
|
||||
&new_identity_pubkey,
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(ixs, Some(&config.signers[0].pubkey()));
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
@ -431,7 +510,7 @@ pub fn process_vote_update_validator(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
log_instruction_custom_error::<VoteError>(result)
|
||||
}
|
||||
|
||||
@ -517,6 +596,38 @@ pub fn process_show_vote_account(
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_withdraw_from_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
withdraw_authority: SignerIndex,
|
||||
lamports: u64,
|
||||
destination_account_pubkey: &Pubkey,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let withdraw_authority = config.signers[withdraw_authority];
|
||||
|
||||
let ix = withdraw(
|
||||
vote_account_pubkey,
|
||||
&withdraw_authority.pubkey(),
|
||||
lamports,
|
||||
destination_account_pubkey,
|
||||
);
|
||||
|
||||
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
|
||||
let mut transaction = Transaction::new_unsigned(message);
|
||||
transaction.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.signers[0].pubkey(),
|
||||
&fee_calculator,
|
||||
&transaction.message,
|
||||
)?;
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&mut transaction, &config.signers);
|
||||
log_instruction_custom_error::<VoteError>(result)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -565,13 +676,14 @@ mod tests {
|
||||
let keypair = Keypair::new();
|
||||
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
|
||||
// Test CreateVoteAccount SubCommand
|
||||
let node_pubkey = Pubkey::new_rand();
|
||||
let node_pubkey_string = format!("{}", node_pubkey);
|
||||
let (identity_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let identity_keypair = Keypair::new();
|
||||
write_keypair(&identity_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
let test_create_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"create-vote-account",
|
||||
&keypair_file,
|
||||
&node_pubkey_string,
|
||||
&identity_keypair_file,
|
||||
"--commission",
|
||||
"10",
|
||||
]);
|
||||
@ -580,14 +692,15 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
node_pubkey,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 10,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(keypair)
|
||||
Box::new(keypair),
|
||||
read_keypair_file(&identity_keypair_file).unwrap().into(),
|
||||
],
|
||||
}
|
||||
);
|
||||
@ -600,21 +713,22 @@ mod tests {
|
||||
"test",
|
||||
"create-vote-account",
|
||||
&keypair_file,
|
||||
&node_pubkey_string,
|
||||
&identity_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
node_pubkey,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 100,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(keypair)
|
||||
Box::new(keypair),
|
||||
read_keypair_file(&identity_keypair_file).unwrap().into(),
|
||||
],
|
||||
}
|
||||
);
|
||||
@ -629,7 +743,7 @@ mod tests {
|
||||
"test",
|
||||
"create-vote-account",
|
||||
&keypair_file,
|
||||
&node_pubkey_string,
|
||||
&identity_keypair_file,
|
||||
"--authorized-voter",
|
||||
&authed.to_string(),
|
||||
]);
|
||||
@ -638,14 +752,15 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
node_pubkey,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(authed),
|
||||
authorized_withdrawer: None,
|
||||
commission: 100
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(keypair)
|
||||
Box::new(keypair),
|
||||
read_keypair_file(&identity_keypair_file).unwrap().into(),
|
||||
],
|
||||
}
|
||||
);
|
||||
@ -658,7 +773,7 @@ mod tests {
|
||||
"test",
|
||||
"create-vote-account",
|
||||
&keypair_file,
|
||||
&node_pubkey_string,
|
||||
&identity_keypair_file,
|
||||
"--authorized-withdrawer",
|
||||
&authed.to_string(),
|
||||
]);
|
||||
@ -667,14 +782,15 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
node_pubkey,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: Some(authed),
|
||||
commission: 100
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(keypair)
|
||||
Box::new(keypair),
|
||||
read_keypair_file(&identity_keypair_file).unwrap().into(),
|
||||
],
|
||||
}
|
||||
);
|
||||
@ -683,7 +799,7 @@ mod tests {
|
||||
"test",
|
||||
"vote-update-validator",
|
||||
&pubkey_string,
|
||||
&pubkey2_string,
|
||||
&identity_keypair_file,
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
@ -691,11 +807,72 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_identity_pubkey: pubkey2,
|
||||
new_identity_account: 2,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(read_keypair_file(&keypair_file).unwrap())
|
||||
Box::new(read_keypair_file(&keypair_file).unwrap()),
|
||||
read_keypair_file(&identity_keypair_file).unwrap().into(),
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromVoteAccount subcommand
|
||||
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"withdraw-from-vote-account",
|
||||
&keypair_file,
|
||||
&pubkey_string,
|
||||
"42",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(
|
||||
&test_withdraw_from_vote_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
destination_account_pubkey: pubkey,
|
||||
withdraw_authority: 0,
|
||||
lamports: 42_000_000_000
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromVoteAccount subcommand with authority
|
||||
let withdraw_authority = Keypair::new();
|
||||
let (withdraw_authority_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&withdraw_authority, tmp_file.as_file_mut()).unwrap();
|
||||
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"withdraw-from-vote-account",
|
||||
&keypair_file,
|
||||
&pubkey_string,
|
||||
"42",
|
||||
"--authorized-withdrawer",
|
||||
&withdraw_authority_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(
|
||||
&test_withdraw_from_vote_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
destination_account_pubkey: pubkey,
|
||||
withdraw_authority: 1,
|
||||
lamports: 42_000_000_000
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
read_keypair_file(&withdraw_authority_file).unwrap().into()
|
||||
],
|
||||
}
|
||||
);
|
||||
|
@ -1,12 +1,18 @@
|
||||
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
nonce,
|
||||
offline::{
|
||||
blockhash_query::{self, BlockhashQuery},
|
||||
parse_sign_only_reply_string,
|
||||
},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::validator::TestValidator;
|
||||
use solana_core::validator::{TestValidator, TestValidatorOptions};
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
system_instruction::create_address_with_seed,
|
||||
system_program,
|
||||
};
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel, thread::sleep, time::Duration};
|
||||
@ -123,7 +129,7 @@ fn full_battery_tests(
|
||||
config_nonce.signers = vec![&nonce_keypair];
|
||||
|
||||
let nonce_account = if let Some(seed) = seed.as_ref() {
|
||||
create_address_with_seed(
|
||||
Pubkey::create_with_seed(
|
||||
&config_nonce.signers[0].pubkey(),
|
||||
seed,
|
||||
&system_program::id(),
|
||||
@ -247,3 +253,124 @@ fn full_battery_tests(
|
||||
check_balance(800, &rpc_client, &nonce_account);
|
||||
check_balance(200, &rpc_client, &payee_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_account_with_seed() {
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
});
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap();
|
||||
let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
let to_address = Pubkey::new(&[3u8; 32]);
|
||||
|
||||
// Setup accounts
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
42,
|
||||
)
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
4242,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_balance(4242, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_balance(0, &rpc_client, &to_address);
|
||||
|
||||
// Create nonce account
|
||||
let creator_pubkey = online_nonce_creator_signer.pubkey();
|
||||
let authority_pubkey = offline_nonce_authority_signer.pubkey();
|
||||
let seed = authority_pubkey.to_string()[0..32].to_string();
|
||||
let nonce_address =
|
||||
Pubkey::create_with_seed(&creator_pubkey, &seed, &system_program::id()).unwrap();
|
||||
check_balance(0, &rpc_client, &nonce_address);
|
||||
|
||||
let mut creator_config = CliConfig::default();
|
||||
creator_config.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
creator_config.signers = vec![&online_nonce_creator_signer];
|
||||
creator_config.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: 0,
|
||||
seed: Some(seed),
|
||||
nonce_authority: Some(authority_pubkey),
|
||||
lamports: 241,
|
||||
};
|
||||
process_command(&creator_config).unwrap();
|
||||
check_balance(241, &rpc_client, &nonce_address);
|
||||
check_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_balance(0, &rpc_client, &to_address);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_address)
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.blockhash;
|
||||
|
||||
// Test by creating transfer TX with nonce, fully offline
|
||||
let mut authority_config = CliConfig::default();
|
||||
authority_config.json_rpc_url = String::default();
|
||||
authority_config.signers = vec![&offline_nonce_authority_signer];
|
||||
// Verify we cannot contact the cluster
|
||||
authority_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&authority_config).unwrap_err();
|
||||
authority_config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_address),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_only_reply = process_command(&authority_config).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
|
||||
let authority_presigner = sign_only.presigner_of(&authority_pubkey).unwrap();
|
||||
assert_eq!(sign_only.blockhash, nonce_hash);
|
||||
|
||||
// And submit it
|
||||
let mut submit_config = CliConfig::default();
|
||||
submit_config.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
submit_config.signers = vec![&authority_presigner];
|
||||
submit_config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_address),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_address),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&submit_config).unwrap();
|
||||
check_balance(241, &rpc_client, &nonce_address);
|
||||
check_balance(31, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_balance(10, &rpc_client, &to_address);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@ -1,17 +1,18 @@
|
||||
use chrono::prelude::*;
|
||||
use serde_json::Value;
|
||||
use solana_clap_utils::keypair::presigner_from_pubkey_sigs;
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand},
|
||||
offline::{parse_sign_only_reply_string, BlockhashQuery},
|
||||
nonce,
|
||||
offline::{
|
||||
blockhash_query::{self, BlockhashQuery},
|
||||
parse_sign_only_reply_string,
|
||||
},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
fee_calculator::FeeCalculator,
|
||||
nonce,
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
@ -323,7 +324,7 @@ fn test_offline_pay_tx() {
|
||||
config_offline.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
sign_only: true,
|
||||
..PayCommand::default()
|
||||
});
|
||||
@ -333,15 +334,17 @@ fn test_offline_pay_tx() {
|
||||
check_balance(50, &rpc_client, &config_online.signers[0].pubkey());
|
||||
check_balance(0, &rpc_client, &bob_pubkey);
|
||||
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner =
|
||||
presigner_from_pubkey_sigs(&config_offline.signers[0].pubkey(), &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only
|
||||
.presigner_of(&config_offline.signers[0].pubkey())
|
||||
.unwrap();
|
||||
let online_pubkey = config_online.signers[0].pubkey();
|
||||
config_online.signers = vec![&offline_presigner];
|
||||
config_online.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
..PayCommand::default()
|
||||
});
|
||||
process_command(&config_online).unwrap();
|
||||
@ -377,7 +380,7 @@ fn test_nonced_pay_tx() {
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(nonce::State::size())
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
@ -408,21 +411,20 @@ fn test_nonced_pay_tx() {
|
||||
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
nonce_hash,
|
||||
),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
..PayCommand::default()
|
||||
});
|
||||
@ -432,14 +434,11 @@ fn test_nonced_pay_tx() {
|
||||
check_balance(10, &rpc_client, &bob_pubkey);
|
||||
|
||||
// Verify that nonce has been used
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash2 = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
match nonce_state {
|
||||
nonce::State::Initialized(ref data) => assert_ne!(data.blockhash, nonce_hash),
|
||||
_ => assert!(false, "Nonce is not initialized"),
|
||||
}
|
||||
.blockhash;
|
||||
assert_ne!(nonce_hash, nonce_hash2);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
|
@ -1,18 +1,19 @@
|
||||
use solana_clap_utils::keypair::presigner_from_pubkey_sigs;
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
offline::{parse_sign_only_reply_string, BlockhashQuery},
|
||||
nonce,
|
||||
offline::{
|
||||
blockhash_query::{self, BlockhashQuery},
|
||||
parse_sign_only_reply_string,
|
||||
},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::validator::{TestValidator, TestValidatorOptions};
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
fee_calculator::FeeCalculator,
|
||||
nonce,
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
system_instruction::create_address_with_seed,
|
||||
};
|
||||
use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
@ -66,7 +67,7 @@ fn test_stake_delegation_force() {
|
||||
config.signers = vec![&default_signer, &vote_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
node_pubkey: config.signers[0].pubkey(),
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
@ -84,7 +85,7 @@ fn test_stake_delegation_force() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -158,7 +159,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
|
||||
let stake_address = create_address_with_seed(
|
||||
let stake_address = Pubkey::create_with_seed(
|
||||
&config_validator.signers[0].pubkey(),
|
||||
"hi there",
|
||||
&solana_stake_program::id(),
|
||||
@ -175,7 +176,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -258,7 +259,7 @@ fn test_stake_delegation_and_deactivation() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -363,7 +364,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -379,15 +380,17 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
stake_authority: 0,
|
||||
force: false,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner =
|
||||
presigner_from_pubkey_sigs(&config_offline.signers[0].pubkey(), &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only
|
||||
.presigner_of(&config_offline.signers[0].pubkey())
|
||||
.unwrap();
|
||||
config_payer.signers = vec![&offline_presigner];
|
||||
config_payer.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
@ -395,7 +398,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
stake_authority: 0,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -408,21 +411,23 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
stake_authority: 0,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner =
|
||||
presigner_from_pubkey_sigs(&config_offline.signers[0].pubkey(), &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only
|
||||
.presigner_of(&config_offline.signers[0].pubkey())
|
||||
.unwrap();
|
||||
config_payer.signers = vec![&offline_presigner];
|
||||
config_payer.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -457,7 +462,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(nonce::State::size())
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
@ -479,7 +484,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -499,14 +504,10 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Delegate stake
|
||||
config.signers = vec![&config_keypair];
|
||||
@ -516,7 +517,10 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
stake_authority: 0,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
nonce_hash,
|
||||
),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -524,21 +528,20 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Deactivate stake
|
||||
config.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
nonce_hash,
|
||||
),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -608,7 +611,7 @@ fn test_stake_authorize() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -622,9 +625,7 @@ fn test_stake_authorize() {
|
||||
config.signers.pop();
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: online_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 0,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 0)],
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
@ -640,13 +641,40 @@ fn test_stake_authorize() {
|
||||
};
|
||||
assert_eq!(current_authority, online_authority_pubkey);
|
||||
|
||||
// Assign new offline stake authority
|
||||
// Assign new online stake and withdraw authorities
|
||||
let online_authority2 = Keypair::new();
|
||||
let online_authority2_pubkey = online_authority2.pubkey();
|
||||
let withdraw_authority = Keypair::new();
|
||||
let withdraw_authority_pubkey = withdraw_authority.pubkey();
|
||||
config.signers.push(&online_authority);
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: offline_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 1,
|
||||
new_authorizations: vec![
|
||||
(StakeAuthorize::Staker, online_authority2_pubkey, 1),
|
||||
(StakeAuthorize::Withdrawer, withdraw_authority_pubkey, 0),
|
||||
],
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
let stake_state: StakeState = stake_account.state().unwrap();
|
||||
let (current_staker, current_withdrawer) = match stake_state {
|
||||
StakeState::Initialized(meta) => (meta.authorized.staker, meta.authorized.withdrawer),
|
||||
_ => panic!("Unexpected stake state!"),
|
||||
};
|
||||
assert_eq!(current_staker, online_authority2_pubkey);
|
||||
assert_eq!(current_withdrawer, withdraw_authority_pubkey);
|
||||
|
||||
// Assign new offline stake authority
|
||||
config.signers.pop();
|
||||
config.signers.push(&online_authority2);
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, offline_authority_pubkey, 1)],
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
@ -668,27 +696,23 @@ fn test_stake_authorize() {
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: nonced_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 0,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
|
||||
let offline_presigner =
|
||||
presigner_from_pubkey_sigs(&offline_authority_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_reply);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only.presigner_of(&offline_authority_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: nonced_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 0,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -704,7 +728,7 @@ fn test_stake_authorize() {
|
||||
|
||||
// Create nonce account
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(nonce::State::size())
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
let nonce_account = Keypair::new();
|
||||
config.signers = vec![&default_signer, &nonce_account];
|
||||
@ -717,14 +741,10 @@ fn test_stake_authorize() {
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Nonced assignment of new online stake authority
|
||||
let online_authority = Keypair::new();
|
||||
@ -732,30 +752,28 @@ fn test_stake_authorize() {
|
||||
config_offline.signers.push(&nonced_authority);
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: online_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 1,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
|
||||
assert_eq!(blockhash, nonce_hash);
|
||||
let offline_presigner =
|
||||
presigner_from_pubkey_sigs(&offline_authority_pubkey, &signers).unwrap();
|
||||
let nonced_authority_presigner =
|
||||
presigner_from_pubkey_sigs(&nonced_authority_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_reply);
|
||||
assert!(sign_only.has_all_signers());
|
||||
assert_eq!(sign_only.blockhash, nonce_hash);
|
||||
let offline_presigner = sign_only.presigner_of(&offline_authority_pubkey).unwrap();
|
||||
let nonced_authority_presigner = sign_only.presigner_of(&nonced_authority_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner, &nonced_authority_presigner];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: online_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 1,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -768,14 +786,11 @@ fn test_stake_authorize() {
|
||||
_ => panic!("Unexpected stake state!"),
|
||||
};
|
||||
assert_eq!(current_authority, online_authority_pubkey);
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
|
||||
let new_nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let new_nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
assert_ne!(nonce_hash, new_nonce_hash);
|
||||
|
||||
server.close().unwrap();
|
||||
@ -819,6 +834,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
let mut config_offline = CliConfig::default();
|
||||
let offline_signer = Keypair::new();
|
||||
config_offline.signers = vec![&offline_signer];
|
||||
config_offline.json_rpc_url = String::new();
|
||||
let offline_pubkey = config_offline.signers[0].pubkey();
|
||||
// Verify we're offline
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
@ -845,7 +861,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -859,11 +875,9 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
config.signers = vec![&default_signer, &payer_keypair];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: offline_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 0,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, offline_pubkey, 0)],
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 1,
|
||||
@ -879,26 +893,23 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: payer_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 0,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_reply);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: payer_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: 0,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -975,7 +986,7 @@ fn test_stake_split() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 10 * minimum_stake_balance,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -990,7 +1001,7 @@ fn test_stake_split() {
|
||||
|
||||
// Create nonce account
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(nonce::State::size())
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap();
|
||||
config.signers = vec![&default_signer, &nonce_account];
|
||||
@ -1004,14 +1015,10 @@ fn test_stake_split() {
|
||||
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Nonced offline split
|
||||
let split_account = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
@ -1021,7 +1028,7 @@ fn test_stake_split() {
|
||||
stake_account_pubkey: stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
split_stake_account: 1,
|
||||
@ -1030,14 +1037,18 @@ fn test_stake_split() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner, &split_account];
|
||||
config.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
split_stake_account: 1,
|
||||
@ -1127,7 +1138,7 @@ fn test_stake_set_lockup() {
|
||||
lockup,
|
||||
lamports: 10 * minimum_stake_balance,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -1242,7 +1253,7 @@ fn test_stake_set_lockup() {
|
||||
|
||||
// Create nonce account
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(nonce::State::size())
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap();
|
||||
let nonce_account_pubkey = nonce_account.pubkey();
|
||||
@ -1257,14 +1268,10 @@ fn test_stake_set_lockup() {
|
||||
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account_pubkey).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Nonced offline set lockup
|
||||
let lockup = LockupArgs {
|
||||
@ -1277,21 +1284,25 @@ fn test_stake_set_lockup() {
|
||||
lockup,
|
||||
custodian: 0,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_account_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
custodian: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account_pubkey),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_account_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -1359,7 +1370,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
|
||||
// Create nonce account
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(nonce::State::size())
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
|
||||
let nonce_pubkey = nonce_account.pubkey();
|
||||
@ -1373,14 +1384,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Create stake account offline
|
||||
let stake_keypair = keypair_from_seed(&[4u8; 32]).unwrap();
|
||||
@ -1394,16 +1401,17 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
from: 0,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let stake_presigner = presigner_from_pubkey_sigs(&stake_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
let stake_presigner = sign_only.presigner_of(&stake_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner, &stake_presigner];
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: 1,
|
||||
@ -1413,7 +1421,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_pubkey),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -1423,14 +1434,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
check_balance(50_000, &rpc_client, &stake_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Offline, nonced stake-withdraw
|
||||
let recipient = keypair_from_seed(&[5u8; 32]).unwrap();
|
||||
@ -1442,14 +1449,14 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
lamports: 42,
|
||||
withdraw_authority: 0,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
@ -1457,7 +1464,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
lamports: 42,
|
||||
withdraw_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_pubkey),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -1466,14 +1476,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
check_balance(42, &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Create another stake account. This time with seed
|
||||
let seed = "seedy";
|
||||
@ -1486,16 +1492,16 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
from: 0,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let stake_presigner = presigner_from_pubkey_sigs(&stake_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
let stake_presigner = sign_only.presigner_of(&stake_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner, &stake_presigner];
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: 1,
|
||||
@ -1505,7 +1511,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_pubkey),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_pubkey),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -1513,7 +1522,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let seed_address =
|
||||
create_address_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap();
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap();
|
||||
check_balance(50_000, &rpc_client, &seed_address);
|
||||
|
||||
server.close().unwrap();
|
||||
|
@ -1,17 +1,18 @@
|
||||
use solana_clap_utils::keypair::presigner_from_pubkey_sigs;
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
offline::{parse_sign_only_reply_string, BlockhashQuery},
|
||||
nonce,
|
||||
offline::{
|
||||
blockhash_query::{self, BlockhashQuery},
|
||||
parse_sign_only_reply_string,
|
||||
},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::validator::{TestValidator, TestValidatorOptions};
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
fee_calculator::FeeCalculator,
|
||||
nonce,
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
|
||||
};
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel, thread::sleep, time::Duration};
|
||||
|
||||
@ -67,7 +68,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -94,21 +95,22 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_only_reply = process_command(&offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -120,7 +122,7 @@ fn test_transfer() {
|
||||
// Create nonce account
|
||||
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(nonce::State::size())
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
config.signers = vec![&default_signer, &nonce_account];
|
||||
config.command = CliCommand::CreateNonceAccount {
|
||||
@ -133,14 +135,10 @@ fn test_transfer() {
|
||||
check_balance(49_987 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Nonced transfer
|
||||
config.signers = vec![&default_signer];
|
||||
@ -149,7 +147,10 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
nonce_hash,
|
||||
),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -157,14 +158,10 @@ fn test_transfer() {
|
||||
process_command(&config).unwrap();
|
||||
check_balance(49_976 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_balance(30, &rpc_client, &recipient_pubkey);
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let new_nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let new_nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
assert_ne!(nonce_hash, new_nonce_hash);
|
||||
|
||||
// Assign nonce authority to offline
|
||||
@ -178,14 +175,10 @@ fn test_transfer() {
|
||||
check_balance(49_975 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
|
||||
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
|
||||
.and_then(|ref a| nonce::data_from_account(a))
|
||||
.unwrap()
|
||||
.convert_to_current();
|
||||
let nonce_hash = match nonce_state {
|
||||
nonce::State::Initialized(ref data) => data.blockhash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
.blockhash;
|
||||
|
||||
// Offline, nonced transfer
|
||||
offline.signers = vec![&default_offline_signer];
|
||||
@ -194,21 +187,25 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_only_reply = process_command(&offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
|
||||
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
sign_only.blockhash,
|
||||
),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
@ -220,3 +217,114 @@ fn test_transfer() {
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_multisession_signing() {
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
});
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let to_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
let offline_fee_payer_signer = keypair_from_seed(&[3u8; 32]).unwrap();
|
||||
let from_null_signer = NullSigner::new(&offline_from_signer.pubkey());
|
||||
|
||||
// Setup accounts
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_from_signer.pubkey(), 43)
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
&offline_fee_payer_signer.pubkey(),
|
||||
3,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance(43, &rpc_client, &offline_from_signer.pubkey());
|
||||
check_balance(3, &rpc_client, &offline_fee_payer_signer.pubkey());
|
||||
check_balance(0, &rpc_client, &to_pubkey);
|
||||
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
|
||||
// Offline fee-payer signs first
|
||||
let mut fee_payer_config = CliConfig::default();
|
||||
fee_payer_config.json_rpc_url = String::default();
|
||||
fee_payer_config.signers = vec![&offline_fee_payer_signer, &from_null_signer];
|
||||
// Verify we cannot contact the cluster
|
||||
fee_payer_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&fee_payer_config).unwrap_err();
|
||||
fee_payer_config.command = CliCommand::Transfer {
|
||||
lamports: 42,
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_only_reply = process_command(&fee_payer_config).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
|
||||
assert!(!sign_only.has_all_signers());
|
||||
let fee_payer_presigner = sign_only
|
||||
.presigner_of(&offline_fee_payer_signer.pubkey())
|
||||
.unwrap();
|
||||
|
||||
// Now the offline fund source
|
||||
let mut from_config = CliConfig::default();
|
||||
from_config.json_rpc_url = String::default();
|
||||
from_config.signers = vec![&fee_payer_presigner, &offline_from_signer];
|
||||
// Verify we cannot contact the cluster
|
||||
from_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&from_config).unwrap_err();
|
||||
from_config.command = CliCommand::Transfer {
|
||||
lamports: 42,
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let sign_only_reply = process_command(&from_config).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let from_presigner = sign_only
|
||||
.presigner_of(&offline_from_signer.pubkey())
|
||||
.unwrap();
|
||||
|
||||
// Finally submit to the cluster
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&fee_payer_presigner, &from_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
lamports: 42,
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
check_balance(1, &rpc_client, &offline_from_signer.pubkey());
|
||||
check_balance(1, &rpc_client, &offline_fee_payer_signer.pubkey());
|
||||
check_balance(42, &rpc_client, &to_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
118
cli/tests/vote.rs
Normal file
118
cli/tests/vote.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel, thread::sleep, time::Duration};
|
||||
|
||||
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
|
||||
(0..5).for_each(|tries| {
|
||||
let balance = client.retry_get_balance(pubkey, 1).unwrap().unwrap();
|
||||
if balance == expected_balance {
|
||||
return;
|
||||
}
|
||||
if tries == 4 {
|
||||
assert_eq!(balance, expected_balance);
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vote_authorize_and_withdraw() {
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let default_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
&config.signers[0].pubkey(),
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Create vote account
|
||||
let vote_account_keypair = Keypair::new();
|
||||
let vote_account_pubkey = vote_account_keypair.pubkey();
|
||||
config.signers = vec![&default_signer, &vote_account_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: Some(config.signers[0].pubkey()),
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let vote_account = rpc_client
|
||||
.get_account(&vote_account_keypair.pubkey())
|
||||
.unwrap();
|
||||
let vote_state: VoteStateVersions = vote_account.state().unwrap();
|
||||
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
|
||||
assert_eq!(authorized_withdrawer, config.signers[0].pubkey());
|
||||
let expected_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
|
||||
.unwrap()
|
||||
.max(1);
|
||||
check_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Authorize vote account withdrawal to another signer
|
||||
let withdraw_authority = Keypair::new();
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let vote_account = rpc_client
|
||||
.get_account(&vote_account_keypair.pubkey())
|
||||
.unwrap();
|
||||
let vote_state: VoteStateVersions = vote_account.state().unwrap();
|
||||
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
|
||||
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
|
||||
|
||||
// Withdraw from vote account
|
||||
let destination_account = Pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
|
||||
config.signers = vec![&default_signer, &withdraw_authority];
|
||||
config.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
lamports: 100,
|
||||
destination_account_pubkey: destination_account,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(expected_balance - 100, &rpc_client, &vote_account_pubkey);
|
||||
check_balance(100, &rpc_client, &destination_account);
|
||||
|
||||
// Re-assign validator identity
|
||||
let new_identity_keypair = Keypair::new();
|
||||
config.signers.push(&new_identity_keypair);
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,15 +11,18 @@ edition = "2018"
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
bs58 = "0.3.0"
|
||||
indicatif = "0.14.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
log = "0.4.8"
|
||||
rayon = "1.2.0"
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
|
||||
serde = "1.0.104"
|
||||
rayon = "1.3.0"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
serde_json = "1.0.48"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@ -28,4 +31,4 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
|
@ -1,20 +1,161 @@
|
||||
use crate::rpc_request;
|
||||
use solana_sdk::{signature::SignerError, transaction::TransactionError};
|
||||
use std::{fmt, io};
|
||||
use solana_sdk::{
|
||||
signature::SignerError, transaction::TransactionError, transport::TransportError,
|
||||
};
|
||||
use std::io;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ClientError {
|
||||
pub enum ClientErrorKind {
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
#[error(transparent)]
|
||||
Reqwest(#[from] reqwest::Error),
|
||||
#[error(transparent)]
|
||||
RpcError(#[from] rpc_request::RpcError),
|
||||
#[error(transparent)]
|
||||
SerdeJson(#[from] serde_json::error::Error),
|
||||
#[error(transparent)]
|
||||
SigningError(#[from] SignerError),
|
||||
#[error(transparent)]
|
||||
TransactionError(#[from] TransactionError),
|
||||
#[error("Custom: {0}")]
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for ClientError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "solana client error")
|
||||
impl From<TransportError> for ClientErrorKind {
|
||||
fn from(err: TransportError) -> Self {
|
||||
match err {
|
||||
TransportError::IoError(err) => Self::Io(err),
|
||||
TransportError::TransactionError(err) => Self::TransactionError(err),
|
||||
TransportError::Custom(err) => Self::Custom(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<TransportError> for ClientErrorKind {
|
||||
fn into(self) -> TransportError {
|
||||
match self {
|
||||
Self::Io(err) => TransportError::IoError(err),
|
||||
Self::TransactionError(err) => TransportError::TransactionError(err),
|
||||
Self::Reqwest(err) => TransportError::Custom(format!("{:?}", err)),
|
||||
Self::RpcError(err) => TransportError::Custom(format!("{:?}", err)),
|
||||
Self::SerdeJson(err) => TransportError::Custom(format!("{:?}", err)),
|
||||
Self::SigningError(err) => TransportError::Custom(format!("{:?}", err)),
|
||||
Self::Custom(err) => TransportError::Custom(format!("{:?}", err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[error("{kind}")]
|
||||
pub struct ClientError {
|
||||
command: Option<&'static str>,
|
||||
#[source]
|
||||
#[error(transparent)]
|
||||
kind: ClientErrorKind,
|
||||
}
|
||||
|
||||
impl ClientError {
|
||||
pub fn new_with_command(kind: ClientErrorKind, command: &'static str) -> Self {
|
||||
Self {
|
||||
command: Some(command),
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_with_command(self, command: &'static str) -> Self {
|
||||
Self {
|
||||
command: Some(command),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn command(&self) -> Option<&'static str> {
|
||||
self.command
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> &ClientErrorKind {
|
||||
&self.kind
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ClientErrorKind> for ClientError {
|
||||
fn from(kind: ClientErrorKind) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TransportError> for ClientError {
|
||||
fn from(err: TransportError) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind: err.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<TransportError> for ClientError {
|
||||
fn into(self) -> TransportError {
|
||||
self.kind.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for ClientError {
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind: err.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<reqwest::Error> for ClientError {
|
||||
fn from(err: reqwest::Error) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind: err.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rpc_request::RpcError> for ClientError {
|
||||
fn from(err: rpc_request::RpcError) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind: err.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::error::Error> for ClientError {
|
||||
fn from(err: serde_json::error::Error) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind: err.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignerError> for ClientError {
|
||||
fn from(err: SignerError) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind: err.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TransactionError> for ClientError {
|
||||
fn from(err: TransactionError) -> Self {
|
||||
Self {
|
||||
command: None,
|
||||
kind: err.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, ClientError>;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::{client_error::ClientError, rpc_request::RpcRequest};
|
||||
use crate::{client_error::Result, rpc_request::RpcRequest};
|
||||
|
||||
pub(crate) trait GenericRpcClientRequest {
|
||||
fn send(
|
||||
@ -6,5 +6,5 @@ pub(crate) trait GenericRpcClientRequest {
|
||||
request: &RpcRequest,
|
||||
params: serde_json::Value,
|
||||
retries: usize,
|
||||
) -> Result<serde_json::Value, ClientError>;
|
||||
) -> Result<serde_json::Value>;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
client_error::ClientError,
|
||||
client_error::Result,
|
||||
generic_rpc_client_request::GenericRpcClientRequest,
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
@ -10,6 +10,7 @@ use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
transaction::{self, TransactionError},
|
||||
};
|
||||
use solana_transaction_status::TransactionStatus;
|
||||
use std::{collections::HashMap, sync::RwLock};
|
||||
|
||||
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
|
||||
@ -41,7 +42,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
request: &RpcRequest,
|
||||
params: serde_json::Value,
|
||||
_retries: usize,
|
||||
) -> Result<serde_json::Value, ClientError> {
|
||||
) -> Result<serde_json::Value> {
|
||||
if let Some(value) = self.mocks.write().unwrap().remove(request) {
|
||||
return Ok(value);
|
||||
}
|
||||
@ -71,24 +72,45 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
serde_json::to_value(FeeCalculator::default()).unwrap(),
|
||||
),
|
||||
})?,
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => {
|
||||
let value = if self.url == "blockhash_expired" {
|
||||
Value::Null
|
||||
} else {
|
||||
serde_json::to_value(Some(FeeCalculator::default())).unwrap()
|
||||
};
|
||||
serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value,
|
||||
})?
|
||||
}
|
||||
RpcRequest::GetFeeRateGovernor => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: serde_json::to_value(FeeRateGovernor::default()).unwrap(),
|
||||
})?,
|
||||
RpcRequest::GetSignatureStatus => {
|
||||
let response: Option<transaction::Result<()>> = if self.url == "account_in_use" {
|
||||
Some(Err(TransactionError::AccountInUse))
|
||||
let status: transaction::Result<()> = if self.url == "account_in_use" {
|
||||
Err(TransactionError::AccountInUse)
|
||||
} else if self.url == "instruction_error" {
|
||||
Some(Err(TransactionError::InstructionError(
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::UninitializedAccount,
|
||||
)))
|
||||
} else if self.url == "sig_not_found" {
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
};
|
||||
let status = if self.url == "sig_not_found" {
|
||||
None
|
||||
} else {
|
||||
Some(Ok(()))
|
||||
Some(TransactionStatus {
|
||||
status,
|
||||
slot: 1,
|
||||
confirmations: Some(0),
|
||||
})
|
||||
};
|
||||
serde_json::to_value(response).unwrap()
|
||||
serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: vec![status],
|
||||
})?
|
||||
}
|
||||
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
|
||||
RpcRequest::GetSlot => Value::Number(Number::from(0)),
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
client_error::ClientError,
|
||||
client_error::Result,
|
||||
generic_rpc_client_request::GenericRpcClientRequest,
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
};
|
||||
@ -34,7 +34,7 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
||||
request: &RpcRequest,
|
||||
params: serde_json::Value,
|
||||
mut retries: usize,
|
||||
) -> Result<serde_json::Value, ClientError> {
|
||||
) -> Result<serde_json::Value> {
|
||||
// Concurrent requests are not supported so reuse the same request id for all requests
|
||||
let request_id = 1;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
use serde_json::{json, Value};
|
||||
use std::{error, fmt};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub enum RpcRequest {
|
||||
@ -18,9 +18,9 @@ pub enum RpcRequest {
|
||||
GetIdentity,
|
||||
GetInflation,
|
||||
GetLeaderSchedule,
|
||||
GetNumBlocksSinceSignatureConfirmation,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
GetFeeRateGovernor,
|
||||
GetSignatureStatus,
|
||||
GetSlot,
|
||||
@ -29,6 +29,7 @@ pub enum RpcRequest {
|
||||
GetStorageTurnRate,
|
||||
GetSlotsPerSegment,
|
||||
GetStoragePubkeysForSlot,
|
||||
GetTotalSupply,
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
GetVoteAccounts,
|
||||
@ -59,11 +60,9 @@ impl RpcRequest {
|
||||
RpcRequest::GetIdentity => "getIdentity",
|
||||
RpcRequest::GetInflation => "getInflation",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
|
||||
"getNumBlocksSinceSignatureConfirmation"
|
||||
}
|
||||
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
|
||||
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
|
||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
@ -72,6 +71,7 @@ impl RpcRequest {
|
||||
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
|
||||
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
|
||||
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||
RpcRequest::GetTotalSupply => "getTotalSupply",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
RpcRequest::GetVoteAccounts => "getVoteAccounts",
|
||||
@ -91,26 +91,16 @@ impl RpcRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RpcError {
|
||||
#[error("rpc request error: {0}")]
|
||||
RpcRequestError(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for RpcError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "invalid")
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for RpcError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn error::Error> {
|
||||
// Generic error, underlying cause isn't tracked.
|
||||
None
|
||||
}
|
||||
#[error("parse error: expected {0}")]
|
||||
ParseError(String), /* "expected" */
|
||||
// Anything in a `ForUser` needs to die. The caller should be
|
||||
// deciding what to tell their user
|
||||
#[error("{0}")]
|
||||
ForUser(String), /* "direct-to-user message" */
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -127,7 +117,7 @@ mod tests {
|
||||
assert_eq!(request["params"], json!([addr]));
|
||||
|
||||
let test_request = RpcRequest::GetBalance;
|
||||
let request = test_request.build_request_json(1, json!([addr]));
|
||||
let request = test_request.build_request_json(1, json!([addr.clone()]));
|
||||
assert_eq!(request["method"], "getBalance");
|
||||
|
||||
let test_request = RpcRequest::GetEpochInfo;
|
||||
@ -142,6 +132,10 @@ mod tests {
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getRecentBlockhash");
|
||||
|
||||
let test_request = RpcRequest::GetFeeCalculatorForBlockhash;
|
||||
let request = test_request.build_request_json(1, json!([addr.clone()]));
|
||||
assert_eq!(request["method"], "getFeeCalculatorForBlockhash");
|
||||
|
||||
let test_request = RpcRequest::GetFeeRateGovernor;
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getFeeRateGovernor");
|
||||
|
@ -1,18 +1,14 @@
|
||||
use crate::rpc_request::RpcError;
|
||||
use bincode::serialize;
|
||||
use jsonrpc_core::Result as JsonResult;
|
||||
use crate::{client_error, rpc_request::RpcError};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
message::MessageHeader,
|
||||
pubkey::Pubkey,
|
||||
transaction::{Result, Transaction},
|
||||
transaction::Result,
|
||||
};
|
||||
use std::{collections::HashMap, io, net::SocketAddr, str::FromStr};
|
||||
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
|
||||
|
||||
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
|
||||
pub type RpcResponse<T> = io::Result<Response<T>>;
|
||||
pub type RpcResult<T> = client_error::Result<Response<T>>;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RpcResponseContext {
|
||||
@ -32,119 +28,6 @@ pub struct RpcBlockCommitment<T> {
|
||||
pub total_stake: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RpcReward {
|
||||
pub pubkey: String,
|
||||
pub lamports: i64,
|
||||
}
|
||||
|
||||
pub type RpcRewards = Vec<RpcReward>;
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedBlock {
|
||||
pub previous_blockhash: String,
|
||||
pub blockhash: String,
|
||||
pub parent_slot: Slot,
|
||||
pub transactions: Vec<RpcTransactionWithStatusMeta>,
|
||||
pub rewards: RpcRewards,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionWithStatusMeta {
|
||||
pub transaction: RpcEncodedTransaction,
|
||||
pub meta: Option<RpcTransactionStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTransactionEncoding {
|
||||
Binary,
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum RpcEncodedTransaction {
|
||||
Binary(String),
|
||||
Json(RpcTransaction),
|
||||
}
|
||||
|
||||
impl RpcEncodedTransaction {
|
||||
pub fn encode(transaction: Transaction, encoding: RpcTransactionEncoding) -> Self {
|
||||
if encoding == RpcTransactionEncoding::Json {
|
||||
RpcEncodedTransaction::Json(RpcTransaction {
|
||||
signatures: transaction
|
||||
.signatures
|
||||
.iter()
|
||||
.map(|sig| sig.to_string())
|
||||
.collect(),
|
||||
message: RpcMessage {
|
||||
header: transaction.message.header,
|
||||
account_keys: transaction
|
||||
.message
|
||||
.account_keys
|
||||
.iter()
|
||||
.map(|pubkey| pubkey.to_string())
|
||||
.collect(),
|
||||
recent_blockhash: transaction.message.recent_blockhash.to_string(),
|
||||
instructions: transaction
|
||||
.message
|
||||
.instructions
|
||||
.iter()
|
||||
.map(|instruction| RpcCompiledInstruction {
|
||||
program_id_index: instruction.program_id_index,
|
||||
accounts: instruction.accounts.clone(),
|
||||
data: bs58::encode(instruction.data.clone()).into_string(),
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
RpcEncodedTransaction::Binary(
|
||||
bs58::encode(serialize(&transaction).unwrap()).into_string(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Transaction for pretty JSON serialization
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransaction {
|
||||
pub signatures: Vec<String>,
|
||||
pub message: RpcMessage,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcMessage {
|
||||
pub header: MessageHeader,
|
||||
pub account_keys: Vec<String>,
|
||||
pub recent_blockhash: String,
|
||||
pub instructions: Vec<RpcCompiledInstruction>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcCompiledInstruction {
|
||||
pub program_id_index: u8,
|
||||
pub accounts: Vec<u8>,
|
||||
pub data: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionStatus {
|
||||
pub status: Result<()>,
|
||||
pub fee: u64,
|
||||
pub pre_balances: Vec<u64>,
|
||||
pub post_balances: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockhashFeeCalculator {
|
||||
@ -152,6 +35,12 @@ pub struct RpcBlockhashFeeCalculator {
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFeeCalculator {
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFeeRateGovernor {
|
||||
|
@ -188,7 +188,7 @@ impl ThinClient {
|
||||
transaction: &mut Transaction,
|
||||
tries: usize,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> io::Result<Signature> {
|
||||
) -> TransportResult<Signature> {
|
||||
self.send_and_confirm_transaction(&[keypair], transaction, tries, min_confirmed_blocks)
|
||||
}
|
||||
|
||||
@ -198,7 +198,7 @@ impl ThinClient {
|
||||
keypair: &Keypair,
|
||||
transaction: &mut Transaction,
|
||||
tries: usize,
|
||||
) -> io::Result<Signature> {
|
||||
) -> TransportResult<Signature> {
|
||||
self.send_and_confirm_transaction(&[keypair], transaction, tries, 0)
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ impl ThinClient {
|
||||
transaction: &mut Transaction,
|
||||
tries: usize,
|
||||
pending_confirmations: usize,
|
||||
) -> io::Result<Signature> {
|
||||
) -> TransportResult<Signature> {
|
||||
for x in 0..tries {
|
||||
let now = Instant::now();
|
||||
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
|
||||
@ -243,13 +243,14 @@ impl ThinClient {
|
||||
}
|
||||
}
|
||||
info!("{} tries failed transfer to {}", x, self.tpu_addr());
|
||||
let (blockhash, _fee_calculator) = self.rpc_client().get_recent_blockhash()?;
|
||||
let (blockhash, _fee_calculator) = self.get_recent_blockhash()?;
|
||||
transaction.sign(keypairs, blockhash);
|
||||
}
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("retry_transfer failed in {} retries", tries),
|
||||
))
|
||||
)
|
||||
.into())
|
||||
}
|
||||
|
||||
pub fn poll_balance_with_timeout_and_commitment(
|
||||
@ -258,13 +259,15 @@ impl ThinClient {
|
||||
polling_frequency: &Duration,
|
||||
timeout: &Duration,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> io::Result<u64> {
|
||||
self.rpc_client().poll_balance_with_timeout_and_commitment(
|
||||
pubkey,
|
||||
polling_frequency,
|
||||
timeout,
|
||||
commitment_config,
|
||||
)
|
||||
) -> TransportResult<u64> {
|
||||
self.rpc_client()
|
||||
.poll_balance_with_timeout_and_commitment(
|
||||
pubkey,
|
||||
polling_frequency,
|
||||
timeout,
|
||||
commitment_config,
|
||||
)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn poll_balance_with_timeout(
|
||||
@ -272,8 +275,8 @@ impl ThinClient {
|
||||
pubkey: &Pubkey,
|
||||
polling_frequency: &Duration,
|
||||
timeout: &Duration,
|
||||
) -> io::Result<u64> {
|
||||
self.rpc_client().poll_balance_with_timeout_and_commitment(
|
||||
) -> TransportResult<u64> {
|
||||
self.poll_balance_with_timeout_and_commitment(
|
||||
pubkey,
|
||||
polling_frequency,
|
||||
timeout,
|
||||
@ -281,18 +284,18 @@ impl ThinClient {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
|
||||
self.rpc_client()
|
||||
.poll_get_balance_with_commitment(pubkey, CommitmentConfig::default())
|
||||
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
|
||||
self.poll_get_balance_with_commitment(pubkey, CommitmentConfig::default())
|
||||
}
|
||||
|
||||
pub fn poll_get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> io::Result<u64> {
|
||||
) -> TransportResult<u64> {
|
||||
self.rpc_client()
|
||||
.poll_get_balance_with_commitment(pubkey, commitment_config)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
|
||||
@ -321,9 +324,9 @@ impl ThinClient {
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> TransportResult<()> {
|
||||
Ok(self
|
||||
.rpc_client()
|
||||
.poll_for_signature_with_commitment(signature, commitment_config)?)
|
||||
self.rpc_client()
|
||||
.poll_for_signature_with_commitment(signature, commitment_config)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Check a signature in the bank. This method blocks
|
||||
@ -332,16 +335,17 @@ impl ThinClient {
|
||||
self.rpc_client().check_signature(signature)
|
||||
}
|
||||
|
||||
pub fn validator_exit(&self) -> io::Result<bool> {
|
||||
self.rpc_client().validator_exit()
|
||||
pub fn validator_exit(&self) -> TransportResult<bool> {
|
||||
self.rpc_client().validator_exit().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn get_num_blocks_since_signature_confirmation(
|
||||
&mut self,
|
||||
sig: &Signature,
|
||||
) -> io::Result<usize> {
|
||||
) -> TransportResult<usize> {
|
||||
self.rpc_client()
|
||||
.get_num_blocks_since_signature_confirmation(sig)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -368,7 +372,7 @@ impl SyncClient for ThinClient {
|
||||
keypair: &Keypair,
|
||||
instruction: Instruction,
|
||||
) -> TransportResult<Signature> {
|
||||
let message = Message::new(vec![instruction]);
|
||||
let message = Message::new(&[instruction]);
|
||||
self.send_message(&[keypair], message)
|
||||
}
|
||||
|
||||
@ -400,14 +404,14 @@ impl SyncClient for ThinClient {
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> TransportResult<Option<Account>> {
|
||||
Ok(self
|
||||
.rpc_client()
|
||||
.get_account_with_commitment(pubkey, commitment_config)?
|
||||
.value)
|
||||
self.rpc_client()
|
||||
.get_account_with_commitment(pubkey, commitment_config)
|
||||
.map_err(|e| e.into())
|
||||
.map(|r| r.value)
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
|
||||
Ok(self.rpc_client().get_balance(pubkey)?)
|
||||
self.rpc_client().get_balance(pubkey).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
@ -415,10 +419,10 @@ impl SyncClient for ThinClient {
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> TransportResult<u64> {
|
||||
let balance = self
|
||||
.rpc_client()
|
||||
.get_balance_with_commitment(pubkey, commitment_config)?;
|
||||
Ok(balance.value)
|
||||
self.rpc_client()
|
||||
.get_balance_with_commitment(pubkey, commitment_config)
|
||||
.map_err(|e| e.into())
|
||||
.map(|r| r.value)
|
||||
}
|
||||
|
||||
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
@ -445,9 +449,20 @@ impl SyncClient for ThinClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_fee_calculator_for_blockhash(
|
||||
&self,
|
||||
blockhash: &Hash,
|
||||
) -> TransportResult<Option<FeeCalculator>> {
|
||||
self.rpc_client()
|
||||
.get_fee_calculator_for_blockhash(blockhash)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn get_fee_rate_governor(&self) -> TransportResult<FeeRateGovernor> {
|
||||
let fee_rate_governor = self.rpc_client().get_fee_rate_governor()?;
|
||||
Ok(fee_rate_governor.value)
|
||||
self.rpc_client()
|
||||
.get_fee_rate_governor()
|
||||
.map_err(|e| e.into())
|
||||
.map(|r| r.value)
|
||||
}
|
||||
|
||||
fn get_signature_status(
|
||||
@ -456,7 +471,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status(&signature.to_string())
|
||||
.get_signature_status(&signature)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -473,7 +488,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status_with_commitment(&signature.to_string(), commitment_config)
|
||||
.get_signature_status_with_commitment(&signature, commitment_config)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -545,23 +560,26 @@ impl SyncClient for ThinClient {
|
||||
signature: &Signature,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> TransportResult<usize> {
|
||||
Ok(self
|
||||
.rpc_client()
|
||||
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?)
|
||||
self.rpc_client()
|
||||
.poll_for_signature_confirmation(signature, min_confirmed_blocks)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> {
|
||||
Ok(self.rpc_client().poll_for_signature(signature)?)
|
||||
self.rpc_client()
|
||||
.poll_for_signature(signature)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
let new_blockhash = self.rpc_client().get_new_blockhash(blockhash)?;
|
||||
Ok(new_blockhash)
|
||||
self.rpc_client()
|
||||
.get_new_blockhash(blockhash)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncClient for ThinClient {
|
||||
fn async_send_transaction(&self, transaction: Transaction) -> io::Result<Signature> {
|
||||
fn async_send_transaction(&self, transaction: Transaction) -> TransportResult<Signature> {
|
||||
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
|
||||
let mut wr = std::io::Cursor::new(&mut buf[..]);
|
||||
serialize_into(&mut wr, &transaction)
|
||||
@ -576,7 +594,7 @@ impl AsyncClient for ThinClient {
|
||||
keypairs: &T,
|
||||
message: Message,
|
||||
recent_blockhash: Hash,
|
||||
) -> io::Result<Signature> {
|
||||
) -> TransportResult<Signature> {
|
||||
let transaction = Transaction::new(keypairs, message, recent_blockhash);
|
||||
self.async_send_transaction(transaction)
|
||||
}
|
||||
@ -585,8 +603,8 @@ impl AsyncClient for ThinClient {
|
||||
keypair: &Keypair,
|
||||
instruction: Instruction,
|
||||
recent_blockhash: Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let message = Message::new(vec![instruction]);
|
||||
) -> TransportResult<Signature> {
|
||||
let message = Message::new(&[instruction]);
|
||||
self.async_send_message(&[keypair], message, recent_blockhash)
|
||||
}
|
||||
fn async_transfer(
|
||||
@ -595,7 +613,7 @@ impl AsyncClient for ThinClient {
|
||||
keypair: &Keypair,
|
||||
pubkey: &Pubkey,
|
||||
recent_blockhash: Hash,
|
||||
) -> io::Result<Signature> {
|
||||
) -> TransportResult<Signature> {
|
||||
let transfer_instruction =
|
||||
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
|
||||
self.async_send_instruction(keypair, transfer_instruction, recent_blockhash)
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.4"
|
||||
version = "1.1.0"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -15,66 +15,67 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
bs58 = "0.3.0"
|
||||
byteorder = "1.3.2"
|
||||
chrono = { version = "0.4.10", features = ["serde"] }
|
||||
compression = "0.1.5"
|
||||
byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.3"
|
||||
crossbeam-channel = "0.4"
|
||||
fs_extra = "1.1.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = "1.3"
|
||||
itertools = "0.8.2"
|
||||
itertools = "0.9.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-core-client = { version = "14.0.5", features = ["ws"] }
|
||||
jsonrpc-derive = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
jsonrpc-pubsub = "14.0.6"
|
||||
jsonrpc-ws-server = "14.0.6"
|
||||
libc = "0.2.66"
|
||||
log = "0.4.8"
|
||||
nix = "0.17.0"
|
||||
num_cpus = "1.0.0"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
rayon = "1.2.0"
|
||||
regex = "1.3.4"
|
||||
serde = "1.0.104"
|
||||
rayon = "1.3.0"
|
||||
regex = "1.3.6"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
|
||||
solana-client = { path = "../client", version = "1.0.4" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.4" }
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "1.0.4" }
|
||||
solana-logger = { path = "../logger", version = "1.0.4" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.4" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.4" }
|
||||
solana-measure = { path = "../measure", version = "1.0.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.4" }
|
||||
solana-perf = { path = "../perf", version = "1.0.4" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.4" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.4" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.4" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.4" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.4" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.4" }
|
||||
sys-info = "0.5.9"
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.0" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.0" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.4" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.0" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
matches = "0.1.6"
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
|
||||
serial_test = "0.3.2"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
systemstat = "0.1.5"
|
||||
|
||||
|
@ -9,12 +9,12 @@ use rayon::prelude::*;
|
||||
use solana_core::banking_stage::{create_test_recorder, BankingStage};
|
||||
use solana_core::cluster_info::ClusterInfo;
|
||||
use solana_core::cluster_info::Node;
|
||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::poh_recorder::WorkingBankEntry;
|
||||
use solana_ledger::blockstore_processor::process_entries;
|
||||
use solana_ledger::entry::{next_hash, Entry};
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
|
@ -6,12 +6,12 @@ extern crate test;
|
||||
use log::*;
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::retransmit_stage::retransmitter;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
@ -6,9 +6,9 @@ extern crate test;
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::sigverify::TransactionSigVerifier;
|
||||
use solana_core::sigverify_stage::SigVerifyStage;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
|
38
core/src/accounts_cleanup_service.rs
Normal file
38
core/src/accounts_cleanup_service.rs
Normal file
@ -0,0 +1,38 @@
|
||||
// Service to clean up dead slots in accounts_db
|
||||
//
|
||||
// This can be expensive since we have to walk the append vecs being cleaned up.
|
||||
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
};
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct AccountsCleanupService {
|
||||
t_cleanup: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl AccountsCleanupService {
|
||||
pub fn new(bank_forks: Arc<RwLock<BankForks>>, exit: &Arc<AtomicBool>) -> Self {
|
||||
info!("AccountsCleanupService active");
|
||||
let exit = exit.clone();
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-accounts-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
bank.clean_dead_slots();
|
||||
sleep(Duration::from_millis(100));
|
||||
})
|
||||
.unwrap();
|
||||
Self { t_cleanup }
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_cleanup.join()
|
||||
}
|
||||
}
|
200
core/src/accounts_hash_verifier.rs
Normal file
200
core/src/accounts_hash_verifier.rs
Normal file
@ -0,0 +1,200 @@
|
||||
// Service to verify accounts hashes with other trusted validator nodes.
|
||||
//
|
||||
// Each interval, publish the snapshat hash which is the full accounts state
|
||||
// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators
|
||||
// set and halt the node if a mismatch is detected.
|
||||
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use solana_ledger::{
|
||||
snapshot_package::SnapshotPackage, snapshot_package::SnapshotPackageReceiver,
|
||||
snapshot_package::SnapshotPackageSender,
|
||||
};
|
||||
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::RecvTimeoutError,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub struct AccountsHashVerifier {
|
||||
t_accounts_hash_verifier: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl AccountsHashVerifier {
|
||||
pub fn new(
|
||||
snapshot_package_receiver: SnapshotPackageReceiver,
|
||||
snapshot_package_sender: Option<SnapshotPackageSender>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
halt_on_trusted_validators_accounts_hash_mismatch: bool,
|
||||
fault_injection_rate_slots: u64,
|
||||
) -> Self {
|
||||
let exit = exit.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
let t_accounts_hash_verifier = Builder::new()
|
||||
.name("solana-accounts-hash".to_string())
|
||||
.spawn(move || {
|
||||
let mut hashes = vec![];
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
match snapshot_package_receiver.recv_timeout(Duration::from_secs(1)) {
|
||||
Ok(snapshot_package) => {
|
||||
Self::process_snapshot(
|
||||
snapshot_package,
|
||||
&cluster_info,
|
||||
&trusted_validators,
|
||||
halt_on_trusted_validators_accounts_hash_mismatch,
|
||||
&snapshot_package_sender,
|
||||
&mut hashes,
|
||||
&exit,
|
||||
fault_injection_rate_slots,
|
||||
);
|
||||
}
|
||||
Err(RecvTimeoutError::Disconnected) => break,
|
||||
Err(RecvTimeoutError::Timeout) => (),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
Self {
|
||||
t_accounts_hash_verifier,
|
||||
}
|
||||
}
|
||||
|
||||
fn process_snapshot(
|
||||
snapshot_package: SnapshotPackage,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
trusted_validators: &Option<HashSet<Pubkey>>,
|
||||
halt_on_trusted_validator_accounts_hash_mismatch: bool,
|
||||
snapshot_package_sender: &Option<SnapshotPackageSender>,
|
||||
hashes: &mut Vec<(Slot, Hash)>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
fault_injection_rate_slots: u64,
|
||||
) {
|
||||
if fault_injection_rate_slots != 0
|
||||
&& snapshot_package.root % fault_injection_rate_slots == 0
|
||||
{
|
||||
// For testing, publish an invalid hash to gossip.
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_sdk::hash::extend_and_hash;
|
||||
warn!("inserting fault at slot: {}", snapshot_package.root);
|
||||
let rand = thread_rng().gen_range(0, 10);
|
||||
let hash = extend_and_hash(&snapshot_package.hash, &[rand]);
|
||||
hashes.push((snapshot_package.root, hash));
|
||||
} else {
|
||||
hashes.push((snapshot_package.root, snapshot_package.hash));
|
||||
}
|
||||
|
||||
if halt_on_trusted_validator_accounts_hash_mismatch {
|
||||
let mut slot_to_hash = HashMap::new();
|
||||
for (slot, hash) in hashes.iter() {
|
||||
slot_to_hash.insert(*slot, *hash);
|
||||
}
|
||||
if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) {
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
if let Some(sender) = snapshot_package_sender.as_ref() {
|
||||
if sender.send(snapshot_package).is_err() {}
|
||||
}
|
||||
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_accounts_hashes(hashes.clone());
|
||||
}
|
||||
|
||||
fn should_halt(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
trusted_validators: &Option<HashSet<Pubkey>>,
|
||||
slot_to_hash: &mut HashMap<Slot, Hash>,
|
||||
) -> bool {
|
||||
let mut verified_count = 0;
|
||||
if let Some(trusted_validators) = trusted_validators.as_ref() {
|
||||
for trusted_validator in trusted_validators {
|
||||
let cluster_info_r = cluster_info.read().unwrap();
|
||||
if let Some(accounts_hashes) =
|
||||
cluster_info_r.get_accounts_hash_for_node(trusted_validator)
|
||||
{
|
||||
for (slot, hash) in accounts_hashes {
|
||||
if let Some(reference_hash) = slot_to_hash.get(slot) {
|
||||
if *hash != *reference_hash {
|
||||
error!("Trusted validator {} produced conflicting hashes for slot: {} ({} != {})",
|
||||
trusted_validator,
|
||||
slot,
|
||||
hash,
|
||||
reference_hash,
|
||||
);
|
||||
|
||||
return true;
|
||||
} else {
|
||||
verified_count += 1;
|
||||
}
|
||||
} else {
|
||||
slot_to_hash.insert(*slot, *hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
inc_new_counter_info!("accounts_hash_verifier-hashes_verified", verified_count);
|
||||
false
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_accounts_hash_verifier.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cluster_info::make_accounts_hashes_message;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_sdk::{
|
||||
hash::hash,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_should_halt() {
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
|
||||
let mut trusted_validators = HashSet::new();
|
||||
let mut slot_to_hash = HashMap::new();
|
||||
assert!(!AccountsHashVerifier::should_halt(
|
||||
&cluster_info,
|
||||
&Some(trusted_validators.clone()),
|
||||
&mut slot_to_hash,
|
||||
));
|
||||
|
||||
let validator1 = Keypair::new();
|
||||
let hash1 = hash(&[1]);
|
||||
let hash2 = hash(&[2]);
|
||||
{
|
||||
let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap();
|
||||
let mut cluster_info_w = cluster_info.write().unwrap();
|
||||
cluster_info_w.push_message(message);
|
||||
}
|
||||
slot_to_hash.insert(0, hash2);
|
||||
trusted_validators.insert(validator1.pubkey());
|
||||
assert!(AccountsHashVerifier::should_halt(
|
||||
&cluster_info,
|
||||
&Some(trusted_validators.clone()),
|
||||
&mut slot_to_hash,
|
||||
));
|
||||
}
|
||||
}
|
@ -3,7 +3,6 @@
|
||||
//! can do its processing in parallel with signature verification on the GPU.
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
|
||||
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
|
||||
poh_service::PohService,
|
||||
};
|
||||
@ -17,7 +16,11 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_measure::{measure::Measure, thread_mem_usage};
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
|
||||
use solana_perf::{cuda_runtime::PinnedVec, perf_libs};
|
||||
use solana_perf::{
|
||||
cuda_runtime::PinnedVec,
|
||||
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
|
||||
perf_libs,
|
||||
};
|
||||
use solana_runtime::{
|
||||
accounts_db::ErrorCounters,
|
||||
bank::{Bank, TransactionBalancesSet, TransactionProcessResult},
|
||||
@ -1009,20 +1012,18 @@ pub fn create_test_recorder(
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cluster_info::Node,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
packet::to_packets,
|
||||
poh_recorder::WorkingBank,
|
||||
cluster_info::Node, poh_recorder::WorkingBank,
|
||||
transaction_status_service::TransactionStatusService,
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use itertools::Itertools;
|
||||
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
|
||||
use solana_ledger::{
|
||||
blockstore::entries_to_test_shreds,
|
||||
entry::{next_entry, Entry, EntrySlice},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_perf::packet::to_packets;
|
||||
use solana_runtime::bank::HashAgeKind;
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
@ -1030,6 +1031,7 @@ mod tests {
|
||||
system_transaction,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use solana_transaction_status::{EncodedTransaction, TransactionWithStatusMeta};
|
||||
use std::{sync::atomic::Ordering, thread::sleep};
|
||||
|
||||
#[test]
|
||||
@ -1975,10 +1977,10 @@ mod tests {
|
||||
let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap();
|
||||
assert_eq!(confirmed_block.transactions.len(), 3);
|
||||
|
||||
for RpcTransactionWithStatusMeta { transaction, meta } in
|
||||
for TransactionWithStatusMeta { transaction, meta } in
|
||||
confirmed_block.transactions.into_iter()
|
||||
{
|
||||
if let RpcEncodedTransaction::Json(transaction) = transaction {
|
||||
if let EncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == success_signature.to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
} else if transaction.signatures[0] == ix_error_signature.to_string() {
|
||||
|
@ -1,283 +0,0 @@
|
||||
//! The `blockstream` module provides a method for streaming entries out via a
|
||||
//! local unix socket, to provide client services such as a block explorer with
|
||||
//! real-time access to entries.
|
||||
|
||||
use bincode::serialize;
|
||||
use chrono::{SecondsFormat, Utc};
|
||||
use serde_json::json;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
|
||||
use std::cell::RefCell;
|
||||
use std::io::Result;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub trait EntryWriter: std::fmt::Debug {
|
||||
fn write(&self, payload: String) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct EntryVec {
|
||||
values: RefCell<Vec<String>>,
|
||||
}
|
||||
|
||||
impl EntryWriter for EntryVec {
|
||||
fn write(&self, payload: String) -> Result<()> {
|
||||
self.values.borrow_mut().push(payload);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl EntryVec {
|
||||
pub fn new() -> Self {
|
||||
EntryVec {
|
||||
values: RefCell::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> Vec<String> {
|
||||
self.values.borrow().clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EntrySocket {
|
||||
unix_socket: PathBuf,
|
||||
}
|
||||
|
||||
impl EntryWriter for EntrySocket {
|
||||
#[cfg(not(windows))]
|
||||
fn write(&self, payload: String) -> Result<()> {
|
||||
use std::io::prelude::*;
|
||||
use std::net::Shutdown;
|
||||
use std::os::unix::net::UnixStream;
|
||||
|
||||
const MESSAGE_TERMINATOR: &str = "\n";
|
||||
|
||||
let mut socket = UnixStream::connect(&self.unix_socket)?;
|
||||
socket.write_all(payload.as_bytes())?;
|
||||
socket.write_all(MESSAGE_TERMINATOR.as_bytes())?;
|
||||
socket.shutdown(Shutdown::Write)?;
|
||||
Ok(())
|
||||
}
|
||||
#[cfg(windows)]
|
||||
fn write(&self, _payload: String) -> Result<()> {
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"EntryWriter::write() not implemented for windows",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BlockstreamEvents {
|
||||
fn emit_entry_event(
|
||||
&self,
|
||||
slot: Slot,
|
||||
tick_height: u64,
|
||||
leader_pubkey: &Pubkey,
|
||||
entries: &Entry,
|
||||
) -> Result<()>;
|
||||
fn emit_block_event(
|
||||
&self,
|
||||
slot: Slot,
|
||||
tick_height: u64,
|
||||
leader_pubkey: &Pubkey,
|
||||
blockhash: Hash,
|
||||
) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Blockstream<T: EntryWriter> {
|
||||
pub output: T,
|
||||
}
|
||||
|
||||
impl<T> BlockstreamEvents for Blockstream<T>
|
||||
where
|
||||
T: EntryWriter,
|
||||
{
|
||||
fn emit_entry_event(
|
||||
&self,
|
||||
slot: Slot,
|
||||
tick_height: u64,
|
||||
leader_pubkey: &Pubkey,
|
||||
entry: &Entry,
|
||||
) -> Result<()> {
|
||||
let transactions: Vec<Vec<u8>> = serialize_transactions(entry);
|
||||
let stream_entry = json!({
|
||||
"num_hashes": entry.num_hashes,
|
||||
"hash": entry.hash,
|
||||
"transactions": transactions
|
||||
});
|
||||
let json_entry = serde_json::to_string(&stream_entry)?;
|
||||
let payload = format!(
|
||||
r#"{{"dt":"{}","t":"entry","s":{},"h":{},"l":"{:?}","entry":{}}}"#,
|
||||
Utc::now().to_rfc3339_opts(SecondsFormat::Nanos, true),
|
||||
slot,
|
||||
tick_height,
|
||||
leader_pubkey,
|
||||
json_entry,
|
||||
);
|
||||
self.output.write(payload)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn emit_block_event(
|
||||
&self,
|
||||
slot: Slot,
|
||||
tick_height: u64,
|
||||
leader_pubkey: &Pubkey,
|
||||
blockhash: Hash,
|
||||
) -> Result<()> {
|
||||
let payload = format!(
|
||||
r#"{{"dt":"{}","t":"block","s":{},"h":{},"l":"{:?}","hash":"{:?}"}}"#,
|
||||
Utc::now().to_rfc3339_opts(SecondsFormat::Nanos, true),
|
||||
slot,
|
||||
tick_height,
|
||||
leader_pubkey,
|
||||
blockhash,
|
||||
);
|
||||
self.output.write(payload)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub type SocketBlockstream = Blockstream<EntrySocket>;
|
||||
|
||||
impl SocketBlockstream {
|
||||
pub fn new(unix_socket: &Path) -> Self {
|
||||
Blockstream {
|
||||
output: EntrySocket {
|
||||
unix_socket: unix_socket.to_path_buf(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type MockBlockstream = Blockstream<EntryVec>;
|
||||
|
||||
impl MockBlockstream {
|
||||
pub fn new(_: &Path) -> Self {
|
||||
Blockstream {
|
||||
output: EntryVec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> Vec<String> {
|
||||
self.output.entries()
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_transactions(entry: &Entry) -> Vec<Vec<u8>> {
|
||||
entry
|
||||
.transactions
|
||||
.iter()
|
||||
.map(|tx| serialize(&tx).unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use chrono::{DateTime, FixedOffset};
|
||||
use serde_json::Value;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_transaction;
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn test_serialize_transactions() {
|
||||
let entry = Entry::new(&Hash::default(), 1, vec![]);
|
||||
let empty_vec: Vec<Vec<u8>> = vec![];
|
||||
assert_eq!(serialize_transactions(&entry), empty_vec);
|
||||
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
||||
let tx1 = system_transaction::transfer(&keypair1, &keypair0.pubkey(), 2, Hash::default());
|
||||
let serialized_tx0 = serialize(&tx0).unwrap();
|
||||
let serialized_tx1 = serialize(&tx1).unwrap();
|
||||
let entry = Entry::new(&Hash::default(), 1, vec![tx0, tx1]);
|
||||
assert_eq!(
|
||||
serialize_transactions(&entry),
|
||||
vec![serialized_tx0, serialized_tx1]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockstream() -> () {
|
||||
let blockstream = MockBlockstream::new(&PathBuf::from("test_stream"));
|
||||
let ticks_per_slot = 5;
|
||||
|
||||
let mut blockhash = Hash::default();
|
||||
let mut entries = Vec::new();
|
||||
let mut expected_entries = Vec::new();
|
||||
|
||||
let tick_height_initial = 1;
|
||||
let tick_height_final = tick_height_initial + ticks_per_slot + 2;
|
||||
let mut curr_slot = 0;
|
||||
let leader_pubkey = Pubkey::new_rand();
|
||||
|
||||
for tick_height in tick_height_initial..=tick_height_final {
|
||||
if tick_height == 5 {
|
||||
blockstream
|
||||
.emit_block_event(curr_slot, tick_height, &leader_pubkey, blockhash)
|
||||
.unwrap();
|
||||
curr_slot += 1;
|
||||
}
|
||||
let entry = Entry::new(&mut blockhash, 1, vec![]); // just ticks
|
||||
blockhash = entry.hash;
|
||||
blockstream
|
||||
.emit_entry_event(curr_slot, tick_height, &leader_pubkey, &entry)
|
||||
.unwrap();
|
||||
expected_entries.push(entry.clone());
|
||||
entries.push(entry);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
blockstream.entries().len() as u64,
|
||||
// one entry per tick (1..=N+2) is +3, plus one block
|
||||
ticks_per_slot + 3 + 1
|
||||
);
|
||||
|
||||
let mut j = 0;
|
||||
let mut matched_entries = 0;
|
||||
let mut matched_slots = HashSet::new();
|
||||
let mut matched_blocks = HashSet::new();
|
||||
|
||||
for item in blockstream.entries() {
|
||||
let json: Value = serde_json::from_str(&item).unwrap();
|
||||
let dt_str = json["dt"].as_str().unwrap();
|
||||
|
||||
// Ensure `ts` field parses as valid DateTime
|
||||
let _dt: DateTime<FixedOffset> = DateTime::parse_from_rfc3339(dt_str).unwrap();
|
||||
|
||||
let item_type = json["t"].as_str().unwrap();
|
||||
match item_type {
|
||||
"block" => {
|
||||
let hash = json["hash"].to_string();
|
||||
matched_blocks.insert(hash);
|
||||
}
|
||||
|
||||
"entry" => {
|
||||
let slot = json["s"].as_u64().unwrap();
|
||||
matched_slots.insert(slot);
|
||||
let entry_obj = json["entry"].clone();
|
||||
let entry: Entry = serde_json::from_value(entry_obj).unwrap();
|
||||
|
||||
assert_eq!(entry, expected_entries[j]);
|
||||
matched_entries += 1;
|
||||
j += 1;
|
||||
}
|
||||
|
||||
_ => {
|
||||
assert!(false, "unknown item type {}", item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(matched_entries, expected_entries.len());
|
||||
assert_eq!(matched_slots.len(), 2);
|
||||
assert_eq!(matched_blocks.len(), 1);
|
||||
}
|
||||
}
|
@ -1,228 +0,0 @@
|
||||
//! The `blockstream_service` implements optional streaming of entries and block metadata
|
||||
//! using the `blockstream` module, providing client services such as a block explorer with
|
||||
//! real-time access to entries.
|
||||
|
||||
use crate::blockstream::BlockstreamEvents;
|
||||
#[cfg(test)]
|
||||
use crate::blockstream::MockBlockstream as Blockstream;
|
||||
#[cfg(not(test))]
|
||||
use crate::blockstream::SocketBlockstream as Blockstream;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct BlockstreamService {
|
||||
t_blockstream: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl BlockstreamService {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
slot_full_receiver: Receiver<(u64, Pubkey)>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
unix_socket: &Path,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let mut blockstream = Blockstream::new(unix_socket);
|
||||
let exit = exit.clone();
|
||||
let t_blockstream = Builder::new()
|
||||
.name("solana-blockstream".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
if let Err(e) =
|
||||
Self::process_entries(&slot_full_receiver, &blockstore, &mut blockstream)
|
||||
{
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => info!("Error from process_entries: {:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
Self { t_blockstream }
|
||||
}
|
||||
fn process_entries(
|
||||
slot_full_receiver: &Receiver<(u64, Pubkey)>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
blockstream: &mut Blockstream,
|
||||
) -> Result<()> {
|
||||
let timeout = Duration::new(1, 0);
|
||||
let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?;
|
||||
|
||||
// Slot might not exist due to LedgerCleanupService, check first
|
||||
let blockstore_meta = blockstore.meta(slot).unwrap();
|
||||
if let Some(blockstore_meta) = blockstore_meta {
|
||||
// Return error to main loop. Thread won't exit, will just log the error
|
||||
let entries = blockstore.get_slot_entries(slot, 0, None)?;
|
||||
let _parent_slot = if slot == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(blockstore_meta.parent_slot)
|
||||
};
|
||||
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
|
||||
let mut tick_height = ticks_per_slot * slot;
|
||||
|
||||
for (i, entry) in entries.iter().enumerate() {
|
||||
if entry.is_tick() {
|
||||
tick_height += 1;
|
||||
}
|
||||
blockstream
|
||||
.emit_entry_event(slot, tick_height, &slot_leader, &entry)
|
||||
.unwrap_or_else(|e| {
|
||||
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
|
||||
});
|
||||
if i == entries.len() - 1 {
|
||||
blockstream
|
||||
.emit_block_event(slot, tick_height, &slot_leader, entry.hash)
|
||||
.unwrap_or_else(|e| {
|
||||
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_blockstream.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use bincode::{deserialize, serialize};
|
||||
use chrono::{DateTime, FixedOffset};
|
||||
use serde_json::Value;
|
||||
use solana_ledger::create_new_tmp_ledger;
|
||||
use solana_ledger::entry::{create_ticks, Entry};
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_transaction;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
fn test_blockstream_service_process_entries() {
|
||||
let ticks_per_slot = 5;
|
||||
let leader_pubkey = Pubkey::new_rand();
|
||||
|
||||
// Set up genesis config and blockstore
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(1000);
|
||||
genesis_config.ticks_per_slot = ticks_per_slot;
|
||||
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
|
||||
// Set up blockstream
|
||||
let mut blockstream = Blockstream::new(&PathBuf::from("test_stream"));
|
||||
|
||||
// Set up dummy channel to receive a full-slot notification
|
||||
let (slot_full_sender, slot_full_receiver) = channel();
|
||||
|
||||
// Create entries - 4 ticks + 1 populated entry + 1 tick
|
||||
let mut entries = create_ticks(4, 0, Hash::default());
|
||||
|
||||
let keypair = Keypair::new();
|
||||
let mut blockhash = entries[3].hash;
|
||||
let tx = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default());
|
||||
let entry = Entry::new(&mut blockhash, 1, vec![tx]);
|
||||
blockhash = entry.hash;
|
||||
entries.push(entry);
|
||||
let final_tick = create_ticks(1, 0, blockhash);
|
||||
entries.extend_from_slice(&final_tick);
|
||||
|
||||
let expected_entries = entries.clone();
|
||||
let expected_tick_heights = [6, 7, 8, 9, 9, 10];
|
||||
|
||||
blockstore
|
||||
.write_entries(
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
ticks_per_slot,
|
||||
None,
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
slot_full_sender.send((1, leader_pubkey)).unwrap();
|
||||
BlockstreamService::process_entries(
|
||||
&slot_full_receiver,
|
||||
&Arc::new(blockstore),
|
||||
&mut blockstream,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(blockstream.entries().len(), 7);
|
||||
|
||||
let (entry_events, block_events): (Vec<Value>, Vec<Value>) = blockstream
|
||||
.entries()
|
||||
.iter()
|
||||
.map(|item| {
|
||||
let json: Value = serde_json::from_str(&item).unwrap();
|
||||
let dt_str = json["dt"].as_str().unwrap();
|
||||
// Ensure `ts` field parses as valid DateTime
|
||||
let _dt: DateTime<FixedOffset> = DateTime::parse_from_rfc3339(dt_str).unwrap();
|
||||
json
|
||||
})
|
||||
.partition(|json| {
|
||||
let item_type = json["t"].as_str().unwrap();
|
||||
item_type == "entry"
|
||||
});
|
||||
for (i, json) in entry_events.iter().enumerate() {
|
||||
let height = json["h"].as_u64().unwrap();
|
||||
assert_eq!(height, expected_tick_heights[i]);
|
||||
let entry_obj = json["entry"].clone();
|
||||
let tx = entry_obj["transactions"].as_array().unwrap();
|
||||
let entry: Entry;
|
||||
if tx.len() == 0 {
|
||||
entry = serde_json::from_value(entry_obj).unwrap();
|
||||
} else {
|
||||
let entry_json = entry_obj.as_object().unwrap();
|
||||
entry = Entry {
|
||||
num_hashes: entry_json.get("num_hashes").unwrap().as_u64().unwrap(),
|
||||
hash: serde_json::from_value(entry_json.get("hash").unwrap().clone()).unwrap(),
|
||||
transactions: entry_json
|
||||
.get("transactions")
|
||||
.unwrap()
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(j, tx)| {
|
||||
let tx_vec: Vec<u8> = serde_json::from_value(tx.clone()).unwrap();
|
||||
// Check explicitly that transaction matches bincode-serialized format
|
||||
assert_eq!(
|
||||
tx_vec,
|
||||
serialize(&expected_entries[i].transactions[j]).unwrap()
|
||||
);
|
||||
deserialize(&tx_vec).unwrap()
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
}
|
||||
assert_eq!(entry, expected_entries[i]);
|
||||
}
|
||||
for json in block_events {
|
||||
let slot = json["s"].as_u64().unwrap();
|
||||
assert_eq!(1, slot);
|
||||
let height = json["h"].as_u64().unwrap();
|
||||
assert_eq!(2 * ticks_per_slot, height);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,30 +1,41 @@
|
||||
//! A stage to broadcast data from a leader node to validators
|
||||
use self::broadcast_fake_shreds_run::BroadcastFakeShredsRun;
|
||||
use self::fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun;
|
||||
use self::standard_broadcast_run::StandardBroadcastRun;
|
||||
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
|
||||
use crate::poh_recorder::WorkingBankEntry;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_ledger::staking_utils;
|
||||
use self::{
|
||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun,
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
standard_broadcast_run::StandardBroadcastRun,
|
||||
};
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
||||
poh_recorder::WorkingBankEntry,
|
||||
result::{Error, Result},
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
|
||||
Sender as CrossbeamSender,
|
||||
};
|
||||
use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils};
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::HashMap;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, RecvError, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Instant;
|
||||
|
||||
pub const NUM_INSERT_THREADS: usize = 2;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::UdpSocket,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvError, RecvTimeoutError, Sender},
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub(crate) mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
|
||||
pub const NUM_INSERT_THREADS: usize = 2;
|
||||
pub type RetransmitSlotsSender = CrossbeamSender<HashMap<Slot, Arc<Bank>>>;
|
||||
pub type RetransmitSlotsReceiver = CrossbeamReceiver<HashMap<Slot, Arc<Bank>>>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum BroadcastStageReturnType {
|
||||
ChannelDisconnected,
|
||||
@ -43,6 +54,7 @@ impl BroadcastStageType {
|
||||
sock: Vec<UdpSocket>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
receiver: Receiver<WorkingBankEntry>,
|
||||
retransmit_slots_receiver: RetransmitSlotsReceiver,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
shred_version: u16,
|
||||
@ -53,6 +65,7 @@ impl BroadcastStageType {
|
||||
sock,
|
||||
cluster_info,
|
||||
receiver,
|
||||
retransmit_slots_receiver,
|
||||
exit_sender,
|
||||
blockstore,
|
||||
StandardBroadcastRun::new(keypair, shred_version),
|
||||
@ -62,6 +75,7 @@ impl BroadcastStageType {
|
||||
sock,
|
||||
cluster_info,
|
||||
receiver,
|
||||
retransmit_slots_receiver,
|
||||
exit_sender,
|
||||
blockstore,
|
||||
FailEntryVerificationBroadcastRun::new(keypair, shred_version),
|
||||
@ -71,6 +85,7 @@ impl BroadcastStageType {
|
||||
sock,
|
||||
cluster_info,
|
||||
receiver,
|
||||
retransmit_slots_receiver,
|
||||
exit_sender,
|
||||
blockstore,
|
||||
BroadcastFakeShredsRun::new(keypair, 0, shred_version),
|
||||
@ -79,7 +94,7 @@ impl BroadcastStageType {
|
||||
}
|
||||
}
|
||||
|
||||
type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
|
||||
pub type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
|
||||
trait BroadcastRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
@ -135,25 +150,27 @@ impl BroadcastStage {
|
||||
loop {
|
||||
let res =
|
||||
broadcast_stage_run.run(blockstore, receiver, socket_sender, blockstore_sender);
|
||||
let res = Self::handle_error(res);
|
||||
let res = Self::handle_error(res, "run");
|
||||
if let Some(res) = res {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
fn handle_error(r: Result<()>) -> Option<BroadcastStageReturnType> {
|
||||
fn handle_error(r: Result<()>, name: &str) -> Option<BroadcastStageReturnType> {
|
||||
if let Err(e) = r {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected)
|
||||
| Error::SendError
|
||||
| Error::RecvError(RecvError) => {
|
||||
| Error::RecvError(RecvError)
|
||||
| Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Disconnected) => {
|
||||
return Some(BroadcastStageReturnType::ChannelDisconnected);
|
||||
}
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout)
|
||||
| Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Timeout) => (),
|
||||
Error::ClusterInfoError(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
_ => {
|
||||
inc_new_counter_error!("streamer-broadcaster-error", 1, 1);
|
||||
error!("broadcaster error: {:?}", e);
|
||||
error!("{} broadcaster error: {:?}", name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -180,6 +197,7 @@ impl BroadcastStage {
|
||||
socks: Vec<UdpSocket>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
receiver: Receiver<WorkingBankEntry>,
|
||||
retransmit_slots_receiver: RetransmitSlotsReceiver,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone,
|
||||
@ -189,6 +207,8 @@ impl BroadcastStage {
|
||||
let (socket_sender, socket_receiver) = channel();
|
||||
let (blockstore_sender, blockstore_receiver) = channel();
|
||||
let bs_run = broadcast_stage_run.clone();
|
||||
|
||||
let socket_sender_ = socket_sender.clone();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
@ -196,7 +216,7 @@ impl BroadcastStage {
|
||||
Self::run(
|
||||
&btree,
|
||||
&receiver,
|
||||
&socket_sender,
|
||||
&socket_sender_,
|
||||
&blockstore_sender,
|
||||
bs_run,
|
||||
)
|
||||
@ -212,7 +232,7 @@ impl BroadcastStage {
|
||||
.name("solana-broadcaster-transmit".to_string())
|
||||
.spawn(move || loop {
|
||||
let res = bs_transmit.transmit(&socket_receiver, &cluster_info, &sock);
|
||||
let res = Self::handle_error(res);
|
||||
let res = Self::handle_error(res, "solana-broadcaster-transmit");
|
||||
if let Some(res) = res {
|
||||
return res;
|
||||
}
|
||||
@ -229,7 +249,7 @@ impl BroadcastStage {
|
||||
.name("solana-broadcaster-record".to_string())
|
||||
.spawn(move || loop {
|
||||
let res = bs_record.record(&blockstore_receiver, &btree);
|
||||
let res = Self::handle_error(res);
|
||||
let res = Self::handle_error(res, "solana-broadcaster-record");
|
||||
if let Some(res) = res {
|
||||
return res;
|
||||
}
|
||||
@ -238,9 +258,68 @@ impl BroadcastStage {
|
||||
thread_hdls.push(t);
|
||||
}
|
||||
|
||||
let blockstore = blockstore.clone();
|
||||
let retransmit_thread = Builder::new()
|
||||
.name("solana-broadcaster-retransmit".to_string())
|
||||
.spawn(move || loop {
|
||||
if let Some(res) = Self::handle_error(
|
||||
Self::check_retransmit_signals(
|
||||
&blockstore,
|
||||
&retransmit_slots_receiver,
|
||||
&socket_sender,
|
||||
),
|
||||
"solana-broadcaster-retransmit-check_retransmit_signals",
|
||||
) {
|
||||
return res;
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
thread_hdls.push(retransmit_thread);
|
||||
Self { thread_hdls }
|
||||
}
|
||||
|
||||
fn check_retransmit_signals(
|
||||
blockstore: &Blockstore,
|
||||
retransmit_slots_receiver: &RetransmitSlotsReceiver,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(100);
|
||||
|
||||
// Check for a retransmit signal
|
||||
let mut retransmit_slots = retransmit_slots_receiver.recv_timeout(timer)?;
|
||||
while let Ok(new_retransmit_slots) = retransmit_slots_receiver.try_recv() {
|
||||
retransmit_slots.extend(new_retransmit_slots);
|
||||
}
|
||||
|
||||
for (_, bank) in retransmit_slots.iter() {
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let data_shreds = Arc::new(
|
||||
blockstore
|
||||
.get_data_shreds_for_slot(bank.slot(), 0)
|
||||
.expect("My own shreds must be reconstructable"),
|
||||
);
|
||||
|
||||
if !data_shreds.is_empty() {
|
||||
socket_sender.send((stakes.clone(), data_shreds))?;
|
||||
}
|
||||
|
||||
let coding_shreds = Arc::new(
|
||||
blockstore
|
||||
.get_coding_shreds_for_slot(bank.slot(), 0)
|
||||
.expect("My own shreds must be reconstructable"),
|
||||
);
|
||||
|
||||
if !coding_shreds.is_empty() {
|
||||
socket_sender.send((stakes.clone(), coding_shreds))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<BroadcastStageReturnType> {
|
||||
for thread_hdl in self.thread_hdls.into_iter() {
|
||||
let _ = thread_hdl.join();
|
||||
@ -250,22 +329,137 @@ impl BroadcastStage {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::entry::create_ticks;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_ledger::{
|
||||
blockstore::{make_slot_entries, Blockstore},
|
||||
entry::create_ticks,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
shred::{max_ticks_per_n_shreds, Shredder, RECOMMENDED_FEC_RATE},
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
path::Path,
|
||||
sync::atomic::AtomicBool,
|
||||
sync::mpsc::channel,
|
||||
sync::{Arc, RwLock},
|
||||
thread::sleep,
|
||||
};
|
||||
|
||||
pub fn make_transmit_shreds(
|
||||
slot: Slot,
|
||||
num: u64,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
) -> (
|
||||
Vec<Shred>,
|
||||
Vec<Shred>,
|
||||
Vec<TransmitShreds>,
|
||||
Vec<TransmitShreds>,
|
||||
) {
|
||||
let num_entries = max_ticks_per_n_shreds(num);
|
||||
let (data_shreds, _) = make_slot_entries(slot, 0, num_entries);
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..]);
|
||||
(
|
||||
data_shreds.clone(),
|
||||
coding_shreds.clone(),
|
||||
data_shreds
|
||||
.into_iter()
|
||||
.map(|s| (stakes.clone(), Arc::new(vec![s])))
|
||||
.collect(),
|
||||
coding_shreds
|
||||
.into_iter()
|
||||
.map(|s| (stakes.clone(), Arc::new(vec![s])))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn check_all_shreds_received(
|
||||
transmit_receiver: &Receiver<TransmitShreds>,
|
||||
mut data_index: u64,
|
||||
mut coding_index: u64,
|
||||
num_expected_data_shreds: u64,
|
||||
num_expected_coding_shreds: u64,
|
||||
) {
|
||||
while let Ok(new_retransmit_slots) = transmit_receiver.try_recv() {
|
||||
if new_retransmit_slots.1[0].is_data() {
|
||||
for data_shred in new_retransmit_slots.1.iter() {
|
||||
assert_eq!(data_shred.index() as u64, data_index);
|
||||
data_index += 1;
|
||||
}
|
||||
} else {
|
||||
assert_eq!(new_retransmit_slots.1[0].index() as u64, coding_index);
|
||||
for coding_shred in new_retransmit_slots.1.iter() {
|
||||
assert_eq!(coding_shred.index() as u64, coding_index);
|
||||
coding_index += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(num_expected_data_shreds, data_index);
|
||||
assert_eq!(num_expected_coding_shreds, coding_index);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_retransmit_signal() {
|
||||
// Setup
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let (transmit_sender, transmit_receiver) = channel();
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
|
||||
let bank0 = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
// Make some shreds
|
||||
let updated_slot = 0;
|
||||
let (all_data_shreds, all_coding_shreds, _, _all_coding_transmit_shreds) =
|
||||
make_transmit_shreds(updated_slot, 10, None);
|
||||
let num_data_shreds = all_data_shreds.len();
|
||||
let num_coding_shreds = all_coding_shreds.len();
|
||||
assert!(num_data_shreds >= 10);
|
||||
|
||||
// Insert all the shreds
|
||||
blockstore
|
||||
.insert_shreds(all_data_shreds, None, true)
|
||||
.unwrap();
|
||||
blockstore
|
||||
.insert_shreds(all_coding_shreds, None, true)
|
||||
.unwrap();
|
||||
|
||||
// Insert duplicate retransmit signal, blocks should
|
||||
// only be retransmitted once
|
||||
retransmit_slots_sender
|
||||
.send(vec![(updated_slot, bank0.clone())].into_iter().collect())
|
||||
.unwrap();
|
||||
retransmit_slots_sender
|
||||
.send(vec![(updated_slot, bank0.clone())].into_iter().collect())
|
||||
.unwrap();
|
||||
BroadcastStage::check_retransmit_signals(
|
||||
&blockstore,
|
||||
&retransmit_slots_receiver,
|
||||
&transmit_sender,
|
||||
)
|
||||
.unwrap();
|
||||
// Check all the data shreds were received only once
|
||||
check_all_shreds_received(
|
||||
&transmit_receiver,
|
||||
0,
|
||||
0,
|
||||
num_data_shreds as u64,
|
||||
num_coding_shreds as u64,
|
||||
);
|
||||
}
|
||||
|
||||
struct MockBroadcastStage {
|
||||
blockstore: Arc<Blockstore>,
|
||||
@ -277,6 +471,7 @@ mod test {
|
||||
leader_pubkey: &Pubkey,
|
||||
ledger_path: &Path,
|
||||
entry_receiver: Receiver<WorkingBankEntry>,
|
||||
retransmit_slots_receiver: RetransmitSlotsReceiver,
|
||||
) -> MockBroadcastStage {
|
||||
// Make the database ledger
|
||||
let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap());
|
||||
@ -304,6 +499,7 @@ mod test {
|
||||
leader_info.sockets.broadcast,
|
||||
cluster_info,
|
||||
entry_receiver,
|
||||
retransmit_slots_receiver,
|
||||
&exit_sender,
|
||||
&blockstore,
|
||||
StandardBroadcastRun::new(leader_keypair, 0),
|
||||
@ -326,10 +522,12 @@ mod test {
|
||||
let leader_keypair = Keypair::new();
|
||||
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let broadcast_service = setup_dummy_broadcast_service(
|
||||
&leader_keypair.pubkey(),
|
||||
&ledger_path,
|
||||
entry_receiver,
|
||||
retransmit_slots_receiver,
|
||||
);
|
||||
let start_tick_height;
|
||||
let max_tick_height;
|
||||
@ -348,6 +546,7 @@ mod test {
|
||||
.expect("Expect successful send to broadcast service");
|
||||
}
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(2000));
|
||||
|
||||
trace!(
|
||||
@ -364,6 +563,7 @@ mod test {
|
||||
assert_eq!(entries.len(), max_tick_height as usize);
|
||||
|
||||
drop(entry_sender);
|
||||
drop(retransmit_slots_sender);
|
||||
broadcast_service
|
||||
.broadcast_service
|
||||
.join()
|
||||
|
@ -76,7 +76,7 @@ pub(super) fn recv_slot_entries(receiver: &Receiver<WorkingBankEntry>) -> Result
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::system_transaction;
|
||||
|
@ -1,11 +1,11 @@
|
||||
use super::broadcast_utils::{self, ReceiveResults};
|
||||
use super::*;
|
||||
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::timing::duration_as_us;
|
||||
use solana_ledger::{
|
||||
entry::Entry,
|
||||
shred::{Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK},
|
||||
};
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -212,7 +212,8 @@ impl StandardBroadcastRun {
|
||||
blockstore_sender.send(data_shreds.clone())?;
|
||||
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
|
||||
let coding_shreds = Arc::new(coding_shreds);
|
||||
socket_sender.send((stakes, coding_shreds))?;
|
||||
socket_sender.send((stakes, coding_shreds.clone()))?;
|
||||
blockstore_sender.send(coding_shreds)?;
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
|
||||
receive_elapsed: duration_as_us(&receive_elapsed),
|
||||
@ -353,14 +354,13 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::genesis_utils::create_genesis_config;
|
||||
use solana_ledger::genesis_utils::create_genesis_config;
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path,
|
||||
shred::max_ticks_per_n_shreds,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
genesis_config::GenesisConfig,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
|
@ -12,23 +12,19 @@
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
use crate::crds_value::CompressionType::*;
|
||||
use crate::crds_value::EpochIncompleteSlots;
|
||||
use crate::packet::limited_deserialize;
|
||||
use crate::streamer::{PacketReceiver, PacketSender};
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, SnapshotHash, Vote},
|
||||
packet::{Packet, PACKET_DATA_SIZE},
|
||||
crds_value::{
|
||||
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, SnapshotHash, Vote,
|
||||
},
|
||||
epoch_slots::EpochSlots,
|
||||
result::{Error, Result},
|
||||
sendmmsg::{multicast, send_mmsg},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
};
|
||||
use bincode::{serialize, serialized_size};
|
||||
use compression::prelude::*;
|
||||
use core::cmp;
|
||||
use itertools::Itertools;
|
||||
use rayon::iter::IntoParallelIterator;
|
||||
@ -41,21 +37,26 @@ use solana_net_utils::{
|
||||
bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range,
|
||||
multi_bind_in_range, PortRange,
|
||||
};
|
||||
use solana_perf::packet::{to_packets_with_destination, Packets, PacketsRecycler};
|
||||
use solana_perf::packet::{
|
||||
limited_deserialize, to_packets_with_destination, Packet, Packets, PacketsRecycler,
|
||||
PACKET_DATA_SIZE,
|
||||
};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::timing::duration_as_s;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT},
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_SLOTS_PER_EPOCH},
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signable, Signature},
|
||||
signature::{Keypair, Signable, Signature, Signer},
|
||||
timing::{duration_as_ms, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_streamer::sendmmsg::{multicast, send_mmsg};
|
||||
use solana_streamer::streamer::{PacketReceiver, PacketSender};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
cmp::min,
|
||||
collections::{BTreeSet, HashMap, HashSet},
|
||||
collections::{HashMap, HashSet},
|
||||
fmt,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket},
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
@ -71,7 +72,8 @@ pub const DATA_PLANE_FANOUT: usize = 200;
|
||||
/// milliseconds we sleep for between gossip requests
|
||||
pub const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
||||
/// The maximum size of a bloom filter
|
||||
pub const MAX_BLOOM_SIZE: usize = 1018;
|
||||
pub const MAX_BLOOM_SIZE: usize = MAX_CRDS_OBJECT_SIZE;
|
||||
pub const MAX_CRDS_OBJECT_SIZE: usize = 928;
|
||||
/// The maximum size of a protocol payload
|
||||
const MAX_PROTOCOL_PAYLOAD_SIZE: u64 = PACKET_DATA_SIZE as u64 - MAX_PROTOCOL_HEADER_SIZE;
|
||||
/// The largest protocol header size
|
||||
@ -81,9 +83,6 @@ const MAX_PROTOCOL_HEADER_SIZE: u64 = 214;
|
||||
/// 128MB/PACKET_DATA_SIZE
|
||||
const MAX_GOSSIP_TRAFFIC: usize = 128_000_000 / PACKET_DATA_SIZE;
|
||||
|
||||
const NUM_BITS_PER_BYTE: u64 = 8;
|
||||
const MIN_SIZE_TO_COMPRESS_GZIP: u64 = 64;
|
||||
|
||||
/// Keep the number of snapshot hashes a node publishes under MAX_PROTOCOL_PAYLOAD_SIZE
|
||||
pub const MAX_SNAPSHOT_HASHES: usize = 16;
|
||||
|
||||
@ -180,6 +179,14 @@ struct PullData {
|
||||
pub filter: CrdsFilter,
|
||||
}
|
||||
|
||||
pub fn make_accounts_hashes_message(
|
||||
keypair: &Keypair,
|
||||
accounts_hashes: Vec<(Slot, Hash)>,
|
||||
) -> Option<CrdsValue> {
|
||||
let message = CrdsData::AccountsHashes(SnapshotHash::new(keypair.pubkey(), accounts_hashes));
|
||||
Some(CrdsValue::new_signed(message, keypair))
|
||||
}
|
||||
|
||||
// TODO These messages should go through the gpu pipeline for spam filtering
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
@ -254,6 +261,16 @@ impl ClusterInfo {
|
||||
self.lookup(&self.id()).cloned().unwrap()
|
||||
}
|
||||
|
||||
pub fn lookup_epoch_slots(&self, ix: EpochSlotsIndex) -> EpochSlots {
|
||||
let entry = CrdsValueLabel::EpochSlots(ix, self.id());
|
||||
self.gossip
|
||||
.crds
|
||||
.lookup(&entry)
|
||||
.and_then(CrdsValue::epoch_slots)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| EpochSlots::new(self.id(), timestamp()))
|
||||
}
|
||||
|
||||
pub fn contact_info_trace(&self) -> String {
|
||||
let now = timestamp();
|
||||
let mut spy_nodes = 0;
|
||||
@ -328,137 +345,112 @@ impl ClusterInfo {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn compress_incomplete_slots(incomplete_slots: &BTreeSet<Slot>) -> EpochIncompleteSlots {
|
||||
if !incomplete_slots.is_empty() {
|
||||
let first_slot = incomplete_slots
|
||||
.iter()
|
||||
.next()
|
||||
.expect("expected to find at least one slot");
|
||||
let last_slot = incomplete_slots
|
||||
.iter()
|
||||
.next_back()
|
||||
.expect("expected to find last slot");
|
||||
let num_uncompressed_bits = last_slot.saturating_sub(*first_slot) + 1;
|
||||
let num_uncompressed_bytes = if num_uncompressed_bits % NUM_BITS_PER_BYTE > 0 {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
} + num_uncompressed_bits / NUM_BITS_PER_BYTE;
|
||||
let mut uncompressed = vec![0u8; num_uncompressed_bytes as usize];
|
||||
incomplete_slots.iter().for_each(|slot| {
|
||||
let offset_from_first_slot = slot.saturating_sub(*first_slot);
|
||||
let index = offset_from_first_slot / NUM_BITS_PER_BYTE;
|
||||
let bit_index = offset_from_first_slot % NUM_BITS_PER_BYTE;
|
||||
uncompressed[index as usize] |= 1 << bit_index;
|
||||
});
|
||||
if num_uncompressed_bytes >= MIN_SIZE_TO_COMPRESS_GZIP {
|
||||
if let Ok(compressed) = uncompressed
|
||||
.iter()
|
||||
.cloned()
|
||||
.encode(&mut GZipEncoder::new(), Action::Finish)
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
return EpochIncompleteSlots {
|
||||
first: *first_slot,
|
||||
compression: GZip,
|
||||
compressed_list: compressed,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
return EpochIncompleteSlots {
|
||||
first: *first_slot,
|
||||
compression: Uncompressed,
|
||||
compressed_list: uncompressed,
|
||||
};
|
||||
}
|
||||
}
|
||||
EpochIncompleteSlots::default()
|
||||
}
|
||||
|
||||
fn bitmap_to_slot_list(first: Slot, bitmap: &[u8]) -> BTreeSet<Slot> {
|
||||
let mut old_incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
|
||||
bitmap.iter().enumerate().for_each(|(i, val)| {
|
||||
if *val != 0 {
|
||||
(0..8).for_each(|bit_index| {
|
||||
if (1 << bit_index & *val) != 0 {
|
||||
let slot = first + i as u64 * NUM_BITS_PER_BYTE + bit_index as u64;
|
||||
old_incomplete_slots.insert(slot);
|
||||
}
|
||||
})
|
||||
}
|
||||
});
|
||||
old_incomplete_slots
|
||||
}
|
||||
|
||||
pub fn decompress_incomplete_slots(slots: &EpochIncompleteSlots) -> BTreeSet<Slot> {
|
||||
match slots.compression {
|
||||
Uncompressed => Self::bitmap_to_slot_list(slots.first, &slots.compressed_list),
|
||||
GZip => {
|
||||
if let Ok(decompressed) = slots
|
||||
.compressed_list
|
||||
.iter()
|
||||
.cloned()
|
||||
.decode(&mut GZipDecoder::new())
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
Self::bitmap_to_slot_list(slots.first, &decompressed)
|
||||
} else {
|
||||
BTreeSet::new()
|
||||
}
|
||||
}
|
||||
BZip2 => {
|
||||
if let Ok(decompressed) = slots
|
||||
.compressed_list
|
||||
.iter()
|
||||
.cloned()
|
||||
.decode(&mut BZip2Decoder::new())
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
Self::bitmap_to_slot_list(slots.first, &decompressed)
|
||||
} else {
|
||||
BTreeSet::new()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_epoch_slots(
|
||||
&mut self,
|
||||
id: Pubkey,
|
||||
root: Slot,
|
||||
min: Slot,
|
||||
slots: BTreeSet<Slot>,
|
||||
incomplete_slots: &BTreeSet<Slot>,
|
||||
) {
|
||||
let compressed = Self::compress_incomplete_slots(incomplete_slots);
|
||||
pub fn push_lowest_slot(&mut self, id: Pubkey, min: Slot) {
|
||||
let now = timestamp();
|
||||
let entry = CrdsValue::new_signed(
|
||||
CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(id, root, min, slots, vec![compressed], now),
|
||||
),
|
||||
&self.keypair,
|
||||
);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
let last = self
|
||||
.gossip
|
||||
.crds
|
||||
.lookup(&CrdsValueLabel::LowestSlot(self.id()))
|
||||
.and_then(|x| x.lowest_slot())
|
||||
.map(|x| x.lowest)
|
||||
.unwrap_or(0);
|
||||
if min > last {
|
||||
let entry = CrdsValue::new_signed(
|
||||
CrdsData::LowestSlot(0, LowestSlot::new(id, min, now)),
|
||||
&self.keypair,
|
||||
);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_epoch_slots(&mut self, update: &[Slot]) {
|
||||
let mut num = 0;
|
||||
let mut current_slots: Vec<_> = (0..crds_value::MAX_EPOCH_SLOTS)
|
||||
.filter_map(|ix| {
|
||||
Some((
|
||||
self.gossip
|
||||
.crds
|
||||
.lookup(&CrdsValueLabel::EpochSlots(ix, self.id()))
|
||||
.and_then(CrdsValue::epoch_slots)
|
||||
.and_then(|x| Some((x.wallclock, x.first_slot()?)))?,
|
||||
ix,
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
current_slots.sort();
|
||||
let min_slot: Slot = current_slots
|
||||
.iter()
|
||||
.map(|((_, s), _)| *s)
|
||||
.min()
|
||||
.unwrap_or(0);
|
||||
let max_slot: Slot = update.iter().max().cloned().unwrap_or(0);
|
||||
let total_slots = max_slot as isize - min_slot as isize;
|
||||
// WARN if CRDS is not storing at least a full epoch worth of slots
|
||||
if DEFAULT_SLOTS_PER_EPOCH as isize > total_slots
|
||||
&& crds_value::MAX_EPOCH_SLOTS as usize <= current_slots.len()
|
||||
{
|
||||
inc_new_counter_warn!("cluster_info-epoch_slots-filled", 1);
|
||||
warn!(
|
||||
"EPOCH_SLOTS are filling up FAST {}/{}",
|
||||
total_slots,
|
||||
current_slots.len()
|
||||
);
|
||||
}
|
||||
let mut reset = false;
|
||||
let mut epoch_slot_index = current_slots.last().map(|(_, x)| *x).unwrap_or(0);
|
||||
while num < update.len() {
|
||||
let ix = (epoch_slot_index % crds_value::MAX_EPOCH_SLOTS) as u8;
|
||||
let now = timestamp();
|
||||
let mut slots = if !reset {
|
||||
self.lookup_epoch_slots(ix)
|
||||
} else {
|
||||
EpochSlots::new(self.id(), now)
|
||||
};
|
||||
let n = slots.fill(&update[num..], now);
|
||||
if n > 0 {
|
||||
let entry = CrdsValue::new_signed(CrdsData::EpochSlots(ix, slots), &self.keypair);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
}
|
||||
num += n;
|
||||
if num < update.len() {
|
||||
epoch_slot_index += 1;
|
||||
reset = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_message(&mut self, message: CrdsValue) {
|
||||
let now = message.wallclock();
|
||||
let id = message.pubkey();
|
||||
self.gossip.process_push_message(&id, vec![message], now);
|
||||
}
|
||||
|
||||
pub fn push_accounts_hashes(&mut self, accounts_hashes: Vec<(Slot, Hash)>) {
|
||||
if accounts_hashes.len() > MAX_SNAPSHOT_HASHES {
|
||||
warn!(
|
||||
"accounts hashes too large, ignored: {}",
|
||||
accounts_hashes.len(),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let message = CrdsData::AccountsHashes(SnapshotHash::new(self.id(), accounts_hashes));
|
||||
self.push_message(CrdsValue::new_signed(message, &self.keypair));
|
||||
}
|
||||
|
||||
pub fn push_snapshot_hashes(&mut self, snapshot_hashes: Vec<(Slot, Hash)>) {
|
||||
if snapshot_hashes.len() > MAX_SNAPSHOT_HASHES {
|
||||
warn!(
|
||||
"snapshot_hashes too large, ignored: {}",
|
||||
snapshot_hashes.len()
|
||||
"snapshot hashes too large, ignored: {}",
|
||||
snapshot_hashes.len(),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let now = timestamp();
|
||||
let entry = CrdsValue::new_signed(
|
||||
CrdsData::SnapshotHash(SnapshotHash::new(self.id(), snapshot_hashes, now)),
|
||||
&self.keypair,
|
||||
);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
let message = CrdsData::SnapshotHashes(SnapshotHash::new(self.id(), snapshot_hashes));
|
||||
self.push_message(CrdsValue::new_signed(message, &self.keypair));
|
||||
}
|
||||
|
||||
pub fn push_vote(&mut self, tower_index: usize, vote: Transaction) {
|
||||
@ -482,23 +474,23 @@ impl ClusterInfo {
|
||||
/// since. This allows the bank to query for new votes only.
|
||||
///
|
||||
/// * return - The votes, and the max timestamp from the new set.
|
||||
pub fn get_votes(&self, since: u64) -> (Vec<Transaction>, u64) {
|
||||
let votes: Vec<_> = self
|
||||
pub fn get_votes(&self, since: u64) -> (Vec<CrdsValueLabel>, Vec<Transaction>, u64) {
|
||||
let mut max_ts = since;
|
||||
let (labels, txs): (Vec<CrdsValueLabel>, Vec<Transaction>) = self
|
||||
.gossip
|
||||
.crds
|
||||
.table
|
||||
.values()
|
||||
.filter(|x| x.insert_timestamp > since)
|
||||
.filter_map(|x| {
|
||||
.iter()
|
||||
.filter(|(_, x)| x.insert_timestamp > since)
|
||||
.filter_map(|(label, x)| {
|
||||
max_ts = std::cmp::max(x.insert_timestamp, max_ts);
|
||||
x.value
|
||||
.vote()
|
||||
.map(|v| (x.insert_timestamp, v.transaction.clone()))
|
||||
.map(|v| (label.clone(), v.transaction.clone()))
|
||||
})
|
||||
.collect();
|
||||
let max_ts = votes.iter().map(|x| x.0).max().unwrap_or(since);
|
||||
let txs: Vec<Transaction> = votes.into_iter().map(|x| x.1).collect();
|
||||
.unzip();
|
||||
inc_new_counter_info!("cluster_info-get_votes-count", txs.len());
|
||||
(txs, max_ts)
|
||||
(labels, txs, max_ts)
|
||||
}
|
||||
|
||||
pub fn get_snapshot_hash(&self, slot: Slot) -> Vec<(Pubkey, Hash)> {
|
||||
@ -518,29 +510,55 @@ impl ClusterInfo {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_accounts_hash_for_node(&self, pubkey: &Pubkey) -> Option<&Vec<(Slot, Hash)>> {
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
.get(&CrdsValueLabel::AccountsHashes(*pubkey))
|
||||
.map(|x| &x.value.accounts_hash().unwrap().hashes)
|
||||
}
|
||||
|
||||
pub fn get_snapshot_hash_for_node(&self, pubkey: &Pubkey) -> Option<&Vec<(Slot, Hash)>> {
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
.get(&CrdsValueLabel::SnapshotHash(*pubkey))
|
||||
.get(&CrdsValueLabel::SnapshotHashes(*pubkey))
|
||||
.map(|x| &x.value.snapshot_hash().unwrap().hashes)
|
||||
}
|
||||
|
||||
pub fn get_epoch_state_for_node(
|
||||
pub fn get_lowest_slot_for_node(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
since: Option<u64>,
|
||||
) -> Option<(&EpochSlots, u64)> {
|
||||
) -> Option<(&LowestSlot, u64)> {
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
.get(&CrdsValueLabel::EpochSlots(*pubkey))
|
||||
.get(&CrdsValueLabel::LowestSlot(*pubkey))
|
||||
.filter(|x| {
|
||||
since
|
||||
.map(|since| x.insert_timestamp > since)
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.map(|x| (x.value.epoch_slots().unwrap(), x.insert_timestamp))
|
||||
.map(|x| (x.value.lowest_slot().unwrap(), x.insert_timestamp))
|
||||
}
|
||||
|
||||
pub fn get_epoch_slots_since(&self, since: Option<u64>) -> (Vec<EpochSlots>, Option<u64>) {
|
||||
let vals: Vec<_> = self
|
||||
.gossip
|
||||
.crds
|
||||
.table
|
||||
.values()
|
||||
.filter(|x| {
|
||||
since
|
||||
.map(|since| x.insert_timestamp > since)
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.filter_map(|x| Some((x.value.epoch_slots()?, x.insert_timestamp)))
|
||||
.collect();
|
||||
let max = vals.iter().map(|x| x.1).max().or(since);
|
||||
let vec = vals.into_iter().map(|x| x.0).cloned().collect();
|
||||
(vec, max)
|
||||
}
|
||||
|
||||
pub fn get_contact_info_for_node(&self, pubkey: &Pubkey) -> Option<&ContactInfo> {
|
||||
@ -684,8 +702,8 @@ impl ClusterInfo {
|
||||
&& x.shred_version == me.shred_version
|
||||
&& ContactInfo::is_valid_address(&x.serve_repair)
|
||||
&& {
|
||||
self.get_epoch_state_for_node(&x.id, None)
|
||||
.map(|(epoch_slots, _)| epoch_slots.lowest <= slot)
|
||||
self.get_lowest_slot_for_node(&x.id, None)
|
||||
.map(|(lowest_slot, _)| lowest_slot.lowest <= slot)
|
||||
.unwrap_or_else(|| /* fallback to legacy behavior */ true)
|
||||
}
|
||||
})
|
||||
@ -1615,7 +1633,7 @@ impl ClusterInfo {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr) -> ContactInfo {
|
||||
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr) -> ContactInfo {
|
||||
ContactInfo {
|
||||
id: *id,
|
||||
gossip,
|
||||
@ -2256,30 +2274,62 @@ mod tests {
|
||||
#[test]
|
||||
fn test_push_vote() {
|
||||
let keys = Keypair::new();
|
||||
let now = timestamp();
|
||||
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
|
||||
// make sure empty crds is handled correctly
|
||||
let (votes, max_ts) = cluster_info.get_votes(now);
|
||||
let now = timestamp();
|
||||
let (_, votes, max_ts) = cluster_info.get_votes(now);
|
||||
assert_eq!(votes, vec![]);
|
||||
assert_eq!(max_ts, now);
|
||||
|
||||
// add a vote
|
||||
let tx = test_tx();
|
||||
cluster_info.push_vote(0, tx.clone());
|
||||
let index = 1;
|
||||
cluster_info.push_vote(index, tx.clone());
|
||||
|
||||
// -1 to make sure that the clock is strictly lower then when insert occurred
|
||||
let (votes, max_ts) = cluster_info.get_votes(now - 1);
|
||||
let (labels, votes, max_ts) = cluster_info.get_votes(now - 1);
|
||||
assert_eq!(votes, vec![tx]);
|
||||
assert_eq!(labels.len(), 1);
|
||||
match labels[0] {
|
||||
CrdsValueLabel::Vote(_, pubkey) => {
|
||||
assert_eq!(pubkey, keys.pubkey());
|
||||
}
|
||||
|
||||
_ => panic!("Bad match"),
|
||||
}
|
||||
assert!(max_ts >= now - 1);
|
||||
|
||||
// make sure timestamp filter works
|
||||
let (votes, new_max_ts) = cluster_info.get_votes(max_ts);
|
||||
let (_, votes, new_max_ts) = cluster_info.get_votes(max_ts);
|
||||
assert_eq!(votes, vec![]);
|
||||
assert_eq!(max_ts, new_max_ts);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_push_epoch_slots() {
|
||||
let keys = Keypair::new();
|
||||
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let (slots, since) = cluster_info.get_epoch_slots_since(None);
|
||||
assert!(slots.is_empty());
|
||||
assert!(since.is_none());
|
||||
cluster_info.push_epoch_slots(&[0]);
|
||||
|
||||
let (slots, since) = cluster_info.get_epoch_slots_since(Some(std::u64::MAX));
|
||||
assert!(slots.is_empty());
|
||||
assert_eq!(since, Some(std::u64::MAX));
|
||||
|
||||
let (slots, since) = cluster_info.get_epoch_slots_since(None);
|
||||
assert_eq!(slots.len(), 1);
|
||||
assert!(since.is_some());
|
||||
|
||||
let (slots, since2) = cluster_info.get_epoch_slots_since(since.clone());
|
||||
assert!(slots.is_empty());
|
||||
assert_eq!(since2, since);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_entrypoint() {
|
||||
let node_keypair = Arc::new(Keypair::new());
|
||||
@ -2333,20 +2383,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_split_messages_large() {
|
||||
let mut btree_slots = BTreeSet::new();
|
||||
for i in 0..128 {
|
||||
btree_slots.insert(i);
|
||||
}
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
let value = CrdsValue::new_unsigned(CrdsData::LowestSlot(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots: btree_slots,
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
LowestSlot::new(Pubkey::default(), 0, 0),
|
||||
));
|
||||
test_split_messages(value);
|
||||
}
|
||||
@ -2358,39 +2397,19 @@ mod tests {
|
||||
let payload: Vec<CrdsValue> = vec![];
|
||||
let vec_size = serialized_size(&payload).unwrap();
|
||||
let desired_size = MAX_PROTOCOL_PAYLOAD_SIZE - vec_size;
|
||||
let mut value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots: BTreeSet::new(),
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
));
|
||||
let mut value = CrdsValue::new_unsigned(CrdsData::SnapshotHashes(SnapshotHash {
|
||||
from: Pubkey::default(),
|
||||
hashes: vec![],
|
||||
wallclock: 0,
|
||||
}));
|
||||
|
||||
let mut i = 0;
|
||||
while value.size() <= desired_size {
|
||||
let slots = (0..i).collect::<BTreeSet<_>>();
|
||||
if slots.len() > 200 {
|
||||
panic!(
|
||||
"impossible to match size: last {:?} vs desired {:?}",
|
||||
serialized_size(&value).unwrap(),
|
||||
desired_size
|
||||
);
|
||||
}
|
||||
value.data = CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots,
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
);
|
||||
value.data = CrdsData::SnapshotHashes(SnapshotHash {
|
||||
from: Pubkey::default(),
|
||||
hashes: vec![(0, Hash::default()); i],
|
||||
wallclock: 0,
|
||||
});
|
||||
i += 1;
|
||||
}
|
||||
let split = ClusterInfo::split_gossip_messages(vec![value.clone()]);
|
||||
@ -2520,26 +2539,17 @@ mod tests {
|
||||
node_keypair,
|
||||
);
|
||||
for i in 0..10 {
|
||||
let mut peer_root = 5;
|
||||
let mut peer_lowest = 0;
|
||||
if i >= 5 {
|
||||
// make these invalid for the upcoming repair request
|
||||
peer_root = 15;
|
||||
peer_lowest = 10;
|
||||
}
|
||||
let other_node_pubkey = Pubkey::new_rand();
|
||||
let other_node = ContactInfo::new_localhost(&other_node_pubkey, timestamp());
|
||||
cluster_info.insert_info(other_node.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
let value = CrdsValue::new_unsigned(CrdsData::LowestSlot(
|
||||
0,
|
||||
EpochSlots::new(
|
||||
other_node_pubkey,
|
||||
peer_root,
|
||||
peer_lowest,
|
||||
BTreeSet::new(),
|
||||
vec![],
|
||||
timestamp(),
|
||||
),
|
||||
LowestSlot::new(other_node_pubkey, peer_lowest, timestamp()),
|
||||
));
|
||||
let _ = cluster_info.gossip.crds.insert(value, timestamp());
|
||||
}
|
||||
@ -2549,7 +2559,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_max_bloom_size() {
|
||||
assert_eq!(MAX_BLOOM_SIZE, max_bloom_size());
|
||||
// check that the constant fits into the dynamic size
|
||||
assert!(MAX_BLOOM_SIZE <= max_bloom_size());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2602,36 +2613,24 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compress_incomplete_slots() {
|
||||
let mut incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
|
||||
|
||||
assert_eq!(
|
||||
EpochIncompleteSlots::default(),
|
||||
ClusterInfo::compress_incomplete_slots(&incomplete_slots)
|
||||
fn test_push_epoch_slots_large() {
|
||||
use rand::Rng;
|
||||
let node_keypair = Arc::new(Keypair::new());
|
||||
let mut cluster_info = ClusterInfo::new(
|
||||
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
|
||||
node_keypair,
|
||||
);
|
||||
|
||||
incomplete_slots.insert(100);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(100, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(104);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(100, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(80);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(80, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(10000);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(80, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
let mut range: Vec<Slot> = vec![];
|
||||
//random should be hard to compress
|
||||
for _ in 0..32000 {
|
||||
let last = *range.last().unwrap_or(&0);
|
||||
range.push(last + rand::thread_rng().gen_range(1, 32));
|
||||
}
|
||||
cluster_info.push_epoch_slots(&range[..16000]);
|
||||
cluster_info.push_epoch_slots(&range[16000..]);
|
||||
let (slots, since) = cluster_info.get_epoch_slots_since(None);
|
||||
let slots: Vec<_> = slots.iter().flat_map(|x| x.to_slots(0)).collect();
|
||||
assert_eq!(slots, range);
|
||||
assert!(since.is_some());
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
348
core/src/cluster_slots.rs
Normal file
348
core/src/cluster_slots.rs
Normal file
@ -0,0 +1,348 @@
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, contact_info::ContactInfo, epoch_slots::EpochSlots,
|
||||
serve_repair::RepairType,
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::epoch_stakes::NodeIdToVoteAccounts;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
pub type SlotPubkeys = HashMap<Arc<Pubkey>, u64>;
|
||||
pub type ClusterSlotsMap = RwLock<HashMap<Slot, Arc<RwLock<SlotPubkeys>>>>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ClusterSlots {
|
||||
cluster_slots: ClusterSlotsMap,
|
||||
keys: RwLock<HashSet<Arc<Pubkey>>>,
|
||||
since: RwLock<Option<u64>>,
|
||||
validator_stakes: RwLock<Arc<NodeIdToVoteAccounts>>,
|
||||
epoch: RwLock<Option<u64>>,
|
||||
self_id: RwLock<Pubkey>,
|
||||
}
|
||||
|
||||
impl ClusterSlots {
|
||||
pub fn lookup(&self, slot: Slot) -> Option<Arc<RwLock<SlotPubkeys>>> {
|
||||
self.cluster_slots.read().unwrap().get(&slot).cloned()
|
||||
}
|
||||
pub fn update(
|
||||
&self,
|
||||
root: Slot,
|
||||
cluster_info: &RwLock<ClusterInfo>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
) {
|
||||
self.update_peers(cluster_info, bank_forks);
|
||||
let since = *self.since.read().unwrap();
|
||||
let epoch_slots = cluster_info.read().unwrap().get_epoch_slots_since(since);
|
||||
self.update_internal(root, epoch_slots);
|
||||
}
|
||||
fn update_internal(&self, root: Slot, epoch_slots: (Vec<EpochSlots>, Option<u64>)) {
|
||||
let (epoch_slots_list, since) = epoch_slots;
|
||||
for epoch_slots in epoch_slots_list {
|
||||
let slots = epoch_slots.to_slots(root);
|
||||
for slot in &slots {
|
||||
if *slot <= root {
|
||||
continue;
|
||||
}
|
||||
let pubkey = Arc::new(epoch_slots.from);
|
||||
let exists = self.keys.read().unwrap().get(&pubkey).is_some();
|
||||
if !exists {
|
||||
self.keys.write().unwrap().insert(pubkey.clone());
|
||||
}
|
||||
let from = self.keys.read().unwrap().get(&pubkey).unwrap().clone();
|
||||
let balance = self
|
||||
.validator_stakes
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&from)
|
||||
.map(|v| v.total_stake)
|
||||
.unwrap_or(0);
|
||||
|
||||
let mut slot_pubkeys = self.cluster_slots.read().unwrap().get(slot).cloned();
|
||||
if slot_pubkeys.is_none() {
|
||||
let new_slot_pubkeys = Arc::new(RwLock::new(HashMap::default()));
|
||||
self.cluster_slots
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(*slot, new_slot_pubkeys.clone());
|
||||
slot_pubkeys = Some(new_slot_pubkeys);
|
||||
}
|
||||
|
||||
slot_pubkeys
|
||||
.unwrap()
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(from.clone(), balance);
|
||||
}
|
||||
}
|
||||
self.cluster_slots.write().unwrap().retain(|x, _| *x > root);
|
||||
self.keys
|
||||
.write()
|
||||
.unwrap()
|
||||
.retain(|x| Arc::strong_count(x) > 1);
|
||||
*self.since.write().unwrap() = since;
|
||||
}
|
||||
pub fn collect(&self, id: &Pubkey) -> HashSet<Slot> {
|
||||
self.cluster_slots
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|(_, keys)| keys.read().unwrap().get(id).is_some())
|
||||
.map(|(slot, _)| slot)
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn update_peers(&self, cluster_info: &RwLock<ClusterInfo>, bank_forks: &RwLock<BankForks>) {
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
let root_epoch = root_bank.epoch();
|
||||
let my_epoch = *self.epoch.read().unwrap();
|
||||
|
||||
if Some(root_epoch) != my_epoch {
|
||||
let validator_stakes = root_bank
|
||||
.epoch_stakes(root_epoch)
|
||||
.expect(
|
||||
"Bank must have epoch stakes
|
||||
for its own epoch",
|
||||
)
|
||||
.node_id_to_vote_accounts()
|
||||
.clone();
|
||||
|
||||
*self.validator_stakes.write().unwrap() = validator_stakes;
|
||||
let id = cluster_info.read().unwrap().id();
|
||||
*self.self_id.write().unwrap() = id;
|
||||
*self.epoch.write().unwrap() = Some(root_epoch);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec<(u64, usize)> {
|
||||
let slot_peers = self.lookup(slot);
|
||||
repair_peers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| {
|
||||
let peer_stake = slot_peers
|
||||
.as_ref()
|
||||
.and_then(|v| v.read().unwrap().get(&x.id).cloned())
|
||||
.unwrap_or(0);
|
||||
(
|
||||
1 + peer_stake
|
||||
+ self
|
||||
.validator_stakes
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&x.id)
|
||||
.map(|v| v.total_stake)
|
||||
.unwrap_or(0),
|
||||
i,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_repairs_for_missing_slots(
|
||||
&self,
|
||||
self_id: &Pubkey,
|
||||
root: Slot,
|
||||
) -> Vec<RepairType> {
|
||||
let my_slots = self.collect(self_id);
|
||||
self.cluster_slots
|
||||
.read()
|
||||
.unwrap()
|
||||
.keys()
|
||||
.filter(|x| **x > root)
|
||||
.filter(|x| !my_slots.contains(*x))
|
||||
.map(|x| RepairType::HighestShred(*x, 0))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_runtime::epoch_stakes::NodeVoteAccounts;
|
||||
|
||||
#[test]
|
||||
fn test_default() {
|
||||
let cs = ClusterSlots::default();
|
||||
assert!(cs.cluster_slots.read().unwrap().is_empty());
|
||||
assert!(cs.since.read().unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_noop() {
|
||||
let cs = ClusterSlots::default();
|
||||
cs.update_internal(0, (vec![], None));
|
||||
assert!(cs.cluster_slots.read().unwrap().is_empty());
|
||||
assert!(cs.since.read().unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_empty() {
|
||||
let cs = ClusterSlots::default();
|
||||
let epoch_slot = EpochSlots::default();
|
||||
cs.update_internal(0, (vec![epoch_slot], Some(0)));
|
||||
assert_eq!(*cs.since.read().unwrap(), Some(0));
|
||||
assert!(cs.lookup(0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_rooted() {
|
||||
//root is 0, so it should clear out the slot
|
||||
let cs = ClusterSlots::default();
|
||||
let mut epoch_slot = EpochSlots::default();
|
||||
epoch_slot.fill(&[0], 0);
|
||||
cs.update_internal(0, (vec![epoch_slot], Some(0)));
|
||||
assert_eq!(*cs.since.read().unwrap(), Some(0));
|
||||
assert!(cs.lookup(0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_new_slot() {
|
||||
let cs = ClusterSlots::default();
|
||||
let mut epoch_slot = EpochSlots::default();
|
||||
epoch_slot.fill(&[1], 0);
|
||||
cs.update_internal(0, (vec![epoch_slot], Some(0)));
|
||||
assert_eq!(*cs.since.read().unwrap(), Some(0));
|
||||
assert!(cs.lookup(0).is_none());
|
||||
assert!(cs.lookup(1).is_some());
|
||||
assert_eq!(
|
||||
cs.lookup(1)
|
||||
.unwrap()
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&Pubkey::default()),
|
||||
Some(&0)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_weights() {
|
||||
let cs = ClusterSlots::default();
|
||||
let ci = ContactInfo::default();
|
||||
assert_eq!(cs.compute_weights(0, &[ci]), vec![(1, 0)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_best_peer_2() {
|
||||
let cs = ClusterSlots::default();
|
||||
let mut c1 = ContactInfo::default();
|
||||
let mut c2 = ContactInfo::default();
|
||||
let mut map = HashMap::new();
|
||||
let k1 = Pubkey::new_rand();
|
||||
let k2 = Pubkey::new_rand();
|
||||
map.insert(Arc::new(k1.clone()), std::u64::MAX / 2);
|
||||
map.insert(Arc::new(k2.clone()), 0);
|
||||
cs.cluster_slots
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(0, Arc::new(RwLock::new(map)));
|
||||
c1.id = k1;
|
||||
c2.id = k2;
|
||||
assert_eq!(
|
||||
cs.compute_weights(0, &[c1, c2]),
|
||||
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_best_peer_3() {
|
||||
let cs = ClusterSlots::default();
|
||||
let mut c1 = ContactInfo::default();
|
||||
let mut c2 = ContactInfo::default();
|
||||
let mut map = HashMap::new();
|
||||
let k1 = Pubkey::new_rand();
|
||||
let k2 = Pubkey::new_rand();
|
||||
map.insert(Arc::new(k2.clone()), 0);
|
||||
cs.cluster_slots
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(0, Arc::new(RwLock::new(map)));
|
||||
//make sure default weights are used as well
|
||||
let validator_stakes: HashMap<_, _> = vec![(
|
||||
*Arc::new(k1.clone()),
|
||||
NodeVoteAccounts {
|
||||
total_stake: std::u64::MAX / 2,
|
||||
vote_accounts: vec![Pubkey::default()],
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
*cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes);
|
||||
c1.id = k1;
|
||||
c2.id = k2;
|
||||
assert_eq!(
|
||||
cs.compute_weights(0, &[c1, c2]),
|
||||
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_new_staked_slot() {
|
||||
let cs = ClusterSlots::default();
|
||||
let mut epoch_slot = EpochSlots::default();
|
||||
epoch_slot.fill(&[1], 0);
|
||||
|
||||
let map = Arc::new(
|
||||
vec![(
|
||||
Pubkey::default(),
|
||||
NodeVoteAccounts {
|
||||
total_stake: 1,
|
||||
vote_accounts: vec![Pubkey::default()],
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
);
|
||||
|
||||
*cs.validator_stakes.write().unwrap() = map;
|
||||
cs.update_internal(0, (vec![epoch_slot], None));
|
||||
assert!(cs.lookup(1).is_some());
|
||||
assert_eq!(
|
||||
cs.lookup(1)
|
||||
.unwrap()
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&Pubkey::default()),
|
||||
Some(&1)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generate_repairs() {
|
||||
let cs = ClusterSlots::default();
|
||||
let mut epoch_slot = EpochSlots::default();
|
||||
epoch_slot.fill(&[1], 0);
|
||||
cs.update_internal(0, (vec![epoch_slot], None));
|
||||
let self_id = Pubkey::new_rand();
|
||||
assert_eq!(
|
||||
cs.generate_repairs_for_missing_slots(&self_id, 0),
|
||||
vec![RepairType::HighestShred(1, 0)]
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_collect_my_slots() {
|
||||
let cs = ClusterSlots::default();
|
||||
let mut epoch_slot = EpochSlots::default();
|
||||
epoch_slot.fill(&[1], 0);
|
||||
let self_id = epoch_slot.from;
|
||||
cs.update_internal(0, (vec![epoch_slot], None));
|
||||
let slots: Vec<Slot> = cs.collect(&self_id).into_iter().collect();
|
||||
assert_eq!(slots, vec![1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generate_repairs_existing() {
|
||||
let cs = ClusterSlots::default();
|
||||
let mut epoch_slot = EpochSlots::default();
|
||||
epoch_slot.fill(&[1], 0);
|
||||
let self_id = epoch_slot.from;
|
||||
cs.update_internal(0, (vec![epoch_slot], None));
|
||||
assert!(cs
|
||||
.generate_repairs_for_missing_slots(&self_id, 0)
|
||||
.is_empty());
|
||||
}
|
||||
}
|
@ -1,3 +1,6 @@
|
||||
use crate::consensus::VOTE_THRESHOLD_SIZE;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
|
||||
@ -31,17 +34,40 @@ impl BlockCommitment {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
#[derive(Default)]
|
||||
pub struct BlockCommitmentCache {
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BlockCommitmentCache {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("BlockCommitmentCache")
|
||||
.field("block_commitment", &self.block_commitment)
|
||||
.field("total_stake", &self.total_stake)
|
||||
.field(
|
||||
"bank",
|
||||
&format_args!("Bank({{current_slot: {:?}}})", self.bank.slot()),
|
||||
)
|
||||
.field("root", &self.root)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockCommitmentCache {
|
||||
pub fn new(block_commitment: HashMap<Slot, BlockCommitment>, total_stake: u64) -> Self {
|
||||
pub fn new(
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
) -> Self {
|
||||
Self {
|
||||
block_commitment,
|
||||
total_stake,
|
||||
bank,
|
||||
root,
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,38 +79,62 @@ impl BlockCommitmentCache {
|
||||
self.total_stake
|
||||
}
|
||||
|
||||
pub fn get_block_with_depth_commitment(
|
||||
&self,
|
||||
minimum_depth: usize,
|
||||
minimum_stake_percentage: f64,
|
||||
) -> Option<Slot> {
|
||||
self.block_commitment
|
||||
.iter()
|
||||
.filter(|&(_, block_commitment)| {
|
||||
let fork_stake_minimum_depth: u64 = block_commitment.commitment[minimum_depth..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.sum();
|
||||
fork_stake_minimum_depth as f64 / self.total_stake as f64
|
||||
>= minimum_stake_percentage
|
||||
})
|
||||
.map(|(slot, _)| *slot)
|
||||
.max()
|
||||
pub fn bank(&self) -> Arc<Bank> {
|
||||
self.bank.clone()
|
||||
}
|
||||
|
||||
pub fn get_rooted_block_with_commitment(&self, minimum_stake_percentage: f64) -> Option<u64> {
|
||||
self.get_block_with_depth_commitment(MAX_LOCKOUT_HISTORY - 1, minimum_stake_percentage)
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.bank.slot()
|
||||
}
|
||||
|
||||
pub fn root(&self) -> Slot {
|
||||
self.root
|
||||
}
|
||||
|
||||
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
|
||||
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
|
||||
}
|
||||
|
||||
// Returns the lowest level at which at least `minimum_stake_percentage` of the total epoch
|
||||
// stake is locked out
|
||||
fn get_lockout_count(&self, slot: Slot, minimum_stake_percentage: f64) -> Option<usize> {
|
||||
self.get_block_commitment(slot).map(|block_commitment| {
|
||||
let iterator = block_commitment.commitment.iter().enumerate().rev();
|
||||
let mut sum = 0;
|
||||
for (i, stake) in iterator {
|
||||
sum += stake;
|
||||
if (sum as f64 / self.total_stake as f64) > minimum_stake_percentage {
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
0
|
||||
})
|
||||
}
|
||||
#[cfg(test)]
|
||||
pub fn new_for_tests() -> Self {
|
||||
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
|
||||
block_commitment.insert(0, BlockCommitment::default());
|
||||
Self {
|
||||
block_commitment,
|
||||
total_stake: 42,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
}
|
||||
|
||||
impl CommitmentAggregationData {
|
||||
pub fn new(bank: Arc<Bank>, total_staked: u64) -> Self {
|
||||
Self { bank, total_staked }
|
||||
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
|
||||
Self {
|
||||
bank,
|
||||
root,
|
||||
total_staked,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,14 +194,24 @@ impl AggregateCommitmentService {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let block_commitment = Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let mut new_block_commitment =
|
||||
BlockCommitmentCache::new(block_commitment, aggregation_data.total_staked);
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
aggregation_data.root,
|
||||
);
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
aggregate_commitment_time.stop();
|
||||
inc_new_counter_info!(
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -230,7 +290,7 @@ impl AggregateCommitmentService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
@ -246,84 +306,31 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_block_with_depth_commitment() {
|
||||
fn test_get_confirmations() {
|
||||
let bank = Arc::new(Bank::default());
|
||||
// Build BlockCommitmentCache with votes at depths 0 and 1 for 2 slots
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 15);
|
||||
cache0.increase_confirmation_stake(2, 25);
|
||||
cache0.increase_confirmation_stake(1, 5);
|
||||
cache0.increase_confirmation_stake(2, 40);
|
||||
|
||||
let mut cache1 = BlockCommitment::default();
|
||||
cache1.increase_confirmation_stake(1, 10);
|
||||
cache1.increase_confirmation_stake(2, 20);
|
||||
cache1.increase_confirmation_stake(1, 40);
|
||||
cache1.increase_confirmation_stake(2, 5);
|
||||
|
||||
let mut cache2 = BlockCommitment::default();
|
||||
cache2.increase_confirmation_stake(1, 20);
|
||||
cache2.increase_confirmation_stake(2, 5);
|
||||
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
|
||||
block_commitment.entry(2).or_insert(cache2.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50, bank, 0);
|
||||
|
||||
// Neither slot has rooted votes
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.1),
|
||||
None
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.6 at depth 1
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.6),
|
||||
None
|
||||
);
|
||||
// Only slot 0 meets the minimum level of commitment 0.5 at depth 1
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.5),
|
||||
Some(0)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.4),
|
||||
Some(1)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(0, 0.6),
|
||||
Some(1)
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.9 at depth 0
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(0, 0.9),
|
||||
None
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_rooted_block_with_commitment() {
|
||||
// Build BlockCommitmentCache with rooted votes
|
||||
let mut cache0 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
|
||||
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 40);
|
||||
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
|
||||
let mut cache1 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 30);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 2, 10);
|
||||
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
|
||||
|
||||
// Only slot 0 meets the minimum level of commitment 0.66 at root
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.66),
|
||||
Some(0)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.6),
|
||||
Some(1)
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.9 at root
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.9),
|
||||
None
|
||||
);
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(2), Some(0),);
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(3), None,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1,3 +1,4 @@
|
||||
use crate::progress_map::ProgressMap;
|
||||
use chrono::prelude::*;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
@ -355,6 +356,17 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_switch_threshold(
|
||||
&self,
|
||||
_slot: u64,
|
||||
_ancestors: &HashMap<Slot, HashSet<u64>>,
|
||||
_descendants: &HashMap<Slot, HashSet<u64>>,
|
||||
_progress: &ProgressMap,
|
||||
_total_stake: u64,
|
||||
) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Update lockouts for all the ancestors
|
||||
fn update_ancestor_lockouts(
|
||||
stake_lockouts: &mut HashMap<Slot, StakeLockout>,
|
||||
@ -468,7 +480,12 @@ impl Tower {
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::replay_stage::{ForkProgress, ReplayStage};
|
||||
use crate::{
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slots::ClusterSlots,
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
@ -511,9 +528,9 @@ pub mod test {
|
||||
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
|
||||
validator_keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
my_keypairs: &ValidatorVoteKeypairs,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
progress: &mut ProgressMap,
|
||||
tower: &mut Tower,
|
||||
) -> Vec<VoteFailures> {
|
||||
) -> Vec<HeaviestForkFailures> {
|
||||
let node = self
|
||||
.find_node_and_update_simulation(vote_slot)
|
||||
.expect("Vote to simulate must be for a slot in the tree");
|
||||
@ -550,7 +567,7 @@ pub mod test {
|
||||
info!("parent of {} is {}", missing_slot, parent_bank.slot(),);
|
||||
progress
|
||||
.entry(missing_slot)
|
||||
.or_insert_with(|| ForkProgress::new(parent_bank.last_blockhash()));
|
||||
.or_insert_with(|| ForkProgress::new(parent_bank.last_blockhash(), None, None));
|
||||
|
||||
// Create the missing bank
|
||||
let new_bank =
|
||||
@ -595,6 +612,10 @@ pub mod test {
|
||||
&mut frozen_banks,
|
||||
tower,
|
||||
progress,
|
||||
&VoteTracker::default(),
|
||||
&ClusterSlots::default(),
|
||||
bank_forks,
|
||||
&mut HashSet::new(),
|
||||
);
|
||||
|
||||
let bank = bank_forks
|
||||
@ -611,17 +632,24 @@ pub mod test {
|
||||
info!("lockouts: {:?}", fork_progress.fork_stats.stake_lockouts);
|
||||
let mut failures = vec![];
|
||||
if fork_progress.fork_stats.is_locked_out {
|
||||
failures.push(VoteFailures::LockedOut(vote_slot));
|
||||
failures.push(HeaviestForkFailures::LockedOut(vote_slot));
|
||||
}
|
||||
if !fork_progress.fork_stats.vote_threshold {
|
||||
failures.push(VoteFailures::FailedThreshold(vote_slot));
|
||||
failures.push(HeaviestForkFailures::FailedThreshold(vote_slot));
|
||||
}
|
||||
if !failures.is_empty() {
|
||||
return failures;
|
||||
}
|
||||
let vote = tower.new_vote_from_bank(&bank, &my_vote_pubkey).0;
|
||||
if let Some(new_root) = tower.record_bank_vote(vote) {
|
||||
ReplayStage::handle_new_root(new_root, bank_forks, progress, &None);
|
||||
ReplayStage::handle_new_root(
|
||||
new_root,
|
||||
bank_forks,
|
||||
progress,
|
||||
&None,
|
||||
&mut 0,
|
||||
&mut HashSet::new(),
|
||||
);
|
||||
}
|
||||
|
||||
// Mark the vote for this bank under this node's pubkey so it will be
|
||||
@ -671,22 +699,17 @@ pub mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub(crate) enum VoteFailures {
|
||||
LockedOut(u64),
|
||||
FailedThreshold(u64),
|
||||
}
|
||||
|
||||
// Setup BankForks with bank 0 and all the validator accounts
|
||||
pub(crate) fn initialize_state(
|
||||
validator_keypairs_map: &HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
) -> (BankForks, HashMap<u64, ForkProgress>) {
|
||||
stake: u64,
|
||||
) -> (BankForks, ProgressMap) {
|
||||
let validator_keypairs: Vec<_> = validator_keypairs_map.values().collect();
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
voting_keypair: _,
|
||||
} = create_genesis_config_with_vote_accounts(1_000_000_000, &validator_keypairs);
|
||||
} = create_genesis_config_with_vote_accounts(1_000_000_000, &validator_keypairs, stake);
|
||||
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
|
||||
@ -695,8 +718,8 @@ pub mod test {
|
||||
}
|
||||
|
||||
bank0.freeze();
|
||||
let mut progress = HashMap::new();
|
||||
progress.insert(0, ForkProgress::new(bank0.last_blockhash()));
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(0, ForkProgress::new(bank0.last_blockhash(), None, None));
|
||||
(BankForks::new(0, bank0), progress)
|
||||
}
|
||||
|
||||
@ -728,7 +751,7 @@ pub mod test {
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
|
||||
keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
progress: &mut ProgressMap,
|
||||
) -> bool {
|
||||
// Check that within some reasonable time, validator can make a new
|
||||
// root on this fork
|
||||
@ -773,7 +796,7 @@ pub mod test {
|
||||
);
|
||||
|
||||
// Initialize BankForks
|
||||
let (bank_forks, mut progress) = initialize_state(&keypairs);
|
||||
let (bank_forks, mut progress) = initialize_state(&keypairs, 10_000);
|
||||
let bank_forks = RwLock::new(bank_forks);
|
||||
|
||||
// Create the tree of banks
|
||||
@ -853,7 +876,7 @@ pub mod test {
|
||||
votes.extend((45..=50).into_iter());
|
||||
|
||||
let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
|
||||
let (bank_forks, mut progress) = initialize_state(&keypairs);
|
||||
let (bank_forks, mut progress) = initialize_state(&keypairs, 10_000);
|
||||
let bank_forks = RwLock::new(bank_forks);
|
||||
|
||||
// Simulate the votes. Should fail on trying to come back to the main fork
|
||||
|
@ -1,5 +1,8 @@
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::deprecated;
|
||||
use crate::epoch_slots::EpochSlots;
|
||||
use bincode::{serialize, serialized_size};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
hash::Hash,
|
||||
@ -16,7 +19,8 @@ use std::{
|
||||
pub type VoteIndex = u8;
|
||||
pub const MAX_VOTES: VoteIndex = 32;
|
||||
|
||||
pub type EpochSlotIndex = u8;
|
||||
pub type EpochSlotsIndex = u8;
|
||||
pub const MAX_EPOCH_SLOTS: EpochSlotsIndex = 255;
|
||||
|
||||
/// CrdsValue that is replicated across the cluster
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@ -48,6 +52,7 @@ impl Signable for CrdsValue {
|
||||
.verify(&self.pubkey().as_ref(), self.signable_data().borrow());
|
||||
let data_check = match &self.data {
|
||||
CrdsData::Vote(ix, _) => *ix < MAX_VOTES,
|
||||
CrdsData::EpochSlots(ix, _) => *ix < MAX_EPOCH_SLOTS,
|
||||
_ => true,
|
||||
};
|
||||
sig_check && data_check
|
||||
@ -56,33 +61,16 @@ impl Signable for CrdsValue {
|
||||
|
||||
/// CrdsData that defines the different types of items CrdsValues can hold
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
/// * LowestSlot index is deprecated
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum CrdsData {
|
||||
ContactInfo(ContactInfo),
|
||||
Vote(VoteIndex, Vote),
|
||||
EpochSlots(EpochSlotIndex, EpochSlots),
|
||||
SnapshotHash(SnapshotHash),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum CompressionType {
|
||||
Uncompressed,
|
||||
GZip,
|
||||
BZip2,
|
||||
}
|
||||
|
||||
impl Default for CompressionType {
|
||||
fn default() -> Self {
|
||||
Self::Uncompressed
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
|
||||
pub struct EpochIncompleteSlots {
|
||||
pub first: Slot,
|
||||
pub compression: CompressionType,
|
||||
pub compressed_list: Vec<u8>,
|
||||
LowestSlot(u8, LowestSlot),
|
||||
SnapshotHashes(SnapshotHash),
|
||||
EpochSlots(EpochSlotsIndex, EpochSlots),
|
||||
AccountsHashes(SnapshotHash),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@ -93,40 +81,32 @@ pub struct SnapshotHash {
|
||||
}
|
||||
|
||||
impl SnapshotHash {
|
||||
pub fn new(from: Pubkey, hashes: Vec<(Slot, Hash)>, wallclock: u64) -> Self {
|
||||
pub fn new(from: Pubkey, hashes: Vec<(Slot, Hash)>) -> Self {
|
||||
Self {
|
||||
from,
|
||||
hashes,
|
||||
wallclock,
|
||||
wallclock: timestamp(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct EpochSlots {
|
||||
pub struct LowestSlot {
|
||||
pub from: Pubkey,
|
||||
pub root: Slot,
|
||||
root: Slot, //deprecated
|
||||
pub lowest: Slot,
|
||||
pub slots: BTreeSet<Slot>,
|
||||
pub stash: Vec<EpochIncompleteSlots>,
|
||||
slots: BTreeSet<Slot>, //deprecated
|
||||
stash: Vec<deprecated::EpochIncompleteSlots>, //deprecated
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl EpochSlots {
|
||||
pub fn new(
|
||||
from: Pubkey,
|
||||
root: Slot,
|
||||
lowest: Slot,
|
||||
slots: BTreeSet<Slot>,
|
||||
stash: Vec<EpochIncompleteSlots>,
|
||||
wallclock: u64,
|
||||
) -> Self {
|
||||
impl LowestSlot {
|
||||
pub fn new(from: Pubkey, lowest: Slot, wallclock: u64) -> Self {
|
||||
Self {
|
||||
from,
|
||||
root,
|
||||
root: 0,
|
||||
lowest,
|
||||
slots,
|
||||
stash,
|
||||
slots: BTreeSet::new(),
|
||||
stash: vec![],
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
@ -155,8 +135,10 @@ impl Vote {
|
||||
pub enum CrdsValueLabel {
|
||||
ContactInfo(Pubkey),
|
||||
Vote(VoteIndex, Pubkey),
|
||||
EpochSlots(Pubkey),
|
||||
SnapshotHash(Pubkey),
|
||||
LowestSlot(Pubkey),
|
||||
SnapshotHashes(Pubkey),
|
||||
EpochSlots(EpochSlotsIndex, Pubkey),
|
||||
AccountsHashes(Pubkey),
|
||||
}
|
||||
|
||||
impl fmt::Display for CrdsValueLabel {
|
||||
@ -164,8 +146,10 @@ impl fmt::Display for CrdsValueLabel {
|
||||
match self {
|
||||
CrdsValueLabel::ContactInfo(_) => write!(f, "ContactInfo({})", self.pubkey()),
|
||||
CrdsValueLabel::Vote(ix, _) => write!(f, "Vote({}, {})", ix, self.pubkey()),
|
||||
CrdsValueLabel::EpochSlots(_) => write!(f, "EpochSlots({})", self.pubkey()),
|
||||
CrdsValueLabel::SnapshotHash(_) => write!(f, "SnapshotHash({})", self.pubkey()),
|
||||
CrdsValueLabel::LowestSlot(_) => write!(f, "LowestSlot({})", self.pubkey()),
|
||||
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHash({})", self.pubkey()),
|
||||
CrdsValueLabel::EpochSlots(ix, _) => write!(f, "EpochSlots({}, {})", ix, self.pubkey()),
|
||||
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -175,8 +159,10 @@ impl CrdsValueLabel {
|
||||
match self {
|
||||
CrdsValueLabel::ContactInfo(p) => *p,
|
||||
CrdsValueLabel::Vote(_, p) => *p,
|
||||
CrdsValueLabel::EpochSlots(p) => *p,
|
||||
CrdsValueLabel::SnapshotHash(p) => *p,
|
||||
CrdsValueLabel::LowestSlot(p) => *p,
|
||||
CrdsValueLabel::SnapshotHashes(p) => *p,
|
||||
CrdsValueLabel::EpochSlots(_, p) => *p,
|
||||
CrdsValueLabel::AccountsHashes(p) => *p,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -201,24 +187,30 @@ impl CrdsValue {
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => contact_info.wallclock,
|
||||
CrdsData::Vote(_, vote) => vote.wallclock,
|
||||
CrdsData::EpochSlots(_, vote) => vote.wallclock,
|
||||
CrdsData::SnapshotHash(hash) => hash.wallclock,
|
||||
CrdsData::LowestSlot(_, obj) => obj.wallclock,
|
||||
CrdsData::SnapshotHashes(hash) => hash.wallclock,
|
||||
CrdsData::EpochSlots(_, p) => p.wallclock,
|
||||
CrdsData::AccountsHashes(hash) => hash.wallclock,
|
||||
}
|
||||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => contact_info.id,
|
||||
CrdsData::Vote(_, vote) => vote.from,
|
||||
CrdsData::EpochSlots(_, slots) => slots.from,
|
||||
CrdsData::SnapshotHash(hash) => hash.from,
|
||||
CrdsData::LowestSlot(_, slots) => slots.from,
|
||||
CrdsData::SnapshotHashes(hash) => hash.from,
|
||||
CrdsData::EpochSlots(_, p) => p.from,
|
||||
CrdsData::AccountsHashes(hash) => hash.from,
|
||||
}
|
||||
}
|
||||
pub fn label(&self) -> CrdsValueLabel {
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(_) => CrdsValueLabel::ContactInfo(self.pubkey()),
|
||||
CrdsData::Vote(ix, _) => CrdsValueLabel::Vote(*ix, self.pubkey()),
|
||||
CrdsData::EpochSlots(_, _) => CrdsValueLabel::EpochSlots(self.pubkey()),
|
||||
CrdsData::SnapshotHash(_) => CrdsValueLabel::SnapshotHash(self.pubkey()),
|
||||
CrdsData::LowestSlot(_, _) => CrdsValueLabel::LowestSlot(self.pubkey()),
|
||||
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
|
||||
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
|
||||
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
|
||||
}
|
||||
}
|
||||
pub fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
@ -241,16 +233,30 @@ impl CrdsValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn epoch_slots(&self) -> Option<&EpochSlots> {
|
||||
pub fn lowest_slot(&self) -> Option<&LowestSlot> {
|
||||
match &self.data {
|
||||
CrdsData::EpochSlots(_, slots) => Some(slots),
|
||||
CrdsData::LowestSlot(_, slots) => Some(slots),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn snapshot_hash(&self) -> Option<&SnapshotHash> {
|
||||
match &self.data {
|
||||
CrdsData::SnapshotHash(slots) => Some(slots),
|
||||
CrdsData::SnapshotHashes(slots) => Some(slots),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn accounts_hash(&self) -> Option<&SnapshotHash> {
|
||||
match &self.data {
|
||||
CrdsData::AccountsHashes(slots) => Some(slots),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn epoch_slots(&self) -> Option<&EpochSlots> {
|
||||
match &self.data {
|
||||
CrdsData::EpochSlots(_, slots) => Some(slots),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@ -259,10 +265,12 @@ impl CrdsValue {
|
||||
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
|
||||
let mut labels = vec![
|
||||
CrdsValueLabel::ContactInfo(*key),
|
||||
CrdsValueLabel::EpochSlots(*key),
|
||||
CrdsValueLabel::SnapshotHash(*key),
|
||||
CrdsValueLabel::LowestSlot(*key),
|
||||
CrdsValueLabel::SnapshotHashes(*key),
|
||||
CrdsValueLabel::AccountsHashes(*key),
|
||||
];
|
||||
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
|
||||
labels.extend((0..MAX_EPOCH_SLOTS).map(|ix| CrdsValueLabel::EpochSlots(ix, *key)));
|
||||
labels
|
||||
}
|
||||
|
||||
@ -310,14 +318,18 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_labels() {
|
||||
let mut hits = [false; 3 + MAX_VOTES as usize];
|
||||
let mut hits = [false; 4 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
|
||||
// this method should cover all the possible labels
|
||||
for v in &CrdsValue::record_labels(&Pubkey::default()) {
|
||||
match v {
|
||||
CrdsValueLabel::ContactInfo(_) => hits[0] = true,
|
||||
CrdsValueLabel::EpochSlots(_) => hits[1] = true,
|
||||
CrdsValueLabel::SnapshotHash(_) => hits[2] = true,
|
||||
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 3] = true,
|
||||
CrdsValueLabel::LowestSlot(_) => hits[1] = true,
|
||||
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
|
||||
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
|
||||
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 4] = true,
|
||||
CrdsValueLabel::EpochSlots(ix, _) => {
|
||||
hits[*ix as usize + MAX_VOTES as usize + 4] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(hits.iter().all(|x| *x));
|
||||
@ -337,13 +349,13 @@ mod test {
|
||||
let key = v.clone().vote().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
|
||||
|
||||
let v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
|
||||
0,
|
||||
EpochSlots::new(Pubkey::default(), 0, 0, BTreeSet::new(), vec![], 0),
|
||||
LowestSlot::new(Pubkey::default(), 0, 0),
|
||||
));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().epoch_slots().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::EpochSlots(key));
|
||||
let key = v.clone().lowest_slot().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -360,10 +372,9 @@ mod test {
|
||||
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
|
||||
));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
let btreeset: BTreeSet<Slot> = vec![1, 2, 3, 6, 8].into_iter().collect();
|
||||
v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
|
||||
0,
|
||||
EpochSlots::new(keypair.pubkey(), 0, 0, btreeset, vec![], timestamp()),
|
||||
LowestSlot::new(keypair.pubkey(), 0, timestamp()),
|
||||
));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
}
|
||||
@ -381,6 +392,18 @@ mod test {
|
||||
assert!(!vote.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_epoch_slots_index() {
|
||||
let keypair = Keypair::new();
|
||||
let item = CrdsValue::new_signed(
|
||||
CrdsData::EpochSlots(
|
||||
MAX_EPOCH_SLOTS,
|
||||
EpochSlots::new(keypair.pubkey(), timestamp()),
|
||||
),
|
||||
&keypair,
|
||||
);
|
||||
assert!(!item.verify());
|
||||
}
|
||||
#[test]
|
||||
fn test_compute_vote_index_empty() {
|
||||
for i in 0..MAX_VOTES {
|
||||
|
21
core/src/deprecated.rs
Normal file
21
core/src/deprecated.rs
Normal file
@ -0,0 +1,21 @@
|
||||
use solana_sdk::clock::Slot;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
enum CompressionType {
|
||||
Uncompressed,
|
||||
GZip,
|
||||
BZip2,
|
||||
}
|
||||
|
||||
impl Default for CompressionType {
|
||||
fn default() -> Self {
|
||||
Self::Uncompressed
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
|
||||
pub(crate) struct EpochIncompleteSlots {
|
||||
first: Slot,
|
||||
compression: CompressionType,
|
||||
compressed_list: Vec<u8>,
|
||||
}
|
401
core/src/epoch_slots.rs
Normal file
401
core/src/epoch_slots.rs
Normal file
@ -0,0 +1,401 @@
|
||||
use crate::cluster_info::MAX_CRDS_OBJECT_SIZE;
|
||||
use bincode::serialized_size;
|
||||
use bv::BitVec;
|
||||
use flate2::{Compress, Compression, Decompress, FlushCompress, FlushDecompress};
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct Uncompressed {
|
||||
pub first_slot: Slot,
|
||||
pub num: usize,
|
||||
pub slots: BitVec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
|
||||
pub struct Flate2 {
|
||||
pub first_slot: Slot,
|
||||
pub num: usize,
|
||||
pub compressed: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
CompressError,
|
||||
DecompressError,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl std::convert::From<flate2::CompressError> for Error {
|
||||
fn from(_e: flate2::CompressError) -> Error {
|
||||
Error::CompressError
|
||||
}
|
||||
}
|
||||
impl std::convert::From<flate2::DecompressError> for Error {
|
||||
fn from(_e: flate2::DecompressError) -> Error {
|
||||
Error::DecompressError
|
||||
}
|
||||
}
|
||||
|
||||
impl Flate2 {
|
||||
fn deflate(mut unc: Uncompressed) -> Result<Self> {
|
||||
let mut compressed = Vec::with_capacity(unc.slots.block_capacity());
|
||||
let mut compressor = Compress::new(Compression::best(), false);
|
||||
let first_slot = unc.first_slot;
|
||||
let num = unc.num;
|
||||
unc.slots.shrink_to_fit();
|
||||
let bits = unc.slots.into_boxed_slice();
|
||||
compressor.compress_vec(&bits, &mut compressed, FlushCompress::Finish)?;
|
||||
let rv = Self {
|
||||
first_slot,
|
||||
num,
|
||||
compressed,
|
||||
};
|
||||
let _ = rv.inflate()?;
|
||||
Ok(rv)
|
||||
}
|
||||
pub fn inflate(&self) -> Result<Uncompressed> {
|
||||
//add some head room for the decompressor which might spill more bits
|
||||
let mut uncompressed = Vec::with_capacity(32 + (self.num + 4) / 8);
|
||||
let mut decompress = Decompress::new(false);
|
||||
decompress.decompress_vec(&self.compressed, &mut uncompressed, FlushDecompress::Finish)?;
|
||||
Ok(Uncompressed {
|
||||
first_slot: self.first_slot,
|
||||
num: self.num,
|
||||
slots: BitVec::from_bits(&uncompressed),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Uncompressed {
|
||||
pub fn new(max_size: usize) -> Self {
|
||||
Self {
|
||||
num: 0,
|
||||
first_slot: 0,
|
||||
slots: BitVec::new_fill(false, 8 * max_size as u64),
|
||||
}
|
||||
}
|
||||
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
|
||||
let mut rv = vec![];
|
||||
let start = if min_slot < self.first_slot {
|
||||
0 as usize
|
||||
} else {
|
||||
(min_slot - self.first_slot) as usize
|
||||
};
|
||||
for i in start..self.num {
|
||||
if self.slots.get(i as u64) {
|
||||
rv.push(self.first_slot + i as Slot);
|
||||
}
|
||||
}
|
||||
rv
|
||||
}
|
||||
pub fn add(&mut self, slots: &[Slot]) -> usize {
|
||||
for (i, s) in slots.iter().enumerate() {
|
||||
if self.num == 0 {
|
||||
self.first_slot = *s;
|
||||
}
|
||||
if *s < self.first_slot {
|
||||
return i;
|
||||
}
|
||||
if *s - self.first_slot >= self.slots.capacity() {
|
||||
return i;
|
||||
}
|
||||
self.slots.set(*s - self.first_slot, true);
|
||||
self.num = std::cmp::max(self.num, 1 + (*s - self.first_slot) as usize);
|
||||
}
|
||||
slots.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum CompressedSlots {
|
||||
Flate2(Flate2),
|
||||
Uncompressed(Uncompressed),
|
||||
}
|
||||
|
||||
impl Default for CompressedSlots {
|
||||
fn default() -> Self {
|
||||
CompressedSlots::new(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl CompressedSlots {
|
||||
fn new(max_size: usize) -> Self {
|
||||
CompressedSlots::Uncompressed(Uncompressed::new(max_size))
|
||||
}
|
||||
|
||||
pub fn first_slot(&self) -> Slot {
|
||||
match self {
|
||||
CompressedSlots::Uncompressed(a) => a.first_slot,
|
||||
CompressedSlots::Flate2(b) => b.first_slot,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_slots(&self) -> usize {
|
||||
match self {
|
||||
CompressedSlots::Uncompressed(a) => a.num,
|
||||
CompressedSlots::Flate2(b) => b.num,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(&mut self, slots: &[Slot]) -> usize {
|
||||
match self {
|
||||
CompressedSlots::Uncompressed(vals) => vals.add(slots),
|
||||
CompressedSlots::Flate2(_) => 0,
|
||||
}
|
||||
}
|
||||
pub fn to_slots(&self, min_slot: Slot) -> Result<Vec<Slot>> {
|
||||
match self {
|
||||
CompressedSlots::Uncompressed(vals) => Ok(vals.to_slots(min_slot)),
|
||||
CompressedSlots::Flate2(vals) => {
|
||||
let unc = vals.inflate()?;
|
||||
Ok(unc.to_slots(min_slot))
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn deflate(&mut self) -> Result<()> {
|
||||
match self {
|
||||
CompressedSlots::Uncompressed(vals) => {
|
||||
let unc = vals.clone();
|
||||
let compressed = Flate2::deflate(unc)?;
|
||||
let mut new = CompressedSlots::Flate2(compressed);
|
||||
std::mem::swap(self, &mut new);
|
||||
Ok(())
|
||||
}
|
||||
CompressedSlots::Flate2(_) => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
|
||||
pub struct EpochSlots {
|
||||
pub from: Pubkey,
|
||||
pub slots: Vec<CompressedSlots>,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl EpochSlots {
|
||||
pub fn new(from: Pubkey, now: u64) -> Self {
|
||||
Self {
|
||||
from,
|
||||
wallclock: now,
|
||||
slots: vec![],
|
||||
}
|
||||
}
|
||||
pub fn fill(&mut self, slots: &[Slot], now: u64) -> usize {
|
||||
let mut num = 0;
|
||||
self.wallclock = std::cmp::max(now, self.wallclock + 1);
|
||||
while num < slots.len() {
|
||||
num += self.add(&slots[num..]);
|
||||
if num < slots.len() {
|
||||
if self.deflate().is_err() {
|
||||
return num;
|
||||
}
|
||||
let space = self.max_compressed_slot_size();
|
||||
if space > 0 {
|
||||
let cslot = CompressedSlots::new(space as usize);
|
||||
self.slots.push(cslot);
|
||||
} else {
|
||||
return num;
|
||||
}
|
||||
}
|
||||
}
|
||||
num
|
||||
}
|
||||
pub fn add(&mut self, slots: &[Slot]) -> usize {
|
||||
let mut num = 0;
|
||||
for s in &mut self.slots {
|
||||
num += s.add(&slots[num..]);
|
||||
if num >= slots.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
num
|
||||
}
|
||||
pub fn deflate(&mut self) -> Result<()> {
|
||||
for s in self.slots.iter_mut() {
|
||||
s.deflate()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn max_compressed_slot_size(&self) -> isize {
|
||||
let len_header = serialized_size(self).unwrap();
|
||||
let len_slot = serialized_size(&CompressedSlots::default()).unwrap();
|
||||
MAX_CRDS_OBJECT_SIZE as isize - (len_header + len_slot) as isize
|
||||
}
|
||||
|
||||
pub fn first_slot(&self) -> Option<Slot> {
|
||||
self.slots.iter().map(|s| s.first_slot()).min()
|
||||
}
|
||||
|
||||
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
|
||||
self.slots
|
||||
.iter()
|
||||
.filter(|s| min_slot < s.first_slot() + s.num_slots() as u64)
|
||||
.filter_map(|s| s.to_slots(min_slot).ok())
|
||||
.flatten()
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_epoch_slots_max_size() {
|
||||
let epoch_slots = EpochSlots::default();
|
||||
assert!(epoch_slots.max_compressed_slot_size() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_uncompressed_add_1() {
|
||||
let mut slots = Uncompressed::new(1);
|
||||
assert_eq!(slots.slots.capacity(), 8);
|
||||
assert_eq!(slots.add(&[1]), 1);
|
||||
assert_eq!(slots.to_slots(1), vec![1]);
|
||||
assert!(slots.to_slots(2).is_empty());
|
||||
}
|
||||
#[test]
|
||||
fn test_epoch_slots_uncompressed_add_2() {
|
||||
let mut slots = Uncompressed::new(1);
|
||||
assert_eq!(slots.add(&[1, 2]), 2);
|
||||
assert_eq!(slots.to_slots(1), vec![1, 2]);
|
||||
}
|
||||
#[test]
|
||||
fn test_epoch_slots_uncompressed_add_3a() {
|
||||
let mut slots = Uncompressed::new(1);
|
||||
assert_eq!(slots.add(&[1, 3, 2]), 3);
|
||||
assert_eq!(slots.to_slots(1), vec![1, 2, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_uncompressed_add_3b() {
|
||||
let mut slots = Uncompressed::new(1);
|
||||
assert_eq!(slots.add(&[1, 10, 2]), 1);
|
||||
assert_eq!(slots.to_slots(1), vec![1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_uncompressed_add_3c() {
|
||||
let mut slots = Uncompressed::new(2);
|
||||
assert_eq!(slots.add(&[1, 10, 2]), 3);
|
||||
assert_eq!(slots.to_slots(1), vec![1, 2, 10]);
|
||||
assert_eq!(slots.to_slots(2), vec![2, 10]);
|
||||
assert_eq!(slots.to_slots(3), vec![10]);
|
||||
assert_eq!(slots.to_slots(11).is_empty(), true);
|
||||
}
|
||||
#[test]
|
||||
fn test_epoch_slots_compressed() {
|
||||
let mut slots = Uncompressed::new(100);
|
||||
slots.add(&[1, 701, 2]);
|
||||
assert_eq!(slots.num, 701);
|
||||
let compressed = Flate2::deflate(slots).unwrap();
|
||||
assert_eq!(compressed.first_slot, 1);
|
||||
assert_eq!(compressed.num, 701);
|
||||
assert!(compressed.compressed.len() < 32);
|
||||
let slots = compressed.inflate().unwrap();
|
||||
assert_eq!(slots.first_slot, 1);
|
||||
assert_eq!(slots.num, 701);
|
||||
assert_eq!(slots.to_slots(1), vec![1, 2, 701]);
|
||||
}
|
||||
#[test]
|
||||
fn test_epoch_slots_fill_range() {
|
||||
let range: Vec<Slot> = (0..5000).into_iter().collect();
|
||||
let mut slots = EpochSlots::default();
|
||||
assert_eq!(slots.fill(&range, 1), 5000);
|
||||
assert_eq!(slots.wallclock, 1);
|
||||
assert_eq!(slots.to_slots(0), range);
|
||||
assert_eq!(slots.to_slots(4999), vec![4999]);
|
||||
assert_eq!(slots.to_slots(5000).is_empty(), true);
|
||||
}
|
||||
#[test]
|
||||
fn test_epoch_slots_fill_sparce_range() {
|
||||
let range: Vec<Slot> = (0..5000).into_iter().map(|x| x * 3).collect();
|
||||
let mut slots = EpochSlots::default();
|
||||
assert_eq!(slots.fill(&range, 2), 5000);
|
||||
assert_eq!(slots.wallclock, 2);
|
||||
assert_eq!(slots.slots.len(), 3);
|
||||
assert_eq!(slots.slots[0].first_slot(), 0);
|
||||
assert_ne!(slots.slots[0].num_slots(), 0);
|
||||
let next = slots.slots[0].num_slots() as u64 + slots.slots[0].first_slot();
|
||||
assert!(slots.slots[1].first_slot() >= next);
|
||||
assert_ne!(slots.slots[1].num_slots(), 0);
|
||||
assert_ne!(slots.slots[2].num_slots(), 0);
|
||||
assert_eq!(slots.to_slots(0), range);
|
||||
assert_eq!(slots.to_slots(4999 * 3), vec![4999 * 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_fill_large_sparce_range() {
|
||||
let range: Vec<Slot> = (0..5000).into_iter().map(|x| x * 7).collect();
|
||||
let mut slots = EpochSlots::default();
|
||||
assert_eq!(slots.fill(&range, 2), 5000);
|
||||
assert_eq!(slots.to_slots(0), range);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_fill_uncompressed_random_range() {
|
||||
use rand::Rng;
|
||||
for _ in 0..10 {
|
||||
let mut range: Vec<Slot> = vec![];
|
||||
for _ in 0..5000 {
|
||||
let last = *range.last().unwrap_or(&0);
|
||||
range.push(last + rand::thread_rng().gen_range(1, 5));
|
||||
}
|
||||
let sz = EpochSlots::default().max_compressed_slot_size();
|
||||
let mut slots = Uncompressed::new(sz as usize);
|
||||
let sz = slots.add(&range);
|
||||
let slots = slots.to_slots(0);
|
||||
assert_eq!(slots.len(), sz);
|
||||
assert_eq!(slots[..], range[..sz]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_fill_compressed_random_range() {
|
||||
use rand::Rng;
|
||||
for _ in 0..10 {
|
||||
let mut range: Vec<Slot> = vec![];
|
||||
for _ in 0..5000 {
|
||||
let last = *range.last().unwrap_or(&0);
|
||||
range.push(last + rand::thread_rng().gen_range(1, 5));
|
||||
}
|
||||
let sz = EpochSlots::default().max_compressed_slot_size();
|
||||
let mut slots = Uncompressed::new(sz as usize);
|
||||
let sz = slots.add(&range);
|
||||
let mut slots = CompressedSlots::Uncompressed(slots);
|
||||
slots.deflate().unwrap();
|
||||
let slots = slots.to_slots(0).unwrap();
|
||||
assert_eq!(slots.len(), sz);
|
||||
assert_eq!(slots[..], range[..sz]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_fill_random_range() {
|
||||
use rand::Rng;
|
||||
for _ in 0..10 {
|
||||
let mut range: Vec<Slot> = vec![];
|
||||
for _ in 0..5000 {
|
||||
let last = *range.last().unwrap_or(&0);
|
||||
range.push(last + rand::thread_rng().gen_range(1, 5));
|
||||
}
|
||||
let mut slots = EpochSlots::default();
|
||||
let sz = slots.fill(&range, 1);
|
||||
let last = range[sz - 1];
|
||||
assert_eq!(
|
||||
last,
|
||||
slots.slots.last().unwrap().first_slot()
|
||||
+ slots.slots.last().unwrap().num_slots() as u64
|
||||
- 1
|
||||
);
|
||||
for s in &slots.slots {
|
||||
assert!(s.to_slots(0).is_ok());
|
||||
}
|
||||
let slots = slots.to_slots(0);
|
||||
assert_eq!(slots[..], range[..slots.len()]);
|
||||
assert_eq!(sz, slots.len())
|
||||
}
|
||||
}
|
||||
}
|
@ -1,14 +1,14 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use crate::banking_stage::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET;
|
||||
use crate::packet::PacketsRecycler;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::streamer::{self, PacketReceiver, PacketSender};
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
|
||||
use solana_perf::packet::PacketsRecycler;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_streamer::streamer::{self, PacketReceiver, PacketSender};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::{channel, RecvTimeoutError};
|
||||
|
@ -1 +0,0 @@
|
||||
pub use solana_ledger::genesis_utils::*;
|
@ -2,13 +2,13 @@
|
||||
|
||||
use crate::cluster_info::{ClusterInfo, VALIDATOR_PORT_RANGE};
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::streamer;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_client::thin_client::{create_client, ThinClient};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_streamer::streamer;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
|
@ -1,6 +1,8 @@
|
||||
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
|
||||
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore_db::Result as BlockstoreResult;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::string::ToString;
|
||||
@ -11,13 +13,22 @@ use std::thread;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
// - To try and keep the RocksDB size under 512GB:
|
||||
// Seeing about 1600b/shred, using 2000b/shred for margin, so 250m shreds can be stored in 512gb.
|
||||
// at 5k shreds/slot at 50k tps, this is 500k slots (~5.5 hours).
|
||||
// At idle, 60 shreds/slot this is about 4m slots (18 days)
|
||||
// This is chosen to allow enough time for
|
||||
// - To try and keep the RocksDB size under 512GB at 50k tps (100 slots take ~2GB).
|
||||
// - A validator to download a snapshot from a peer and boot from it
|
||||
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
|
||||
// to catch back up to where it was when it stopped
|
||||
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 270_000;
|
||||
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
|
||||
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 250_000_000;
|
||||
|
||||
// Check for removing slots at this interval so we don't purge too often
|
||||
// and starve other blockstore users.
|
||||
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
|
||||
|
||||
// Remove a limited number of slots at a time, so the operation
|
||||
// does not take too long and block other blockstore users.
|
||||
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
|
||||
|
||||
pub struct LedgerCleanupService {
|
||||
@ -36,7 +47,7 @@ impl LedgerCleanupService {
|
||||
max_ledger_slots
|
||||
);
|
||||
let exit = exit.clone();
|
||||
let mut next_purge_batch = max_ledger_slots;
|
||||
let mut last_purge_slot = 0;
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-ledger-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
@ -47,7 +58,8 @@ impl LedgerCleanupService {
|
||||
&new_root_receiver,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
&mut next_purge_batch,
|
||||
&mut last_purge_slot,
|
||||
DEFAULT_PURGE_SLOT_INTERVAL,
|
||||
) {
|
||||
match e {
|
||||
RecvTimeoutError::Disconnected => break,
|
||||
@ -59,45 +71,123 @@ impl LedgerCleanupService {
|
||||
Self { t_cleanup }
|
||||
}
|
||||
|
||||
fn find_slots_to_clean(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
root: Slot,
|
||||
max_ledger_shreds: u64,
|
||||
) -> (u64, Slot, Slot) {
|
||||
let mut shreds = Vec::new();
|
||||
let mut iterate_time = Measure::start("iterate_time");
|
||||
let mut total_shreds = 0;
|
||||
let mut first_slot = 0;
|
||||
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
|
||||
if i == 0 {
|
||||
first_slot = slot;
|
||||
debug!("purge: searching from slot: {}", slot);
|
||||
}
|
||||
// Not exact since non-full slots will have holes
|
||||
total_shreds += meta.received;
|
||||
shreds.push((slot, meta.received));
|
||||
if slot > root {
|
||||
break;
|
||||
}
|
||||
}
|
||||
iterate_time.stop();
|
||||
info!(
|
||||
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
|
||||
max_ledger_shreds,
|
||||
shreds.len(),
|
||||
total_shreds,
|
||||
iterate_time
|
||||
);
|
||||
if (total_shreds as u64) < max_ledger_shreds {
|
||||
return (0, 0, 0);
|
||||
}
|
||||
let mut cur_shreds = 0;
|
||||
let mut lowest_slot_to_clean = shreds[0].0;
|
||||
for (slot, num_shreds) in shreds.iter().rev() {
|
||||
cur_shreds += *num_shreds as u64;
|
||||
if cur_shreds > max_ledger_shreds {
|
||||
lowest_slot_to_clean = *slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
(cur_shreds, lowest_slot_to_clean, first_slot)
|
||||
}
|
||||
|
||||
fn cleanup_ledger(
|
||||
new_root_receiver: &Receiver<Slot>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
max_ledger_slots: u64,
|
||||
next_purge_batch: &mut u64,
|
||||
max_ledger_shreds: u64,
|
||||
last_purge_slot: &mut u64,
|
||||
purge_interval: u64,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
|
||||
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
// Notify blockstore of impending purge
|
||||
if root > *next_purge_batch {
|
||||
//cleanup
|
||||
let lowest_slot = root - max_ledger_slots;
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_slot;
|
||||
blockstore.purge_slots(0, Some(lowest_slot));
|
||||
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
|
||||
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
// Get the newest root
|
||||
while let Ok(new_root) = new_root_receiver.try_recv() {
|
||||
root = new_root;
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
|
||||
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
|
||||
(disk_utilization_pre, disk_utilization_post)
|
||||
{
|
||||
datapoint_debug!(
|
||||
"ledger_disk_utilization",
|
||||
("disk_utilization_pre", disk_utilization_pre as i64, i64),
|
||||
("disk_utilization_post", disk_utilization_post as i64, i64),
|
||||
(
|
||||
"disk_utilization_delta",
|
||||
(disk_utilization_pre as i64 - disk_utilization_post as i64),
|
||||
i64
|
||||
)
|
||||
if root - *last_purge_slot > purge_interval {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
|
||||
root, last_purge_slot, purge_interval, disk_utilization_pre
|
||||
);
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
|
||||
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
|
||||
|
||||
if num_shreds_to_clean > 0 {
|
||||
debug!(
|
||||
"cleaning up to: {} shreds: {} first: {}",
|
||||
lowest_slot_to_clean, num_shreds_to_clean, first_slot
|
||||
);
|
||||
loop {
|
||||
let current_lowest =
|
||||
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
|
||||
|
||||
let mut slot_update_time = Measure::start("slot_update");
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
|
||||
slot_update_time.stop();
|
||||
|
||||
let mut clean_time = Measure::start("ledger_clean");
|
||||
blockstore.purge_slots(first_slot, Some(current_lowest));
|
||||
clean_time.stop();
|
||||
|
||||
debug!(
|
||||
"ledger purge {} -> {}: {} {}",
|
||||
first_slot, current_lowest, slot_update_time, clean_time
|
||||
);
|
||||
first_slot += DEFAULT_PURGE_BATCH_SIZE;
|
||||
if current_lowest == lowest_slot_to_clean {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
|
||||
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
|
||||
if let (Ok(pre), Ok(post)) = (pre, post) {
|
||||
datapoint_debug!(
|
||||
"ledger_disk_utilization",
|
||||
("disk_utilization_pre", pre as i64, i64),
|
||||
("disk_utilization_post", post as i64, i64),
|
||||
("disk_utilization_delta", (pre as i64 - post as i64), i64)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_cleanup.join()
|
||||
}
|
||||
@ -111,6 +201,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cleanup() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let (shreds, _) = make_many_slot_entries(0, 50, 5);
|
||||
@ -118,10 +209,10 @@ mod tests {
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
//send a signal to kill slots 0-40
|
||||
let mut next_purge_slot = 0;
|
||||
//send a signal to kill all but 5 shreds, which will be in the newest slots
|
||||
let mut last_purge_slot = 0;
|
||||
sender.send(50).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
|
||||
.unwrap();
|
||||
|
||||
//check that 0-40 don't exist
|
||||
@ -134,6 +225,62 @@ mod tests {
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cleanup_speed() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
blockstore.set_no_compaction(true);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let mut first_insert = Measure::start("first_insert");
|
||||
let initial_slots = 50;
|
||||
let initial_entries = 5;
|
||||
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
first_insert.stop();
|
||||
info!("{}", first_insert);
|
||||
|
||||
let mut last_purge_slot = 0;
|
||||
let mut slot = initial_slots;
|
||||
let mut num_slots = 6;
|
||||
for _ in 0..5 {
|
||||
let mut insert_time = Measure::start("insert time");
|
||||
let batch_size = 2;
|
||||
let batches = num_slots / batch_size;
|
||||
for i in 0..batches {
|
||||
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
if i % 100 == 0 {
|
||||
info!("inserting..{} of {}", i, batches);
|
||||
}
|
||||
}
|
||||
insert_time.stop();
|
||||
|
||||
let mut time = Measure::start("purge time");
|
||||
sender.send(slot + num_slots).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
initial_slots,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
time.stop();
|
||||
info!(
|
||||
"slot: {} size: {} {} {}",
|
||||
slot, num_slots, insert_time, time
|
||||
);
|
||||
slot += num_slots;
|
||||
num_slots *= 2;
|
||||
}
|
||||
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compaction() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
@ -142,7 +289,7 @@ mod tests {
|
||||
let n = 10_000;
|
||||
let batch_size = 100;
|
||||
let batches = n / batch_size;
|
||||
let max_ledger_slots = 100;
|
||||
let max_ledger_shreds = 100;
|
||||
|
||||
for i in 0..batches {
|
||||
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
|
||||
@ -158,8 +305,9 @@ mod tests {
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
max_ledger_shreds,
|
||||
&mut next_purge_batch,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -170,7 +318,7 @@ mod tests {
|
||||
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
|
||||
|
||||
// check that early slots don't exist
|
||||
let max_slot = n - max_ledger_slots;
|
||||
let max_slot = n - max_ledger_shreds - 1;
|
||||
blockstore
|
||||
.slot_meta_iterator(0)
|
||||
.unwrap()
|
||||
|
@ -5,16 +5,18 @@
|
||||
//! command-line tools to spin up validators and a Rust library
|
||||
//!
|
||||
|
||||
pub mod accounts_cleanup_service;
|
||||
pub mod accounts_hash_verifier;
|
||||
pub mod banking_stage;
|
||||
pub mod broadcast_stage;
|
||||
pub mod cluster_info_vote_listener;
|
||||
pub mod commitment;
|
||||
mod deprecated;
|
||||
pub mod shred_fetch_stage;
|
||||
#[macro_use]
|
||||
pub mod contact_info;
|
||||
pub mod blockstream;
|
||||
pub mod blockstream_service;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_slots;
|
||||
pub mod consensus;
|
||||
pub mod crds;
|
||||
pub mod crds_gossip;
|
||||
@ -22,16 +24,15 @@ pub mod crds_gossip_error;
|
||||
pub mod crds_gossip_pull;
|
||||
pub mod crds_gossip_push;
|
||||
pub mod crds_value;
|
||||
pub mod epoch_slots;
|
||||
pub mod fetch_stage;
|
||||
pub mod gen_keys;
|
||||
pub mod genesis_utils;
|
||||
pub mod gossip_service;
|
||||
pub mod ledger_cleanup_service;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod packet;
|
||||
pub mod poh_recorder;
|
||||
pub mod poh_service;
|
||||
pub mod recvmmsg;
|
||||
pub mod progress_map;
|
||||
pub mod repair_service;
|
||||
pub mod replay_stage;
|
||||
mod result;
|
||||
@ -42,7 +43,6 @@ pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_service;
|
||||
pub mod rpc_subscriptions;
|
||||
pub mod sendmmsg;
|
||||
pub mod serve_repair;
|
||||
pub mod serve_repair_service;
|
||||
pub mod sigverify;
|
||||
@ -50,11 +50,11 @@ pub mod sigverify_shreds;
|
||||
pub mod sigverify_stage;
|
||||
pub mod snapshot_packager_service;
|
||||
pub mod storage_stage;
|
||||
pub mod streamer;
|
||||
pub mod tpu;
|
||||
pub mod transaction_status_service;
|
||||
pub mod tvu;
|
||||
pub mod validator;
|
||||
pub mod verified_vote_packets;
|
||||
pub mod weighted_shuffle;
|
||||
pub mod window_service;
|
||||
|
||||
|
@ -511,8 +511,8 @@ impl PohRecorder {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use bincode::serialize;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta, get_tmp_ledger_path};
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
|
@ -120,8 +120,8 @@ impl PohService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use crate::poh_recorder::WorkingBank;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::test_tx::test_tx;
|
||||
|
571
core/src/progress_map.rs
Normal file
571
core/src/progress_map.rs
Normal file
@ -0,0 +1,571 @@
|
||||
use crate::{
|
||||
cluster_info_vote_listener::SlotVoteTracker, cluster_slots::SlotPubkeys,
|
||||
consensus::StakeLockout, replay_stage::SUPERMINORITY_THRESHOLD,
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore_processor::{ConfirmationProgress, ConfirmationTiming},
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{account::Account, clock::Slot, hash::Hash, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
rc::Rc,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct ReplaySlotStats(ConfirmationTiming);
|
||||
impl std::ops::Deref for ReplaySlotStats {
|
||||
type Target = ConfirmationTiming;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
impl std::ops::DerefMut for ReplaySlotStats {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ReplaySlotStats {
|
||||
pub fn report_stats(&self, slot: Slot, num_entries: usize, num_shreds: u64) {
|
||||
datapoint_info!(
|
||||
"replay-slot-stats",
|
||||
("slot", slot as i64, i64),
|
||||
("fetch_entries_time", self.fetch_elapsed as i64, i64),
|
||||
(
|
||||
"fetch_entries_fail_time",
|
||||
self.fetch_fail_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("entry_verification_time", self.verify_elapsed as i64, i64),
|
||||
("replay_time", self.replay_elapsed as i64, i64),
|
||||
(
|
||||
"replay_total_elapsed",
|
||||
self.started.elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
("total_entries", num_entries as i64, i64),
|
||||
("total_shreds", num_shreds as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ValidatorStakeInfo {
|
||||
pub validator_vote_pubkey: Pubkey,
|
||||
pub stake: u64,
|
||||
pub total_epoch_stake: u64,
|
||||
}
|
||||
|
||||
impl Default for ValidatorStakeInfo {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
stake: 0,
|
||||
validator_vote_pubkey: Pubkey::default(),
|
||||
total_epoch_stake: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorStakeInfo {
|
||||
pub fn new(validator_vote_pubkey: Pubkey, stake: u64, total_epoch_stake: u64) -> Self {
|
||||
Self {
|
||||
validator_vote_pubkey,
|
||||
stake,
|
||||
total_epoch_stake,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ForkProgress {
|
||||
pub(crate) is_dead: bool,
|
||||
pub(crate) fork_stats: ForkStats,
|
||||
pub(crate) propagated_stats: PropagatedStats,
|
||||
pub(crate) replay_stats: ReplaySlotStats,
|
||||
pub(crate) replay_progress: ConfirmationProgress,
|
||||
}
|
||||
|
||||
impl ForkProgress {
|
||||
pub fn new(
|
||||
last_entry: Hash,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
validator_stake_info: Option<ValidatorStakeInfo>,
|
||||
) -> Self {
|
||||
let (
|
||||
is_leader_slot,
|
||||
propagated_validators_stake,
|
||||
propagated_validators,
|
||||
is_propagated,
|
||||
total_epoch_stake,
|
||||
) = validator_stake_info
|
||||
.map(|info| {
|
||||
(
|
||||
true,
|
||||
info.stake,
|
||||
vec![Rc::new(info.validator_vote_pubkey)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
{
|
||||
if info.total_epoch_stake == 0 {
|
||||
true
|
||||
} else {
|
||||
info.stake as f64 / info.total_epoch_stake as f64
|
||||
> SUPERMINORITY_THRESHOLD
|
||||
}
|
||||
},
|
||||
info.total_epoch_stake,
|
||||
)
|
||||
})
|
||||
.unwrap_or((false, 0, HashSet::new(), false, 0));
|
||||
Self {
|
||||
is_dead: false,
|
||||
fork_stats: ForkStats::default(),
|
||||
replay_stats: ReplaySlotStats::default(),
|
||||
replay_progress: ConfirmationProgress::new(last_entry),
|
||||
propagated_stats: PropagatedStats {
|
||||
prev_leader_slot,
|
||||
is_leader_slot,
|
||||
propagated_validators_stake,
|
||||
propagated_validators,
|
||||
is_propagated,
|
||||
total_epoch_stake,
|
||||
..PropagatedStats::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_bank(
|
||||
bank: &Bank,
|
||||
my_pubkey: &Pubkey,
|
||||
voting_pubkey: &Pubkey,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
) -> Self {
|
||||
let validator_fork_info = {
|
||||
if bank.collector_id() == my_pubkey {
|
||||
let stake = bank.epoch_vote_account_stake(voting_pubkey);
|
||||
Some(ValidatorStakeInfo::new(
|
||||
*voting_pubkey,
|
||||
stake,
|
||||
bank.total_epoch_stake(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
Self::new(bank.last_blockhash(), prev_leader_slot, validator_fork_info)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub(crate) struct ForkStats {
|
||||
pub(crate) weight: u128,
|
||||
pub(crate) fork_weight: u128,
|
||||
pub(crate) total_staked: u64,
|
||||
pub(crate) slot: Slot,
|
||||
pub(crate) block_height: u64,
|
||||
pub(crate) has_voted: bool,
|
||||
pub(crate) is_recent: bool,
|
||||
pub(crate) is_empty: bool,
|
||||
pub(crate) vote_threshold: bool,
|
||||
pub(crate) is_locked_out: bool,
|
||||
pub(crate) stake_lockouts: HashMap<u64, StakeLockout>,
|
||||
pub(crate) confirmation_reported: bool,
|
||||
pub(crate) computed: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct PropagatedStats {
|
||||
pub(crate) propagated_validators: HashSet<Rc<Pubkey>>,
|
||||
pub(crate) propagated_node_ids: HashSet<Rc<Pubkey>>,
|
||||
pub(crate) propagated_validators_stake: u64,
|
||||
pub(crate) is_propagated: bool,
|
||||
pub(crate) is_leader_slot: bool,
|
||||
pub(crate) prev_leader_slot: Option<Slot>,
|
||||
pub(crate) slot_vote_tracker: Option<Arc<RwLock<SlotVoteTracker>>>,
|
||||
pub(crate) cluster_slot_pubkeys: Option<Arc<RwLock<SlotPubkeys>>>,
|
||||
pub(crate) total_epoch_stake: u64,
|
||||
}
|
||||
|
||||
impl PropagatedStats {
|
||||
pub fn add_vote_pubkey(
|
||||
&mut self,
|
||||
vote_pubkey: &Pubkey,
|
||||
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
|
||||
stake: u64,
|
||||
) {
|
||||
if !self.propagated_validators.contains(vote_pubkey) {
|
||||
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(vote_pubkey).cloned();
|
||||
if cached_pubkey.is_none() {
|
||||
let new_pubkey = Rc::new(*vote_pubkey);
|
||||
all_pubkeys.insert(new_pubkey.clone());
|
||||
cached_pubkey = Some(new_pubkey);
|
||||
}
|
||||
let vote_pubkey = cached_pubkey.unwrap();
|
||||
self.propagated_validators.insert(vote_pubkey);
|
||||
self.propagated_validators_stake += stake;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_node_pubkey(
|
||||
&mut self,
|
||||
node_pubkey: &Pubkey,
|
||||
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
|
||||
bank: &Bank,
|
||||
) {
|
||||
if !self.propagated_node_ids.contains(node_pubkey) {
|
||||
let node_vote_accounts = bank
|
||||
.epoch_vote_accounts_for_node_id(&node_pubkey)
|
||||
.map(|v| &v.vote_accounts);
|
||||
|
||||
if let Some(node_vote_accounts) = node_vote_accounts {
|
||||
self.add_node_pubkey_internal(
|
||||
node_pubkey,
|
||||
all_pubkeys,
|
||||
node_vote_accounts,
|
||||
bank.epoch_vote_accounts(bank.epoch())
|
||||
.expect("Epoch stakes for bank's own epoch must exist"),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn add_node_pubkey_internal(
|
||||
&mut self,
|
||||
node_pubkey: &Pubkey,
|
||||
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
|
||||
vote_account_pubkeys: &[Pubkey],
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
) {
|
||||
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(node_pubkey).cloned();
|
||||
if cached_pubkey.is_none() {
|
||||
let new_pubkey = Rc::new(*node_pubkey);
|
||||
all_pubkeys.insert(new_pubkey.clone());
|
||||
cached_pubkey = Some(new_pubkey);
|
||||
}
|
||||
let node_pubkey = cached_pubkey.unwrap();
|
||||
self.propagated_node_ids.insert(node_pubkey);
|
||||
for vote_account_pubkey in vote_account_pubkeys.iter() {
|
||||
let stake = epoch_vote_accounts
|
||||
.get(vote_account_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or(0);
|
||||
self.add_vote_pubkey(vote_account_pubkey, all_pubkeys, stake);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct ProgressMap {
|
||||
progress_map: HashMap<Slot, ForkProgress>,
|
||||
}
|
||||
|
||||
impl std::ops::Deref for ProgressMap {
|
||||
type Target = HashMap<Slot, ForkProgress>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.progress_map
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for ProgressMap {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.progress_map
|
||||
}
|
||||
}
|
||||
|
||||
impl ProgressMap {
|
||||
pub fn insert(&mut self, slot: Slot, fork_progress: ForkProgress) {
|
||||
self.progress_map.insert(slot, fork_progress);
|
||||
}
|
||||
|
||||
pub fn get_propagated_stats(&self, slot: Slot) -> Option<&PropagatedStats> {
|
||||
self.progress_map
|
||||
.get(&slot)
|
||||
.map(|fork_progress| &fork_progress.propagated_stats)
|
||||
}
|
||||
|
||||
pub fn get_propagated_stats_mut(&mut self, slot: Slot) -> Option<&mut PropagatedStats> {
|
||||
self.progress_map
|
||||
.get_mut(&slot)
|
||||
.map(|fork_progress| &mut fork_progress.propagated_stats)
|
||||
}
|
||||
|
||||
pub fn get_fork_stats(&self, slot: Slot) -> Option<&ForkStats> {
|
||||
self.progress_map
|
||||
.get(&slot)
|
||||
.map(|fork_progress| &fork_progress.fork_stats)
|
||||
}
|
||||
|
||||
pub fn get_fork_stats_mut(&mut self, slot: Slot) -> Option<&mut ForkStats> {
|
||||
self.progress_map
|
||||
.get_mut(&slot)
|
||||
.map(|fork_progress| &mut fork_progress.fork_stats)
|
||||
}
|
||||
|
||||
pub fn is_propagated(&self, slot: Slot) -> bool {
|
||||
let leader_slot_to_check = self.get_latest_leader_slot(slot);
|
||||
|
||||
// prev_leader_slot doesn't exist because already rooted
|
||||
// or this validator hasn't been scheduled as a leader
|
||||
// yet. In both cases the latest leader is vacuously
|
||||
// confirmed
|
||||
leader_slot_to_check
|
||||
.map(|leader_slot_to_check| {
|
||||
// If the leader's stats are None (isn't in the
|
||||
// progress map), this means that prev_leader slot is
|
||||
// rooted, so return true
|
||||
self.get_propagated_stats(leader_slot_to_check)
|
||||
.map(|stats| stats.is_propagated)
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
pub fn get_latest_leader_slot(&self, slot: Slot) -> Option<Slot> {
|
||||
let propagated_stats = self
|
||||
.get_propagated_stats(slot)
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
if propagated_stats.is_leader_slot {
|
||||
Some(slot)
|
||||
} else {
|
||||
propagated_stats.prev_leader_slot
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option<Slot> {
|
||||
let parent_slot = bank.parent_slot();
|
||||
self.get_propagated_stats(parent_slot)
|
||||
.map(|stats| {
|
||||
if stats.is_leader_slot {
|
||||
Some(parent_slot)
|
||||
} else {
|
||||
stats.prev_leader_slot
|
||||
}
|
||||
})
|
||||
.unwrap_or(None)
|
||||
}
|
||||
|
||||
pub fn handle_new_root(&mut self, bank_forks: &BankForks) {
|
||||
self.progress_map
|
||||
.retain(|k, _| bank_forks.get(*k).is_some());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_add_vote_pubkey() {
|
||||
let mut stats = PropagatedStats::default();
|
||||
let mut all_pubkeys = HashSet::new();
|
||||
let mut vote_pubkey = Pubkey::new_rand();
|
||||
all_pubkeys.insert(Rc::new(vote_pubkey.clone()));
|
||||
|
||||
// Add a vote pubkey, the number of references in all_pubkeys
|
||||
// should be 2
|
||||
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
|
||||
assert!(stats.propagated_validators.contains(&vote_pubkey));
|
||||
assert_eq!(stats.propagated_validators_stake, 1);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
|
||||
|
||||
// Adding it again should change no state since the key already existed
|
||||
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
|
||||
assert!(stats.propagated_validators.contains(&vote_pubkey));
|
||||
assert_eq!(stats.propagated_validators_stake, 1);
|
||||
|
||||
// Addding another pubkey should succeed
|
||||
vote_pubkey = Pubkey::new_rand();
|
||||
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
|
||||
assert!(stats.propagated_validators.contains(&vote_pubkey));
|
||||
assert_eq!(stats.propagated_validators_stake, 3);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_node_pubkey_internal() {
|
||||
let num_vote_accounts = 10;
|
||||
let staked_vote_accounts = 5;
|
||||
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
|
||||
.take(num_vote_accounts)
|
||||
.collect();
|
||||
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
|
||||
.iter()
|
||||
.skip(num_vote_accounts - staked_vote_accounts)
|
||||
.map(|pubkey| (*pubkey, (1, Account::default())))
|
||||
.collect();
|
||||
|
||||
let mut stats = PropagatedStats::default();
|
||||
let mut all_pubkeys = HashSet::new();
|
||||
let mut node_pubkey = Pubkey::new_rand();
|
||||
all_pubkeys.insert(Rc::new(node_pubkey.clone()));
|
||||
|
||||
// Add a vote pubkey, the number of references in all_pubkeys
|
||||
// should be 2
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
staked_vote_accounts as u64
|
||||
);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
|
||||
|
||||
// Adding it again should not change any state
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
staked_vote_accounts as u64
|
||||
);
|
||||
|
||||
// Addding another pubkey with same vote accounts should succeed, but stake
|
||||
// shouldn't increase
|
||||
node_pubkey = Pubkey::new_rand();
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
staked_vote_accounts as u64
|
||||
);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
|
||||
|
||||
// Addding another pubkey with different vote accounts should succeed
|
||||
// and increase stake
|
||||
node_pubkey = Pubkey::new_rand();
|
||||
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
|
||||
.take(num_vote_accounts)
|
||||
.collect();
|
||||
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
|
||||
.iter()
|
||||
.skip(num_vote_accounts - staked_vote_accounts)
|
||||
.map(|pubkey| (*pubkey, (1, Account::default())))
|
||||
.collect();
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
2 * staked_vote_accounts as u64
|
||||
);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_propagated_status_on_construction() {
|
||||
// If the given ValidatorStakeInfo == None, then this is not
|
||||
// a leader slot and is_propagated == false
|
||||
let progress = ForkProgress::new(Hash::default(), Some(9), None);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
|
||||
// If the stake is zero, then threshold is always achieved
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake: 0,
|
||||
..ValidatorStakeInfo::default()
|
||||
}),
|
||||
);
|
||||
assert!(progress.propagated_stats.is_propagated);
|
||||
|
||||
// If the stake is non zero, then threshold is not achieved unless
|
||||
// validator has enough stake by itself to pass threshold
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake: 2,
|
||||
..ValidatorStakeInfo::default()
|
||||
}),
|
||||
);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
|
||||
// Give the validator enough stake by itself to pass threshold
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
Some(ValidatorStakeInfo {
|
||||
stake: 1,
|
||||
total_epoch_stake: 2,
|
||||
..ValidatorStakeInfo::default()
|
||||
}),
|
||||
);
|
||||
assert!(progress.propagated_stats.is_propagated);
|
||||
|
||||
// Check that the default ValidatorStakeInfo::default() constructs a ForkProgress
|
||||
// with is_propagated == false, otherwise propagation tests will fail to run
|
||||
// the proper checks (most will auto-pass without checking anything)
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_propagated() {
|
||||
let mut progress_map = ProgressMap::default();
|
||||
|
||||
// Insert new ForkProgress for slot 10 (not a leader slot) and its
|
||||
// previous leader slot 9 (leader slot)
|
||||
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None));
|
||||
progress_map.insert(
|
||||
9,
|
||||
ForkProgress::new(Hash::default(), None, Some(ValidatorStakeInfo::default())),
|
||||
);
|
||||
|
||||
// None of these slot have parents which are confirmed
|
||||
assert!(!progress_map.is_propagated(9));
|
||||
assert!(!progress_map.is_propagated(10));
|
||||
|
||||
// Insert new ForkProgress for slot 8 with no previous leader.
|
||||
// The previous leader before 8, slot 7, does not exist in
|
||||
// progress map, so is_propagated(8) should return true as
|
||||
// this implies the parent is rooted
|
||||
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None));
|
||||
assert!(progress_map.is_propagated(8));
|
||||
|
||||
// If we set the is_propagated = true, is_propagated should return true
|
||||
progress_map
|
||||
.get_propagated_stats_mut(9)
|
||||
.unwrap()
|
||||
.is_propagated = true;
|
||||
assert!(progress_map.is_propagated(9));
|
||||
assert!(progress_map.get(&9).unwrap().propagated_stats.is_propagated);
|
||||
|
||||
// Because slot 9 is now confirmed, then slot 10 is also confirmed b/c 9
|
||||
// is the last leader slot before 10
|
||||
assert!(progress_map.is_propagated(10));
|
||||
|
||||
// If we make slot 10 a leader slot though, even though its previous
|
||||
// leader slot 9 has been confirmed, slot 10 itself is not confirmed
|
||||
progress_map
|
||||
.get_propagated_stats_mut(10)
|
||||
.unwrap()
|
||||
.is_leader_slot = true;
|
||||
assert!(!progress_map.is_propagated(10));
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user