Compare commits
301 Commits
Author | SHA1 | Date | |
---|---|---|---|
c53f163ef5 | |||
ca35854417 | |||
ab1fda2a54 | |||
a6ec77c230 | |||
1d7894f1be | |||
4866a1fc39 | |||
60c5e59a5e | |||
fd93bdadf6 | |||
6089db2a07 | |||
462d0cfc6c | |||
e6d6fc4391 | |||
092556ae5e | |||
ad9fa54a47 | |||
2d68170747 | |||
20f3d18458 | |||
be79efe9b7 | |||
5db377f743 | |||
9c2f45a1e0 | |||
8646918d00 | |||
7c44fc3561 | |||
686403eb1d | |||
6cf9b60a9c | |||
aca142df16 | |||
b2582196db | |||
85a77bec5f | |||
e781cbf4ba | |||
59956e4543 | |||
303417f981 | |||
fea03fdf33 | |||
e8160efc46 | |||
e0ba0d581c | |||
36eda29fc9 | |||
2ec73db6bd | |||
ef6ce2765e | |||
8acbb4ab2f | |||
a49f5378e2 | |||
f39e74f0d7 | |||
22b767308a | |||
36aa876833 | |||
06ba0b7279 | |||
a38e1a81ef | |||
da925142d1 | |||
5feeb257bb | |||
06c547094a | |||
a40c5cf185 | |||
deb83cdef6 | |||
20db335aed | |||
407db65336 | |||
9c5a3cd277 | |||
138a49e820 | |||
36c9e22e3d | |||
aa0f8538ed | |||
4177c56c51 | |||
425ac8d520 | |||
ada4d16c4c | |||
4069ef2e02 | |||
ace98bba08 | |||
e59b53dfa8 | |||
aacb38864c | |||
33d13a3aea | |||
1f0f947ed2 | |||
6854c64a09 | |||
4a32bc48d2 | |||
b430762a23 | |||
f8523db51d | |||
48b11d1841 | |||
3600a926df | |||
c228792967 | |||
7ea522e851 | |||
63503ad589 | |||
9800e09431 | |||
2e2b1881f5 | |||
61483c18ca | |||
a5279bb835 | |||
357554b209 | |||
41fbdc6e08 | |||
8bd1c57448 | |||
2562e48b9d | |||
46bb79df29 | |||
6bc0d2a0cb | |||
465cd45833 | |||
b4484b89c3 | |||
c029f069f0 | |||
fdb57bc5db | |||
e43a634944 | |||
2da7c7fbd3 | |||
5683282c94 | |||
44967abd1c | |||
8b41a5d725 | |||
07c183bb84 | |||
7fd879b417 | |||
dc5c6e7cf8 | |||
bd633d2b81 | |||
feeaad619a | |||
b44d8c394e | |||
0ff9c4cd8e | |||
9cafd1f85e | |||
7fe10ba060 | |||
cc48773b03 | |||
8fbf0e2d9f | |||
d86358eedc | |||
fe04fb4cd3 | |||
de3f7e9634 | |||
5e8fcdbe1d | |||
3ee7256c0c | |||
2a7a9fdf03 | |||
5bf87de136 | |||
97a136ea20 | |||
735dfab02e | |||
b5f65ce49c | |||
a283863694 | |||
25908feef9 | |||
b91ad6fd96 | |||
02abf422df | |||
3fe5f886d7 | |||
4c6a6d63bf | |||
589a9d3a72 | |||
bd884a56bf | |||
119467df59 | |||
ee68b9800e | |||
c6b4a3a706 | |||
b1ac8f933b | |||
9e3758983d | |||
34c0537e9b | |||
8628f33d0b | |||
ed05aeaef8 | |||
e1444a9b00 | |||
9514169bf6 | |||
fa8394f526 | |||
1cd8c1865e | |||
e3f895d7d4 | |||
8abf22f34b | |||
a016bc2736 | |||
470debef16 | |||
c147dc3028 | |||
bdd95b2286 | |||
efe676bc94 | |||
fc34687687 | |||
6042ccf496 | |||
f1197e1b1f | |||
8c1b9a0b67 | |||
0da9ac1a47 | |||
c1f316721a | |||
8e86014311 | |||
d807217be7 | |||
bc44516eb4 | |||
b78a13d42c | |||
0dcdc37fec | |||
dd1c3514a8 | |||
767efab941 | |||
288a3bdcd9 | |||
8019bff391 | |||
575a897ffc | |||
697228a484 | |||
ca907f37c3 | |||
439e7cc26a | |||
3217a1d70c | |||
6dbba86cc6 | |||
8cc863ea6c | |||
1d957b6b80 | |||
e56430c9fb | |||
e4d8ea11ac | |||
a4035a3c65 | |||
807c69d97c | |||
9259d342ac | |||
b4d4edb645 | |||
966b6999d1 | |||
73491e3ca1 | |||
d1d53c3fb6 | |||
a77e576cd9 | |||
9e14cde461 | |||
a2a7c86c0d | |||
38aeed02fc | |||
64d63966c7 | |||
38ae54b720 | |||
a18c0e34f4 | |||
be3a0b6b10 | |||
9f6496d38a | |||
1fa31c9410 | |||
2b5e757d57 | |||
0dbe5ee559 | |||
6926e89e86 | |||
ec0007217d | |||
91b23f8316 | |||
2fd8d57504 | |||
0595109f98 | |||
9f46b2a6ce | |||
a357d08524 | |||
177c9cc026 | |||
0c4cb76acf | |||
8676b5d40c | |||
efab896c9e | |||
97b9d57b62 | |||
487826a539 | |||
4acb764589 | |||
9de4c1dcd9 | |||
e8c4302d6d | |||
a9f73ea321 | |||
66c41b3e8c | |||
8435fbfa0b | |||
9a4c449135 | |||
ac6dbf8f04 | |||
b55927370b | |||
002fbc4d53 | |||
53deb7919c | |||
8e46c44f3e | |||
37c2fa1d8d | |||
fdaa939892 | |||
c9d63204eb | |||
cfab54511b | |||
492cc93850 | |||
fd9fd43e83 | |||
191483f4ee | |||
688f8a669a | |||
46eea85022 | |||
1c765124e7 | |||
194491ae96 | |||
2ae595294c | |||
ead947e710 | |||
82df267ec9 | |||
53275cc678 | |||
44835a91db | |||
ee42040e6b | |||
2b98a16ec6 | |||
aa4a7b0c73 | |||
8f50c3dd2e | |||
9c47ce30a7 | |||
3433b08b8c | |||
d26fd27bf9 | |||
5c98c1d306 | |||
51aacfe3ca | |||
82bd2df986 | |||
aa88c40a9e | |||
8ec5a47027 | |||
5bd3eb4557 | |||
e9cb4a12dc | |||
de5cad9211 | |||
e3365529de | |||
ce2ce76958 | |||
16f2fb5c09 | |||
d77c98530f | |||
fe40b75ac6 | |||
e7129757c9 | |||
3635a68129 | |||
70a16e91a5 | |||
41daf1ef0c | |||
ff77789718 | |||
a77775cb58 | |||
167e15a5ae | |||
dea663d509 | |||
9754e551cb | |||
40a4ac15f1 | |||
c56052ff16 | |||
482ef51502 | |||
e4ca3900ae | |||
3574469052 | |||
e15246746d | |||
ec5cca41bc | |||
bc1368ba3e | |||
c0a161afe8 | |||
d343c409e6 | |||
64e8a21d73 | |||
ce04d2bfc2 | |||
1c1d83bd56 | |||
028e111fbc | |||
9670788bf5 | |||
d2f9625878 | |||
182096dc1a | |||
2d284ba6db | |||
1de805e7cd | |||
d642125f68 | |||
b8aff218e2 | |||
045d4d5294 | |||
d67dd8ce1f | |||
4d6679906b | |||
4537f54532 | |||
39b40dfff8 | |||
c82f4a1b6d | |||
7a021dff05 | |||
348c2263ba | |||
b5324063f1 | |||
6ed071c4dd | |||
4404634b14 | |||
6a1de33138 | |||
c05c3e69ca | |||
534244b322 | |||
335dfdc4d5 | |||
a7ef409c2b | |||
14594217db | |||
c8a03c7b3d | |||
9fcd162412 | |||
441fed7a5b | |||
ff31ffbd54 | |||
0e26ee854b | |||
5340800cea | |||
13c2e50b38 | |||
dd39b2b056 | |||
65f89d6729 | |||
1eceb4831d | |||
50303c9ede | |||
ed6a438c51 |
41
.appveyor.yml
Normal file
41
.appveyor.yml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
os: Visual Studio 2017
|
||||||
|
version: '{build}'
|
||||||
|
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- /^v[0-9.]+/
|
||||||
|
|
||||||
|
cache:
|
||||||
|
- '%USERPROFILE%\.cargo'
|
||||||
|
- '%APPVEYOR_BUILD_FOLDER%\target'
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- bash ci/publish-tarball.sh
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
- provider: Slack
|
||||||
|
incoming_webhook:
|
||||||
|
secure: 6HTXVh+FBz29LGJb+taFOo9dqoADfo9xyAszeyXZF5Ub9t5NERytKAR35B2wb+uIOOCBF8+JhmH4437Cgf/ti4IqvURzW1QReXK7eQhn1EI=
|
||||||
|
channel: ci-status
|
||||||
|
on_build_success: false
|
||||||
|
on_build_failure: true
|
||||||
|
on_build_status_changed: true
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
- provider: S3
|
||||||
|
access_key_id:
|
||||||
|
secure: ptvqM/yvgeTeA12XOzybH1KYNh95AdfEvqoH9mvP2ic=
|
||||||
|
secret_access_key:
|
||||||
|
secure: IkrgBlz5hdxvwcJdMXyyHUrpWhKa6fXLOD/8rm/rjKqYCdrba9B8V1nLZVrzXGGy
|
||||||
|
bucket: release.solana.com
|
||||||
|
region: us-west-1
|
||||||
|
set_public: true
|
||||||
|
|
||||||
|
- provider: GitHub
|
||||||
|
auth_token:
|
||||||
|
secure: vQ3jMl5LQrit6+TQONA3ZgQjZ/Ej62BN2ReVb2NSOwjITHMu1131hjc3dOrMEZL6
|
||||||
|
draft: false
|
||||||
|
prerelease: false
|
||||||
|
on:
|
||||||
|
appveyor_repo_tag: true
|
1
.buildkite/env/.gitignore
vendored
Normal file
1
.buildkite/env/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
/secrets_unencrypted.ejson
|
16
.buildkite/env/secrets.ejson
vendored
16
.buildkite/env/secrets.ejson
vendored
@ -1,12 +1,14 @@
|
|||||||
{
|
{
|
||||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||||
"environment": {
|
"environment": {
|
||||||
"CODECOV_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:jA0WqO70coUtF0iokRdgtCR/lF/lETAI:d/Wl8Tdl6xVh/B39cTf1DaQkomR7I/2vMhvxd1msJ++BjI2l3p2dFoGsXqWT+/os8VgiPg==]",
|
"CODECOV_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:ks2/ElgxwgxqgmFcxTHANNLmj23YH74h:U4uzRONRfiQyqy6HrPQ/e7OnBUY4HkW37R0iekkF3KJ9UGnHqT1UvwgVbDqLahtDIJ4rWw==]",
|
||||||
"CRATES_IO_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:2FaZ6k4RGH8luyNRaN6yeZUQDNAu2KwC:XeYe0tCAivYE0F9HEWM79mAI6kNbfYaqP7k7yY+SBDvs0341U9BdGZp7SErbHleS]",
|
"CRATES_IO_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:lKMh3aLW+jyRrfS/c7yvkpB+TaPhXqLq:j0v27EbaPgwRdHZAbsM0FlAnt3r9ScQrFbWJYOAZtM3qestEiByTlKpZ0eyF/823]",
|
||||||
"GITHUB_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:9kh4DGPiGDcUU7ejSFWg3gTW8nrOM09Q:b+GE07Wu6/bEnkDZcUtf48vTKAFphrCSt3tNNER9h6A+wZ80k499edw4pbDdl9kEvxB30fFwrLQ=]",
|
"GITHUB_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:Ll78c3jGpYqnTwR7HJq3mNNUC7pOv9Lu:GrInO2r8MjmP5c54szkyygdsrW5KQYkDgJQUVyFEPyG8SWfchyM9Gur8RV0a+cdwuxNkHLi4U2M=]",
|
||||||
"INFLUX_DATABASE": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:rCHsYi0rc7dmvr1V3wEgNoaNIyr+9ClM:omjVcOqM7vwt44kJ+As4BjJL]",
|
"INFLUX_DATABASE": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:IlH/ZLTXv3SwlY3TVyAPCX2KzLRY6iG3:gGmUGSU/kCfR/mTwKONaUC/X]",
|
||||||
"INFLUX_PASSWORD": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:bP5Gw1Vy66viKFKO41o2Gho998XajH/5:khkCYz2LFvkJkk7R4xY1Hfz1yU3/NENjauiUkPhXA+dmg1qOIToxEagCgIkRwyeCiYaoCR6CZyw=]",
|
"INFLUX_PASSWORD": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:o2qm95GU4VrrcC4OU06jjPvCwKZy/CZF:OW2ga3kLOQJvaDEdGRJ+gn3L2ckFm8AJZtv9wj/GeUIKDH2A4uBPTHsAH9PMe6zujpuHGk3qbeg=]",
|
||||||
"INFLUX_USERNAME": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:ZamCvza2W9/bZRGSkqDu55xNN04XKKhp:5jlmCOdFbpL7EFez41zCbLfk3ZZlfmhI]",
|
"INFLUX_USERNAME": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:yDWW/uIHsJqOTDYskZoSx3pzoB1vztWY:2z31oTA3g0Xs9fCczGNJRcx8xf/hFCed]",
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:Oi2nsRxnvWnnBYsB6KwEDzLPcYgpYojU:ELbvjXkXKlgFCMES45R+fxG7Ex43WHWErjMbxZoqasxyr7GSH66hQzUWqiQSJyT4ukYrRhRC9YrsKKGkjACLU57X4EGIy9TuLgTnyBYhPnxLYStC3y/7o/MB5FCTt5wHJw3/A9p+me5+T4UmyZ7OeP21NhDUCGQcb0040VwYWS78klW2aQESJJ6wTI1xboE8/zC0vtnB/u50+LydbKEyb21r6y3OH9FYNEpSwIspWKcgpruJdQSCnDoKxP9YR1yzvk2rabss13LJNdV1Y6mQNIdP4OIFQhCs6dXT253RTl5qdZ0MruHwlp8wX4btOuYDcCoM5exr]"
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:RqRaHlYUvGPNFJa6gmciaYM3tRJTURUH:q78/3GTHCN3Uqx9z4nOBjPZcO1lOazNoB/mdhGRDFsnAqVd2hU8zbKkqLrZfLlGqyD8WQOFuw5oTJR9qWg6L9LcOyj3pGL8jWF2yjgZxdtNMXnkbSrCWLooWBBLT61jYQnEwg73gT8ld3Q8EVv3T+MeSMu6FnPz+0+bqQCAGgfqksP4hsUAJGzgZu+i0tNOdlT7fxnh5KJK/yFM/CKgN2sRwEjukA9hXsffyB61g2zqzTDJxCUDLbCVrCkA/bfUk7Of/t0W5t0nK1H3oyGZEc/lRMauCknDBka3Gz11dVss2QT19WQNh0u7bHVaT/U4lepX1j9Zv]",
|
||||||
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wFDl3INEnA3EQDHRX40avqGe1OMoJxyy:6ncCRVRTIRuYI5o/gayeuWCudWvmKNYr8KEHAWeTq34a5bdcKInBdKhjmjX+wLHqsEwQ5gcyhcxy4Ri2mbuN6AHazfZOZlubQkGlyUOAIYO5D5jkbyIh40DAtjVzo1MD/0HsW9zdGOzqUKp5xJJeDsbR4F153jbxa7fvwF90Q4UQjYFTKAtExEmHtDGSJG48ToVwTabTV/OnISMIggDZBviIv2QWHvXgK07b2mUj34rHJywEDGN1nj5rITTDdUeRcB1x4BAMOe94kTFPSTaj/OszvYlGECt8rkKFqbm092qL+XLfiBaImqe/WJHRCnAj6Don]",
|
||||||
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wAh+dBuZopv6vruVOYegUcq/aBnbksT1:qIJfCfDvDWiqicMOkmbJs/0n7UJLKNmgMQaKzeQ8J7Q60YpXbtWzKVW3tS6lzlgf64m3MrPXyo1C+mWh6jkjsb18T/OfggZy1ZHM4AcsOC6/ldUkV5YtuxUQuAmd5jCuV/R7iuYY8Z66AcfAevlb+bnLpgIifdA8fh/IktOo58nZUQwZDdppAacmftsLc6Frn5Er6A6+EXpxK1nmnlmLJ4AJztqlh6X0r+JvE2O7qeoZUXrIegnkxo7Aay7I/dd8zdYpp7ICSiTEtfVN/xNIu/5QmTRU7gWoz7cPl9epq4aiEALzPOzb6KVOiRcsOg+TlFvLQ71Ik5o=]"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
CI_BUILD_START=$(date +%s)
|
CI_BUILD_START=$(date +%s)
|
||||||
export CI_BUILD_START
|
export CI_BUILD_START
|
||||||
|
|
||||||
|
source ci/env.sh
|
||||||
|
|
||||||
#
|
#
|
||||||
# Kill any running docker containers, which are potentially left over from the
|
# Kill any running docker containers, which are potentially left over from the
|
||||||
# previous CI job
|
# previous CI job
|
||||||
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -2,9 +2,10 @@
|
|||||||
/book/src/img/
|
/book/src/img/
|
||||||
/book/src/tests.ok
|
/book/src/tests.ok
|
||||||
/farf/
|
/farf/
|
||||||
/metrics/scripts/lib/
|
|
||||||
/solana-release/
|
/solana-release/
|
||||||
solana-release.tar.bz2
|
/solana-release.tar.bz2
|
||||||
|
/solana-metrics/
|
||||||
|
/solana-metrics.tar.bz2
|
||||||
/target/
|
/target/
|
||||||
|
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
|
44
.travis.yml
Normal file
44
.travis.yml
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
os:
|
||||||
|
- osx
|
||||||
|
|
||||||
|
language: rust
|
||||||
|
cache: cargo
|
||||||
|
rust:
|
||||||
|
- 1.35.0
|
||||||
|
|
||||||
|
install:
|
||||||
|
- source ci/rust-version.sh
|
||||||
|
- test $rust_stable = $TRAVIS_RUST_VERSION # Update .travis.yml rust version above when this fails
|
||||||
|
|
||||||
|
script:
|
||||||
|
- source ci/env.sh
|
||||||
|
- ci/publish-tarball.sh
|
||||||
|
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
slack:
|
||||||
|
on_success: change
|
||||||
|
secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU=
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
- provider: s3
|
||||||
|
access_key_id: $AWS_ACCESS_KEY_ID
|
||||||
|
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||||
|
bucket: release.solana.com
|
||||||
|
region: us-west-1
|
||||||
|
skip_cleanup: true
|
||||||
|
acl: public_read
|
||||||
|
local_dir: travis-s3-upload
|
||||||
|
on:
|
||||||
|
all_branches: true
|
||||||
|
- provider: releases
|
||||||
|
api_key: $GITHUB_TOKEN
|
||||||
|
skip_cleanup: true
|
||||||
|
file_glob: true
|
||||||
|
file: travis-release-upload/*
|
||||||
|
on:
|
||||||
|
tags: true
|
1174
Cargo.lock
generated
1174
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,7 @@ members = [
|
|||||||
"bench-exchange",
|
"bench-exchange",
|
||||||
"bench-streamer",
|
"bench-streamer",
|
||||||
"bench-tps",
|
"bench-tps",
|
||||||
|
"chacha-sys",
|
||||||
"client",
|
"client",
|
||||||
"core",
|
"core",
|
||||||
"drone",
|
"drone",
|
||||||
@ -14,10 +15,12 @@ members = [
|
|||||||
"kvstore",
|
"kvstore",
|
||||||
"ledger-tool",
|
"ledger-tool",
|
||||||
"logger",
|
"logger",
|
||||||
|
"merkle-tree",
|
||||||
"metrics",
|
"metrics",
|
||||||
"netutil",
|
"netutil",
|
||||||
"programs/bpf",
|
"programs/bpf",
|
||||||
"programs/bpf_loader",
|
"programs/bpf_loader_api",
|
||||||
|
"programs/bpf_loader_program",
|
||||||
"programs/budget_api",
|
"programs/budget_api",
|
||||||
"programs/budget_program",
|
"programs/budget_program",
|
||||||
"programs/config_api",
|
"programs/config_api",
|
||||||
|
34
README.md
34
README.md
@ -30,6 +30,40 @@ Before you jump into the code, review the online book [Solana: Blockchain Rebuil
|
|||||||
|
|
||||||
(The _latest_ development version of the online book is also [available here](https://solana-labs.github.io/book-edge/).)
|
(The _latest_ development version of the online book is also [available here](https://solana-labs.github.io/book-edge/).)
|
||||||
|
|
||||||
|
Release Binaries
|
||||||
|
===
|
||||||
|
Official release binaries are available at [Github Releases](https://github.com/solana-labs/solana/releases).
|
||||||
|
|
||||||
|
Additionally we provide pre-release binaries for the latest code on the edge and
|
||||||
|
beta channels. Note that these pre-release binaries may be less stable than an
|
||||||
|
official release.
|
||||||
|
|
||||||
|
### Edge channel
|
||||||
|
#### Linux (x86_64-unknown-linux-gnu)
|
||||||
|
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
||||||
|
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
||||||
|
#### mac OS (x86_64-apple-darwin)
|
||||||
|
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-apple-darwin.tar.bz2)
|
||||||
|
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
||||||
|
#### Windows (x86_64-pc-windows-msvc)
|
||||||
|
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
||||||
|
* [solana-install-init.exe](http://release.solana.com/edge/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
||||||
|
#### All platforms
|
||||||
|
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/edge/solana-metrics.tar.bz2)
|
||||||
|
|
||||||
|
### Beta channel
|
||||||
|
#### Linux (x86_64-unknown-linux-gnu)
|
||||||
|
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
||||||
|
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
||||||
|
#### mac OS (x86_64-apple-darwin)
|
||||||
|
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-apple-darwin.tar.bz2)
|
||||||
|
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
||||||
|
#### Windows (x86_64-pc-windows-msvc)
|
||||||
|
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
||||||
|
* [solana-install-init.exe](http://release.solana.com/beta/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
||||||
|
#### All platforms
|
||||||
|
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/beta/solana-metrics.tar.bz2)
|
||||||
|
|
||||||
Developing
|
Developing
|
||||||
===
|
===
|
||||||
|
|
||||||
|
@ -2,39 +2,41 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-exchange"
|
name = "solana-bench-exchange"
|
||||||
version = "0.15.0"
|
version = "0.16.2"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
publish = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
bincode = "1.1.4"
|
||||||
bs58 = "0.2.0"
|
bs58 = "0.2.0"
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
bincode = "1.1.4"
|
|
||||||
env_logger = "0.6.0"
|
env_logger = "0.6.0"
|
||||||
itertools = "0.8.0"
|
itertools = "0.8.0"
|
||||||
log = "0.4.6"
|
log = "0.4.6"
|
||||||
num-traits = "0.2"
|
|
||||||
num-derive = "0.2"
|
num-derive = "0.2"
|
||||||
|
num-traits = "0.2"
|
||||||
rand = "0.6.5"
|
rand = "0.6.5"
|
||||||
rayon = "1.0.3"
|
rayon = "1.1.0"
|
||||||
serde = "1.0.91"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.91"
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.38"
|
serde_json = "1.0.39"
|
||||||
|
serde_yaml = "0.8.9"
|
||||||
# solana-runtime = { path = "../solana/runtime"}
|
# solana-runtime = { path = "../solana/runtime"}
|
||||||
solana = { path = "../core", version = "0.15.0" }
|
solana = { path = "../core", version = "0.16.2" }
|
||||||
solana-client = { path = "../client", version = "0.15.0" }
|
solana-client = { path = "../client", version = "0.16.2" }
|
||||||
solana-drone = { path = "../drone", version = "0.15.0" }
|
solana-drone = { path = "../drone", version = "0.16.2" }
|
||||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.15.0" }
|
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.2" }
|
||||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.15.0" }
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.2" }
|
||||||
solana-logger = { path = "../logger", version = "0.15.0" }
|
solana-logger = { path = "../logger", version = "0.16.2" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.15.0" }
|
solana-metrics = { path = "../metrics", version = "0.16.2" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.2" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.15.0" }
|
solana-runtime = { path = "../runtime", version = "0.16.2" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.15.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.2" }
|
||||||
ws = "0.8.1"
|
|
||||||
untrusted = "0.6.2"
|
untrusted = "0.6.2"
|
||||||
|
ws = "0.8.1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
erasure = []
|
|
||||||
|
@ -20,9 +20,12 @@ use solana_sdk::system_instruction;
|
|||||||
use solana_sdk::timing::{duration_as_ms, duration_as_s};
|
use solana_sdk::timing::{duration_as_ms, duration_as_s};
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::collections::VecDeque;
|
use std::collections::{HashMap, VecDeque};
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::prelude::*;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
use std::path::Path;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
@ -48,6 +51,8 @@ pub struct Config {
|
|||||||
pub batch_size: usize,
|
pub batch_size: usize,
|
||||||
pub chunk_size: usize,
|
pub chunk_size: usize,
|
||||||
pub account_groups: usize,
|
pub account_groups: usize,
|
||||||
|
pub client_ids_and_stake_file: String,
|
||||||
|
pub read_from_client_file: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -61,10 +66,38 @@ impl Default for Config {
|
|||||||
batch_size: 10,
|
batch_size: 10,
|
||||||
chunk_size: 10,
|
chunk_size: 10,
|
||||||
account_groups: 100,
|
account_groups: 100,
|
||||||
|
client_ids_and_stake_file: String::new(),
|
||||||
|
read_from_client_file: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn create_client_accounts_file(
|
||||||
|
client_ids_and_stake_file: &str,
|
||||||
|
batch_size: usize,
|
||||||
|
account_groups: usize,
|
||||||
|
fund_amount: u64,
|
||||||
|
) {
|
||||||
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
|
const NUM_KEYPAIR_GROUPS: u64 = 2;
|
||||||
|
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
|
||||||
|
|
||||||
|
let keypairs = generate_keypairs(total_keys);
|
||||||
|
|
||||||
|
let mut accounts = HashMap::new();
|
||||||
|
keypairs.iter().for_each(|keypair| {
|
||||||
|
accounts.insert(
|
||||||
|
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
||||||
|
fund_amount,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
let serialized = serde_yaml::to_string(&accounts).unwrap();
|
||||||
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
|
let mut file = File::create(path).unwrap();
|
||||||
|
file.write_all(&serialized.into_bytes()).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
pub fn do_bench_exchange<T>(clients: Vec<T>, config: Config)
|
pub fn do_bench_exchange<T>(clients: Vec<T>, config: Config)
|
||||||
where
|
where
|
||||||
T: 'static + Client + Send + Sync,
|
T: 'static + Client + Send + Sync,
|
||||||
@ -78,6 +111,8 @@ where
|
|||||||
batch_size,
|
batch_size,
|
||||||
chunk_size,
|
chunk_size,
|
||||||
account_groups,
|
account_groups,
|
||||||
|
client_ids_and_stake_file,
|
||||||
|
read_from_client_file,
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
@ -92,35 +127,55 @@ where
|
|||||||
);
|
);
|
||||||
|
|
||||||
let accounts_in_groups = batch_size * account_groups;
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
const NUM_KEYPAIR_GROUPS: u64 = 2;
|
||||||
|
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
|
||||||
|
|
||||||
|
let mut signer_keypairs = if read_from_client_file {
|
||||||
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
|
let file = File::open(path).unwrap();
|
||||||
|
|
||||||
|
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap();
|
||||||
|
accounts
|
||||||
|
.into_iter()
|
||||||
|
.map(|(keypair, _)| {
|
||||||
|
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
||||||
|
Keypair::from_bytes(&bytes).unwrap()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
info!("Generating {:?} signer keys", total_keys);
|
||||||
|
generate_keypairs(total_keys)
|
||||||
|
};
|
||||||
|
|
||||||
|
let trader_signers: Vec<_> = signer_keypairs
|
||||||
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(Arc::new)
|
||||||
|
.collect();
|
||||||
|
let swapper_signers: Vec<_> = signer_keypairs
|
||||||
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(Arc::new)
|
||||||
|
.collect();
|
||||||
|
|
||||||
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
||||||
let client = clients[0].as_ref();
|
let client = clients[0].as_ref();
|
||||||
|
|
||||||
const NUM_KEYPAIR_GROUPS: u64 = 4;
|
if !read_from_client_file {
|
||||||
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
|
info!("Fund trader accounts");
|
||||||
info!("Generating {:?} keys", total_keys);
|
fund_keys(client, &identity, &trader_signers, fund_amount);
|
||||||
let mut keypairs = generate_keypairs(total_keys);
|
info!("Fund swapper accounts");
|
||||||
let trader_signers: Vec<_> = keypairs
|
fund_keys(client, &identity, &swapper_signers, fund_amount);
|
||||||
.drain(0..accounts_in_groups)
|
}
|
||||||
.map(Arc::new)
|
|
||||||
.collect();
|
|
||||||
let swapper_signers: Vec<_> = keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
|
||||||
.map(Arc::new)
|
|
||||||
.collect();
|
|
||||||
let src_pubkeys: Vec<_> = keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
|
||||||
.map(|keypair| keypair.pubkey())
|
|
||||||
.collect();
|
|
||||||
let profit_pubkeys: Vec<_> = keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
|
||||||
.map(|keypair| keypair.pubkey())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
info!("Fund trader accounts");
|
info!("Generating {:?} account keys", total_keys);
|
||||||
fund_keys(client, &identity, &trader_signers, fund_amount);
|
let mut account_keypairs = generate_keypairs(total_keys);
|
||||||
info!("Fund swapper accounts");
|
let src_pubkeys: Vec<_> = account_keypairs
|
||||||
fund_keys(client, &identity, &swapper_signers, fund_amount);
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(|keypair| keypair.pubkey())
|
||||||
|
.collect();
|
||||||
|
let profit_pubkeys: Vec<_> = account_keypairs
|
||||||
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(|keypair| keypair.pubkey())
|
||||||
|
.collect();
|
||||||
|
|
||||||
info!("Create {:?} source token accounts", src_pubkeys.len());
|
info!("Create {:?} source token accounts", src_pubkeys.len());
|
||||||
create_token_accounts(client, &trader_signers, &src_pubkeys);
|
create_token_accounts(client, &trader_signers, &src_pubkeys);
|
||||||
@ -136,6 +191,7 @@ where
|
|||||||
transfer_delay
|
transfer_delay
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||||
let total_txs_sent_count = Arc::new(AtomicUsize::new(0));
|
let total_txs_sent_count = Arc::new(AtomicUsize::new(0));
|
||||||
let s_threads: Vec<_> = (0..threads)
|
let s_threads: Vec<_> = (0..threads)
|
||||||
@ -892,7 +948,7 @@ pub fn airdrop_lamports(client: &Client, drone_addr: &SocketAddr, id: &Keypair,
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use solana::gossip_service::{discover_cluster, get_clients};
|
use solana::gossip_service::{discover_cluster, get_multi_client};
|
||||||
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
||||||
use solana::validator::ValidatorConfig;
|
use solana::validator::ValidatorConfig;
|
||||||
use solana_drone::drone::run_local_drone;
|
use solana_drone::drone::run_local_drone;
|
||||||
@ -907,7 +963,6 @@ mod tests {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
const NUM_NODES: usize = 1;
|
const NUM_NODES: usize = 1;
|
||||||
let validator_config = ValidatorConfig::default();
|
|
||||||
|
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.identity = Keypair::new();
|
config.identity = Keypair::new();
|
||||||
@ -929,7 +984,7 @@ mod tests {
|
|||||||
let cluster = LocalCluster::new(&ClusterConfig {
|
let cluster = LocalCluster::new(&ClusterConfig {
|
||||||
node_stakes: vec![100_000; NUM_NODES],
|
node_stakes: vec![100_000; NUM_NODES],
|
||||||
cluster_lamports: 100_000_000_000_000,
|
cluster_lamports: 100_000_000_000_000,
|
||||||
validator_config,
|
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
});
|
});
|
||||||
@ -952,25 +1007,20 @@ mod tests {
|
|||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
let clients = get_clients(&nodes);
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
|
|
||||||
if clients.len() < NUM_NODES {
|
info!("clients: {}", num_clients);
|
||||||
error!(
|
assert!(num_clients >= NUM_NODES);
|
||||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
|
||||||
NUM_NODES
|
|
||||||
);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
const NUM_SIGNERS: u64 = 2;
|
const NUM_SIGNERS: u64 = 2;
|
||||||
airdrop_lamports(
|
airdrop_lamports(
|
||||||
&clients[0],
|
&client,
|
||||||
&drone_addr,
|
&drone_addr,
|
||||||
&config.identity,
|
&config.identity,
|
||||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||||
);
|
);
|
||||||
|
|
||||||
do_bench_exchange(clients, config);
|
do_bench_exchange(vec![client], config);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -18,6 +18,9 @@ pub struct Config {
|
|||||||
pub batch_size: usize,
|
pub batch_size: usize,
|
||||||
pub chunk_size: usize,
|
pub chunk_size: usize,
|
||||||
pub account_groups: usize,
|
pub account_groups: usize,
|
||||||
|
pub client_ids_and_stake_file: String,
|
||||||
|
pub write_to_client_file: bool,
|
||||||
|
pub read_from_client_file: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -34,6 +37,9 @@ impl Default for Config {
|
|||||||
batch_size: 100,
|
batch_size: 100,
|
||||||
chunk_size: 100,
|
chunk_size: 100,
|
||||||
account_groups: 100,
|
account_groups: 100,
|
||||||
|
client_ids_and_stake_file: String::new(),
|
||||||
|
write_to_client_file: false,
|
||||||
|
read_from_client_file: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -141,6 +147,20 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("10")
|
.default_value("10")
|
||||||
.help("Number of account groups to cycle for each batch"),
|
.help("Number of account groups to cycle for each batch"),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("write-client-keys")
|
||||||
|
.long("write-client-keys")
|
||||||
|
.value_name("FILENAME")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("Generate client keys and stakes and write the list to YAML file"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("read-client-keys")
|
||||||
|
.long("read-client-keys")
|
||||||
|
.value_name("FILENAME")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("Read client keys and stakes from the YAML file"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||||
@ -184,5 +204,15 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
args.account_groups = value_t!(matches.value_of("account-groups"), usize)
|
args.account_groups = value_t!(matches.value_of("account-groups"), usize)
|
||||||
.expect("Failed to parse account-groups");
|
.expect("Failed to parse account-groups");
|
||||||
|
|
||||||
|
if let Some(s) = matches.value_of("write-client-keys") {
|
||||||
|
args.write_to_client_file = true;
|
||||||
|
args.client_ids_and_stake_file = s.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(s) = matches.value_of("read-client-keys") {
|
||||||
|
assert!(!args.write_to_client_file);
|
||||||
|
args.read_from_client_file = true;
|
||||||
|
args.client_ids_and_stake_file = s.to_string();
|
||||||
|
}
|
||||||
args
|
args
|
||||||
}
|
}
|
||||||
|
@ -6,9 +6,9 @@ pub mod order_book;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate solana_exchange_program;
|
extern crate solana_exchange_program;
|
||||||
|
|
||||||
use crate::bench::{airdrop_lamports, do_bench_exchange, Config};
|
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||||
use log::*;
|
use log::*;
|
||||||
use solana::gossip_service::{discover_cluster, get_clients};
|
use solana::gossip_service::{discover_cluster, get_multi_client};
|
||||||
use solana_sdk::signature::KeypairUtil;
|
use solana_sdk::signature::KeypairUtil;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@ -30,33 +30,12 @@ fn main() {
|
|||||||
batch_size,
|
batch_size,
|
||||||
chunk_size,
|
chunk_size,
|
||||||
account_groups,
|
account_groups,
|
||||||
|
client_ids_and_stake_file,
|
||||||
|
write_to_client_file,
|
||||||
|
read_from_client_file,
|
||||||
..
|
..
|
||||||
} = cli_config;
|
} = cli_config;
|
||||||
|
|
||||||
info!("Connecting to the cluster");
|
|
||||||
let (nodes, _replicators) =
|
|
||||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
|
||||||
panic!("Failed to discover nodes");
|
|
||||||
});
|
|
||||||
|
|
||||||
let clients = get_clients(&nodes);
|
|
||||||
|
|
||||||
info!("{} nodes found", clients.len());
|
|
||||||
if clients.len() < num_nodes {
|
|
||||||
panic!("Error: Insufficient nodes discovered");
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Funding keypair: {}", identity.pubkey());
|
|
||||||
|
|
||||||
let accounts_in_groups = batch_size * account_groups;
|
|
||||||
const NUM_SIGNERS: u64 = 2;
|
|
||||||
airdrop_lamports(
|
|
||||||
&clients[0],
|
|
||||||
&drone_addr,
|
|
||||||
&identity,
|
|
||||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
|
||||||
);
|
|
||||||
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
identity,
|
identity,
|
||||||
threads,
|
threads,
|
||||||
@ -66,7 +45,43 @@ fn main() {
|
|||||||
batch_size,
|
batch_size,
|
||||||
chunk_size,
|
chunk_size,
|
||||||
account_groups,
|
account_groups,
|
||||||
|
client_ids_and_stake_file,
|
||||||
|
read_from_client_file,
|
||||||
};
|
};
|
||||||
|
|
||||||
do_bench_exchange(clients, config);
|
if write_to_client_file {
|
||||||
|
create_client_accounts_file(
|
||||||
|
&config.client_ids_and_stake_file,
|
||||||
|
config.batch_size,
|
||||||
|
config.account_groups,
|
||||||
|
config.fund_amount,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
info!("Connecting to the cluster");
|
||||||
|
let (nodes, _replicators) =
|
||||||
|
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||||
|
panic!("Failed to discover nodes");
|
||||||
|
});
|
||||||
|
|
||||||
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
|
|
||||||
|
info!("{} nodes found", num_clients);
|
||||||
|
if num_clients < num_nodes {
|
||||||
|
panic!("Error: Insufficient nodes discovered");
|
||||||
|
}
|
||||||
|
|
||||||
|
if !read_from_client_file {
|
||||||
|
info!("Funding keypair: {}", config.identity.pubkey());
|
||||||
|
|
||||||
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
|
const NUM_SIGNERS: u64 = 2;
|
||||||
|
airdrop_lamports(
|
||||||
|
&client,
|
||||||
|
&drone_addr,
|
||||||
|
&config.identity,
|
||||||
|
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
do_bench_exchange(vec![client], config);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,17 +2,17 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "0.15.0"
|
version = "0.16.2"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
solana = { path = "../core", version = "0.15.0" }
|
solana = { path = "../core", version = "0.16.2" }
|
||||||
solana-logger = { path = "../logger", version = "0.15.0" }
|
solana-logger = { path = "../logger", version = "0.16.2" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.2" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
erasure = []
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "0.15.0"
|
version = "0.16.2"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -10,17 +10,20 @@ homepage = "https://solana.com/"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
log = "0.4.6"
|
log = "0.4.6"
|
||||||
rayon = "1.0.3"
|
rayon = "1.1.0"
|
||||||
|
serde = "1.0.92"
|
||||||
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana = { path = "../core", version = "0.15.0" }
|
serde_yaml = "0.8.9"
|
||||||
solana-client = { path = "../client", version = "0.15.0" }
|
solana = { path = "../core", version = "0.16.2" }
|
||||||
solana-drone = { path = "../drone", version = "0.15.0" }
|
solana-client = { path = "../client", version = "0.16.2" }
|
||||||
solana-logger = { path = "../logger", version = "0.15.0" }
|
solana-drone = { path = "../drone", version = "0.16.2" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.15.0" }
|
solana-logger = { path = "../logger", version = "0.16.2" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
solana-metrics = { path = "../metrics", version = "0.16.2" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.15.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.2" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.15.0" }
|
solana-runtime = { path = "../runtime", version = "0.16.2" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "0.16.2" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
erasure = []
|
|
||||||
|
@ -17,7 +17,6 @@ use solana_sdk::transaction::Transaction;
|
|||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::process::exit;
|
|
||||||
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
@ -25,8 +24,15 @@ use std::thread::Builder;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
pub const MAX_SPENDS_PER_TX: usize = 4;
|
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||||
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 20;
|
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 128;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum BenchTpsError {
|
||||||
|
AirdropFailure,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||||
|
|
||||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||||
|
|
||||||
@ -335,8 +341,13 @@ fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64)
|
|||||||
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
||||||
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
||||||
/// or full
|
/// or full
|
||||||
pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Keypair], lamports: u64) {
|
pub fn fund_keys<T: Client>(
|
||||||
let total = lamports * dests.len() as u64;
|
client: &T,
|
||||||
|
source: &Keypair,
|
||||||
|
dests: &[Keypair],
|
||||||
|
total: u64,
|
||||||
|
lamports_per_signature: u64,
|
||||||
|
) {
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||||
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
||||||
|
|
||||||
@ -346,12 +357,12 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Keypair], lam
|
|||||||
let mut to_fund = vec![];
|
let mut to_fund = vec![];
|
||||||
println!("creating from... {}", funded.len());
|
println!("creating from... {}", funded.len());
|
||||||
for f in &mut funded {
|
for f in &mut funded {
|
||||||
let max_units = cmp::min(notfunded.len(), MAX_SPENDS_PER_TX);
|
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
|
||||||
if max_units == 0 {
|
if max_units == 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let start = notfunded.len() - max_units;
|
let start = notfunded.len() - max_units as usize;
|
||||||
let per_unit = f.1 / (max_units as u64);
|
let per_unit = (f.1 - max_units * lamports_per_signature) / max_units;
|
||||||
let moves: Vec<_> = notfunded[start..]
|
let moves: Vec<_> = notfunded[start..]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|k| (k.pubkey(), per_unit))
|
.map(|k| (k.pubkey(), per_unit))
|
||||||
@ -442,7 +453,7 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
drone_addr: &SocketAddr,
|
drone_addr: &SocketAddr,
|
||||||
id: &Keypair,
|
id: &Keypair,
|
||||||
tx_count: u64,
|
tx_count: u64,
|
||||||
) {
|
) -> Result<()> {
|
||||||
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||||
metrics_submit_lamport_balance(starting_balance);
|
metrics_submit_lamport_balance(starting_balance);
|
||||||
println!("starting balance {}", starting_balance);
|
println!("starting balance {}", starting_balance);
|
||||||
@ -491,9 +502,10 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
current_balance,
|
current_balance,
|
||||||
starting_balance
|
starting_balance
|
||||||
);
|
);
|
||||||
exit(1);
|
return Err(BenchTpsError::AirdropFailure);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_and_report_stats(
|
fn compute_and_report_stats(
|
||||||
@ -570,19 +582,16 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
|
|||||||
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_keypairs(seed_keypair: &Keypair, count: usize) -> Vec<Keypair> {
|
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> Vec<Keypair> {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
||||||
let mut rnd = GenKeys::new(seed);
|
let mut rnd = GenKeys::new(seed);
|
||||||
|
|
||||||
let mut total_keys = 0;
|
let mut total_keys = 1;
|
||||||
let mut target = count;
|
while total_keys < count {
|
||||||
while target > 1 {
|
total_keys *= MAX_SPENDS_PER_TX;
|
||||||
total_keys += target;
|
|
||||||
// Use the upper bound for this division otherwise it may not generate enough keys
|
|
||||||
target = (target + MAX_SPENDS_PER_TX - 1) / MAX_SPENDS_PER_TX;
|
|
||||||
}
|
}
|
||||||
rnd.gen_n_keypairs(total_keys as u64)
|
rnd.gen_n_keypairs(total_keys)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_and_fund_keypairs<T: Client>(
|
pub fn generate_and_fund_keypairs<T: Client>(
|
||||||
@ -591,9 +600,9 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
funding_pubkey: &Keypair,
|
funding_pubkey: &Keypair,
|
||||||
tx_count: usize,
|
tx_count: usize,
|
||||||
lamports_per_account: u64,
|
lamports_per_account: u64,
|
||||||
) -> (Vec<Keypair>, u64) {
|
) -> Result<(Vec<Keypair>, u64)> {
|
||||||
info!("Creating {} keypairs...", tx_count * 2);
|
info!("Creating {} keypairs...", tx_count * 2);
|
||||||
let mut keypairs = generate_keypairs(funding_pubkey, tx_count * 2);
|
let mut keypairs = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
|
||||||
|
|
||||||
info!("Get lamports...");
|
info!("Get lamports...");
|
||||||
|
|
||||||
@ -604,19 +613,27 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
if lamports_per_account > last_keypair_balance {
|
if lamports_per_account > last_keypair_balance {
|
||||||
let extra = lamports_per_account - last_keypair_balance;
|
let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||||
|
let extra =
|
||||||
|
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
||||||
let total = extra * (keypairs.len() as u64);
|
let total = extra * (keypairs.len() as u64);
|
||||||
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
|
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
|
||||||
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total);
|
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
|
||||||
}
|
}
|
||||||
info!("adding more lamports {}", extra);
|
info!("adding more lamports {}", extra);
|
||||||
fund_keys(client, funding_pubkey, &keypairs, extra);
|
fund_keys(
|
||||||
|
client,
|
||||||
|
funding_pubkey,
|
||||||
|
&keypairs,
|
||||||
|
total,
|
||||||
|
fee_calculator.max_lamports_per_signature,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||||
keypairs.truncate(2 * tx_count);
|
keypairs.truncate(2 * tx_count);
|
||||||
|
|
||||||
(keypairs, last_keypair_balance)
|
Ok((keypairs, last_keypair_balance))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -651,12 +668,11 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bench_tps_local_cluster() {
|
fn test_bench_tps_local_cluster() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let validator_config = ValidatorConfig::default();
|
|
||||||
const NUM_NODES: usize = 1;
|
const NUM_NODES: usize = 1;
|
||||||
let cluster = LocalCluster::new(&ClusterConfig {
|
let cluster = LocalCluster::new(&ClusterConfig {
|
||||||
node_stakes: vec![999_990; NUM_NODES],
|
node_stakes: vec![999_990; NUM_NODES],
|
||||||
cluster_lamports: 2_000_000,
|
cluster_lamports: 2_000_000,
|
||||||
validator_config,
|
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -683,7 +699,8 @@ mod tests {
|
|||||||
&config.id,
|
&config.id,
|
||||||
config.tx_count,
|
config.tx_count,
|
||||||
lamports_per_account,
|
lamports_per_account,
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let total = do_bench_tps(vec![client], config, keypairs, 0);
|
let total = do_bench_tps(vec![client], config, keypairs, 0);
|
||||||
assert!(total > 100);
|
assert!(total > 100);
|
||||||
@ -701,7 +718,7 @@ mod tests {
|
|||||||
config.duration = Duration::from_secs(5);
|
config.duration = Duration::from_secs(5);
|
||||||
|
|
||||||
let (keypairs, _keypair_balance) =
|
let (keypairs, _keypair_balance) =
|
||||||
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20);
|
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20).unwrap();
|
||||||
|
|
||||||
do_bench_tps(clients, config, keypairs, 0);
|
do_bench_tps(clients, config, keypairs, 0);
|
||||||
}
|
}
|
||||||
@ -715,11 +732,10 @@ mod tests {
|
|||||||
let lamports = 20;
|
let lamports = 20;
|
||||||
|
|
||||||
let (keypairs, _keypair_balance) =
|
let (keypairs, _keypair_balance) =
|
||||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports);
|
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
||||||
|
|
||||||
for kp in &keypairs {
|
for kp in &keypairs {
|
||||||
// TODO: This should be >= lamports, but fails at the moment
|
assert!(client.get_balance(&kp.pubkey()).unwrap() >= lamports);
|
||||||
assert_ne!(client.get_balance(&kp.pubkey()).unwrap(), 0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
|
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
|
||||||
use solana_drone::drone::DRONE_PORT;
|
use solana_drone::drone::DRONE_PORT;
|
||||||
|
use solana_sdk::fee_calculator::FeeCalculator;
|
||||||
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
||||||
|
|
||||||
/// Holds the configuration for a single run of the benchmark
|
/// Holds the configuration for a single run of the benchmark
|
||||||
@ -17,6 +18,10 @@ pub struct Config {
|
|||||||
pub tx_count: usize,
|
pub tx_count: usize,
|
||||||
pub thread_batch_sleep_ms: usize,
|
pub thread_batch_sleep_ms: usize,
|
||||||
pub sustained: bool,
|
pub sustained: bool,
|
||||||
|
pub client_ids_and_stake_file: String,
|
||||||
|
pub write_to_client_file: bool,
|
||||||
|
pub read_from_client_file: bool,
|
||||||
|
pub target_lamports_per_signature: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -31,6 +36,10 @@ impl Default for Config {
|
|||||||
tx_count: 500_000,
|
tx_count: 500_000,
|
||||||
thread_batch_sleep_ms: 0,
|
thread_batch_sleep_ms: 0,
|
||||||
sustained: false,
|
sustained: false,
|
||||||
|
client_ids_and_stake_file: String::new(),
|
||||||
|
write_to_client_file: false,
|
||||||
|
read_from_client_file: false,
|
||||||
|
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -106,6 +115,30 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("Per-thread-per-iteration sleep in ms"),
|
.help("Per-thread-per-iteration sleep in ms"),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("write-client-keys")
|
||||||
|
.long("write-client-keys")
|
||||||
|
.value_name("FILENAME")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("Generate client keys and stakes and write the list to YAML file"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("read-client-keys")
|
||||||
|
.long("read-client-keys")
|
||||||
|
.value_name("FILENAME")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("Read client keys and stakes from the YAML file"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("target_lamports_per_signature")
|
||||||
|
.long("target-lamports-per-signature")
|
||||||
|
.value_name("LAMPORTS")
|
||||||
|
.takes_value(true)
|
||||||
|
.help(
|
||||||
|
"The cost in lamports that the cluster will charge for signature \
|
||||||
|
verification when the cluster is operating at target-signatures-per-slot",
|
||||||
|
),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||||
@ -163,5 +196,20 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
|
|
||||||
args.sustained = matches.is_present("sustained");
|
args.sustained = matches.is_present("sustained");
|
||||||
|
|
||||||
|
if let Some(s) = matches.value_of("write-client-keys") {
|
||||||
|
args.write_to_client_file = true;
|
||||||
|
args.client_ids_and_stake_file = s.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(s) = matches.value_of("read-client-keys") {
|
||||||
|
assert!(!args.write_to_client_file);
|
||||||
|
args.read_from_client_file = true;
|
||||||
|
args.client_ids_and_stake_file = s.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(v) = matches.value_of("target_lamports_per_signature") {
|
||||||
|
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
||||||
|
}
|
||||||
|
|
||||||
args
|
args
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,21 @@
|
|||||||
mod bench;
|
mod bench;
|
||||||
mod cli;
|
mod cli;
|
||||||
|
|
||||||
use crate::bench::{do_bench_tps, generate_and_fund_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT};
|
use crate::bench::{
|
||||||
use solana::gossip_service::{discover_cluster, get_clients};
|
do_bench_tps, generate_and_fund_keypairs, generate_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT,
|
||||||
|
};
|
||||||
|
use solana::gossip_service::{discover_cluster, get_multi_client};
|
||||||
|
use solana_sdk::fee_calculator::FeeCalculator;
|
||||||
|
use solana_sdk::signature::Keypair;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::path::Path;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
|
||||||
|
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||||
|
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
solana_metrics::set_panic_hook("bench-tps");
|
solana_metrics::set_panic_hook("bench-tps");
|
||||||
@ -22,15 +33,44 @@ fn main() {
|
|||||||
tx_count,
|
tx_count,
|
||||||
thread_batch_sleep_ms,
|
thread_batch_sleep_ms,
|
||||||
sustained,
|
sustained,
|
||||||
|
client_ids_and_stake_file,
|
||||||
|
write_to_client_file,
|
||||||
|
read_from_client_file,
|
||||||
|
target_lamports_per_signature,
|
||||||
} = cli_config;
|
} = cli_config;
|
||||||
|
|
||||||
|
if write_to_client_file {
|
||||||
|
let keypairs = generate_keypairs(&id, tx_count as u64 * 2);
|
||||||
|
let num_accounts = keypairs.len() as u64;
|
||||||
|
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
|
||||||
|
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
||||||
|
/ num_accounts
|
||||||
|
+ NUM_LAMPORTS_PER_ACCOUNT;
|
||||||
|
let mut accounts = HashMap::new();
|
||||||
|
keypairs.iter().for_each(|keypair| {
|
||||||
|
accounts.insert(
|
||||||
|
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
||||||
|
num_lamports_per_account,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
let serialized = serde_yaml::to_string(&accounts).unwrap();
|
||||||
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
|
let mut file = File::create(path).unwrap();
|
||||||
|
file.write_all(&serialized.into_bytes()).unwrap();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
println!("Connecting to the cluster");
|
println!("Connecting to the cluster");
|
||||||
let (nodes, _replicators) =
|
let (nodes, _replicators) =
|
||||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|err| {
|
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|err| {
|
||||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
if nodes.len() < num_nodes {
|
|
||||||
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
|
|
||||||
|
if nodes.len() < num_clients {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||||
num_nodes
|
num_nodes
|
||||||
@ -38,15 +78,33 @@ fn main() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let clients = get_clients(&nodes);
|
let (keypairs, keypair_balance) = if read_from_client_file {
|
||||||
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
|
let file = File::open(path).unwrap();
|
||||||
|
|
||||||
let (keypairs, keypair_balance) = generate_and_fund_keypairs(
|
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap();
|
||||||
&clients[0],
|
let mut keypairs = vec![];
|
||||||
Some(drone_addr),
|
let mut last_balance = 0;
|
||||||
&id,
|
|
||||||
tx_count,
|
accounts.into_iter().for_each(|(keypair, balance)| {
|
||||||
NUM_LAMPORTS_PER_ACCOUNT,
|
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
||||||
);
|
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
||||||
|
last_balance = balance;
|
||||||
|
});
|
||||||
|
(keypairs, last_balance)
|
||||||
|
} else {
|
||||||
|
generate_and_fund_keypairs(
|
||||||
|
&client,
|
||||||
|
Some(drone_addr),
|
||||||
|
&id,
|
||||||
|
tx_count,
|
||||||
|
NUM_LAMPORTS_PER_ACCOUNT,
|
||||||
|
)
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
eprintln!("Error could not fund keys: {:?}", e);
|
||||||
|
exit(1);
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
id,
|
id,
|
||||||
@ -57,5 +115,5 @@ fn main() {
|
|||||||
sustained,
|
sustained,
|
||||||
};
|
};
|
||||||
|
|
||||||
do_bench_tps(clients, config, keypairs, keypair_balance);
|
do_bench_tps(vec![client], config, keypairs, keypair_balance);
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
BOB_SRCS=$(wildcard art/*.bob)
|
BOB_SRCS=$(wildcard art/*.bob)
|
||||||
|
MSC_SRCS=$(wildcard art/*.msc)
|
||||||
MD_SRCS=$(wildcard src/*.md)
|
MD_SRCS=$(wildcard src/*.md)
|
||||||
|
|
||||||
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg)
|
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg) $(MSC_SRCS:art/%.msc=src/img/%.svg)
|
||||||
|
|
||||||
all: html/index.html
|
all: html/index.html
|
||||||
|
|
||||||
@ -17,6 +18,10 @@ src/img/%.svg: art/%.bob
|
|||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
svgbob < $< > $@
|
svgbob < $< > $@
|
||||||
|
|
||||||
|
src/img/%.svg: art/%.msc
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
mscgen -T svg -i $< -o $@
|
||||||
|
|
||||||
src/%.md: %.md
|
src/%.md: %.md
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
@cp $< $@
|
@cp $< $@
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
- [Terminology](terminology.md)
|
- [Terminology](terminology.md)
|
||||||
|
|
||||||
- [Getting Started](getting-started.md)
|
- [Getting Started](getting-started.md)
|
||||||
|
- [Testnet Participation](testnet-participation.md)
|
||||||
|
- [Testnet Replicator](testnet-replicator.md)
|
||||||
- [Example: Web Wallet](webwallet.md)
|
- [Example: Web Wallet](webwallet.md)
|
||||||
|
|
||||||
- [Programming Model](programs.md)
|
- [Programming Model](programs.md)
|
||||||
@ -16,10 +18,10 @@
|
|||||||
- [Leader Rotation](leader-rotation.md)
|
- [Leader Rotation](leader-rotation.md)
|
||||||
- [Fork Generation](fork-generation.md)
|
- [Fork Generation](fork-generation.md)
|
||||||
- [Managing Forks](managing-forks.md)
|
- [Managing Forks](managing-forks.md)
|
||||||
- [Data Plane Fanout](data-plane-fanout.md)
|
- [Turbine Block Propagation](turbine-block-propagation.md)
|
||||||
- [Ledger Replication](ledger-replication.md)
|
- [Ledger Replication](ledger-replication.md)
|
||||||
- [Secure Vote Signing](vote-signing.md)
|
- [Secure Vote Signing](vote-signing.md)
|
||||||
- [Staking Delegation and Rewards](stake-delegation-and-rewards.md)
|
- [Stake Delegation and Rewards](stake-delegation-and-rewards.md)
|
||||||
- [Performance Metrics](performance-metrics.md)
|
- [Performance Metrics](performance-metrics.md)
|
||||||
|
|
||||||
- [Anatomy of a Validator](validator.md)
|
- [Anatomy of a Validator](validator.md)
|
||||||
@ -39,7 +41,6 @@
|
|||||||
- [Ledger Replication](ledger-replication-to-implement.md)
|
- [Ledger Replication](ledger-replication-to-implement.md)
|
||||||
- [Secure Vote Signing](vote-signing-to-implement.md)
|
- [Secure Vote Signing](vote-signing-to-implement.md)
|
||||||
- [Staking Rewards](staking-rewards.md)
|
- [Staking Rewards](staking-rewards.md)
|
||||||
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
|
|
||||||
- [Cluster Economics](ed_overview.md)
|
- [Cluster Economics](ed_overview.md)
|
||||||
- [Validation-client Economics](ed_validation_client_economics.md)
|
- [Validation-client Economics](ed_validation_client_economics.md)
|
||||||
- [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md)
|
- [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md)
|
||||||
@ -55,15 +56,17 @@
|
|||||||
- [References](ed_references.md)
|
- [References](ed_references.md)
|
||||||
- [Cluster Test Framework](cluster-test-framework.md)
|
- [Cluster Test Framework](cluster-test-framework.md)
|
||||||
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
||||||
- [Deterministic Transaction Fees](transaction-fees.md)
|
|
||||||
- [Validator](validator-proposal.md)
|
- [Validator](validator-proposal.md)
|
||||||
|
|
||||||
- [Implemented Design Proposals](implemented-proposals.md)
|
- [Implemented Design Proposals](implemented-proposals.md)
|
||||||
|
- [Blocktree](blocktree.md)
|
||||||
|
- [Cluster Software Installation and Updates](installer.md)
|
||||||
|
- [Deterministic Transaction Fees](transaction-fees.md)
|
||||||
- [Fork Selection](fork-selection.md)
|
- [Fork Selection](fork-selection.md)
|
||||||
- [Leader-to-Leader Transition](leader-leader-transition.md)
|
- [Leader-to-Leader Transition](leader-leader-transition.md)
|
||||||
- [Leader-to-Validator Transition](leader-validator-transition.md)
|
- [Leader-to-Validator Transition](leader-validator-transition.md)
|
||||||
- [Testnet Participation](testnet-participation.md)
|
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
|
||||||
- [Testing Programs](testing-programs.md)
|
|
||||||
- [Reliable Vote Transmission](reliable-vote-transmission.md)
|
|
||||||
- [Persistent Account Storage](persistent-account-storage.md)
|
- [Persistent Account Storage](persistent-account-storage.md)
|
||||||
- [Cluster Software Installation and Updates](installer.md)
|
- [Reliable Vote Transmission](reliable-vote-transmission.md)
|
||||||
|
- [Repair Service](repair-service.md)
|
||||||
|
- [Testing Programs](testing-programs.md)
|
@ -161,7 +161,7 @@ This will dump all the threads stack traces into gdb.txt
|
|||||||
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ ./multinode-demo/client.sh --entrypoint testnet.solana.com:8001 --duration 60
|
$ ./multinode-demo/client.sh --entrypoint testnet.solana.com:8001 --drone testnet.solana.com:9900 --duration 60 --tx_count 50
|
||||||
```
|
```
|
||||||
|
|
||||||
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
||||||
|
@ -12,18 +12,18 @@ updates is managed using an on-chain update manifest program.
|
|||||||
#### Fetch and run a pre-built installer using a bootstrap curl/shell script
|
#### Fetch and run a pre-built installer using a bootstrap curl/shell script
|
||||||
The easiest install method for supported platforms:
|
The easiest install method for supported platforms:
|
||||||
```bash
|
```bash
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
This script will check github for the latest tagged release and download and run the
|
This script will check github for the latest tagged release and download and run the
|
||||||
`solana-install` binary from there.
|
`solana-install-init` binary from there.
|
||||||
|
|
||||||
|
|
||||||
If additional arguments need to be specified during the installation, the
|
If additional arguments need to be specified during the installation, the
|
||||||
following shell syntax is used:
|
following shell syntax is used:
|
||||||
```bash
|
```bash
|
||||||
$ init_args=.... # arguments for `solana-installer init ...`
|
$ init_args=.... # arguments for `solana-install-init ...`
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh -s - ${init_args}
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s - ${init_args}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Fetch and run a pre-built installer from a Github release
|
#### Fetch and run a pre-built installer from a Github release
|
||||||
@ -31,9 +31,9 @@ With a well-known release URL, a pre-built binary can be obtained for supported
|
|||||||
platforms:
|
platforms:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl -o solana-install https://github.com/solana-labs/solana/releases/download/v0.13.0/solana-install-x86_64-apple-darwin
|
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.16.0/solana-install-init-x86_64-apple-darwin
|
||||||
$ chmod +x ./solana-install
|
$ chmod +x ./solana-install-init
|
||||||
$ ./solana-install --help
|
$ ./solana-install-init --help
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Build and run the installer from source
|
#### Build and run the installer from source
|
||||||
@ -49,7 +49,7 @@ $ cargo run -- --help
|
|||||||
Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL,
|
Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL,
|
||||||
the following commands will deploy the update:
|
the following commands will deploy the update:
|
||||||
```bash
|
```bash
|
||||||
$ solana-keygen -o update-manifest.json # <-- only generated once, the public key is shared with users
|
$ solana-keygen new -o update-manifest.json # <-- only generated once, the public key is shared with users
|
||||||
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
|
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ It manages the following files and directories in the user's home directory:
|
|||||||
|
|
||||||
#### Command-line Interface
|
#### Command-line Interface
|
||||||
```manpage
|
```manpage
|
||||||
solana-install 0.13.0
|
solana-install 0.16.0
|
||||||
The solana cluster software installer
|
The solana cluster software installer
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
|
@ -25,11 +25,13 @@ Methods
|
|||||||
* [getAccountInfo](#getaccountinfo)
|
* [getAccountInfo](#getaccountinfo)
|
||||||
* [getBalance](#getbalance)
|
* [getBalance](#getbalance)
|
||||||
* [getClusterNodes](#getclusternodes)
|
* [getClusterNodes](#getclusternodes)
|
||||||
|
* [getProgramAccounts](#getprogramaccounts)
|
||||||
* [getRecentBlockhash](#getrecentblockhash)
|
* [getRecentBlockhash](#getrecentblockhash)
|
||||||
* [getSignatureStatus](#getsignaturestatus)
|
* [getSignatureStatus](#getsignaturestatus)
|
||||||
* [getSlotLeader](#getslotleader)
|
* [getSlotLeader](#getslotleader)
|
||||||
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
||||||
* [getTransactionCount](#gettransactioncount)
|
* [getTransactionCount](#gettransactioncount)
|
||||||
|
* [getTotalSupply](#gettotalsupply)
|
||||||
* [getEpochVoteAccounts](#getepochvoteaccounts)
|
* [getEpochVoteAccounts](#getepochvoteaccounts)
|
||||||
* [requestAirdrop](#requestairdrop)
|
* [requestAirdrop](#requestairdrop)
|
||||||
* [sendTransaction](#sendtransaction)
|
* [sendTransaction](#sendtransaction)
|
||||||
@ -95,6 +97,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
|||||||
{"jsonrpc":"2.0","result":true,"id":1}
|
{"jsonrpc":"2.0","result":true,"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getAccountInfo
|
||||||
|
Returns all information associated with the account of provided Pubkey
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be a JSON object with the following sub fields:
|
||||||
|
|
||||||
|
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||||
|
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||||
|
* `data`, array of bytes representing any data associated with the account
|
||||||
|
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### getBalance
|
### getBalance
|
||||||
@ -125,7 +153,7 @@ None
|
|||||||
|
|
||||||
##### Results:
|
##### Results:
|
||||||
The result field will be an array of JSON objects, each with the following sub fields:
|
The result field will be an array of JSON objects, each with the following sub fields:
|
||||||
* `id` - Node identifier, as base-58 encoded string
|
* `pubkey` - Node public key, as base-58 encoded string
|
||||||
* `gossip` - Gossip network address for the node
|
* `gossip` - Gossip network address for the node
|
||||||
* `tpu` - TPU network address for the node
|
* `tpu` - TPU network address for the node
|
||||||
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||||
@ -136,33 +164,34 @@ The result field will be an array of JSON objects, each with the following sub f
|
|||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
||||||
|
|
||||||
// Result
|
// Result
|
||||||
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","id":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### getAccountInfo
|
### getProgramAccounts
|
||||||
Returns all information associated with the account of provided Pubkey
|
Returns all accounts owned by the provided program Pubkey
|
||||||
|
|
||||||
##### Parameters:
|
##### Parameters:
|
||||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
* `string` - Pubkey of program, as base-58 encoded string
|
||||||
|
|
||||||
##### Results:
|
##### Results:
|
||||||
The result field will be a JSON object with the following sub fields:
|
The result field will be an array of arrays. Each sub array will contain:
|
||||||
|
* `string` - a the account Pubkey as base-58 encoded string
|
||||||
|
and a JSON object, with the following sub fields:
|
||||||
|
|
||||||
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||||
* `data`, array of bytes representing any data associated with the account
|
* `data`, array of bytes representing any data associated with the account
|
||||||
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||||
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
|
|
||||||
|
|
||||||
##### Example:
|
##### Example:
|
||||||
```bash
|
```bash
|
||||||
// Request
|
// Request
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
|
||||||
|
|
||||||
// Result
|
// Result
|
||||||
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@ -275,6 +304,26 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### getTotalSupply
|
||||||
|
Returns the current total supply in Lamports
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - Total supply, as unsigned 64-bit integer
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":10126,"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
### getEpochVoteAccounts
|
### getEpochVoteAccounts
|
||||||
Returns the account info and associated stake for all the voting accounts in the current epoch.
|
Returns the account info and associated stake for all the voting accounts in the current epoch.
|
||||||
|
|
||||||
@ -282,19 +331,11 @@ Returns the account info and associated stake for all the voting accounts in the
|
|||||||
None
|
None
|
||||||
|
|
||||||
##### Results:
|
##### Results:
|
||||||
An array consisting of vote accounts:
|
The result field will be an array of JSON objects, each with the following sub fields:
|
||||||
* `string` - the vote account's Pubkey as base-58 encoded string
|
* `votePubkey` - Vote account public key, as base-58 encoded string
|
||||||
* `integer` - the stake, in lamports, delegated to this vote account
|
* `nodePubkey` - Node public key, as base-58 encoded string
|
||||||
* `VoteState` - the vote account's state
|
* `stake` - the stake, in lamports, delegated to this vote account
|
||||||
|
|
||||||
Each VoteState will be a JSON object with the following sub fields:
|
|
||||||
|
|
||||||
* `votes`, array of most recent vote lockouts
|
|
||||||
* `node_pubkey`, the pubkey of the node that votes using this account
|
|
||||||
* `authorized_voter_pubkey`, the pubkey of the authorized vote signer for this account
|
|
||||||
* `commission`, a 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
|
* `commission`, a 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
|
||||||
* `root_slot`, the most recent slot this account has achieved maximum lockout
|
|
||||||
* `credits`, credits accrued by this account for reaching lockouts
|
|
||||||
|
|
||||||
##### Example:
|
##### Example:
|
||||||
```bash
|
```bash
|
||||||
@ -302,7 +343,7 @@ Each VoteState will be a JSON object with the following sub fields:
|
|||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochVoteAccounts"}' http://localhost:8899
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochVoteAccounts"}' http://localhost:8899
|
||||||
|
|
||||||
// Result
|
// Result
|
||||||
{"jsonrpc":"2.0","result":[[[84,115,89,23,41,83,221,72,58,23,53,245,195,188,140,161,242,189,200,164,139,214,12,180,84,161,28,151,24,243,159,125],10000000,{"authorized_voter_pubkey":[84,115,89,23,41,83,221,72,58,23,53,245,195,188,140,161,242,189,200,164,139,214,12,180,84,161,28,151,24,243,159,125],"commission":0,"credits":0,"node_pubkey":[49,139,227,211,47,39,69,86,131,244,160,144,228,169,84,143,142,253,83,81,212,110,254,12,242,71,219,135,30,60,157,213],"root_slot":null,"votes":[{"confirmation_count":1,"slot":0}]}]],"id":1}
|
{"jsonrpc":"2.0","result":[{"commission":0,"nodePubkey":"Et2RaZJdJRTzTkodUwiHr4H6sLkVmijBFv8tkd7oSSFY","stake":42,"votePubkey":"B4CdWq3NBSoH2wYsVE1CaZSWPo2ZtopE4SJipQhZ3srF"}],"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@ -389,7 +430,7 @@ for a given account public key changes
|
|||||||
|
|
||||||
##### Notification Format:
|
##### Notification Format:
|
||||||
```bash
|
```bash
|
||||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -45,7 +45,7 @@ The upsides compared to guards:
|
|||||||
* The timeout is not fixed.
|
* The timeout is not fixed.
|
||||||
|
|
||||||
* The timeout is local to the leader, and therefore can be clever. The leader's
|
* The timeout is local to the leader, and therefore can be clever. The leader's
|
||||||
heuristic can take into account avalanche performance.
|
heuristic can take into account turbine performance.
|
||||||
|
|
||||||
* This design doesn't require a ledger hard fork to update.
|
* This design doesn't require a ledger hard fork to update.
|
||||||
|
|
||||||
|
@ -1,19 +1,18 @@
|
|||||||
# Ledger Replication
|
# Ledger Replication
|
||||||
|
|
||||||
At full capacity on a 1gbps network solana will generate 4 petabytes of data
|
At full capacity on a 1gbps network solana will generate 4 petabytes of data
|
||||||
per year. To prevent the network from centralizing around full nodes that have
|
per year. To prevent the network from centralizing around validators that have
|
||||||
to store the full data set this protocol proposes a way for mining nodes to
|
to store the full data set this protocol proposes a way for mining nodes to
|
||||||
provide storage capacity for pieces of the network.
|
provide storage capacity for pieces of the data.
|
||||||
|
|
||||||
The basic idea to Proof of Replication is encrypting a dataset with a public
|
The basic idea to Proof of Replication is encrypting a dataset with a public
|
||||||
symmetric key using CBC encryption, then hash the encrypted dataset. The main
|
symmetric key using CBC encryption, then hash the encrypted dataset. The main
|
||||||
problem with the naive approach is that a dishonest storage node can stream the
|
problem with the naive approach is that a dishonest storage node can stream the
|
||||||
encryption and delete the data as its hashed. The simple solution is to force
|
encryption and delete the data as it's hashed. The simple solution is to periodically
|
||||||
the hash to be done on the reverse of the encryption, or perhaps with a random
|
regenerate the hash based on a signed PoH value. This ensures that all the data is present
|
||||||
order. This ensures that all the data is present during the generation of the
|
during the generation of the proof and it also requires validators to have the
|
||||||
proof and it also requires the validator to have the entirety of the encrypted
|
entirety of the encrypted data present for verification of every proof of every identity.
|
||||||
data present for verification of every proof of every identity. So the space
|
So the space required to validate is `number_of_proofs * data_size`
|
||||||
required to validate is `number_of_proofs * data_size`
|
|
||||||
|
|
||||||
## Optimization with PoH
|
## Optimization with PoH
|
||||||
|
|
||||||
@ -29,13 +28,12 @@ core. The total space required for verification is `1_ledger_segment +
|
|||||||
## Network
|
## Network
|
||||||
|
|
||||||
Validators for PoRep are the same validators that are verifying transactions.
|
Validators for PoRep are the same validators that are verifying transactions.
|
||||||
They have some stake that they have put up as collateral that ensures that
|
If a replicator can prove that a validator verified a fake PoRep, then the
|
||||||
their work is honest. If you can prove that a validator verified a fake PoRep,
|
validator will not receive a reward for that storage epoch.
|
||||||
then the validator will not receive a reward for that storage epoch.
|
|
||||||
|
|
||||||
Replicators are specialized *light clients*. They download a part of the ledger
|
Replicators are specialized *light clients*. They download a part of the
|
||||||
and store it, and provide PoReps of storing the ledger. For each verified PoRep
|
ledger (a.k.a Segment) and store it, and provide PoReps of storing the ledger.
|
||||||
replicators earn a reward of sol from the mining pool.
|
For each verified PoRep replicators earn a reward of sol from the mining pool.
|
||||||
|
|
||||||
## Constraints
|
## Constraints
|
||||||
|
|
||||||
@ -55,9 +53,8 @@ changes to determine what rate it can validate storage proofs.
|
|||||||
|
|
||||||
1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The
|
1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The
|
||||||
unit of storage for a replicator.
|
unit of storage for a replicator.
|
||||||
2. NUM\_KEY\_ROTATION\_TICKS: Number of ticks to save a PoH value and cause a
|
2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which replicators
|
||||||
key generation for the section of ledger just generated and the rotation of
|
regenerate their encryption keys and select a new dataset to store.
|
||||||
another key in the set.
|
|
||||||
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
|
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
|
||||||
claim to be successfully rewarded.
|
claim to be successfully rewarded.
|
||||||
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
|
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
|
||||||
@ -66,36 +63,40 @@ mining proof claim has to contain to be valid for a reward.
|
|||||||
proof.
|
proof.
|
||||||
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
|
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
|
||||||
encrypted state.
|
encrypted state.
|
||||||
|
7. NUM\_SLOTS\_PER\_TURN: Number of slots that define a single storage epoch or
|
||||||
|
a "turn" of the PoRep game.
|
||||||
|
|
||||||
### Validator behavior
|
### Validator behavior
|
||||||
|
|
||||||
1. Validator joins the network and submits a storage validation capacity
|
1. Validators join the network and begin looking for replicator accounts at each
|
||||||
transaction which tells the network how many proofs it can process in a given
|
storage epoch/turn boundary.
|
||||||
period defined by NUM\_KEY\_ROTATION\_TICKS.
|
2. Every turn, Validators sign the PoH value at the boundary and use that signature
|
||||||
2. Every NUM\_KEY\_ROTATION\_TICKS the validator stores the PoH value at that
|
to randomly pick proofs to verify from each storage account found in the turn boundary.
|
||||||
height.
|
This signed value is also submitted to the validator's storage account and will be used by
|
||||||
3. Validator generates a storage proof confirmation transaction.
|
replicators at a later stage to cross-verify.
|
||||||
4. The storage proof confirmation transaction is integrated into the ledger.
|
3. Every `NUM_SLOTS_PER_TURN` slots the validator advertises the PoH value. This is value
|
||||||
6. Validator responds to RPC interfaces for what the last storage epoch PoH
|
is also served to Replicators via RPC interfaces.
|
||||||
value is and its slot.
|
4. For a given turn N, all validations get locked out until turn N+3 (a gap of 2 turn/epoch).
|
||||||
|
At which point all validations during that turn are available for reward collection.
|
||||||
|
5. Any incorrect validations will be marked during the turn in between.
|
||||||
|
|
||||||
|
|
||||||
### Replicator behavior
|
### Replicator behavior
|
||||||
|
|
||||||
1. Since a replicator is somewhat of a light client and not downloading all the
|
1. Since a replicator is somewhat of a light client and not downloading all the
|
||||||
ledger data, they have to rely on other full nodes (validators) for
|
ledger data, they have to rely on other validators and replicators for information.
|
||||||
information. Any given validator may or may not be malicious and give incorrect
|
Any given validator may or may not be malicious and give incorrect information, although
|
||||||
information, although there are not any obvious attack vectors that this could
|
there are not any obvious attack vectors that this could accomplish besides having the
|
||||||
accomplish besides having the replicator do extra wasted work. For many of the
|
replicator do extra wasted work. For many of the operations there are a number of options
|
||||||
operations there are a number of options depending on how paranoid a replicator
|
depending on how paranoid a replicator is:
|
||||||
is:
|
|
||||||
- (a) replicator can ask a validator
|
- (a) replicator can ask a validator
|
||||||
- (b) replicator can ask multiple validators
|
- (b) replicator can ask multiple validators
|
||||||
- (c) replicator can subscribe to the full transaction stream and generate
|
- (c) replicator can ask other replicators
|
||||||
the information itself
|
- (d) replicator can subscribe to the full transaction stream and generate
|
||||||
- (d) replicator can subscribe to an abbreviated transaction stream to
|
the information itself (assuming the slot is recent enough)
|
||||||
generate the information itself
|
- (e) replicator can subscribe to an abbreviated transaction stream to
|
||||||
2. A replicator obtains the PoH hash corresponding to the last key rotation
|
generate the information itself (assuming the slot is recent enough)
|
||||||
along with its slot.
|
2. A replicator obtains the PoH hash corresponding to the last turn with its slot.
|
||||||
3. The replicator signs the PoH hash with its keypair. That signature is the
|
3. The replicator signs the PoH hash with its keypair. That signature is the
|
||||||
seed used to pick the segment to replicate and also the encryption key. The
|
seed used to pick the segment to replicate and also the encryption key. The
|
||||||
replicator mods the signature with the slot to get which segment to
|
replicator mods the signature with the slot to get which segment to
|
||||||
@ -103,38 +104,67 @@ replicate.
|
|||||||
4. The replicator retrives the ledger by asking peer validators and
|
4. The replicator retrives the ledger by asking peer validators and
|
||||||
replicators. See 6.5.
|
replicators. See 6.5.
|
||||||
5. The replicator then encrypts that segment with the key with chacha algorithm
|
5. The replicator then encrypts that segment with the key with chacha algorithm
|
||||||
in CBC mode with NUM\_CHACHA\_ROUNDS of encryption.
|
in CBC mode with `NUM_CHACHA_ROUNDS` of encryption.
|
||||||
6. The replicator initializes a chacha rng with the signature from step 2 as
|
6. The replicator initializes a chacha rng with the a signed recent PoH value as
|
||||||
the seed.
|
the seed.
|
||||||
7. The replicator generates NUM\_STORAGE\_SAMPLES samples in the range of the
|
7. The replicator generates `NUM_STORAGE_SAMPLES` samples in the range of the
|
||||||
entry size and samples the encrypted segment with sha256 for 32-bytes at each
|
entry size and samples the encrypted segment with sha256 for 32-bytes at each
|
||||||
offset value. Sampling the state should be faster than generating the encrypted
|
offset value. Sampling the state should be faster than generating the encrypted
|
||||||
segment.
|
segment.
|
||||||
8. The replicator sends a PoRep proof transaction which contains its sha state
|
8. The replicator sends a PoRep proof transaction which contains its sha state
|
||||||
at the end of the sampling operation, its seed and the samples it used to the
|
at the end of the sampling operation, its seed and the samples it used to the
|
||||||
current leader and it is put onto the ledger.
|
current leader and it is put onto the ledger.
|
||||||
|
9. During a given turn the replicator should submit many proofs for the same segment
|
||||||
|
and based on the `RATIO_OF_FAKE_PROOFS` some of those proofs must be fake.
|
||||||
|
10. As the PoRep game enters the next turn, the replicator must submit a
|
||||||
|
transaction with the mask of which proofs were fake during the last turn. This
|
||||||
|
transaction will define the rewards for both replicators and validators.
|
||||||
|
11. Finally for a turn N, as the PoRep game enters turn N + 3, replicator's proofs for
|
||||||
|
turn N will be counted towards their rewards.
|
||||||
|
|
||||||
|
|
||||||
|
### The PoRep Game
|
||||||
|
|
||||||
|
The Proof of Replication game has 4 primary stages. For each "turn" multiple PoRep
|
||||||
|
games can be in progress but each in a different stage.
|
||||||
|
|
||||||
|
The 4 stages of the PoRep Game are as follows:
|
||||||
|
|
||||||
|
1. Proof submission stage
|
||||||
|
- Replicators: submit as many proofs as possible during this stage
|
||||||
|
- Validators: No-op
|
||||||
|
2. Proof verification stage
|
||||||
|
- Replicators: No-op
|
||||||
|
- Validators: Select replicators and verify their proofs from the previous turn
|
||||||
|
3. Proof challenge stage
|
||||||
|
- Replicators: Submit the proof mask with justifications (for fake proofs submitted 2 turns ago)
|
||||||
|
- Validators: No-op
|
||||||
|
4. Reward collection stage
|
||||||
|
- Replicators: Collect rewards for 3 turns ago
|
||||||
|
- Validators: Collect rewards for 3 turns ago
|
||||||
|
|
||||||
|
|
||||||
|
For each turn of the PoRep game, both Validators and Replicators evaluate each
|
||||||
|
stage. The stages are run as separate transactions on the storage program.
|
||||||
|
|
||||||
### Finding who has a given block of ledger
|
### Finding who has a given block of ledger
|
||||||
|
|
||||||
1. Validators monitor the transaction stream for storage mining proofs, and
|
1. Validators monitor the turns in the PoRep game and look at the rooted bank
|
||||||
keep a mapping of ledger segments by slot to public keys. When it sees
|
at turn boundaries for any proofs.
|
||||||
a storage mining proof it updates this mapping and provides an RPC interface
|
2. Validators maintain a map of ledger segments and corresponding replicator public keys.
|
||||||
which takes a slot and hands back a list of public keys. The client
|
The map is updated when a Validator processes a replicator's proofs for a segment.
|
||||||
then looks up in their cluster\_info table to see which network address that
|
The validator provides an RPC interface to access the this map. Using this API, clients
|
||||||
corresponds to and sends a repair request to retrieve the necessary blocks of
|
can map a segment to a replicator's network address (correlating it via cluster_info table).
|
||||||
ledger.
|
The clients can then send repair requests to the replicator to retrieve segments.
|
||||||
2. Validators would need to prune this list which it could do by periodically
|
3. Validators would need to invalidate this list every N turns.
|
||||||
looking at the oldest entries in its mappings and doing a network query to see
|
|
||||||
if the storage host is still serving the first entry.
|
|
||||||
|
|
||||||
## Sybil attacks
|
## Sybil attacks
|
||||||
|
|
||||||
For any random seed, we force everyone to use a signature that is derived from
|
For any random seed, we force everyone to use a signature that is derived from
|
||||||
a PoH hash. Everyone must use the same count, so the same PoH hash is signed by
|
a PoH hash at the turn boundary. Everyone uses the same count, so the same PoH
|
||||||
every participant. The signatures are then each cryptographically tied to the
|
hash is signed by every participant. The signatures are then each cryptographically
|
||||||
keypair, which prevents a leader from grinding on the resulting value for more
|
tied to the keypair, which prevents a leader from grinding on the resulting
|
||||||
than 1 identity.
|
value for more than 1 identity.
|
||||||
|
|
||||||
Since there are many more client identities then encryption identities, we need
|
Since there are many more client identities then encryption identities, we need
|
||||||
to split the reward for multiple clients, and prevent Sybil attacks from
|
to split the reward for multiple clients, and prevent Sybil attacks from
|
||||||
@ -155,8 +185,7 @@ the network can reward long lived client identities more than new ones.
|
|||||||
showing the initial state for the hash.
|
showing the initial state for the hash.
|
||||||
- If a validator marks real proofs as fake, no on-chain computation can be done
|
- If a validator marks real proofs as fake, no on-chain computation can be done
|
||||||
to distinguish who is correct. Rewards would have to rely on the results from
|
to distinguish who is correct. Rewards would have to rely on the results from
|
||||||
multiple validators in a stake-weighted fashion to catch bad actors and
|
multiple validators to catch bad actors and replicators from being denied rewards.
|
||||||
replicators from being locked out of the network.
|
|
||||||
- Validator stealing mining proof results for itself. The proofs are derived
|
- Validator stealing mining proof results for itself. The proofs are derived
|
||||||
from a signature from a replicator, since the validator does not know the
|
from a signature from a replicator, since the validator does not know the
|
||||||
private key used to generate the encryption key, it cannot be the generator of
|
private key used to generate the encryption key, it cannot be the generator of
|
||||||
|
@ -76,21 +76,24 @@ this field can only modified by this entity
|
|||||||
|
|
||||||
### StakeState
|
### StakeState
|
||||||
|
|
||||||
A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
|
A StakeState takes one of two forms, StakeState::Stake and StakeState::MiningPool.
|
||||||
|
|
||||||
### StakeState::Delegate
|
### StakeState::Stake
|
||||||
|
|
||||||
StakeState is the current delegation preference of the **staker**. StakeState
|
Stake is the current delegation preference of the **staker**. Stake
|
||||||
contains the following state information:
|
contains the following state information:
|
||||||
|
|
||||||
* Account::lamports - The staked lamports.
|
|
||||||
|
|
||||||
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
||||||
delegated to.
|
delegated to.
|
||||||
|
|
||||||
* `credits_observed` - The total credits claimed over the lifetime of the
|
* `credits_observed` - The total credits claimed over the lifetime of the
|
||||||
program.
|
program.
|
||||||
|
|
||||||
|
* `stake` - The actual activated stake.
|
||||||
|
|
||||||
|
* Account::lamports - Lamports available for staking, including any earned as rewards.
|
||||||
|
|
||||||
|
|
||||||
### StakeState::MiningPool
|
### StakeState::MiningPool
|
||||||
|
|
||||||
There are two approaches to the mining pool. The bank could allow the
|
There are two approaches to the mining pool. The bank could allow the
|
||||||
@ -105,11 +108,12 @@ tokens stored as `Account::lamports`.
|
|||||||
The stakes and the MiningPool are accounts that are owned by the same `Stake`
|
The stakes and the MiningPool are accounts that are owned by the same `Stake`
|
||||||
program.
|
program.
|
||||||
|
|
||||||
### StakeInstruction::Initialize
|
### StakeInstruction::DelegateStake(stake)
|
||||||
|
|
||||||
* `account[0]` - RW - The StakeState::Delegate instance.
|
* `account[0]` - RW - The StakeState::Stake instance.
|
||||||
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
|
`StakeState::Stake::credits_observed` is initialized to `VoteState::credits`.
|
||||||
`StakeState::Delegate::voter_pubkey` is initialized to `account[1]`
|
`StakeState::Stake::voter_pubkey` is initialized to `account[1]`
|
||||||
|
`StakeState::Stake::stake` is initialized to `stake`, as long as it's less than account[0].lamports
|
||||||
|
|
||||||
* `account[1]` - R - The VoteState instance.
|
* `account[1]` - R - The VoteState instance.
|
||||||
|
|
||||||
@ -124,7 +128,7 @@ deposited into the StakeState and as validator commission is proportional to
|
|||||||
|
|
||||||
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
|
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
|
||||||
reward.
|
reward.
|
||||||
* `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
|
* `account[1]` - RW - The StakeState::Stake instance that is redeeming votes
|
||||||
credits.
|
credits.
|
||||||
* `account[2]` - R - The VoteState instance, must be the same as
|
* `account[2]` - R - The VoteState instance, must be the same as
|
||||||
`StakeState::voter_pubkey`
|
`StakeState::voter_pubkey`
|
||||||
@ -132,7 +136,7 @@ credits.
|
|||||||
Reward is payed out for the difference between `VoteState::credits` to
|
Reward is payed out for the difference between `VoteState::credits` to
|
||||||
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
|
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
|
||||||
`VoteState::credits`. The commission is deposited into the `VoteState` token
|
`VoteState::credits`. The commission is deposited into the `VoteState` token
|
||||||
balance, and the reward is deposited to the `StakeState::Delegate` token balance. The
|
balance, and the reward is deposited to the `StakeState::Stake` token balance. The
|
||||||
reward and the commission is weighted by the `StakeState::lamports` divided by total lamports staked.
|
reward and the commission is weighted by the `StakeState::lamports` divided by total lamports staked.
|
||||||
|
|
||||||
The Staker or the owner of the Stake program sends a transaction with this
|
The Staker or the owner of the Stake program sends a transaction with this
|
||||||
@ -146,7 +150,7 @@ stake_state.credits_observed = vote_state.credits;
|
|||||||
```
|
```
|
||||||
|
|
||||||
`credits_to_claim` is used to compute the reward and commission, and
|
`credits_to_claim` is used to compute the reward and commission, and
|
||||||
`StakeState::Delegate::credits_observed` is updated to the latest
|
`StakeState::Stake::credits_observed` is updated to the latest
|
||||||
`VoteState::credits` value.
|
`VoteState::credits` value.
|
||||||
|
|
||||||
### Collecting network fees into the MiningPool
|
### Collecting network fees into the MiningPool
|
||||||
@ -175,13 +179,13 @@ many rewards to be claimed concurrently.
|
|||||||
|
|
||||||
## Passive Delegation
|
## Passive Delegation
|
||||||
|
|
||||||
Any number of instances of StakeState::Delegate programs can delegate to a single
|
Any number of instances of StakeState::Stake programs can delegate to a single
|
||||||
VoteState program without an interactive action from the identity controlling
|
VoteState program without an interactive action from the identity controlling
|
||||||
the VoteState program or submitting votes to the program.
|
the VoteState program or submitting votes to the program.
|
||||||
|
|
||||||
The total stake allocated to a VoteState program can be calculated by the sum of
|
The total stake allocated to a VoteState program can be calculated by the sum of
|
||||||
all the StakeState programs that have the VoteState pubkey as the
|
all the StakeState programs that have the VoteState pubkey as the
|
||||||
`StakeState::Delegate::voter_pubkey`.
|
`StakeState::Stake::voter_pubkey`.
|
||||||
|
|
||||||
## Example Callflow
|
## Example Callflow
|
||||||
|
|
||||||
|
@ -35,9 +35,9 @@ The different protocol strategies to address the above challenges:
|
|||||||
* Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist.
|
* Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist.
|
||||||
* If `p.slot` does exist, update the parent of `p` based on `parents`
|
* If `p.slot` does exist, update the parent of `p` based on `parents`
|
||||||
|
|
||||||
Note: that once these empty slots are added to blocktree, the `Blob Repair` protocol should attempt to fill those slots.
|
Note: that once these empty slots are added to blocktree, the `Blob Repair` protocol should attempt to fill those slots.
|
||||||
|
|
||||||
Note: Validators will only accept responses containing blobs within the current verifiable epoch (epoch the validator has a leader schedule for).
|
Note: Validators will only accept responses containing blobs within the current verifiable epoch (epoch the validator has a leader schedule for).
|
||||||
|
|
||||||
3. Repairmen (Addresses Challenge #3):
|
3. Repairmen (Addresses Challenge #3):
|
||||||
This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every blob that they receive against a known leader schedule.
|
This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every blob that they receive against a known leader schedule.
|
||||||
|
@ -1,68 +1,195 @@
|
|||||||
# Stake Delegation and Rewards
|
# Stake Delegation and Rewards
|
||||||
|
|
||||||
Stakers are rewarded for helping validate the ledger. They do it by delegating
|
Stakers are rewarded for helping to validate the ledger. They do this by
|
||||||
their stake to fullnodes. Those fullnodes do the legwork and send votes to the
|
delegating their stake to validator nodes. Those validators do the legwork of
|
||||||
stakers' staking accounts. The rest of the cluster uses those stake-weighted
|
replaying the ledger and send votes to a per-node vote account to which stakers
|
||||||
votes to select a block when forks arise. Both the fullnode and staker need
|
can delegate their stakes. The rest of the cluster uses those stake-weighted
|
||||||
some economic incentive to play their part. The fullnode needs to be
|
votes to select a block when forks arise. Both the validator and staker need
|
||||||
compensated for its hardware and the staker needs to be compensated for risking
|
some economic incentive to play their part. The validator needs to be
|
||||||
getting its stake slashed. The economics are covered in [staking
|
compensated for its hardware and the staker needs to be compensated for the risk
|
||||||
|
of getting its stake slashed. The economics are covered in [staking
|
||||||
rewards](staking-rewards.md). This chapter, on the other hand, describes the
|
rewards](staking-rewards.md). This chapter, on the other hand, describes the
|
||||||
underlying mechanics of its implementation.
|
underlying mechanics of its implementation.
|
||||||
|
|
||||||
## Vote and Rewards accounts
|
## Basic Besign
|
||||||
|
|
||||||
The rewards process is split into two on-chain programs. The Vote program
|
The general idea is that the validator owns a Vote account. The Vote account
|
||||||
solves the problem of making stakes slashable. The Rewards account acts as
|
tracks validator votes, counts validator generated credits, and provides any
|
||||||
custodian of the rewards pool. It is responsible for paying out each staker
|
additional validator specific state. The Vote account is not aware of any
|
||||||
once the staker proves to the Rewards program that it participated in
|
stakes delegated to it and has no staking weight.
|
||||||
validating the ledger.
|
|
||||||
|
|
||||||
The Vote account contains the following state information:
|
A separate Stake account (created by a staker) names a Vote account to which the
|
||||||
|
stake is delegated. Rewards generated are proportional to the amount of
|
||||||
|
lamports staked. The Stake account is owned by the staker only. Lamports
|
||||||
|
stored in this account are the stake.
|
||||||
|
|
||||||
* votes - The submitted votes.
|
## Passive Delegation
|
||||||
|
|
||||||
* `delegate_pubkey` - An identity that may operate with the weight of this
|
Any number of Stake accounts can delegate to a single
|
||||||
account's stake. It is typically the identity of a fullnode, but may be any
|
Vote account without an interactive action from the identity controlling
|
||||||
identity involved in stake-weighted computations.
|
the Vote account or submitting votes to the account.
|
||||||
|
|
||||||
* `authorized_voter_pubkey` - Only this identity is authorized to submit votes.
|
The total stake allocated to a Vote account can be calculated by the sum of
|
||||||
|
all the Stake accounts that have the Vote account pubkey as the
|
||||||
|
`StakeState::Delegate::voter_pubkey`.
|
||||||
|
|
||||||
* `credits` - The amount of unclaimed rewards.
|
## Vote and Stake accounts
|
||||||
|
|
||||||
* `root_slot` - The last slot to reach the full lockout commitment necessary
|
The rewards process is split into two on-chain programs. The Vote program solves
|
||||||
for rewards.
|
the problem of making stakes slashable. The Stake account acts as custodian of
|
||||||
|
the rewards pool, and provides passive delegation. The Stake program is
|
||||||
|
responsible for paying out each staker once the staker proves to the Stake
|
||||||
|
program that its delegate has participated in validating the ledger.
|
||||||
|
|
||||||
The Rewards program is stateless and pays out reward when a staker submits its
|
### VoteState
|
||||||
Vote account to the program. Claiming a reward requires a transaction that
|
|
||||||
includes the following instructions:
|
|
||||||
|
|
||||||
1. `RewardsInstruction::RedeemVoteCredits`
|
VoteState is the current state of all the votes the validator has submitted to
|
||||||
2. `VoteInstruction::ClearCredits`
|
the network. VoteState contains the following state information:
|
||||||
|
|
||||||
The Rewards program transfers lamports from the Rewards account to the Vote
|
* votes - The submitted votes data structure.
|
||||||
account's public key. The Rewards program also ensures that the `ClearCredits`
|
|
||||||
instruction follows the `RedeemVoteCredits` instruction, such that a staker may
|
* credits - The total number of rewards this vote program has generated over its
|
||||||
not claim rewards for the same work more than once.
|
lifetime.
|
||||||
|
|
||||||
|
* root\_slot - The last slot to reach the full lockout commitment necessary for
|
||||||
|
rewards.
|
||||||
|
|
||||||
|
* commission - The commission taken by this VoteState for any rewards claimed by
|
||||||
|
staker's Stake accounts. This is the percentage ceiling of the reward.
|
||||||
|
|
||||||
|
* Account::lamports - The accumulated lamports from the commission. These do not
|
||||||
|
count as stakes.
|
||||||
|
|
||||||
|
* `authorized_vote_signer` - Only this identity is authorized to submit votes. This field can only modified by this identity.
|
||||||
|
|
||||||
|
### VoteInstruction::Initialize
|
||||||
|
|
||||||
|
* `account[0]` - RW - The VoteState
|
||||||
|
`VoteState::authorized_vote_signer` is initialized to `account[0]`
|
||||||
|
other VoteState members defaulted
|
||||||
|
|
||||||
|
### VoteInstruction::AuthorizeVoteSigner(Pubkey)
|
||||||
|
|
||||||
|
* `account[0]` - RW - The VoteState
|
||||||
|
`VoteState::authorized_vote_signer` is set to to `Pubkey`, instruction must by
|
||||||
|
signed by Pubkey
|
||||||
|
|
||||||
|
### VoteInstruction::Vote(Vec<Vote>)
|
||||||
|
|
||||||
|
* `account[0]` - RW - The VoteState
|
||||||
|
`VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Fork Selection](fork-selection.md)
|
||||||
|
|
||||||
|
|
||||||
### Delegating Stake
|
* `account[1]` - RO - A list of some N most recent slots and their hashes for the vote to be verified against.
|
||||||
|
|
||||||
`VoteInstruction::DelegateStake` allows the staker to choose a fullnode to
|
|
||||||
validate the ledger on its behalf. By being a delegate, the fullnode is
|
|
||||||
entitled to collect transaction fees when its is leader. The larger the stake,
|
|
||||||
the more often the fullnode will be able to collect those fees.
|
|
||||||
|
|
||||||
### Authorizing a Vote Signer
|
### StakeState
|
||||||
|
|
||||||
|
A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
|
||||||
|
|
||||||
|
### StakeState::Delegate
|
||||||
|
|
||||||
|
StakeState is the current delegation preference of the **staker**. StakeState
|
||||||
|
contains the following state information:
|
||||||
|
|
||||||
|
* Account::lamports - The staked lamports.
|
||||||
|
|
||||||
|
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
||||||
|
delegated to.
|
||||||
|
|
||||||
|
* `credits_observed` - The total credits claimed over the lifetime of the
|
||||||
|
program.
|
||||||
|
|
||||||
|
### StakeState::MiningPool
|
||||||
|
|
||||||
|
There are two approaches to the mining pool. The bank could allow the
|
||||||
|
StakeState program to bypass the token balance check, or a program representing
|
||||||
|
the mining pool could run on the network. To avoid a single network wide lock,
|
||||||
|
the pool can be split into several mining pools. This design focuses on using
|
||||||
|
StakeState::MiningPool instances as the cluster wide mining pools.
|
||||||
|
|
||||||
|
* 256 StakeState::MiningPool are initialized, each with 1/256 number of mining pool
|
||||||
|
tokens stored as `Account::lamports`.
|
||||||
|
|
||||||
|
The stakes and the MiningPool are accounts that are owned by the same `Stake`
|
||||||
|
program.
|
||||||
|
|
||||||
|
### StakeInstruction::Initialize
|
||||||
|
|
||||||
|
* `account[0]` - RW - The StakeState::Delegate instance.
|
||||||
|
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
|
||||||
|
`StakeState::Delegate::voter_pubkey` is initialized to `account[1]`
|
||||||
|
|
||||||
|
* `account[1]` - R - The VoteState instance.
|
||||||
|
|
||||||
|
### StakeInstruction::RedeemVoteCredits
|
||||||
|
|
||||||
|
The Staker or the owner of the Stake account sends a transaction with this
|
||||||
|
instruction to claim rewards.
|
||||||
|
|
||||||
|
The Vote account and the Stake account pair maintain a lifetime counter
|
||||||
|
of total rewards generated and claimed. When claiming rewards, the total lamports
|
||||||
|
deposited into the Stake account and as validator commission is proportional to
|
||||||
|
`VoteState::credits - StakeState::credits_observed`.
|
||||||
|
|
||||||
|
|
||||||
|
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
|
||||||
|
reward.
|
||||||
|
* `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
|
||||||
|
credits.
|
||||||
|
* `account[2]` - R - The VoteState instance, must be the same as
|
||||||
|
`StakeState::voter_pubkey`
|
||||||
|
|
||||||
|
Reward is paid out for the difference between `VoteState::credits` to
|
||||||
|
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
|
||||||
|
`VoteState::credits`. The commission is deposited into the Vote account token
|
||||||
|
balance, and the reward is deposited to the Stake account token balance.
|
||||||
|
|
||||||
|
The total lamports paid is a percentage-rate of the lamports staked muiltplied by
|
||||||
|
the ratio of rewards being redeemed to rewards that could have been generated
|
||||||
|
during the rate period.
|
||||||
|
|
||||||
|
Any random MiningPool can be used to redeem the credits.
|
||||||
|
|
||||||
|
```rust,ignore
|
||||||
|
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
|
||||||
|
stake_state.credits_observed = vote_state.credits;
|
||||||
|
```
|
||||||
|
|
||||||
|
`credits_to_claim` is used to compute the reward and commission, and
|
||||||
|
`StakeState::Delegate::credits_observed` is updated to the latest
|
||||||
|
`VoteState::credits` value.
|
||||||
|
|
||||||
|
## Collecting network fees into the MiningPool
|
||||||
|
|
||||||
|
At the end of the block, before the bank is frozen, but after it processed all
|
||||||
|
the transactions for the block, a virtual instruction is executed to collect
|
||||||
|
the transaction fees.
|
||||||
|
|
||||||
|
* A portion of the fees are deposited into the leader's account.
|
||||||
|
* A portion of the fees are deposited into the smallest StakeState::MiningPool
|
||||||
|
account.
|
||||||
|
|
||||||
|
## Authorizing a Vote Signer
|
||||||
|
|
||||||
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
|
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
|
||||||
for its votes. That service is responsible for ensuring the vote won't cause
|
for its votes. That service is responsible for ensuring the vote won't cause
|
||||||
the staker to be slashed.
|
the staker to be slashed.
|
||||||
|
|
||||||
## Limitations
|
## Benefits of the design
|
||||||
|
|
||||||
Many stakers may delegate their stakes to the same fullnode. The fullnode must
|
* Single vote for all the stakers.
|
||||||
send a separate vote to each staking account. If there are far more stakers
|
|
||||||
than fullnodes, that's a lot of network traffic. An alternative design might
|
* Clearing of the credit variable is not necessary for claiming rewards.
|
||||||
have fullnodes submit each vote to just one account and then have each staker
|
|
||||||
submit that account along with their own to collect its reward.
|
* Each delegated stake can claim its rewards independently.
|
||||||
|
|
||||||
|
* Commission for the work is deposited when a reward is claimed by the delegated
|
||||||
|
stake.
|
||||||
|
|
||||||
|
This proposal would benefit from the `read-only` accounts proposal to allow for
|
||||||
|
many rewards to be claimed concurrently.
|
||||||
|
|
||||||
|
## Example Callflow
|
||||||
|
|
||||||
|
<img alt="Passive Staking Callflow" src="img/passive-staking-callflow.svg" class="center"/>
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
# Staking Rewards
|
# Staking Rewards
|
||||||
|
|
||||||
Initial Proof of Stake (PoS) (i.e. using in-protocol asset, SOL, to provide
|
A Proof of Stake (PoS), (i.e. using in-protocol asset, SOL, to provide
|
||||||
secure consensus) design ideas outlined here. Solana will implement a proof of
|
secure consensus) design is outlined here. Solana implements a proof of
|
||||||
stake reward/security scheme for node validators in the cluster. The purpose is
|
stake reward/security scheme for validator nodes in the cluster. The purpose is
|
||||||
threefold:
|
threefold:
|
||||||
|
|
||||||
- Align validator incentives with that of the greater cluster through
|
- Align validator incentives with that of the greater cluster through
|
||||||
@ -48,7 +48,7 @@ specific parameters will be necessary:
|
|||||||
|
|
||||||
Solana's trustless sense of time and ordering provided by its PoH data
|
Solana's trustless sense of time and ordering provided by its PoH data
|
||||||
structure, along with its
|
structure, along with its
|
||||||
[avalanche](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast
|
[turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast
|
||||||
and transmission design, should provide sub-second transaction confirmation times that scale
|
and transmission design, should provide sub-second transaction confirmation times that scale
|
||||||
with the log of the number of nodes in the cluster. This means we shouldn't
|
with the log of the number of nodes in the cluster. This means we shouldn't
|
||||||
have to restrict the number of validating nodes with a prohibitive 'minimum
|
have to restrict the number of validating nodes with a prohibitive 'minimum
|
||||||
|
@ -32,7 +32,7 @@ traversal issues. A cloud-hosted machine works best. **Ensure that IP ports
|
|||||||
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
||||||
MacOS or WSL users may build from source.
|
MacOS or WSL users may build from source.
|
||||||
|
|
||||||
For a performance testnet with many transactions we have some preliminary recomended setups:
|
For a performance testnet with many transactions we have some preliminary recommended setups:
|
||||||
|
|
||||||
| | Low end | Medium end | High end | Notes |
|
| | Low end | Medium end | High end | Notes |
|
||||||
| --- | ---------|------------|----------| -- |
|
| --- | ---------|------------|----------| -- |
|
||||||
@ -42,6 +42,13 @@ For a performance testnet with many transactions we have some preliminary recome
|
|||||||
| Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
|
| Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
|
||||||
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
|
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
|
||||||
|
|
||||||
|
#### GPU Requirements
|
||||||
|
CUDA is required to make use of the GPU on your system. The provided Solana
|
||||||
|
release binaries are built on Ubuntu 18.04 with <a
|
||||||
|
href="https://developer.nvidia.com/cuda-toolkit-archive">CUDA Toolkit 10.1
|
||||||
|
update 1"</a>. If your machine is using a different CUDA version then you will
|
||||||
|
need to rebuild from source.
|
||||||
|
|
||||||
#### Confirm The Testnet Is Reachable
|
#### Confirm The Testnet Is Reachable
|
||||||
Before attaching a validator node, sanity check that the cluster is accessible
|
Before attaching a validator node, sanity check that the cluster is accessible
|
||||||
to your machine by running some simple commands. If any of the commands fail,
|
to your machine by running some simple commands. If any of the commands fail,
|
||||||
@ -64,11 +71,11 @@ for more detail on cluster activity.
|
|||||||
##### Bootstrap with `solana-install`
|
##### Bootstrap with `solana-install`
|
||||||
|
|
||||||
The `solana-install` tool can be used to easily install and upgrade the cluster
|
The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||||
software on Linux x86_64 systems.
|
software on Linux x86_64 and mac OS systems.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ export SOLANA_RELEASE=v0.14.2 # skip this line to install the latest release
|
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.14.0/install/solana-install-init.sh | sh -s
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively build the `solana-install` program from source and run the
|
Alternatively build the `solana-install` program from source and run the
|
||||||
@ -78,11 +85,12 @@ $ solana-install init
|
|||||||
```
|
```
|
||||||
|
|
||||||
After a successful install, `solana-install update` may be used to easily update the cluster
|
After a successful install, `solana-install update` may be used to easily update the cluster
|
||||||
software to a newer version.
|
software to a newer version at any time.
|
||||||
|
|
||||||
##### Download Prebuilt Binaries
|
##### Download Prebuilt Binaries
|
||||||
Binaries are available for Linux x86_64 systems.
|
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
||||||
|
|
||||||
|
###### Linux
|
||||||
Download the binaries by navigating to
|
Download the binaries by navigating to
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
||||||
@ -92,6 +100,17 @@ $ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
|||||||
$ cd solana-release/
|
$ cd solana-release/
|
||||||
$ export PATH=$PWD/bin:$PATH
|
$ export PATH=$PWD/bin:$PATH
|
||||||
```
|
```
|
||||||
|
###### mac OS
|
||||||
|
Download the binaries by navigating to
|
||||||
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
|
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
|
||||||
|
archive:
|
||||||
|
```bash
|
||||||
|
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||||
|
$ cd solana-release/
|
||||||
|
$ export PATH=$PWD/bin:$PATH
|
||||||
|
```
|
||||||
|
|
||||||
##### Build From Source
|
##### Build From Source
|
||||||
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
||||||
from source, navigate to
|
from source, navigate to
|
||||||
@ -103,6 +122,12 @@ $ ./scripts/cargo-install-all.sh .
|
|||||||
$ export PATH=$PWD/bin:$PATH
|
$ export PATH=$PWD/bin:$PATH
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If building for CUDA, include the `cuda` feature flag as well:
|
||||||
|
```bash
|
||||||
|
$ ./scripts/cargo-install-all.sh . cuda
|
||||||
|
$ export PATH=$PWD/bin:$PATH
|
||||||
|
```
|
||||||
|
|
||||||
### Starting The Validator
|
### Starting The Validator
|
||||||
Sanity check that you are able to interact with the cluster by receiving a small
|
Sanity check that you are able to interact with the cluster by receiving a small
|
||||||
airdrop of lamports from the testnet drone:
|
airdrop of lamports from the testnet drone:
|
||||||
@ -119,7 +144,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|||||||
|
|
||||||
Now configure a key pair for your validator by running:
|
Now configure a key pair for your validator by running:
|
||||||
```bash
|
```bash
|
||||||
$ solana-keygen -o validator-keypair.json
|
$ solana-keygen new -o ~/validator-keypair.json
|
||||||
```
|
```
|
||||||
|
|
||||||
Then use one of the following commands, depending on your installation
|
Then use one of the following commands, depending on your installation
|
||||||
@ -128,22 +153,33 @@ choice, to start the node:
|
|||||||
If this is a `solana-install`-installation:
|
If this is a `solana-install`-installation:
|
||||||
```bash
|
```bash
|
||||||
$ clear-config.sh
|
$ clear-config.sh
|
||||||
$ validator.sh --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
$ validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, the `solana-install run` command can be used to run the validator
|
Alternatively, the `solana-install run` command can be used to run the validator
|
||||||
node while periodically checking for and applying software updates:
|
node while periodically checking for and applying software updates:
|
||||||
```bash
|
```bash
|
||||||
$ clear-config.sh
|
$ clear-config.sh
|
||||||
$ solana-install run validator.sh -- --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
If you built from source:
|
If you built from source:
|
||||||
```bash
|
```bash
|
||||||
$ USE_INSTALL=1 ./multinode-demo/clear-config.sh
|
$ USE_INSTALL=1 ./multinode-demo/clear-config.sh
|
||||||
$ USE_INSTALL=1 ./multinode-demo/validator.sh --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
$ USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Enabling CUDA
|
||||||
|
By default CUDA is disabled. If your machine has a GPU with CUDA installed,
|
||||||
|
define the SOLANA_CUDA flag in your environment *before* running any of the
|
||||||
|
previusly mentioned commands
|
||||||
|
```bash
|
||||||
|
$ export SOLANA_CUDA=1
|
||||||
|
```
|
||||||
|
|
||||||
|
When your validator is started look for the following log message to indicate that CUDA is enabled:
|
||||||
|
`"[<timestamp> solana::validator] CUDA is enabled"`
|
||||||
|
|
||||||
#### Controlling local network port allocation
|
#### Controlling local network port allocation
|
||||||
By default the validator will dynamically select available network ports in the
|
By default the validator will dynamically select available network ports in the
|
||||||
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
||||||
@ -164,7 +200,7 @@ accounts: ...
|
|||||||
|
|
||||||
The **identity pubkey** for your validator can also be found by running:
|
The **identity pubkey** for your validator can also be found by running:
|
||||||
```bash
|
```bash
|
||||||
$ solana-keygen pubkey validator-keypair.json
|
$ solana-keygen pubkey ~/validator-keypair.json
|
||||||
```
|
```
|
||||||
|
|
||||||
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the
|
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the
|
||||||
@ -176,7 +212,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|||||||
Provide the **vote pubkey** to the `solana-wallet show-vote-account` command to view
|
Provide the **vote pubkey** to the `solana-wallet show-vote-account` command to view
|
||||||
the recent voting activity from your validator:
|
the recent voting activity from your validator:
|
||||||
```bash
|
```bash
|
||||||
$ solana-wallet -n testnet.solana.com show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
$ solana-wallet show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||||
```
|
```
|
||||||
|
|
||||||
The vote pubkey for the validator can also be found by running:
|
The vote pubkey for the validator can also be found by running:
|
||||||
@ -187,13 +223,20 @@ $ solana-keygen pubkey ~/.local/share/solana/install/active_release/config-local
|
|||||||
$ solana-keygen pubkey ./config-local/validator-vote-keypair.json
|
$ solana-keygen pubkey ./config-local/validator-vote-keypair.json
|
||||||
```
|
```
|
||||||
|
|
||||||
### Sharing Metrics From Your Validator
|
|
||||||
If you have obtained a metrics username/password from the Solana maintainers to
|
#### Validator Metrics
|
||||||
help us monitor the health of the testnet, please perform the following steps
|
Metrics are available for local monitoring of your validator.
|
||||||
before starting the validator to activate metrics reporting:
|
|
||||||
|
Docker must be installed and the current user added to the docker group. Then
|
||||||
|
download `solana-metrics.tar.bz2` from the Github Release and run
|
||||||
```bash
|
```bash
|
||||||
export u="username obtained from the Solana maintainers"
|
$ tar jxf solana-metrics.tar.bz2
|
||||||
export p="password obtained from the Solana maintainers"
|
$ cd solana-metrics/
|
||||||
export SOLANA_METRICS_CONFIG="db=testnet,u=${u:?},p=${p:?}"
|
$ ./start.sh
|
||||||
source scripts/configure-metrics.sh
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
A local InfluxDB and Grafana instance is now running on your machine. Define
|
||||||
|
`SOLANA_METRICS_CONFIG` in your environment as described at the end of the
|
||||||
|
`start.sh` output and restart your validator.
|
||||||
|
|
||||||
|
Metrics should now be streaming and visible from your local Grafana dashboard.
|
||||||
|
154
book/src/testnet-replicator.md
Normal file
154
book/src/testnet-replicator.md
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
## Testnet Replicator
|
||||||
|
This document describes how to setup a replicator in the testnet
|
||||||
|
|
||||||
|
Please note some of the information and instructions described here may change
|
||||||
|
in future releases.
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
Replicators are specialized light clients. They download a part of the
|
||||||
|
ledger (a.k.a Segment) and store it. They earn rewards for storing segments.
|
||||||
|
|
||||||
|
The testnet features a validator running at testnet.solana.com, which
|
||||||
|
serves as the entrypoint to the cluster for your replicator node.
|
||||||
|
|
||||||
|
Additionally there is a blockexplorer available at
|
||||||
|
[http://testnet.solana.com/](http://testnet.solana.com/).
|
||||||
|
|
||||||
|
The testnet is configured to reset the ledger daily, or sooner
|
||||||
|
should the hourly automated cluster sanity test fail.
|
||||||
|
|
||||||
|
### Machine Requirements
|
||||||
|
Replicators don't need specialized hardware. Anything with more than
|
||||||
|
128GB of disk space will be able to participate in the cluster as a replicator node.
|
||||||
|
|
||||||
|
Currently the disk space requirements are very low but we expect them to change
|
||||||
|
in the future.
|
||||||
|
|
||||||
|
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended),
|
||||||
|
macOS, and Windows.
|
||||||
|
|
||||||
|
#### Confirm The Testnet Is Reachable
|
||||||
|
Before starting a replicator node, sanity check that the cluster is accessible
|
||||||
|
to your machine by running some simple commands. If any of the commands fail,
|
||||||
|
please retry 5-10 minutes later to confirm the testnet is not just restarting
|
||||||
|
itself before debugging further.
|
||||||
|
|
||||||
|
Fetch the current transaction count over JSON RPC:
|
||||||
|
```bash
|
||||||
|
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||||
|
```
|
||||||
|
|
||||||
|
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
|
||||||
|
|
||||||
|
View the [metrics dashboard](
|
||||||
|
https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet)
|
||||||
|
for more detail on cluster activity.
|
||||||
|
|
||||||
|
### Replicator Setup
|
||||||
|
##### Obtaining The Software
|
||||||
|
##### Bootstrap with `solana-install`
|
||||||
|
|
||||||
|
The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||||
|
software.
|
||||||
|
|
||||||
|
##### Linux and mac OS
|
||||||
|
```bash
|
||||||
|
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
|
||||||
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively build the `solana-install` program from source and run the
|
||||||
|
following command to obtain the same result:
|
||||||
|
```bash
|
||||||
|
$ solana-install init
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Windows
|
||||||
|
Download and install **solana-install-init** from
|
||||||
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest)
|
||||||
|
|
||||||
|
After a successful install, `solana-install update` may be used to
|
||||||
|
easily update the software to a newer version at any time.
|
||||||
|
|
||||||
|
##### Download Prebuilt Binaries
|
||||||
|
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
||||||
|
|
||||||
|
##### Linux
|
||||||
|
Download the binaries by navigating to
|
||||||
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
|
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
||||||
|
archive:
|
||||||
|
```bash
|
||||||
|
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||||
|
$ cd solana-release/
|
||||||
|
$ export PATH=$PWD/bin:$PATH
|
||||||
|
```
|
||||||
|
##### mac OS
|
||||||
|
Download the binaries by navigating to
|
||||||
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
|
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
|
||||||
|
archive:
|
||||||
|
```bash
|
||||||
|
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||||
|
$ cd solana-release/
|
||||||
|
$ export PATH=$PWD/bin:$PATH
|
||||||
|
```
|
||||||
|
##### Windows
|
||||||
|
Download the binaries by navigating to
|
||||||
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
|
download **solana-release-x86_64-pc-windows-msvc.tar.bz2**, then extract it into a folder.
|
||||||
|
It is a good idea to add this extracted folder to your windows PATH.
|
||||||
|
|
||||||
|
### Starting The Replicator
|
||||||
|
Try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||||
|
```bash
|
||||||
|
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||||
|
# Press ^C to exit
|
||||||
|
```
|
||||||
|
|
||||||
|
Now configure the keypairs for your replicator by running:
|
||||||
|
|
||||||
|
Navigate to the solana install location and open a cmd prompt
|
||||||
|
```bash
|
||||||
|
$ solana-keygen new -o replicator-keypair.json
|
||||||
|
$ solana-keygen new -o storage-keypair.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Use solana-keygen to show the public keys for each of the keypairs,
|
||||||
|
they will be needed in the next step:
|
||||||
|
- Windows
|
||||||
|
```bash
|
||||||
|
# The replicator's identity
|
||||||
|
$ solana-keygen pubkey replicator-keypair.json
|
||||||
|
$ solana-keygen pubkey storage-keypair.json
|
||||||
|
```
|
||||||
|
- Linux and mac OS
|
||||||
|
```bash
|
||||||
|
$ export REPLICATOR_IDENTITY=$(solana-keygen pubkey replicator-keypair.json)
|
||||||
|
$ export STORAGE_IDENTITY=$(solana-keygen pubkey storage-keypair.json)
|
||||||
|
|
||||||
|
```
|
||||||
|
Then set up the storage accounts for your replicator by running:
|
||||||
|
```bash
|
||||||
|
$ solana-wallet --keypair replicator-keypair.json airdrop 100000
|
||||||
|
$ solana-wallet --keypair replicator-keypair.json create-replicator-storage-account $REPLICATOR_IDENTITY $STORAGE_IDENTITY
|
||||||
|
```
|
||||||
|
Note: Every time the testnet restarts, run the wallet steps to setup the replicator accounts again.
|
||||||
|
|
||||||
|
To start the replicator:
|
||||||
|
```bash
|
||||||
|
$ solana-replicator --entrypoint testnet.solana.com:8001 --identity replicator-keypair.json --storage-keypair storage-keypair.json --ledger replicator-ledger
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify Replicator Setup
|
||||||
|
From another console, confirm the IP address and **identity pubkey** of your replicator is visible in the
|
||||||
|
gossip network by running:
|
||||||
|
```bash
|
||||||
|
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||||
|
```
|
||||||
|
|
||||||
|
Provide the **storage account pubkey** to the `solana-wallet show-storage-account` command to view
|
||||||
|
the recent mining activity from your replicator:
|
||||||
|
```bash
|
||||||
|
$ solana-wallet --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||||
|
```
|
@ -8,17 +8,14 @@ client won't know how much was collected until the transaction is confirmed by
|
|||||||
the cluster and the remaining balance is checked. It smells of exactly what we
|
the cluster and the remaining balance is checked. It smells of exactly what we
|
||||||
dislike about Ethereum's "gas", non-determinism.
|
dislike about Ethereum's "gas", non-determinism.
|
||||||
|
|
||||||
## Implementation Status
|
|
||||||
|
|
||||||
This design is not yet implemented, but is written as though it has been. Once
|
|
||||||
implemented, delete this comment.
|
|
||||||
|
|
||||||
### Congestion-driven fees
|
### Congestion-driven fees
|
||||||
|
|
||||||
Each validator uses *signatures per slot* (SPS) to estimate network congestion
|
Each validator uses *signatures per slot* (SPS) to estimate network congestion
|
||||||
and *SPS target* to estimate the desired processing capacity of the cluster.
|
and *SPS target* to estimate the desired processing capacity of the cluster.
|
||||||
The validator learns the SPS target from the genesis block, whereas it
|
The validator learns the SPS target from the genesis block, whereas it
|
||||||
calculates SPS from the ledger data in the previous epoch.
|
calculates SPS from recently processed transactions. The genesis block also
|
||||||
|
defines a target `lamports_per_signature`, which is the fee to charge per
|
||||||
|
signature when the cluster is operating at *SPS target*.
|
||||||
|
|
||||||
### Calculating fees
|
### Calculating fees
|
||||||
|
|
||||||
@ -37,8 +34,11 @@ lamports as returned by the fee calculator.
|
|||||||
In the first implementation of this design, the only fee parameter is
|
In the first implementation of this design, the only fee parameter is
|
||||||
`lamports_per_signature`. The more signatures the cluster needs to verify, the
|
`lamports_per_signature`. The more signatures the cluster needs to verify, the
|
||||||
higher the fee. The exact number of lamports is determined by the ratio of SPS
|
higher the fee. The exact number of lamports is determined by the ratio of SPS
|
||||||
to the SPS target. The cluster lowers `lamports_per_signature` when SPS is
|
to the SPS target. At the end of each slot, the cluster lowers
|
||||||
below the target and raises it when at or above the target.
|
`lamports_per_signature` when SPS is below the target and raises it when above
|
||||||
|
the target. The minimum value for `lamports_per_signature` is 50% of the target
|
||||||
|
`lamports_per_signature` and the maximum value is 10x the target
|
||||||
|
`lamports_per_signature'
|
||||||
|
|
||||||
Future parameters might include:
|
Future parameters might include:
|
||||||
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
# Data Plane Fanout
|
# Turbine Block Propagation
|
||||||
|
|
||||||
A Solana cluster uses a multi-layer mechanism called *data plane fanout* to
|
A Solana cluster uses a multi-layer block propagation mechanism called *Turbine*
|
||||||
broadcast transaction blobs to all nodes in a very quick and efficient manner.
|
to broadcast transaction blobs to all nodes with minimal amount of duplicate
|
||||||
In order to establish the fanout, the cluster divides itself into small
|
messages. The cluster divides itself into small collections of nodes, called
|
||||||
collections of nodes, called *neighborhoods*. Each node is responsible for
|
*neighborhoods*. Each node is responsible for sharing any data it receives with
|
||||||
sharing any data it receives with the other nodes in its neighborhood, as well
|
the other nodes in its neighborhood, as well as propagating the data on to a
|
||||||
as propagating the data on to a small set of nodes in other neighborhoods.
|
small set of nodes in other neighborhoods. This way each node only has to
|
||||||
This way each node only has to communicate with a small number of nodes.
|
communicate with a small number of nodes.
|
||||||
|
|
||||||
During its slot, the leader node distributes blobs between the validator nodes
|
During its slot, the leader node distributes blobs between the validator nodes
|
||||||
in the first neighborhood (layer 0). Each validator shares its data within its
|
in the first neighborhood (layer 0). Each validator shares its data within its
|
||||||
@ -26,6 +26,14 @@ make up layer 0. These will automatically be the highest stake holders, allowing
|
|||||||
the heaviest votes to come back to the leader first. Layer-0 and lower-layer
|
the heaviest votes to come back to the leader first. Layer-0 and lower-layer
|
||||||
nodes use the same logic to find their neighbors and next layer peers.
|
nodes use the same logic to find their neighbors and next layer peers.
|
||||||
|
|
||||||
|
To reduce the possibility of attack vectors, each blob is transmitted over a
|
||||||
|
random tree of neighborhoods. Each node uses the same set of nodes representing
|
||||||
|
the cluster. A random tree is generated from the set for each blob using
|
||||||
|
randomness derived from the blob itself. Since the random seed is not known in
|
||||||
|
advance, attacks that try to eclipse neighborhoods from certain leaders or
|
||||||
|
blocks become very difficult, and should require almost complete control of the
|
||||||
|
stake in the cluster.
|
||||||
|
|
||||||
## Layer and Neighborhood Structure
|
## Layer and Neighborhood Structure
|
||||||
|
|
||||||
The current leader makes its initial broadcasts to at most `DATA_PLANE_FANOUT`
|
The current leader makes its initial broadcasts to at most `DATA_PLANE_FANOUT`
|
@ -284,6 +284,18 @@ ARGS:
|
|||||||
<PATH> /path/to/program.o
|
<PATH> /path/to/program.o
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-wallet-fees
|
||||||
|
Display current cluster fees
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana-wallet fees
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
```
|
||||||
|
|
||||||
```manpage
|
```manpage
|
||||||
solana-wallet-get-transaction-count
|
solana-wallet-get-transaction-count
|
||||||
Get current transaction count
|
Get current transaction count
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
#
|
|
||||||
# Builds perf-libs from the upstream source and installs them into the correct
|
|
||||||
# location in the tree
|
|
||||||
#
|
|
||||||
set -e
|
|
||||||
cd "$(dirname "$0")"
|
|
||||||
|
|
||||||
if [[ -d target/perf-libs ]]; then
|
|
||||||
echo "target/perf-libs/ already exists, to continue run:"
|
|
||||||
echo "$ rm -rf target/perf-libs"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
git clone git@github.com:solana-labs/solana-perf-libs.git target/perf-libs
|
|
||||||
cd target/perf-libs
|
|
||||||
make -j"$(nproc)"
|
|
||||||
make DESTDIR=. install
|
|
||||||
)
|
|
||||||
|
|
||||||
./fetch-perf-libs.sh
|
|
12
chacha-sys/Cargo.toml
Normal file
12
chacha-sys/Cargo.toml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
[package]
|
||||||
|
name = "solana-chacha-sys"
|
||||||
|
version = "0.16.2"
|
||||||
|
description = "Solana chacha-sys"
|
||||||
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
|
repository = "https://github.com/solana-labs/solana"
|
||||||
|
homepage = "https://solana.com/"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
cc = "1.0.37"
|
8
chacha-sys/build.rs
Normal file
8
chacha-sys/build.rs
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
extern crate cc;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
cc::Build::new()
|
||||||
|
.file("cpu-crypt/chacha20_core.c")
|
||||||
|
.file("cpu-crypt/chacha_cbc.c")
|
||||||
|
.compile("libcpu-crypt");
|
||||||
|
}
|
1
chacha-sys/cpu-crypt/.gitignore
vendored
Normal file
1
chacha-sys/cpu-crypt/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
release/
|
25
chacha-sys/cpu-crypt/Makefile
Normal file
25
chacha-sys/cpu-crypt/Makefile
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
V:=debug
|
||||||
|
|
||||||
|
LIB:=cpu-crypt
|
||||||
|
|
||||||
|
CFLAGS_common:=-Wall -Werror -pedantic -fPIC
|
||||||
|
CFLAGS_release:=-march=native -O3 $(CFLAGS_common)
|
||||||
|
CFLAGS_debug:=-g $(CFLAGS_common)
|
||||||
|
CFLAGS:=$(CFLAGS_$V)
|
||||||
|
|
||||||
|
all: $V/lib$(LIB).a
|
||||||
|
|
||||||
|
$V/chacha20_core.o: chacha20_core.c chacha.h
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
$V/chacha_cbc.o: chacha_cbc.c chacha.h
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
$V/lib$(LIB).a: $V/chacha20_core.o $V/chacha_cbc.o
|
||||||
|
$(AR) rcs $@ $^
|
||||||
|
|
||||||
|
.PHONY:clean
|
||||||
|
clean:
|
||||||
|
rm -rf $V
|
35
chacha-sys/cpu-crypt/chacha.h
Normal file
35
chacha-sys/cpu-crypt/chacha.h
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
#ifndef HEADER_CHACHA_H
|
||||||
|
# define HEADER_CHACHA_H
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
#include <inttypes.h>
|
||||||
|
# include <stddef.h>
|
||||||
|
# ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
# endif
|
||||||
|
|
||||||
|
typedef unsigned int u32;
|
||||||
|
|
||||||
|
#define CHACHA_KEY_SIZE 32
|
||||||
|
#define CHACHA_NONCE_SIZE 12
|
||||||
|
#define CHACHA_BLOCK_SIZE 64
|
||||||
|
#define CHACHA_ROUNDS 500
|
||||||
|
|
||||||
|
void chacha20_encrypt(const u32 input[16],
|
||||||
|
unsigned char output[64],
|
||||||
|
int num_rounds);
|
||||||
|
|
||||||
|
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||||
|
const uint8_t key[CHACHA_KEY_SIZE], const uint8_t nonce[CHACHA_NONCE_SIZE],
|
||||||
|
uint32_t counter);
|
||||||
|
|
||||||
|
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
uint32_t len, const uint8_t* key,
|
||||||
|
unsigned char* ivec);
|
||||||
|
|
||||||
|
|
||||||
|
# ifdef __cplusplus
|
||||||
|
}
|
||||||
|
# endif
|
||||||
|
|
||||||
|
#endif
|
102
chacha-sys/cpu-crypt/chacha20_core.c
Normal file
102
chacha-sys/cpu-crypt/chacha20_core.c
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
#include "chacha.h"
|
||||||
|
|
||||||
|
#define ROTL32(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
|
||||||
|
|
||||||
|
#define ROTATE(v, c) ROTL32((v), (c))
|
||||||
|
|
||||||
|
#define XOR(v, w) ((v) ^ (w))
|
||||||
|
|
||||||
|
#define PLUS(x, y) ((x) + (y))
|
||||||
|
|
||||||
|
#define U32TO8_LITTLE(p, v) \
|
||||||
|
{ (p)[0] = ((v) ) & 0xff; (p)[1] = ((v) >> 8) & 0xff; \
|
||||||
|
(p)[2] = ((v) >> 16) & 0xff; (p)[3] = ((v) >> 24) & 0xff; }
|
||||||
|
|
||||||
|
#define U8TO32_LITTLE(p) \
|
||||||
|
(((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
|
||||||
|
((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
|
||||||
|
|
||||||
|
#define QUARTERROUND(a,b,c,d) \
|
||||||
|
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \
|
||||||
|
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \
|
||||||
|
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \
|
||||||
|
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7);
|
||||||
|
|
||||||
|
// sigma contains the ChaCha constants, which happen to be an ASCII string.
|
||||||
|
static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
|
||||||
|
'2', '-', 'b', 'y', 't', 'e', ' ', 'k' };
|
||||||
|
|
||||||
|
void chacha20_encrypt(const u32 input[16],
|
||||||
|
unsigned char output[64],
|
||||||
|
int num_rounds)
|
||||||
|
{
|
||||||
|
u32 x[16];
|
||||||
|
int i;
|
||||||
|
memcpy(x, input, sizeof(u32) * 16);
|
||||||
|
for (i = num_rounds; i > 0; i -= 2) {
|
||||||
|
QUARTERROUND( 0, 4, 8,12)
|
||||||
|
QUARTERROUND( 1, 5, 9,13)
|
||||||
|
QUARTERROUND( 2, 6,10,14)
|
||||||
|
QUARTERROUND( 3, 7,11,15)
|
||||||
|
QUARTERROUND( 0, 5,10,15)
|
||||||
|
QUARTERROUND( 1, 6,11,12)
|
||||||
|
QUARTERROUND( 2, 7, 8,13)
|
||||||
|
QUARTERROUND( 3, 4, 9,14)
|
||||||
|
}
|
||||||
|
for (i = 0; i < 16; ++i) {
|
||||||
|
x[i] = PLUS(x[i], input[i]);
|
||||||
|
}
|
||||||
|
for (i = 0; i < 16; ++i) {
|
||||||
|
U32TO8_LITTLE(output + 4 * i, x[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||||
|
const uint8_t key[CHACHA_KEY_SIZE],
|
||||||
|
const uint8_t nonce[CHACHA_NONCE_SIZE],
|
||||||
|
uint32_t counter)
|
||||||
|
{
|
||||||
|
uint32_t input[16];
|
||||||
|
uint8_t buf[64];
|
||||||
|
size_t todo, i;
|
||||||
|
|
||||||
|
input[0] = U8TO32_LITTLE(sigma + 0);
|
||||||
|
input[1] = U8TO32_LITTLE(sigma + 4);
|
||||||
|
input[2] = U8TO32_LITTLE(sigma + 8);
|
||||||
|
input[3] = U8TO32_LITTLE(sigma + 12);
|
||||||
|
|
||||||
|
input[4] = U8TO32_LITTLE(key + 0);
|
||||||
|
input[5] = U8TO32_LITTLE(key + 4);
|
||||||
|
input[6] = U8TO32_LITTLE(key + 8);
|
||||||
|
input[7] = U8TO32_LITTLE(key + 12);
|
||||||
|
|
||||||
|
input[8] = U8TO32_LITTLE(key + 16);
|
||||||
|
input[9] = U8TO32_LITTLE(key + 20);
|
||||||
|
input[10] = U8TO32_LITTLE(key + 24);
|
||||||
|
input[11] = U8TO32_LITTLE(key + 28);
|
||||||
|
|
||||||
|
input[12] = counter;
|
||||||
|
input[13] = U8TO32_LITTLE(nonce + 0);
|
||||||
|
input[14] = U8TO32_LITTLE(nonce + 4);
|
||||||
|
input[15] = U8TO32_LITTLE(nonce + 8);
|
||||||
|
|
||||||
|
while (in_len > 0) {
|
||||||
|
todo = sizeof(buf);
|
||||||
|
if (in_len < todo) {
|
||||||
|
todo = in_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
chacha20_encrypt(input, buf, 20);
|
||||||
|
for (i = 0; i < todo; i++) {
|
||||||
|
out[i] = in[i] ^ buf[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
out += todo;
|
||||||
|
in += todo;
|
||||||
|
in_len -= todo;
|
||||||
|
|
||||||
|
input[12]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
72
chacha-sys/cpu-crypt/chacha_cbc.c
Normal file
72
chacha-sys/cpu-crypt/chacha_cbc.c
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
#include "chacha.h"
|
||||||
|
|
||||||
|
#if !defined(STRICT_ALIGNMENT) && !defined(PEDANTIC)
|
||||||
|
# define STRICT_ALIGNMENT 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
uint32_t len, const uint8_t* key,
|
||||||
|
unsigned char* ivec)
|
||||||
|
{
|
||||||
|
size_t n;
|
||||||
|
unsigned char *iv = ivec;
|
||||||
|
(void)key;
|
||||||
|
|
||||||
|
if (len == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if !defined(OPENSSL_SMALL_FOOTPRINT)
|
||||||
|
if (STRICT_ALIGNMENT &&
|
||||||
|
((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
|
||||||
|
while (len >= CHACHA_BLOCK_SIZE) {
|
||||||
|
for (n = 0; n < CHACHA_BLOCK_SIZE; ++n) {
|
||||||
|
out[n] = in[n] ^ iv[n];
|
||||||
|
//printf("%x ", out[n]);
|
||||||
|
}
|
||||||
|
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||||
|
iv = out;
|
||||||
|
len -= CHACHA_BLOCK_SIZE;
|
||||||
|
in += CHACHA_BLOCK_SIZE;
|
||||||
|
out += CHACHA_BLOCK_SIZE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
while (len >= CHACHA_BLOCK_SIZE) {
|
||||||
|
for (n = 0; n < CHACHA_BLOCK_SIZE; n += sizeof(size_t)) {
|
||||||
|
*(size_t *)(out + n) =
|
||||||
|
*(size_t *)(in + n) ^ *(size_t *)(iv + n);
|
||||||
|
//printf("%zu ", *(size_t *)(iv + n));
|
||||||
|
}
|
||||||
|
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||||
|
iv = out;
|
||||||
|
len -= CHACHA_BLOCK_SIZE;
|
||||||
|
in += CHACHA_BLOCK_SIZE;
|
||||||
|
out += CHACHA_BLOCK_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
while (len) {
|
||||||
|
for (n = 0; n < CHACHA_BLOCK_SIZE && n < len; ++n) {
|
||||||
|
out[n] = in[n] ^ iv[n];
|
||||||
|
}
|
||||||
|
for (; n < CHACHA_BLOCK_SIZE; ++n) {
|
||||||
|
out[n] = iv[n];
|
||||||
|
}
|
||||||
|
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
||||||
|
iv = out;
|
||||||
|
if (len <= CHACHA_BLOCK_SIZE) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
len -= CHACHA_BLOCK_SIZE;
|
||||||
|
in += CHACHA_BLOCK_SIZE;
|
||||||
|
out += CHACHA_BLOCK_SIZE;
|
||||||
|
}
|
||||||
|
memcpy(ivec, iv, CHACHA_BLOCK_SIZE);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void chacha20_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t in_len,
|
||||||
|
const uint8_t key[CHACHA_KEY_SIZE], uint8_t* ivec)
|
||||||
|
{
|
||||||
|
chacha20_cbc128_encrypt(in, out, in_len, key, ivec);
|
||||||
|
}
|
21
chacha-sys/src/lib.rs
Normal file
21
chacha-sys/src/lib.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
extern "C" {
|
||||||
|
fn chacha20_cbc_encrypt(
|
||||||
|
input: *const u8,
|
||||||
|
output: *mut u8,
|
||||||
|
in_len: usize,
|
||||||
|
key: *const u8,
|
||||||
|
ivec: *mut u8,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
|
||||||
|
unsafe {
|
||||||
|
chacha20_cbc_encrypt(
|
||||||
|
input.as_ptr(),
|
||||||
|
output.as_mut_ptr(),
|
||||||
|
input.len(),
|
||||||
|
key.as_ptr(),
|
||||||
|
ivec.as_mut_ptr(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
@ -12,7 +12,7 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"/..
|
cd "$(dirname "$0")"/..
|
||||||
|
|
||||||
if ci/is-pr.sh; then
|
if [[ -n $CI_PULL_REQUEST ]]; then
|
||||||
affectedFiles="$(buildkite-agent meta-data get affected_files)"
|
affectedFiles="$(buildkite-agent meta-data get affected_files)"
|
||||||
echo "Affected files in this PR: $affectedFiles"
|
echo "Affected files in this PR: $affectedFiles"
|
||||||
|
|
||||||
|
@ -2,13 +2,13 @@ steps:
|
|||||||
- command: "ci/shellcheck.sh"
|
- command: "ci/shellcheck.sh"
|
||||||
name: "shellcheck"
|
name: "shellcheck"
|
||||||
timeout_in_minutes: 5
|
timeout_in_minutes: 5
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-checks.sh"
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||||
name: "checks"
|
name: "checks"
|
||||||
timeout_in_minutes: 15
|
timeout_in_minutes: 15
|
||||||
- wait
|
- wait
|
||||||
- command: "ci/test-stable-perf.sh"
|
- command: "ci/test-stable-perf.sh"
|
||||||
name: "stable-perf"
|
name: "stable-perf"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 30
|
||||||
artifact_paths: "log-*.txt"
|
artifact_paths: "log-*.txt"
|
||||||
agents:
|
agents:
|
||||||
- "queue=cuda"
|
- "queue=cuda"
|
||||||
@ -21,7 +21,7 @@ steps:
|
|||||||
artifact_paths: "log-*.txt"
|
artifact_paths: "log-*.txt"
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||||
name: "coverage"
|
name: "coverage"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 40
|
||||||
# TODO: Fix and re-enable test-large-network.sh
|
# TODO: Fix and re-enable test-large-network.sh
|
||||||
# - command: "ci/test-large-network.sh || true"
|
# - command: "ci/test-large-network.sh || true"
|
||||||
# name: "large-network [ignored]"
|
# name: "large-network [ignored]"
|
||||||
|
@ -89,11 +89,11 @@ BETA_CHANNEL_LATEST_TAG=${beta_tag:+v$beta_tag}
|
|||||||
STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag}
|
STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag}
|
||||||
|
|
||||||
|
|
||||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
if [[ $CI_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||||
CHANNEL=stable
|
CHANNEL=stable
|
||||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
elif [[ $CI_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||||
CHANNEL=edge
|
CHANNEL=edge
|
||||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
elif [[ $CI_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||||
CHANNEL=beta
|
CHANNEL=beta
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -64,11 +64,14 @@ fi
|
|||||||
ARGS+=(
|
ARGS+=(
|
||||||
--env BUILDKITE
|
--env BUILDKITE
|
||||||
--env BUILDKITE_AGENT_ACCESS_TOKEN
|
--env BUILDKITE_AGENT_ACCESS_TOKEN
|
||||||
--env BUILDKITE_BRANCH
|
|
||||||
--env BUILDKITE_COMMIT
|
|
||||||
--env BUILDKITE_JOB_ID
|
--env BUILDKITE_JOB_ID
|
||||||
--env BUILDKITE_TAG
|
|
||||||
--env CI
|
--env CI
|
||||||
|
--env CI_BRANCH
|
||||||
|
--env CI_BUILD_ID
|
||||||
|
--env CI_COMMIT
|
||||||
|
--env CI_JOB_ID
|
||||||
|
--env CI_PULL_REQUEST
|
||||||
|
--env CI_REPO_SLUG
|
||||||
--env CODECOV_TOKEN
|
--env CODECOV_TOKEN
|
||||||
--env CRATES_IO_TOKEN
|
--env CRATES_IO_TOKEN
|
||||||
)
|
)
|
||||||
|
@ -3,6 +3,7 @@ ARG date
|
|||||||
|
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
&& rustup install nightly-$date \
|
&& rustup install nightly-$date \
|
||||||
|
&& rustup component add clippy --toolchain=nightly-$date \
|
||||||
&& rustup show \
|
&& rustup show \
|
||||||
&& rustc --version \
|
&& rustc --version \
|
||||||
&& cargo --version \
|
&& cargo --version \
|
||||||
|
@ -15,12 +15,12 @@ To update the pinned version:
|
|||||||
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
||||||
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
||||||
specific YYYY-MM-DD that is desired (default is today's build).
|
specific YYYY-MM-DD that is desired (default is today's build).
|
||||||
|
1. Update `ci/rust-version.sh` to reflect the new nightly `YYY-MM-DD`
|
||||||
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
|
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
|
||||||
to confirm the new nightly image builds. Fix any issues as needed
|
to confirm the new nightly image builds. Fix any issues as needed
|
||||||
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
|
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
|
||||||
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
|
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
|
||||||
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/rust-version.sh` from the previous to
|
1. Send a PR with the `ci/rust-version.sh` change and any codebase adjustments needed.
|
||||||
new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed.
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Note: when the rust version is changed also modify
|
# Note: when the rust version is changed also modify
|
||||||
# ci/buildkite.yml to pick up the new image tag
|
# ci/rust-version.sh to pick up the new image tag
|
||||||
FROM rust:1.34.0
|
FROM rust:1.35.0
|
||||||
|
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
&& apt update \
|
&& apt update \
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
Docker image containing rust and some preinstalled packages used in CI.
|
Docker image containing rust and some preinstalled packages used in CI.
|
||||||
|
|
||||||
This image may be manually updated by running `./build.sh` if you are a member
|
This image manually maintained:
|
||||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
1. Edit `Dockerfile` to match the desired rust version
|
||||||
organization, but it is also automatically updated periodically by
|
2. Run `./build.sh` to publish the new image, if you are a member of the [Solana
|
||||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust).
|
Labs](https://hub.docker.com/u/solanalabs/) Docker Hub organization.
|
||||||
|
|
||||||
|
89
ci/env.sh
Normal file
89
ci/env.sh
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
#
|
||||||
|
# Normalized CI environment variables
|
||||||
|
#
|
||||||
|
# |source| me
|
||||||
|
#
|
||||||
|
|
||||||
|
if [[ -n $CI ]]; then
|
||||||
|
export CI=1
|
||||||
|
if [[ -n $TRAVIS ]]; then
|
||||||
|
export CI_BRANCH=$TRAVIS_BRANCH
|
||||||
|
export CI_BUILD_ID=$TRAVIS_BUILD_ID
|
||||||
|
export CI_COMMIT=$TRAVIS_COMMIT
|
||||||
|
export CI_JOB_ID=$TRAVIS_JOB_ID
|
||||||
|
if $TRAVIS_PULL_REQUEST; then
|
||||||
|
export CI_PULL_REQUEST=true
|
||||||
|
else
|
||||||
|
export CI_PULL_REQUEST=
|
||||||
|
fi
|
||||||
|
export CI_OS_NAME=$TRAVIS_OS_NAME
|
||||||
|
export CI_REPO_SLUG=$TRAVIS_REPO_SLUG
|
||||||
|
export CI_TAG=$TRAVIS_TAG
|
||||||
|
elif [[ -n $BUILDKITE ]]; then
|
||||||
|
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||||
|
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||||
|
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||||
|
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||||
|
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||||
|
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
||||||
|
# standard Buildkite PR trigger.
|
||||||
|
if [[ $CI_BRANCH =~ pull/* ]]; then
|
||||||
|
export CI_PULL_REQUEST=true
|
||||||
|
else
|
||||||
|
export CI_PULL_REQUEST=
|
||||||
|
fi
|
||||||
|
export CI_OS_NAME=linux
|
||||||
|
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||||
|
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||||
|
# triggered it
|
||||||
|
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG
|
||||||
|
else
|
||||||
|
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
|
||||||
|
fi
|
||||||
|
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
|
||||||
|
# the solana-secondary pipeline
|
||||||
|
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
|
||||||
|
export CI_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||||
|
else
|
||||||
|
export CI_TAG=$BUILDKITE_TAG
|
||||||
|
fi
|
||||||
|
elif [[ -n $APPVEYOR ]]; then
|
||||||
|
export CI_BRANCH=$APPVEYOR_REPO_BRANCH
|
||||||
|
export CI_BUILD_ID=$APPVEYOR_BUILD_ID
|
||||||
|
export CI_COMMIT=$APPVEYOR_REPO_COMMIT
|
||||||
|
export CI_JOB_ID=$APPVEYOR_JOB_ID
|
||||||
|
if [[ -n $APPVEYOR_PULL_REQUEST_NUMBER ]]; then
|
||||||
|
export CI_PULL_REQUEST=true
|
||||||
|
else
|
||||||
|
export CI_PULL_REQUEST=
|
||||||
|
fi
|
||||||
|
if [[ $CI_LINUX = True ]]; then
|
||||||
|
export CI_OS_NAME=linux
|
||||||
|
elif [[ $CI_WINDOWS = True ]]; then
|
||||||
|
export CI_OS_NAME=windows
|
||||||
|
fi
|
||||||
|
export CI_REPO_SLUG=$APPVEYOR_REPO_NAME
|
||||||
|
export CI_TAG=$APPVEYOR_REPO_TAG_NAME
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
export CI=
|
||||||
|
export CI_BRANCH=
|
||||||
|
export CI_BUILD_ID=
|
||||||
|
export CI_COMMIT=
|
||||||
|
export CI_JOB_ID=
|
||||||
|
export CI_OS_NAME=
|
||||||
|
export CI_PULL_REQUEST=
|
||||||
|
export CI_REPO_SLUG=
|
||||||
|
export CI_TAG=
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
CI=$CI
|
||||||
|
CI_BRANCH=$CI_BRANCH
|
||||||
|
CI_BUILD_ID=$CI_BUILD_ID
|
||||||
|
CI_COMMIT=$CI_COMMIT
|
||||||
|
CI_JOB_ID=$CI_JOB_ID
|
||||||
|
CI_OS_NAME=$CI_OS_NAME
|
||||||
|
CI_PULL_REQUEST=$CI_PULL_REQUEST
|
||||||
|
CI_TAG=$CI_TAG
|
||||||
|
EOF
|
@ -1,9 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
#
|
|
||||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
|
||||||
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
|
||||||
# standard Buildkite PR trigger.
|
|
||||||
#
|
|
||||||
|
|
||||||
[[ $BUILDKITE_BRANCH =~ pull/* ]]
|
|
@ -294,7 +294,7 @@ flag_error() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ! $skipSetup; then
|
if ! $skipSetup; then
|
||||||
multinode-demo/setup.sh --hashes-per-tick auto
|
multinode-demo/setup.sh
|
||||||
else
|
else
|
||||||
verifyLedger
|
verifyLedger
|
||||||
fi
|
fi
|
||||||
@ -307,7 +307,7 @@ while [[ $iteration -le $iterations ]]; do
|
|||||||
source multinode-demo/common.sh
|
source multinode-demo/common.sh
|
||||||
set -x
|
set -x
|
||||||
client_keypair=/tmp/client-id.json-$$
|
client_keypair=/tmp/client-id.json-$$
|
||||||
$solana_keygen -o $client_keypair || exit $?
|
$solana_keygen new -f -o $client_keypair || exit $?
|
||||||
$solana_gossip spy --num-nodes-exactly $numNodes || exit $?
|
$solana_gossip spy --num-nodes-exactly $numNodes || exit $?
|
||||||
rm -rf $client_keypair
|
rm -rf $client_keypair
|
||||||
) || flag_error
|
) || flag_error
|
||||||
|
@ -23,11 +23,13 @@ declare print_free_tree=(
|
|||||||
'metrics/src'
|
'metrics/src'
|
||||||
'netutil/src'
|
'netutil/src'
|
||||||
'runtime/src'
|
'runtime/src'
|
||||||
|
'sdk/bpf/rust/rust-utils'
|
||||||
'sdk/src'
|
'sdk/src'
|
||||||
'programs/vote_api/src'
|
'programs/bpf/rust'
|
||||||
'programs/vote_program/src'
|
|
||||||
'programs/stake_api/src'
|
'programs/stake_api/src'
|
||||||
'programs/stake_program/src'
|
'programs/stake_program/src'
|
||||||
|
'programs/vote_api/src'
|
||||||
|
'programs/vote_program/src'
|
||||||
)
|
)
|
||||||
|
|
||||||
if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then
|
if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then
|
||||||
|
65
ci/order-crates-for-publishing.py
Executable file
65
ci/order-crates-for-publishing.py
Executable file
@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# This script figures the order in which workspace crates must be published to
|
||||||
|
# crates.io. Along the way it also ensures there are no circular dependencies
|
||||||
|
# that would cause a |cargo publish| to fail.
|
||||||
|
#
|
||||||
|
# On success an ordered list of Cargo.toml files is written to stdout
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
import sys;
|
||||||
|
|
||||||
|
def load_metadata():
|
||||||
|
return json.loads(subprocess.Popen(
|
||||||
|
'cargo metadata --no-deps --format-version=1',
|
||||||
|
shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||||
|
|
||||||
|
def get_packages():
|
||||||
|
metadata = load_metadata()
|
||||||
|
|
||||||
|
manifest_path = dict()
|
||||||
|
|
||||||
|
# Build dictionary of packages and their immediate solana-only dependencies
|
||||||
|
dependency_graph = dict()
|
||||||
|
for pkg in metadata['packages']:
|
||||||
|
manifest_path[pkg['name']] = pkg['manifest_path'];
|
||||||
|
dependency_graph[pkg['name']] = [x['name'] for x in pkg['dependencies'] if x['name'].startswith('solana')];
|
||||||
|
|
||||||
|
# Check for direct circular dependencies
|
||||||
|
circular_dependencies = set()
|
||||||
|
for package, dependencies in dependency_graph.items():
|
||||||
|
for dependency in dependencies:
|
||||||
|
if dependency in dependency_graph and package in dependency_graph[dependency]:
|
||||||
|
circular_dependencies.add(' <--> '.join(sorted([package, dependency])))
|
||||||
|
|
||||||
|
for dependency in circular_dependencies:
|
||||||
|
sys.stderr.write('Error: Circular dependency: {}\n'.format(dependency))
|
||||||
|
|
||||||
|
if len(circular_dependencies) != 0:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Order dependencies
|
||||||
|
sorted_dependency_graph = []
|
||||||
|
max_iterations = pow(len(dependency_graph),2)
|
||||||
|
while dependency_graph:
|
||||||
|
if max_iterations == 0:
|
||||||
|
# TODO: Be more helpful and find the actual cycle for the user
|
||||||
|
sys.exit('Error: Circular dependency suspected between these packages: {}\n'.format(' '.join(dependency_graph.keys())))
|
||||||
|
|
||||||
|
max_iterations -= 1
|
||||||
|
for package, dependencies in dependency_graph.items():
|
||||||
|
for dependency in dependencies:
|
||||||
|
if dependency in dependency_graph:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
del dependency_graph[package]
|
||||||
|
sorted_dependency_graph.append((package, manifest_path[package]))
|
||||||
|
|
||||||
|
|
||||||
|
return sorted_dependency_graph
|
||||||
|
|
||||||
|
for package, manifest in get_packages():
|
||||||
|
print os.path.relpath(manifest)
|
@ -13,7 +13,7 @@ echo --- create book repo
|
|||||||
git config user.email "maintainers@solana.com"
|
git config user.email "maintainers@solana.com"
|
||||||
git config user.name "$(basename "$0")"
|
git config user.name "$(basename "$0")"
|
||||||
git add ./* ./.nojekyll
|
git add ./* ./.nojekyll
|
||||||
git commit -m "${BUILDKITE_COMMIT:-local}"
|
git commit -m "${CI_COMMIT:-local}"
|
||||||
)
|
)
|
||||||
|
|
||||||
eval "$(ci/channel-info.sh)"
|
eval "$(ci/channel-info.sh)"
|
||||||
|
@ -3,43 +3,21 @@ set -e
|
|||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
source ci/semver_bash/semver.sh
|
source ci/semver_bash/semver.sh
|
||||||
|
|
||||||
# List of internal crates to publish
|
# shellcheck disable=SC2086
|
||||||
#
|
is_crate_version_uploaded() {
|
||||||
# IMPORTANT: the order of the CRATES *is* significant. Crates must be published
|
name=$1
|
||||||
# before the crates that depend on them. Note that this information is already
|
version=$2
|
||||||
# expressed in the various Cargo.toml files, and ideally would not be duplicated
|
curl https://crates.io/api/v1/crates/${name}/${version} | \
|
||||||
# here. (TODO: figure the crate ordering dynamically)
|
python3 -c "import sys,json; print('version' in json.load(sys.stdin));"
|
||||||
#
|
}
|
||||||
CRATES=(
|
|
||||||
kvstore
|
|
||||||
logger
|
|
||||||
netutil
|
|
||||||
sdk
|
|
||||||
keygen
|
|
||||||
metrics
|
|
||||||
client
|
|
||||||
drone
|
|
||||||
programs/{budget_api,config_api,stake_api,storage_api,token_api,vote_api,exchange_api}
|
|
||||||
programs/{vote_program,budget_program,bpf_loader,config_program,exchange_program,failure_program}
|
|
||||||
programs/{noop_program,stake_program,storage_program,token_program}
|
|
||||||
runtime
|
|
||||||
vote-signer
|
|
||||||
core
|
|
||||||
validator
|
|
||||||
genesis
|
|
||||||
gossip
|
|
||||||
ledger-tool
|
|
||||||
wallet
|
|
||||||
install
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only package/publish if this is a tagged release
|
# Only package/publish if this is a tagged release
|
||||||
[[ -n $TRIGGERED_BUILDKITE_TAG ]] || {
|
[[ -n $CI_TAG ]] || {
|
||||||
echo TRIGGERED_BUILDKITE_TAG unset, skipped
|
echo CI_TAG unset, skipped
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
semverParseInto "$TRIGGERED_BUILDKITE_TAG" MAJOR MINOR PATCH SPECIAL
|
semverParseInto "$CI_TAG" MAJOR MINOR PATCH SPECIAL
|
||||||
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
||||||
|
|
||||||
[[ -n "$CRATES_IO_TOKEN" ]] || {
|
[[ -n "$CRATES_IO_TOKEN" ]] || {
|
||||||
@ -49,25 +27,37 @@ expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
|||||||
|
|
||||||
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
|
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
|
||||||
|
|
||||||
for crate in "${CRATES[@]}"; do
|
Cargo_tomls=$(ci/order-crates-for-publishing.py)
|
||||||
if [[ ! -r $crate/Cargo.toml ]]; then
|
|
||||||
echo "Error: $crate/Cargo.toml does not exist"
|
for Cargo_toml in $Cargo_tomls; do
|
||||||
exit 1
|
echo "-- $Cargo_toml"
|
||||||
fi
|
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || {
|
||||||
echo "-- $crate"
|
echo "Error: $Cargo_toml version is not $expectedCrateVersion"
|
||||||
grep -q "^version = \"$expectedCrateVersion\"$" "$crate"/Cargo.toml || {
|
|
||||||
echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
|
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
crate=$(dirname "$Cargo_toml")
|
||||||
# TODO: the rocksdb package does not build with the stock rust docker image,
|
# TODO: the rocksdb package does not build with the stock rust docker image,
|
||||||
# so use the solana rust docker image until this is resolved upstream
|
# so use the solana rust docker image until this is resolved upstream
|
||||||
source ci/rust-version.sh
|
source ci/rust-version.sh
|
||||||
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
|
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
|
||||||
#ci/docker-run.sh rust bash -exc "cd $crate; $cargoCommand"
|
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues
|
||||||
)
|
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
crate_name=$(grep -m 1 '^name = ' $Cargo_toml | cut -f 3 -d ' ' | tr -d \")
|
||||||
|
numRetries=30
|
||||||
|
for ((i = 1 ; i <= numRetries ; i++)); do
|
||||||
|
echo "Attempt ${i} of ${numRetries}"
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [[ $(is_crate_version_uploaded $crate_name $expectedCrateVersion) = True ]] ; then
|
||||||
|
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -45,7 +45,9 @@ beta)
|
|||||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
;;
|
;;
|
||||||
stable)
|
stable)
|
||||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
# Set to whatever branch 'testnet' is on.
|
||||||
|
# TODO: Revert to $STABLE_CHANNEL for TdS
|
||||||
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"
|
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"
|
||||||
@ -53,7 +55,7 @@ stable)
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
if [[ $CI_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||||
(
|
(
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
steps:
|
steps:
|
||||||
|
@ -3,8 +3,20 @@ set -e
|
|||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
if [[ -n $APPVEYOR ]]; then
|
||||||
|
# Bootstrap rust build environment
|
||||||
|
source ci/env.sh
|
||||||
|
source ci/rust-version.sh
|
||||||
|
|
||||||
|
appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
|
||||||
|
./rustup-init -yv --default-toolchain $rust_stable --default-host x86_64-pc-windows-msvc
|
||||||
|
export PATH="$PATH:$USERPROFILE/.cargo/bin"
|
||||||
|
rustc -vV
|
||||||
|
cargo -vV
|
||||||
|
fi
|
||||||
|
|
||||||
DRYRUN=
|
DRYRUN=
|
||||||
if [[ -z $BUILDKITE_BRANCH ]]; then
|
if [[ -z $CI_BRANCH ]]; then
|
||||||
DRYRUN="echo"
|
DRYRUN="echo"
|
||||||
CHANNEL=unknown
|
CHANNEL=unknown
|
||||||
fi
|
fi
|
||||||
@ -12,12 +24,9 @@ fi
|
|||||||
eval "$(ci/channel-info.sh)"
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
TAG=
|
TAG=
|
||||||
if [[ -n "$BUILDKITE_TAG" ]]; then
|
if [[ -n "$CI_TAG" ]]; then
|
||||||
CHANNEL_OR_TAG=$BUILDKITE_TAG
|
CHANNEL_OR_TAG=$CI_TAG
|
||||||
TAG="$BUILDKITE_TAG"
|
TAG="$CI_TAG"
|
||||||
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
|
||||||
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
|
|
||||||
TAG="$TRIGGERED_BUILDKITE_TAG"
|
|
||||||
else
|
else
|
||||||
CHANNEL_OR_TAG=$CHANNEL
|
CHANNEL_OR_TAG=$CHANNEL
|
||||||
fi
|
fi
|
||||||
@ -27,12 +36,17 @@ if [[ -z $CHANNEL_OR_TAG ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
case "$(uname)" in
|
PERF_LIBS=false
|
||||||
Darwin)
|
case "$CI_OS_NAME" in
|
||||||
|
osx)
|
||||||
TARGET=x86_64-apple-darwin
|
TARGET=x86_64-apple-darwin
|
||||||
;;
|
;;
|
||||||
Linux)
|
linux)
|
||||||
TARGET=x86_64-unknown-linux-gnu
|
TARGET=x86_64-unknown-linux-gnu
|
||||||
|
PERF_LIBS=true
|
||||||
|
;;
|
||||||
|
windows)
|
||||||
|
TARGET=x86_64-pc-windows-msvc
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
TARGET=unknown-unknown-unknown
|
TARGET=unknown-unknown-unknown
|
||||||
@ -56,18 +70,21 @@ echo --- Creating tarball
|
|||||||
source ci/rust-version.sh stable
|
source ci/rust-version.sh stable
|
||||||
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
||||||
|
|
||||||
rm -rf target/perf-libs
|
if $PERF_LIBS; then
|
||||||
./fetch-perf-libs.sh
|
rm -rf target/perf-libs
|
||||||
mkdir solana-release/target
|
./fetch-perf-libs.sh
|
||||||
cp -a target/perf-libs solana-release/target/
|
mkdir solana-release/target
|
||||||
|
cp -a target/perf-libs solana-release/target/
|
||||||
|
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
source ./target/perf-libs/env.sh
|
||||||
|
(
|
||||||
|
cd validator
|
||||||
|
cargo +"$rust_stable" install --path . --features=cuda --root ../solana-release-cuda
|
||||||
|
)
|
||||||
|
cp solana-release-cuda/bin/solana-validator solana-release/bin/solana-validator-cuda
|
||||||
|
fi
|
||||||
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
source ./target/perf-libs/env.sh
|
|
||||||
(
|
|
||||||
cd validator
|
|
||||||
cargo +"$rust_stable" install --path . --features=cuda --root ../solana-release-cuda
|
|
||||||
)
|
|
||||||
cp solana-release-cuda/bin/solana-validator solana-release/bin/solana-validator-cuda
|
|
||||||
cp -a scripts multinode-demo solana-release/
|
cp -a scripts multinode-demo solana-release/
|
||||||
|
|
||||||
# Add a wrapper script for validator.sh
|
# Add a wrapper script for validator.sh
|
||||||
@ -88,41 +105,64 @@ EOF
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"/..
|
cd "$(dirname "$0")"/..
|
||||||
export USE_INSTALL=1
|
export USE_INSTALL=1
|
||||||
exec multinode-demo/clear-validator-config.sh "$@"
|
exec multinode-demo/clear-config.sh "$@"
|
||||||
EOF
|
EOF
|
||||||
chmod +x solana-release/bin/clear-config.sh
|
chmod +x solana-release/bin/clear-config.sh
|
||||||
|
|
||||||
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
|
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
|
||||||
cp solana-release/bin/solana-install solana-install-$TARGET
|
cp solana-release/bin/solana-install-init solana-install-init-$TARGET
|
||||||
)
|
)
|
||||||
|
|
||||||
echo --- Saving build artifacts
|
# Metrics tarball is platform agnostic, only publish it from Linux
|
||||||
source ci/upload-ci-artifact.sh
|
MAYBE_METRICS_TARBALL=
|
||||||
upload-ci-artifact solana-release-$TARGET.tar.bz2
|
if [[ "$CI_OS_NAME" = linux ]]; then
|
||||||
|
metrics/create-metrics-tarball.sh
|
||||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
|
||||||
echo Skipped due to DO_NOT_PUBLISH_TAR
|
|
||||||
exit 0
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for file in solana-release-$TARGET.tar.bz2 solana-install-$TARGET; do
|
source ci/upload-ci-artifact.sh
|
||||||
echo --- AWS S3 Store: $file
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
$DRYRUN docker run \
|
|
||||||
--rm \
|
|
||||||
--env AWS_ACCESS_KEY_ID \
|
|
||||||
--env AWS_SECRET_ACCESS_KEY \
|
|
||||||
--volume "$PWD:/solana" \
|
|
||||||
eremite/aws-cli:2018.12.18 \
|
|
||||||
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
|
||||||
|
|
||||||
echo Published to:
|
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
|
||||||
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
upload-ci-artifact "$file"
|
||||||
)
|
|
||||||
|
|
||||||
if [[ -n $TAG ]]; then
|
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||||
ci/upload-github-release-asset.sh $file
|
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n $BUILDKITE ]]; then
|
||||||
|
echo --- AWS S3 Store: "$file"
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
$DRYRUN docker run \
|
||||||
|
--rm \
|
||||||
|
--env AWS_ACCESS_KEY_ID \
|
||||||
|
--env AWS_SECRET_ACCESS_KEY \
|
||||||
|
--volume "$PWD:/solana" \
|
||||||
|
eremite/aws-cli:2018.12.18 \
|
||||||
|
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||||
|
|
||||||
|
echo Published to:
|
||||||
|
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||||
|
)
|
||||||
|
|
||||||
|
if [[ -n $TAG ]]; then
|
||||||
|
ci/upload-github-release-asset.sh "$file"
|
||||||
|
fi
|
||||||
|
elif [[ -n $TRAVIS ]]; then
|
||||||
|
# .travis.yml uploads everything in the travis-s3-upload/ directory to release.solana.com
|
||||||
|
mkdir -p travis-s3-upload/"$CHANNEL_OR_TAG"
|
||||||
|
cp -v "$file" travis-s3-upload/"$CHANNEL_OR_TAG"/
|
||||||
|
|
||||||
|
if [[ -n $TAG ]]; then
|
||||||
|
# .travis.yaml uploads everything in the travis-release-upload/ directory to
|
||||||
|
# the associated Github Release
|
||||||
|
mkdir -p travis-release-upload/
|
||||||
|
cp -v "$file" travis-release-upload/
|
||||||
|
fi
|
||||||
|
elif [[ -n $APPVEYOR ]]; then
|
||||||
|
# Add artifacts for .appveyor.yml to upload
|
||||||
|
appveyor PushArtifact "$file" -FileName "$CHANNEL_OR_TAG"/"$file"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -13,11 +13,14 @@
|
|||||||
# $ source ci/rust-version.sh
|
# $ source ci/rust-version.sh
|
||||||
#
|
#
|
||||||
|
|
||||||
export rust_stable=1.34.0
|
stable_version=1.35.0
|
||||||
export rust_stable_docker_image=solanalabs/rust:1.34.0
|
nightly_version=2019-06-20
|
||||||
|
|
||||||
export rust_nightly=nightly-2019-05-01
|
export rust_stable="$stable_version"
|
||||||
export rust_nightly_docker_image=solanalabs/rust-nightly:2019-05-01
|
export rust_stable_docker_image=solanalabs/rust:"$stable_version"
|
||||||
|
|
||||||
|
export rust_nightly=nightly-"$nightly_version"
|
||||||
|
export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
|
||||||
|
|
||||||
[[ -z $1 ]] || (
|
[[ -z $1 ]] || (
|
||||||
|
|
||||||
|
@ -30,8 +30,8 @@ set -o pipefail
|
|||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
|
|
||||||
UPLOAD_METRICS=""
|
UPLOAD_METRICS=""
|
||||||
TARGET_BRANCH=$BUILDKITE_BRANCH
|
TARGET_BRANCH=$CI_BRANCH
|
||||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
if [[ -z $CI_BRANCH ]] || [[ -n $CI_PULL_REQUEST ]]; then
|
||||||
TARGET_BRANCH=$EDGE_CHANNEL
|
TARGET_BRANCH=$EDGE_CHANNEL
|
||||||
else
|
else
|
||||||
UPLOAD_METRICS="upload"
|
UPLOAD_METRICS="upload"
|
||||||
@ -40,6 +40,10 @@ fi
|
|||||||
BENCH_FILE=bench_output.log
|
BENCH_FILE=bench_output.log
|
||||||
BENCH_ARTIFACT=current_bench_results.log
|
BENCH_ARTIFACT=current_bench_results.log
|
||||||
|
|
||||||
|
# Clear the C dependency files, if dependeny moves these files are not regenerated
|
||||||
|
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||||
|
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||||
|
|
||||||
# Ensure all dependencies are built
|
# Ensure all dependencies are built
|
||||||
_ cargo +$rust_nightly build --all --release
|
_ cargo +$rust_nightly build --all --release
|
||||||
|
|
||||||
|
@ -5,15 +5,37 @@ cd "$(dirname "$0")/.."
|
|||||||
|
|
||||||
source ci/_
|
source ci/_
|
||||||
source ci/rust-version.sh stable
|
source ci/rust-version.sh stable
|
||||||
|
source ci/rust-version.sh nightly
|
||||||
|
|
||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
export RUSTFLAGS="-D warnings"
|
export RUSTFLAGS="-D warnings"
|
||||||
|
|
||||||
|
do_bpf_check() {
|
||||||
|
_ cargo +"$rust_stable" fmt --all -- --check
|
||||||
|
_ cargo +"$rust_nightly" clippy --all -- --version
|
||||||
|
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings
|
||||||
|
_ cargo +"$rust_stable" audit
|
||||||
|
}
|
||||||
|
|
||||||
|
(
|
||||||
|
(
|
||||||
|
cd sdk/bpf/rust/rust-utils
|
||||||
|
do_bpf_check
|
||||||
|
)
|
||||||
|
for project in programs/bpf/rust/*/ ; do
|
||||||
|
(
|
||||||
|
cd "$project"
|
||||||
|
do_bpf_check
|
||||||
|
)
|
||||||
|
done
|
||||||
|
)
|
||||||
|
|
||||||
_ cargo +"$rust_stable" fmt --all -- --check
|
_ cargo +"$rust_stable" fmt --all -- --check
|
||||||
_ cargo +"$rust_stable" clippy --all -- --version
|
_ cargo +"$rust_stable" clippy --all -- --version
|
||||||
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
|
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
|
||||||
_ cargo +"$rust_stable" audit
|
_ cargo +"$rust_stable" audit
|
||||||
_ ci/nits.sh
|
_ ci/nits.sh
|
||||||
|
_ ci/order-crates-for-publishing.py
|
||||||
_ book/build.sh
|
_ book/build.sh
|
||||||
|
|
||||||
echo --- ok
|
echo --- ok
|
||||||
|
@ -25,7 +25,7 @@ source scripts/ulimit-n.sh
|
|||||||
|
|
||||||
scripts/coverage.sh
|
scripts/coverage.sh
|
||||||
|
|
||||||
report=coverage-"${BUILDKITE_COMMIT:0:9}".tar.gz
|
report=coverage-"${CI_COMMIT:0:9}".tar.gz
|
||||||
mv target/cov/report.tar.gz "$report"
|
mv target/cov/report.tar.gz "$report"
|
||||||
upload-ci-artifact "$report"
|
upload-ci-artifact "$report"
|
||||||
annotate --style success --context lcov-report \
|
annotate --style success --context lcov-report \
|
||||||
@ -39,5 +39,5 @@ else
|
|||||||
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
|
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
|
||||||
|
|
||||||
annotate --style success --context codecov.io \
|
annotate --style success --context codecov.io \
|
||||||
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${BUILDKITE_COMMIT:0:9}"
|
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}"
|
||||||
fi
|
fi
|
||||||
|
@ -19,7 +19,14 @@ source scripts/ulimit-n.sh
|
|||||||
# Clear cached json keypair files
|
# Clear cached json keypair files
|
||||||
rm -rf "$HOME/.config/solana"
|
rm -rf "$HOME/.config/solana"
|
||||||
|
|
||||||
# Run tbe appropriate test based on entrypoint
|
# Clear the C dependency files, if dependeny moves these files are not regenerated
|
||||||
|
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||||
|
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||||
|
|
||||||
|
# Clear the BPF sysroot files, they are not automatically rebuilt
|
||||||
|
rm -rf target/xargo # Issue #3105
|
||||||
|
|
||||||
|
# Run the appropriate test based on entrypoint
|
||||||
testName=$(basename "$0" .sh)
|
testName=$(basename "$0" .sh)
|
||||||
case $testName in
|
case $testName in
|
||||||
test-stable)
|
test-stable)
|
||||||
@ -35,8 +42,10 @@ test-stable-perf)
|
|||||||
.rs$ \
|
.rs$ \
|
||||||
Cargo.lock$ \
|
Cargo.lock$ \
|
||||||
Cargo.toml$ \
|
Cargo.toml$ \
|
||||||
ci/test-stable-perf.sh \
|
^ci/test-stable-perf.sh \
|
||||||
ci/test-stable.sh \
|
^ci/test-stable.sh \
|
||||||
|
^core/build.rs \
|
||||||
|
^fetch-perf-libs.sh \
|
||||||
^programs/ \
|
^programs/ \
|
||||||
^sdk/ \
|
^sdk/ \
|
||||||
|| {
|
|| {
|
||||||
@ -52,10 +61,8 @@ test-stable-perf)
|
|||||||
--no-default-features --features=bpf_c,bpf_rust
|
--no-default-features --features=bpf_c,bpf_rust
|
||||||
|
|
||||||
# Run root package tests with these features
|
# Run root package tests with these features
|
||||||
ROOT_FEATURES=erasure,chacha
|
ROOT_FEATURES=
|
||||||
if [[ $(uname) = Darwin ]]; then
|
if [[ $(uname) = Linux ]]; then
|
||||||
./build-perf-libs.sh
|
|
||||||
else
|
|
||||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||||
# is not yet loaded.
|
# is not yet loaded.
|
||||||
@ -65,7 +72,7 @@ test-stable-perf)
|
|||||||
./fetch-perf-libs.sh
|
./fetch-perf-libs.sh
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
source ./target/perf-libs/env.sh
|
source ./target/perf-libs/env.sh
|
||||||
ROOT_FEATURES=$ROOT_FEATURES,cuda
|
ROOT_FEATURES=cuda
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run root package library tests
|
# Run root package library tests
|
||||||
|
@ -311,6 +311,9 @@ if ! $skipStart; then
|
|||||||
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
||||||
args+=(-o noLedgerVerify)
|
args+=(-o noLedgerVerify)
|
||||||
fi
|
fi
|
||||||
|
if [[ -n $NO_INSTALL_CHECK ]]; then
|
||||||
|
args+=(-o noInstallCheck)
|
||||||
|
fi
|
||||||
if [[ -n $maybeHashesPerTick ]]; then
|
if [[ -n $maybeHashesPerTick ]]; then
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
|
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
|
||||||
args+=($maybeHashesPerTick)
|
args+=($maybeHashesPerTick)
|
||||||
@ -324,10 +327,11 @@ if ! $skipStart; then
|
|||||||
args+=(-F)
|
args+=(-F)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# shellcheck disable=SC2154 # SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu comes from .buildkite/env/
|
if $deployUpdateManifest; then
|
||||||
if $deployUpdateManifest && [[ -n $SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu ]]; then
|
rm -f update_manifest_keypair.json
|
||||||
echo "$SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu" > update_manifest_keypair.json
|
args+=(--deploy-update linux)
|
||||||
args+=(-i update_manifest_keypair.json)
|
args+=(--deploy-update osx)
|
||||||
|
args+=(--deploy-update windows)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
|
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
|
||||||
|
@ -132,19 +132,16 @@ case $TESTNET in
|
|||||||
testnet-edge|testnet-edge-perf)
|
testnet-edge|testnet-edge-perf)
|
||||||
CHANNEL_OR_TAG=edge
|
CHANNEL_OR_TAG=edge
|
||||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||||
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
|
||||||
;;
|
;;
|
||||||
testnet-beta|testnet-beta-perf)
|
testnet-beta|testnet-beta-perf)
|
||||||
CHANNEL_OR_TAG=beta
|
CHANNEL_OR_TAG=beta
|
||||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
|
||||||
;;
|
;;
|
||||||
testnet)
|
testnet)
|
||||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||||
: "${EC2_NODE_COUNT:=10}"
|
: "${EC2_NODE_COUNT:=10}"
|
||||||
: "${GCE_NODE_COUNT:=}"
|
: "${GCE_NODE_COUNT:=}"
|
||||||
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
|
||||||
;;
|
;;
|
||||||
testnet-perf)
|
testnet-perf)
|
||||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||||
@ -155,7 +152,6 @@ testnet-demo)
|
|||||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
: "${GCE_NODE_COUNT:=150}"
|
: "${GCE_NODE_COUNT:=150}"
|
||||||
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
||||||
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
@ -188,7 +184,7 @@ if [[ -n $TESTNET_TAG ]]; then
|
|||||||
CHANNEL_OR_TAG=$TESTNET_TAG
|
CHANNEL_OR_TAG=$TESTNET_TAG
|
||||||
else
|
else
|
||||||
|
|
||||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
if [[ $CI_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||||
(
|
(
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
steps:
|
steps:
|
||||||
@ -216,6 +212,7 @@ sanity() {
|
|||||||
testnet-edge)
|
testnet-edge)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
NO_INSTALL_CHECK=1 \
|
||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||||
)
|
)
|
||||||
@ -232,6 +229,7 @@ sanity() {
|
|||||||
testnet-beta)
|
testnet-beta)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
NO_INSTALL_CHECK=1 \
|
||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||||
)
|
)
|
||||||
@ -327,8 +325,7 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D}
|
||||||
--hashes-per-tick auto
|
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-edge-perf)
|
testnet-edge-perf)
|
||||||
@ -342,8 +339,7 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D}
|
||||||
--hashes-per-tick auto
|
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-beta)
|
testnet-beta)
|
||||||
@ -355,8 +351,7 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D}
|
||||||
--hashes-per-tick auto
|
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-beta-perf)
|
testnet-beta-perf)
|
||||||
@ -370,8 +365,7 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D}
|
||||||
--hashes-per-tick auto
|
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet)
|
testnet)
|
||||||
@ -415,8 +409,7 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D}
|
||||||
--hashes-per-tick auto
|
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-demo)
|
testnet-demo)
|
||||||
@ -436,8 +429,7 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${maybeSkipStart:+-s} \
|
${maybeSkipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D}
|
||||||
--hashes-per-tick auto
|
|
||||||
|
|
||||||
if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]]; then
|
if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]]; then
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
@ -448,8 +440,7 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D}
|
||||||
--hashes-per-tick auto
|
|
||||||
fi
|
fi
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
|
@ -64,6 +64,7 @@ for zone in "$@"; do
|
|||||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||||
|
${NO_INSTALL_CHECK:+-o noInstallCheck} \
|
||||||
$zone || ok=false
|
$zone || ok=false
|
||||||
|
|
||||||
net/net.sh logs
|
net/net.sh logs
|
||||||
|
@ -8,8 +8,6 @@
|
|||||||
#
|
#
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
REPO_SLUG=solana-labs/solana
|
|
||||||
|
|
||||||
if [[ -z $1 ]]; then
|
if [[ -z $1 ]]; then
|
||||||
echo No files specified
|
echo No files specified
|
||||||
exit 1
|
exit 1
|
||||||
@ -20,31 +18,30 @@ if [[ -z $GITHUB_TOKEN ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $BUILDKITE_TAG ]]; then
|
if [[ -z $CI_TAG ]]; then
|
||||||
TAG=$BUILDKITE_TAG
|
echo Error: CI_TAG not defined
|
||||||
elif [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
|
exit 1
|
||||||
TAG=$TRIGGERED_BUILDKITE_TAG
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z $TAG ]]; then
|
if [[ -z $CI_REPO_SLUG ]]; then
|
||||||
echo Error: TAG not defined
|
echo Error: CI_REPO_SLUG not defined
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
releaseId=$( \
|
releaseId=$( \
|
||||||
curl -s "https://api.github.com/repos/$REPO_SLUG/releases/tags/$TAG" \
|
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \
|
||||||
| grep -m 1 \"id\": \
|
| grep -m 1 \"id\": \
|
||||||
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
|
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
|
||||||
)
|
)
|
||||||
echo "Github release id for $TAG is $releaseId"
|
echo "Github release id for $CI_TAG is $releaseId"
|
||||||
|
|
||||||
for file in "$@"; do
|
for file in "$@"; do
|
||||||
echo "--- Uploading $file to tag $TAG of $REPO_SLUG"
|
echo "--- Uploading $file to tag $CI_TAG of $CI_REPO_SLUG"
|
||||||
curl \
|
curl \
|
||||||
--data-binary @"$file" \
|
--data-binary @"$file" \
|
||||||
-H "Authorization: token $GITHUB_TOKEN" \
|
-H "Authorization: token $GITHUB_TOKEN" \
|
||||||
-H "Content-Type: application/octet-stream" \
|
-H "Content-Type: application/octet-stream" \
|
||||||
"https://uploads.github.com/repos/$REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
|
"https://uploads.github.com/repos/$CI_REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
|
||||||
echo
|
echo
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-client"
|
name = "solana-client"
|
||||||
version = "0.15.0"
|
version = "0.16.2"
|
||||||
description = "Solana Client"
|
description = "Solana Client"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -11,16 +11,18 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.1.4"
|
bincode = "1.1.4"
|
||||||
bs58 = "0.2.0"
|
bs58 = "0.2.0"
|
||||||
|
jsonrpc-core = "12.0.0"
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
jsonrpc-core = "10.1.0"
|
rand = "0.6.5"
|
||||||
reqwest = "0.9.17"
|
rayon = "1.1.0"
|
||||||
serde = "1.0.89"
|
reqwest = "0.9.18"
|
||||||
serde_derive = "1.0.91"
|
serde = "1.0.92"
|
||||||
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.2" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.15.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.2" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
jsonrpc-core = "10.1.0"
|
jsonrpc-core = "12.0.0"
|
||||||
jsonrpc-http-server = "10.1.0"
|
jsonrpc-http-server = "12.0.0"
|
||||||
solana-logger = { path = "../logger", version = "0.15.0" }
|
solana-logger = { path = "../logger", version = "0.16.2" }
|
||||||
|
@ -60,6 +60,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
|||||||
serde_json::to_value(response).unwrap()
|
serde_json::to_value(response).unwrap()
|
||||||
}
|
}
|
||||||
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
|
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
|
||||||
|
RpcRequest::GetSlot => Value::Number(Number::from(0)),
|
||||||
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
|
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
|
||||||
_ => Value::Null,
|
_ => Value::Null,
|
||||||
};
|
};
|
||||||
|
@ -36,7 +36,18 @@ pub fn sample_txs<T>(
|
|||||||
total_elapsed = start_time.elapsed();
|
total_elapsed = start_time.elapsed();
|
||||||
let elapsed = now.elapsed();
|
let elapsed = now.elapsed();
|
||||||
now = Instant::now();
|
now = Instant::now();
|
||||||
let mut txs = client.get_transaction_count().expect("transaction count");
|
let mut txs;
|
||||||
|
match client.get_transaction_count() {
|
||||||
|
Err(e) => {
|
||||||
|
// ThinClient with multiple options should pick a better one now.
|
||||||
|
info!("Couldn't get transaction count {:?}", e);
|
||||||
|
sleep(Duration::from_secs(sample_period));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Ok(tx_count) => {
|
||||||
|
txs = tx_count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if txs < last_txs {
|
if txs < last_txs {
|
||||||
info!("Expected txs({}) >= last_txs({})", txs, last_txs);
|
info!("Expected txs({}) >= last_txs({})", txs, last_txs);
|
||||||
|
@ -75,6 +75,25 @@ impl RpcClient {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_slot(&self) -> io::Result<u64> {
|
||||||
|
let response = self
|
||||||
|
.client
|
||||||
|
.send(&RpcRequest::GetSlot, None, 0)
|
||||||
|
.map_err(|err| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
|
format!("GetSlot request failure: {:?}", err),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
serde_json::from_value(response).map_err(|err| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
|
format!("GetSlot parse failure: {}", err),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub fn send_and_confirm_transaction<T: KeypairUtil>(
|
pub fn send_and_confirm_transaction<T: KeypairUtil>(
|
||||||
&self,
|
&self,
|
||||||
transaction: &mut Transaction,
|
transaction: &mut Transaction,
|
||||||
|
@ -12,6 +12,7 @@ pub enum RpcRequest {
|
|||||||
GetNumBlocksSinceSignatureConfirmation,
|
GetNumBlocksSinceSignatureConfirmation,
|
||||||
GetRecentBlockhash,
|
GetRecentBlockhash,
|
||||||
GetSignatureStatus,
|
GetSignatureStatus,
|
||||||
|
GetSlot,
|
||||||
GetSlotLeader,
|
GetSlotLeader,
|
||||||
GetEpochVoteAccounts,
|
GetEpochVoteAccounts,
|
||||||
GetStorageBlockhash,
|
GetStorageBlockhash,
|
||||||
@ -39,6 +40,7 @@ impl RpcRequest {
|
|||||||
}
|
}
|
||||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||||
|
RpcRequest::GetSlot => "getSlot",
|
||||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||||
RpcRequest::GetEpochVoteAccounts => "getEpochVoteAccounts",
|
RpcRequest::GetEpochVoteAccounts => "getEpochVoteAccounts",
|
||||||
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
|
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
|
||||||
@ -104,6 +106,10 @@ mod tests {
|
|||||||
let request = test_request.build_request_json(1, None);
|
let request = test_request.build_request_json(1, None);
|
||||||
assert_eq!(request["method"], "getRecentBlockhash");
|
assert_eq!(request["method"], "getRecentBlockhash");
|
||||||
|
|
||||||
|
let test_request = RpcRequest::GetSlot;
|
||||||
|
let request = test_request.build_request_json(1, None);
|
||||||
|
assert_eq!(request["method"], "getSlot");
|
||||||
|
|
||||||
let test_request = RpcRequest::GetTransactionCount;
|
let test_request = RpcRequest::GetTransactionCount;
|
||||||
let request = test_request.build_request_json(1, None);
|
let request = test_request.build_request_json(1, None);
|
||||||
assert_eq!(request["method"], "getTransactionCount");
|
assert_eq!(request["method"], "getTransactionCount");
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
use crate::rpc_client::RpcClient;
|
use crate::rpc_client::RpcClient;
|
||||||
use bincode::{serialize_into, serialized_size};
|
use bincode::{serialize_into, serialized_size};
|
||||||
use log::*;
|
use log::*;
|
||||||
|
use solana_sdk::account::Account;
|
||||||
use solana_sdk::client::{AsyncClient, Client, SyncClient};
|
use solana_sdk::client::{AsyncClient, Client, SyncClient};
|
||||||
use solana_sdk::fee_calculator::FeeCalculator;
|
use solana_sdk::fee_calculator::FeeCalculator;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
@ -15,17 +16,100 @@ use solana_sdk::packet::PACKET_DATA_SIZE;
|
|||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||||
use solana_sdk::system_instruction;
|
use solana_sdk::system_instruction;
|
||||||
|
use solana_sdk::timing::duration_as_ms;
|
||||||
use solana_sdk::transaction::{self, Transaction};
|
use solana_sdk::transaction::{self, Transaction};
|
||||||
use solana_sdk::transport::Result as TransportResult;
|
use solana_sdk::transport::Result as TransportResult;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
use std::time::Duration;
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
|
use std::sync::RwLock;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
struct ClientOptimizer {
|
||||||
|
cur_index: AtomicUsize,
|
||||||
|
experiment_index: AtomicUsize,
|
||||||
|
experiment_done: AtomicBool,
|
||||||
|
times: RwLock<Vec<u64>>,
|
||||||
|
num_clients: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_index(array: &[u64]) -> (u64, usize) {
|
||||||
|
let mut min_time = std::u64::MAX;
|
||||||
|
let mut min_index = 0;
|
||||||
|
for (i, time) in array.iter().enumerate() {
|
||||||
|
if *time < min_time {
|
||||||
|
min_time = *time;
|
||||||
|
min_index = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(min_time, min_index)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientOptimizer {
|
||||||
|
fn new(num_clients: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
cur_index: AtomicUsize::new(0),
|
||||||
|
experiment_index: AtomicUsize::new(0),
|
||||||
|
experiment_done: AtomicBool::new(false),
|
||||||
|
times: RwLock::new(vec![std::u64::MAX; num_clients]),
|
||||||
|
num_clients,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn experiment(&self) -> usize {
|
||||||
|
if self.experiment_index.load(Ordering::Relaxed) < self.num_clients {
|
||||||
|
let old = self.experiment_index.fetch_add(1, Ordering::Relaxed);
|
||||||
|
if old < self.num_clients {
|
||||||
|
old
|
||||||
|
} else {
|
||||||
|
self.best()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.best()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report(&self, index: usize, time_ms: u64) {
|
||||||
|
if self.num_clients > 1
|
||||||
|
&& (!self.experiment_done.load(Ordering::Relaxed) || time_ms == std::u64::MAX)
|
||||||
|
{
|
||||||
|
trace!(
|
||||||
|
"report {} with {} exp: {}",
|
||||||
|
index,
|
||||||
|
time_ms,
|
||||||
|
self.experiment_index.load(Ordering::Relaxed)
|
||||||
|
);
|
||||||
|
|
||||||
|
self.times.write().unwrap()[index] = time_ms;
|
||||||
|
|
||||||
|
if index == (self.num_clients - 1) || time_ms == std::u64::MAX {
|
||||||
|
let times = self.times.read().unwrap();
|
||||||
|
let (min_time, min_index) = min_index(×);
|
||||||
|
trace!(
|
||||||
|
"done experimenting min: {} time: {} times: {:?}",
|
||||||
|
min_index,
|
||||||
|
min_time,
|
||||||
|
times
|
||||||
|
);
|
||||||
|
|
||||||
|
// Only 1 thread should grab the num_clients-1 index, so this should be ok.
|
||||||
|
self.cur_index.store(min_index, Ordering::Relaxed);
|
||||||
|
self.experiment_done.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn best(&self) -> usize {
|
||||||
|
self.cur_index.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// An object for querying and sending transactions to the network.
|
/// An object for querying and sending transactions to the network.
|
||||||
pub struct ThinClient {
|
pub struct ThinClient {
|
||||||
transactions_addr: SocketAddr,
|
|
||||||
transactions_socket: UdpSocket,
|
transactions_socket: UdpSocket,
|
||||||
rpc_client: RpcClient,
|
transactions_addrs: Vec<SocketAddr>,
|
||||||
|
rpc_clients: Vec<RpcClient>,
|
||||||
|
optimizer: ClientOptimizer,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ThinClient {
|
impl ThinClient {
|
||||||
@ -59,12 +143,39 @@ impl ThinClient {
|
|||||||
rpc_client: RpcClient,
|
rpc_client: RpcClient,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
rpc_client,
|
|
||||||
transactions_addr,
|
|
||||||
transactions_socket,
|
transactions_socket,
|
||||||
|
transactions_addrs: vec![transactions_addr],
|
||||||
|
rpc_clients: vec![rpc_client],
|
||||||
|
optimizer: ClientOptimizer::new(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn new_from_addrs(
|
||||||
|
transactions_addrs: Vec<SocketAddr>,
|
||||||
|
transactions_socket: UdpSocket,
|
||||||
|
rpc_sockets: Vec<SocketAddr>,
|
||||||
|
) -> Self {
|
||||||
|
assert!(!transactions_addrs.is_empty());
|
||||||
|
assert!(!rpc_sockets.is_empty());
|
||||||
|
assert_eq!(rpc_sockets.len(), transactions_addrs.len());
|
||||||
|
let rpc_len = rpc_sockets.len();
|
||||||
|
let rpc_clients: Vec<_> = rpc_sockets.into_iter().map(RpcClient::new_socket).collect();
|
||||||
|
Self {
|
||||||
|
transactions_addrs,
|
||||||
|
transactions_socket,
|
||||||
|
rpc_clients,
|
||||||
|
optimizer: ClientOptimizer::new(rpc_len),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transactions_addr(&self) -> &SocketAddr {
|
||||||
|
&self.transactions_addrs[self.optimizer.best()]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rpc_client(&self) -> &RpcClient {
|
||||||
|
&self.rpc_clients[self.optimizer.best()]
|
||||||
|
}
|
||||||
|
|
||||||
/// Retry a sending a signed Transaction to the server for processing.
|
/// Retry a sending a signed Transaction to the server for processing.
|
||||||
pub fn retry_transfer_until_confirmed(
|
pub fn retry_transfer_until_confirmed(
|
||||||
&self,
|
&self,
|
||||||
@ -100,15 +211,19 @@ impl ThinClient {
|
|||||||
serialize_into(&mut wr, &transaction)
|
serialize_into(&mut wr, &transaction)
|
||||||
.expect("serialize Transaction in pub fn transfer_signed");
|
.expect("serialize Transaction in pub fn transfer_signed");
|
||||||
self.transactions_socket
|
self.transactions_socket
|
||||||
.send_to(&buf[..], &self.transactions_addr)?;
|
.send_to(&buf[..], &self.transactions_addr())?;
|
||||||
if self
|
if self
|
||||||
.poll_for_signature_confirmation(&transaction.signatures[0], min_confirmed_blocks)
|
.poll_for_signature_confirmation(&transaction.signatures[0], min_confirmed_blocks)
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
return Ok(transaction.signatures[0]);
|
return Ok(transaction.signatures[0]);
|
||||||
}
|
}
|
||||||
info!("{} tries failed transfer to {}", x, self.transactions_addr);
|
info!(
|
||||||
let (blockhash, _fee_calculator) = self.rpc_client.get_recent_blockhash()?;
|
"{} tries failed transfer to {}",
|
||||||
|
x,
|
||||||
|
self.transactions_addr()
|
||||||
|
);
|
||||||
|
let (blockhash, _fee_calculator) = self.rpc_client().get_recent_blockhash()?;
|
||||||
transaction.sign(keypairs, blockhash);
|
transaction.sign(keypairs, blockhash);
|
||||||
}
|
}
|
||||||
Err(io::Error::new(
|
Err(io::Error::new(
|
||||||
@ -123,39 +238,40 @@ impl ThinClient {
|
|||||||
polling_frequency: &Duration,
|
polling_frequency: &Duration,
|
||||||
timeout: &Duration,
|
timeout: &Duration,
|
||||||
) -> io::Result<u64> {
|
) -> io::Result<u64> {
|
||||||
self.rpc_client
|
self.rpc_client()
|
||||||
.poll_balance_with_timeout(pubkey, polling_frequency, timeout)
|
.poll_balance_with_timeout(pubkey, polling_frequency, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
|
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
|
||||||
self.rpc_client.poll_get_balance(pubkey)
|
self.rpc_client().poll_get_balance(pubkey)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
|
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
|
||||||
self.rpc_client.wait_for_balance(pubkey, expected_balance)
|
self.rpc_client().wait_for_balance(pubkey, expected_balance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check a signature in the bank. This method blocks
|
/// Check a signature in the bank. This method blocks
|
||||||
/// until the server sends a response.
|
/// until the server sends a response.
|
||||||
pub fn check_signature(&self, signature: &Signature) -> bool {
|
pub fn check_signature(&self, signature: &Signature) -> bool {
|
||||||
self.rpc_client.check_signature(signature)
|
self.rpc_client().check_signature(signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fullnode_exit(&self) -> io::Result<bool> {
|
pub fn fullnode_exit(&self) -> io::Result<bool> {
|
||||||
self.rpc_client.fullnode_exit()
|
self.rpc_client().fullnode_exit()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_num_blocks_since_signature_confirmation(
|
pub fn get_num_blocks_since_signature_confirmation(
|
||||||
&mut self,
|
&mut self,
|
||||||
sig: &Signature,
|
sig: &Signature,
|
||||||
) -> io::Result<usize> {
|
) -> io::Result<usize> {
|
||||||
self.rpc_client
|
self.rpc_client()
|
||||||
.get_num_blocks_since_signature_confirmation(sig)
|
.get_num_blocks_since_signature_confirmation(sig)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client for ThinClient {
|
impl Client for ThinClient {
|
||||||
fn transactions_addr(&self) -> String {
|
fn transactions_addr(&self) -> String {
|
||||||
self.transactions_addr.to_string()
|
self.transactions_addr().to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,20 +304,40 @@ impl SyncClient for ThinClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_account_data(&self, pubkey: &Pubkey) -> TransportResult<Option<Vec<u8>>> {
|
fn get_account_data(&self, pubkey: &Pubkey) -> TransportResult<Option<Vec<u8>>> {
|
||||||
Ok(self.rpc_client.get_account_data(pubkey).ok())
|
Ok(self.rpc_client().get_account_data(pubkey).ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_account(&self, pubkey: &Pubkey) -> TransportResult<Option<Account>> {
|
||||||
|
Ok(self.rpc_client().get_account(pubkey).ok())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
|
fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
|
||||||
let balance = self.rpc_client.get_balance(pubkey)?;
|
let balance = self.rpc_client().get_balance(pubkey)?;
|
||||||
Ok(balance)
|
Ok(balance)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
|
||||||
|
let index = self.optimizer.experiment();
|
||||||
|
let now = Instant::now();
|
||||||
|
let recent_blockhash = self.rpc_clients[index].get_recent_blockhash();
|
||||||
|
match recent_blockhash {
|
||||||
|
Ok(recent_blockhash) => {
|
||||||
|
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
|
||||||
|
Ok(recent_blockhash)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
self.optimizer.report(index, std::u64::MAX);
|
||||||
|
Err(e)?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn get_signature_status(
|
fn get_signature_status(
|
||||||
&self,
|
&self,
|
||||||
signature: &Signature,
|
signature: &Signature,
|
||||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||||
let status = self
|
let status = self
|
||||||
.rpc_client
|
.rpc_client()
|
||||||
.get_signature_status(&signature.to_string())
|
.get_signature_status(&signature.to_string())
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
io::Error::new(
|
io::Error::new(
|
||||||
@ -212,13 +348,29 @@ impl SyncClient for ThinClient {
|
|||||||
Ok(status)
|
Ok(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
|
fn get_slot(&self) -> TransportResult<u64> {
|
||||||
Ok(self.rpc_client.get_recent_blockhash()?)
|
let slot = self.rpc_client().get_slot().map_err(|err| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
|
format!("send_transaction failed with error {:?}", err),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Ok(slot)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_transaction_count(&self) -> TransportResult<u64> {
|
fn get_transaction_count(&self) -> TransportResult<u64> {
|
||||||
let transaction_count = self.rpc_client.get_transaction_count()?;
|
let index = self.optimizer.experiment();
|
||||||
Ok(transaction_count)
|
let now = Instant::now();
|
||||||
|
match self.rpc_client().get_transaction_count() {
|
||||||
|
Ok(transaction_count) => {
|
||||||
|
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
|
||||||
|
Ok(transaction_count)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
self.optimizer.report(index, std::u64::MAX);
|
||||||
|
Err(e)?
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks`
|
/// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks`
|
||||||
@ -228,16 +380,17 @@ impl SyncClient for ThinClient {
|
|||||||
min_confirmed_blocks: usize,
|
min_confirmed_blocks: usize,
|
||||||
) -> TransportResult<()> {
|
) -> TransportResult<()> {
|
||||||
Ok(self
|
Ok(self
|
||||||
.rpc_client
|
.rpc_client()
|
||||||
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?)
|
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> {
|
fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> {
|
||||||
Ok(self.rpc_client.poll_for_signature(signature)?)
|
Ok(self.rpc_client().poll_for_signature(signature)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> {
|
fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> {
|
||||||
Ok(self.rpc_client.get_new_blockhash(blockhash)?)
|
let new_blockhash = self.rpc_client().get_new_blockhash(blockhash)?;
|
||||||
|
Ok(new_blockhash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,7 +402,7 @@ impl AsyncClient for ThinClient {
|
|||||||
.expect("serialize Transaction in pub fn transfer_signed");
|
.expect("serialize Transaction in pub fn transfer_signed");
|
||||||
assert!(buf.len() < PACKET_DATA_SIZE);
|
assert!(buf.len() < PACKET_DATA_SIZE);
|
||||||
self.transactions_socket
|
self.transactions_socket
|
||||||
.send_to(&buf[..], &self.transactions_addr)?;
|
.send_to(&buf[..], &self.transactions_addr())?;
|
||||||
Ok(transaction.signatures[0])
|
Ok(transaction.signatures[0])
|
||||||
}
|
}
|
||||||
fn async_send_message(
|
fn async_send_message(
|
||||||
@ -296,3 +449,28 @@ pub fn create_client_with_timeout(
|
|||||||
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
|
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
|
||||||
ThinClient::new_socket_with_timeout(rpc, tpu, transactions_socket, timeout)
|
ThinClient::new_socket_with_timeout(rpc, tpu, transactions_socket, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_client_optimizer() {
|
||||||
|
solana_logger::setup();
|
||||||
|
|
||||||
|
const NUM_CLIENTS: usize = 5;
|
||||||
|
let optimizer = ClientOptimizer::new(NUM_CLIENTS);
|
||||||
|
(0..NUM_CLIENTS).into_par_iter().for_each(|_| {
|
||||||
|
let index = optimizer.experiment();
|
||||||
|
optimizer.report(index, (NUM_CLIENTS - index) as u64);
|
||||||
|
});
|
||||||
|
|
||||||
|
let index = optimizer.experiment();
|
||||||
|
optimizer.report(index, 50);
|
||||||
|
assert_eq!(optimizer.best(), NUM_CLIENTS - 1);
|
||||||
|
|
||||||
|
optimizer.report(optimizer.best(), std::u64::MAX);
|
||||||
|
assert_eq!(optimizer.best(), NUM_CLIENTS - 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.15.0"
|
version = "0.16.2"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
readme = "../README.md"
|
readme = "../README.md"
|
||||||
@ -14,69 +14,73 @@ edition = "2018"
|
|||||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
chacha = []
|
|
||||||
cuda = []
|
cuda = []
|
||||||
erasure = []
|
|
||||||
kvstore = ["solana-kvstore"]
|
kvstore = ["solana-kvstore"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.1.4"
|
bincode = "1.1.4"
|
||||||
bs58 = "0.2.0"
|
bs58 = "0.2.0"
|
||||||
byteorder = "1.3.1"
|
byteorder = "1.3.2"
|
||||||
chrono = { version = "0.4.0", features = ["serde"] }
|
chrono = { version = "0.4.0", features = ["serde"] }
|
||||||
crc = { version = "1.8.1", optional = true }
|
|
||||||
core_affinity = "0.5.9"
|
core_affinity = "0.5.9"
|
||||||
|
crc = { version = "1.8.1", optional = true }
|
||||||
hashbrown = "0.2.0"
|
hashbrown = "0.2.0"
|
||||||
indexmap = "1.0"
|
indexmap = "1.0"
|
||||||
itertools = "0.8.0"
|
itertools = "0.8.0"
|
||||||
jsonrpc-core = "11.0.0"
|
jsonrpc-core = "12.0.0"
|
||||||
jsonrpc-derive = "11.0.0"
|
jsonrpc-derive = "12.0.0"
|
||||||
jsonrpc-http-server = "11.0.0"
|
jsonrpc-http-server = "12.0.0"
|
||||||
jsonrpc-pubsub = "11.0.0"
|
jsonrpc-pubsub = "12.0.0"
|
||||||
jsonrpc-ws-server = "11.0.0"
|
jsonrpc-ws-server = "12.0.0"
|
||||||
libc = "0.2.55"
|
libc = "0.2.58"
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
memmap = { version = "0.7.0", optional = true }
|
memmap = { version = "0.7.0", optional = true }
|
||||||
nix = "0.14.0"
|
nix = "0.14.1"
|
||||||
|
num-traits = "0.2"
|
||||||
rand = "0.6.5"
|
rand = "0.6.5"
|
||||||
rand_chacha = "0.1.1"
|
rand_chacha = "0.1.1"
|
||||||
rayon = "1.0.0"
|
rayon = "1.1.0"
|
||||||
reed-solomon-erasure = "3.1.1"
|
reqwest = "0.9.18"
|
||||||
reqwest = "0.9.17"
|
|
||||||
rocksdb = "0.11.0"
|
rocksdb = "0.11.0"
|
||||||
serde = "1.0.89"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.91"
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana-budget-api = { path = "../programs/budget_api", version = "0.15.0" }
|
solana-budget-api = { path = "../programs/budget_api", version = "0.16.2" }
|
||||||
solana-budget-program = { path = "../programs/budget_program", version = "0.15.0" }
|
solana-budget-program = { path = "../programs/budget_program", version = "0.16.2" }
|
||||||
solana-client = { path = "../client", version = "0.15.0" }
|
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.2" }
|
||||||
solana-drone = { path = "../drone", version = "0.15.0" }
|
solana-client = { path = "../client", version = "0.16.2" }
|
||||||
|
solana-config-program = { path = "../programs/config_program", version = "0.16.2" }
|
||||||
|
solana-drone = { path = "../drone", version = "0.16.2" }
|
||||||
solana-ed25519-dalek = "0.2.0"
|
solana-ed25519-dalek = "0.2.0"
|
||||||
solana-kvstore = { path = "../kvstore", version = "0.15.0" , optional = true }
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.2" }
|
||||||
solana-logger = { path = "../logger", version = "0.15.0" }
|
solana-kvstore = { path = "../kvstore", version = "0.16.2", optional = true }
|
||||||
solana-metrics = { path = "../metrics", version = "0.15.0" }
|
solana-logger = { path = "../logger", version = "0.16.2" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
solana-metrics = { path = "../metrics", version = "0.16.2" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.15.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.2" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.15.0" }
|
solana-runtime = { path = "../runtime", version = "0.16.2" }
|
||||||
solana-stake-api = { path = "../programs/stake_api", version = "0.15.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.2" }
|
||||||
solana-stake-program = { path = "../programs/stake_program", version = "0.15.0" }
|
solana-stake-api = { path = "../programs/stake_api", version = "0.16.2" }
|
||||||
solana-storage-api = { path = "../programs/storage_api", version = "0.15.0" }
|
solana-stake-program = { path = "../programs/stake_program", version = "0.16.2" }
|
||||||
solana-storage-program = { path = "../programs/storage_program", version = "0.15.0" }
|
solana-storage-api = { path = "../programs/storage_api", version = "0.16.2" }
|
||||||
solana-vote-api = { path = "../programs/vote_api", version = "0.15.0" }
|
solana-storage-program = { path = "../programs/storage_program", version = "0.16.2" }
|
||||||
solana-vote-program = { path = "../programs/vote_program", version = "0.15.0" }
|
solana-vote-api = { path = "../programs/vote_api", version = "0.16.2" }
|
||||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.15.0" }
|
solana-vote-program = { path = "../programs/vote_program", version = "0.16.2" }
|
||||||
solana-config-program = { path = "../programs/config_program", version = "0.15.0" }
|
solana-vote-signer = { path = "../vote-signer", version = "0.16.2" }
|
||||||
solana-vote-signer = { path = "../vote-signer", version = "0.15.0" }
|
sys-info = "0.5.7"
|
||||||
sys-info = "0.5.6"
|
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
tokio-codec = "0.1"
|
tokio-codec = "0.1"
|
||||||
untrusted = "0.6.2"
|
untrusted = "0.6.2"
|
||||||
|
|
||||||
|
# reed-solomon-erasure's simd_c feature fails to build for x86_64-pc-windows-msvc, use pure-rust
|
||||||
|
[target.'cfg(windows)'.dependencies]
|
||||||
|
reed-solomon-erasure = { version = "3.1.1", features = ["pure-rust"] }
|
||||||
|
[target.'cfg(not(windows))'.dependencies]
|
||||||
|
reed-solomon-erasure = "3.1.1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex-literal = "0.2.0"
|
hex-literal = "0.2.0"
|
||||||
matches = "0.1.6"
|
matches = "0.1.6"
|
||||||
|
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "banking_stage"
|
name = "banking_stage"
|
||||||
|
|
||||||
@ -99,5 +103,5 @@ name = "sigverify_stage"
|
|||||||
name = "poh"
|
name = "poh"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
required-features = ["chacha"]
|
|
||||||
name = "chacha"
|
name = "chacha"
|
||||||
|
required-features = ["chacha"]
|
||||||
|
@ -22,7 +22,7 @@ use solana_sdk::pubkey::Pubkey;
|
|||||||
use solana_sdk::signature::Signature;
|
use solana_sdk::signature::Signature;
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
use solana_sdk::timing::{
|
use solana_sdk::timing::{
|
||||||
duration_as_ms, timestamp, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES,
|
duration_as_us, timestamp, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES,
|
||||||
};
|
};
|
||||||
use std::iter;
|
use std::iter;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
@ -33,18 +33,20 @@ use test::Bencher;
|
|||||||
|
|
||||||
fn check_txs(receiver: &Arc<Receiver<WorkingBankEntries>>, ref_tx_count: usize) {
|
fn check_txs(receiver: &Arc<Receiver<WorkingBankEntries>>, ref_tx_count: usize) {
|
||||||
let mut total = 0;
|
let mut total = 0;
|
||||||
|
let now = Instant::now();
|
||||||
loop {
|
loop {
|
||||||
let entries = receiver.recv_timeout(Duration::new(1, 0));
|
let entries = receiver.recv_timeout(Duration::new(1, 0));
|
||||||
if let Ok((_, entries)) = entries {
|
if let Ok((_, entries)) = entries {
|
||||||
for (entry, _) in &entries {
|
for (entry, _) in &entries {
|
||||||
total += entry.transactions.len();
|
total += entry.transactions.len();
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
if total >= ref_tx_count {
|
if total >= ref_tx_count {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
if now.elapsed().as_secs() > 60 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
assert_eq!(total, ref_tx_count);
|
assert_eq!(total, ref_tx_count);
|
||||||
}
|
}
|
||||||
@ -89,7 +91,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let num_threads = BankingStage::num_threads() as usize;
|
let num_threads = BankingStage::num_threads() as usize;
|
||||||
// a multiple of packet chunk 2X duplicates to avoid races
|
// a multiple of packet chunk 2X duplicates to avoid races
|
||||||
let txes = 192 * num_threads * 2;
|
const CHUNKS: usize = 32;
|
||||||
|
let txes = 192 * num_threads * CHUNKS;
|
||||||
let mint_total = 1_000_000_000_000;
|
let mint_total = 1_000_000_000_000;
|
||||||
let GenesisBlockInfo {
|
let GenesisBlockInfo {
|
||||||
mut genesis_block,
|
mut genesis_block,
|
||||||
@ -167,7 +170,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
|||||||
);
|
);
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||||
|
|
||||||
let half_len = verified.len() / 2;
|
let chunk_len = verified.len() / CHUNKS;
|
||||||
let mut start = 0;
|
let mut start = 0;
|
||||||
|
|
||||||
// This is so that the signal_receiver does not go out of scope after the closure.
|
// This is so that the signal_receiver does not go out of scope after the closure.
|
||||||
@ -177,18 +180,33 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
|||||||
let signal_receiver2 = signal_receiver.clone();
|
let signal_receiver2 = signal_receiver.clone();
|
||||||
bencher.iter(move || {
|
bencher.iter(move || {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
|
let mut sent = 0;
|
||||||
trace!("sending... {}..{} {}", start, start + half_len, timestamp());
|
|
||||||
|
for v in verified[start..start + chunk_len].chunks(verified.len() / num_threads) {
|
||||||
|
trace!(
|
||||||
|
"sending... {}..{} {}",
|
||||||
|
start,
|
||||||
|
start + chunk_len,
|
||||||
|
timestamp()
|
||||||
|
);
|
||||||
|
for xv in v {
|
||||||
|
sent += xv.0.packets.len();
|
||||||
|
}
|
||||||
verified_sender.send(v.to_vec()).unwrap();
|
verified_sender.send(v.to_vec()).unwrap();
|
||||||
}
|
}
|
||||||
check_txs(&signal_receiver2, txes / 2);
|
check_txs(&signal_receiver2, txes / CHUNKS);
|
||||||
trace!(
|
|
||||||
"time: {} checked: {}",
|
// This signature clear may not actually clear the signatures
|
||||||
duration_as_ms(&now.elapsed()),
|
// in this chunk, but since we rotate between 32 chunks then
|
||||||
txes / 2
|
// we should clear them by the time we come around again to re-use that chunk.
|
||||||
);
|
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
start += half_len;
|
trace!(
|
||||||
|
"time: {} checked: {} sent: {}",
|
||||||
|
duration_as_us(&now.elapsed()),
|
||||||
|
txes / CHUNKS,
|
||||||
|
sent,
|
||||||
|
);
|
||||||
|
start += chunk_len;
|
||||||
start %= verified.len();
|
start %= verified.len();
|
||||||
});
|
});
|
||||||
drop(vote_sender);
|
drop(vote_sender);
|
||||||
|
48
core/benches/poh_verify.rs
Normal file
48
core/benches/poh_verify.rs
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
#![feature(test)]
|
||||||
|
extern crate test;
|
||||||
|
|
||||||
|
use solana::entry::EntrySlice;
|
||||||
|
use solana::entry::{next_entry_mut, Entry};
|
||||||
|
use solana_sdk::hash::{hash, Hash};
|
||||||
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
use solana_sdk::system_transaction;
|
||||||
|
use test::Bencher;
|
||||||
|
|
||||||
|
const NUM_HASHES: u64 = 400;
|
||||||
|
const NUM_ENTRIES: usize = 800;
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_poh_verify_ticks(bencher: &mut Bencher) {
|
||||||
|
let zero = Hash::default();
|
||||||
|
let mut cur_hash = hash(&zero.as_ref());
|
||||||
|
let start = *&cur_hash;
|
||||||
|
|
||||||
|
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
|
||||||
|
for _ in 0..NUM_ENTRIES {
|
||||||
|
ticks.push(next_entry_mut(&mut cur_hash, NUM_HASHES, vec![]));
|
||||||
|
}
|
||||||
|
|
||||||
|
bencher.iter(|| {
|
||||||
|
ticks.verify(&start);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
|
||||||
|
let zero = Hash::default();
|
||||||
|
let mut cur_hash = hash(&zero.as_ref());
|
||||||
|
let start = *&cur_hash;
|
||||||
|
|
||||||
|
let keypair1 = Keypair::new();
|
||||||
|
let pubkey1 = keypair1.pubkey();
|
||||||
|
|
||||||
|
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
|
||||||
|
for _ in 0..NUM_ENTRIES {
|
||||||
|
let tx = system_transaction::create_user_account(&keypair1, &pubkey1, 42, cur_hash);
|
||||||
|
ticks.push(next_entry_mut(&mut cur_hash, NUM_HASHES, vec![tx]));
|
||||||
|
}
|
||||||
|
|
||||||
|
bencher.iter(|| {
|
||||||
|
ticks.verify(&start);
|
||||||
|
})
|
||||||
|
}
|
@ -5,44 +5,41 @@ use std::path::Path;
|
|||||||
fn main() {
|
fn main() {
|
||||||
println!("cargo:rerun-if-changed=build.rs");
|
println!("cargo:rerun-if-changed=build.rs");
|
||||||
|
|
||||||
let perf_libs_dir = {
|
if env::var("CARGO_FEATURE_CUDA").is_ok() {
|
||||||
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
|
println!("cargo:rustc-cfg=cuda");
|
||||||
let mut path = Path::new(&manifest_dir);
|
|
||||||
path = path.parent().unwrap();
|
|
||||||
path.join(Path::new("target/perf-libs"))
|
|
||||||
};
|
|
||||||
let perf_libs_dir = perf_libs_dir.to_str().unwrap();
|
|
||||||
|
|
||||||
// Ensure `perf_libs_dir` exists. It's been observed that
|
let perf_libs_dir = {
|
||||||
// a cargo:rerun-if-changed= directive with a non-existent
|
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||||
// directory triggers a rebuild on every |cargo build| invocation
|
let mut path = Path::new(&manifest_dir);
|
||||||
fs::create_dir_all(&perf_libs_dir).unwrap_or_else(|err| {
|
path = path.parent().unwrap();
|
||||||
if err.kind() != std::io::ErrorKind::AlreadyExists {
|
let mut path = path.join(Path::new("target/perf-libs"));
|
||||||
panic!("Unable to create {}: {:?}", perf_libs_dir, err);
|
path.push(
|
||||||
}
|
env::var("SOLANA_PERF_LIBS_CUDA")
|
||||||
});
|
.unwrap_or_else(|err| panic!("SOLANA_PERF_LIBS_CUDA not defined: {}", err)),
|
||||||
|
);
|
||||||
|
path
|
||||||
|
};
|
||||||
|
let perf_libs_dir = perf_libs_dir.to_str().unwrap();
|
||||||
|
|
||||||
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
|
// Ensure `perf_libs_dir` exists. It's been observed that
|
||||||
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
|
// a cargo:rerun-if-changed= directive with a non-existent
|
||||||
|
// directory triggers a rebuild on every |cargo build| invocation
|
||||||
if chacha || cuda {
|
fs::create_dir_all(&perf_libs_dir).unwrap_or_else(|err| {
|
||||||
|
if err.kind() != std::io::ErrorKind::AlreadyExists {
|
||||||
|
panic!("Unable to create {}: {:?}", perf_libs_dir, err);
|
||||||
|
}
|
||||||
|
});
|
||||||
println!("cargo:rerun-if-changed={}", perf_libs_dir);
|
println!("cargo:rerun-if-changed={}", perf_libs_dir);
|
||||||
println!("cargo:rustc-link-search=native={}", perf_libs_dir);
|
println!("cargo:rustc-link-search=native={}", perf_libs_dir);
|
||||||
}
|
if cfg!(windows) {
|
||||||
if chacha {
|
println!("cargo:rerun-if-changed={}/libcuda-crypt.dll", perf_libs_dir);
|
||||||
println!("cargo:rerun-if-changed={}/libcpu-crypt.a", perf_libs_dir);
|
} else if cfg!(target_os = "macos") {
|
||||||
}
|
println!(
|
||||||
if cuda {
|
"cargo:rerun-if-changed={}/libcuda-crypt.dylib",
|
||||||
let cuda_home = match env::var("CUDA_HOME") {
|
perf_libs_dir
|
||||||
Ok(cuda_home) => cuda_home,
|
);
|
||||||
Err(_) => String::from("/usr/local/cuda"),
|
} else {
|
||||||
};
|
println!("cargo:rerun-if-changed={}/libcuda-crypt.so", perf_libs_dir);
|
||||||
|
}
|
||||||
println!("cargo:rerun-if-changed={}/libcuda-crypt.a", perf_libs_dir);
|
|
||||||
println!("cargo:rustc-link-lib=static=cuda-crypt");
|
|
||||||
println!("cargo:rustc-link-search=native={}/lib64", cuda_home);
|
|
||||||
println!("cargo:rustc-link-lib=dylib=cudart");
|
|
||||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
|
||||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,16 @@
|
|||||||
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
|
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
|
||||||
|
|
||||||
use hashbrown::{HashMap, HashSet};
|
use bincode::{deserialize_from, serialize_into};
|
||||||
use solana_metrics::inc_new_counter_info;
|
use solana_metrics::inc_new_counter_info;
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::{Bank, BankRc, StatusCacheRc};
|
||||||
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::timing;
|
use solana_sdk::timing;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::fs;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::{BufReader, BufWriter, Error, ErrorKind};
|
||||||
use std::ops::Index;
|
use std::ops::Index;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
@ -12,6 +18,8 @@ pub struct BankForks {
|
|||||||
banks: HashMap<u64, Arc<Bank>>,
|
banks: HashMap<u64, Arc<Bank>>,
|
||||||
working_bank: Arc<Bank>,
|
working_bank: Arc<Bank>,
|
||||||
root: u64,
|
root: u64,
|
||||||
|
slots: HashSet<u64>,
|
||||||
|
snapshot_path: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Index<u64> for BankForks {
|
impl Index<u64> for BankForks {
|
||||||
@ -30,6 +38,8 @@ impl BankForks {
|
|||||||
banks,
|
banks,
|
||||||
working_bank,
|
working_bank,
|
||||||
root: 0,
|
root: 0,
|
||||||
|
slots: HashSet::new(),
|
||||||
|
snapshot_path: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,6 +55,7 @@ impl BankForks {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a map of bank slot id to the set of all of its descendants
|
/// Create a map of bank slot id to the set of all of its descendants
|
||||||
|
#[allow(clippy::or_fun_call)]
|
||||||
pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> {
|
pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> {
|
||||||
let mut descendants = HashMap::new();
|
let mut descendants = HashMap::new();
|
||||||
for bank in self.banks.values() {
|
for bank in self.banks.values() {
|
||||||
@ -91,6 +102,8 @@ impl BankForks {
|
|||||||
root,
|
root,
|
||||||
banks,
|
banks,
|
||||||
working_bank,
|
working_bank,
|
||||||
|
slots: HashSet::new(),
|
||||||
|
snapshot_path: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,9 +151,211 @@ impl BankForks {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn prune_non_root(&mut self, root: u64) {
|
fn prune_non_root(&mut self, root: u64) {
|
||||||
|
let slots: HashSet<u64> = self
|
||||||
|
.banks
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, b)| b.is_frozen())
|
||||||
|
.map(|(k, _)| *k)
|
||||||
|
.collect();
|
||||||
let descendants = self.descendants();
|
let descendants = self.descendants();
|
||||||
self.banks
|
self.banks
|
||||||
.retain(|slot, _| descendants[&root].contains(slot))
|
.retain(|slot, _| descendants[&root].contains(slot));
|
||||||
|
if self.snapshot_path.is_some() {
|
||||||
|
let diff: HashSet<_> = slots.symmetric_difference(&self.slots).collect();
|
||||||
|
trace!("prune non root {} - {:?}", root, diff);
|
||||||
|
for slot in diff.iter() {
|
||||||
|
if **slot > root {
|
||||||
|
let _ = self.add_snapshot(**slot, root);
|
||||||
|
} else {
|
||||||
|
BankForks::remove_snapshot(**slot, &self.snapshot_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.slots = slots.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_io_error(error: &str) -> Error {
|
||||||
|
warn!("BankForks error: {:?}", error);
|
||||||
|
Error::new(ErrorKind::Other, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_snapshot_path(path: &Option<String>) -> PathBuf {
|
||||||
|
Path::new(&path.clone().unwrap()).to_path_buf()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_snapshot(&self, slot: u64, root: u64) -> Result<(), Error> {
|
||||||
|
let path = BankForks::get_snapshot_path(&self.snapshot_path);
|
||||||
|
fs::create_dir_all(path.clone())?;
|
||||||
|
let bank_file = format!("{}", slot);
|
||||||
|
let bank_file_path = path.join(bank_file);
|
||||||
|
trace!("path: {:?}", bank_file_path);
|
||||||
|
let file = File::create(bank_file_path)?;
|
||||||
|
let mut stream = BufWriter::new(file);
|
||||||
|
let bank_slot = self.get(slot);
|
||||||
|
if bank_slot.is_none() {
|
||||||
|
return Err(BankForks::get_io_error("bank_forks get error"));
|
||||||
|
}
|
||||||
|
let bank = bank_slot.unwrap().clone();
|
||||||
|
serialize_into(&mut stream, &*bank)
|
||||||
|
.map_err(|_| BankForks::get_io_error("serialize bank error"))?;
|
||||||
|
let mut parent_slot: u64 = 0;
|
||||||
|
if let Some(parent_bank) = bank.parent() {
|
||||||
|
parent_slot = parent_bank.slot();
|
||||||
|
}
|
||||||
|
serialize_into(&mut stream, &parent_slot)
|
||||||
|
.map_err(|_| BankForks::get_io_error("serialize bank parent error"))?;
|
||||||
|
serialize_into(&mut stream, &root)
|
||||||
|
.map_err(|_| BankForks::get_io_error("serialize root error"))?;
|
||||||
|
serialize_into(&mut stream, &bank.src)
|
||||||
|
.map_err(|_| BankForks::get_io_error("serialize bank status cache error"))?;
|
||||||
|
serialize_into(&mut stream, &bank.rc)
|
||||||
|
.map_err(|_| BankForks::get_io_error("serialize bank accounts error"))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remove_snapshot(slot: u64, path: &Option<String>) {
|
||||||
|
let path = BankForks::get_snapshot_path(path);
|
||||||
|
let bank_file = format!("{}", slot);
|
||||||
|
let bank_file_path = path.join(bank_file);
|
||||||
|
let _ = fs::remove_file(bank_file_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_snapshot_config(&mut self, path: Option<String>) {
|
||||||
|
self.snapshot_path = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_snapshots(
|
||||||
|
names: &[u64],
|
||||||
|
bank0: &mut Bank,
|
||||||
|
bank_maps: &mut Vec<(u64, u64, Bank)>,
|
||||||
|
status_cache_rc: &StatusCacheRc,
|
||||||
|
snapshot_path: &Option<String>,
|
||||||
|
) -> Option<u64> {
|
||||||
|
let path = BankForks::get_snapshot_path(snapshot_path);
|
||||||
|
let mut bank_root: Option<u64> = None;
|
||||||
|
|
||||||
|
for bank_slot in names.iter().rev() {
|
||||||
|
let bank_path = format!("{}", bank_slot);
|
||||||
|
let bank_file_path = path.join(bank_path.clone());
|
||||||
|
info!("Load from {:?}", bank_file_path);
|
||||||
|
let file = File::open(bank_file_path);
|
||||||
|
if file.is_err() {
|
||||||
|
warn!("Snapshot file open failed for {}", bank_slot);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let file = file.unwrap();
|
||||||
|
let mut stream = BufReader::new(file);
|
||||||
|
let bank: Result<Bank, std::io::Error> = deserialize_from(&mut stream)
|
||||||
|
.map_err(|_| BankForks::get_io_error("deserialize bank error"));
|
||||||
|
let slot: Result<u64, std::io::Error> = deserialize_from(&mut stream)
|
||||||
|
.map_err(|_| BankForks::get_io_error("deserialize bank parent error"));
|
||||||
|
let parent_slot = if slot.is_ok() { slot.unwrap() } else { 0 };
|
||||||
|
let root: Result<u64, std::io::Error> = deserialize_from(&mut stream)
|
||||||
|
.map_err(|_| BankForks::get_io_error("deserialize root error"));
|
||||||
|
let status_cache: Result<StatusCacheRc, std::io::Error> = deserialize_from(&mut stream)
|
||||||
|
.map_err(|_| BankForks::get_io_error("deserialize bank status cache error"));
|
||||||
|
if bank_root.is_none() && bank0.rc.update_from_stream(&mut stream).is_ok() {
|
||||||
|
bank_root = Some(root.unwrap());
|
||||||
|
}
|
||||||
|
if bank_root.is_some() {
|
||||||
|
match bank {
|
||||||
|
Ok(v) => {
|
||||||
|
if status_cache.is_ok() {
|
||||||
|
status_cache_rc.append(&status_cache.unwrap());
|
||||||
|
}
|
||||||
|
bank_maps.push((*bank_slot, parent_slot, v));
|
||||||
|
}
|
||||||
|
Err(_) => warn!("Load snapshot failed for {}", bank_slot),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
BankForks::remove_snapshot(*bank_slot, snapshot_path);
|
||||||
|
warn!("Load snapshot rc failed for {}", bank_slot);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bank_root
|
||||||
|
}
|
||||||
|
|
||||||
|
fn setup_banks(
|
||||||
|
bank_maps: &mut Vec<(u64, u64, Bank)>,
|
||||||
|
bank_rc: &BankRc,
|
||||||
|
status_cache_rc: &StatusCacheRc,
|
||||||
|
) -> (HashMap<u64, Arc<Bank>>, HashSet<u64>, u64) {
|
||||||
|
let mut banks = HashMap::new();
|
||||||
|
let mut slots = HashSet::new();
|
||||||
|
let (last_slot, last_parent_slot, mut last_bank) = bank_maps.remove(0);
|
||||||
|
last_bank.set_bank_rc(&bank_rc, &status_cache_rc);
|
||||||
|
|
||||||
|
while let Some((slot, parent_slot, mut bank)) = bank_maps.pop() {
|
||||||
|
bank.set_bank_rc(&bank_rc, &status_cache_rc);
|
||||||
|
if parent_slot != 0 {
|
||||||
|
if let Some(parent) = banks.get(&parent_slot) {
|
||||||
|
bank.set_parent(parent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if slot > 0 {
|
||||||
|
banks.insert(slot, Arc::new(bank));
|
||||||
|
slots.insert(slot);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if last_parent_slot != 0 {
|
||||||
|
if let Some(parent) = banks.get(&last_parent_slot) {
|
||||||
|
last_bank.set_parent(parent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
banks.insert(last_slot, Arc::new(last_bank));
|
||||||
|
slots.insert(last_slot);
|
||||||
|
|
||||||
|
(banks, slots, last_slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_from_snapshot(
|
||||||
|
genesis_block: &GenesisBlock,
|
||||||
|
account_paths: Option<String>,
|
||||||
|
snapshot_path: &Option<String>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let path = BankForks::get_snapshot_path(snapshot_path);
|
||||||
|
let paths = fs::read_dir(path)?;
|
||||||
|
let mut names = paths
|
||||||
|
.filter_map(|entry| {
|
||||||
|
entry.ok().and_then(|e| {
|
||||||
|
e.path()
|
||||||
|
.file_name()
|
||||||
|
.and_then(|n| n.to_str().map(|s| s.parse::<u64>().unwrap()))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<u64>>();
|
||||||
|
|
||||||
|
names.sort();
|
||||||
|
let mut bank_maps = vec![];
|
||||||
|
let status_cache_rc = StatusCacheRc::default();
|
||||||
|
let id = (names[names.len() - 1] + 1) as usize;
|
||||||
|
let mut bank0 =
|
||||||
|
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc, id);
|
||||||
|
bank0.freeze();
|
||||||
|
let bank_root = BankForks::load_snapshots(
|
||||||
|
&names,
|
||||||
|
&mut bank0,
|
||||||
|
&mut bank_maps,
|
||||||
|
&status_cache_rc,
|
||||||
|
snapshot_path,
|
||||||
|
);
|
||||||
|
if bank_maps.is_empty() || bank_root.is_none() {
|
||||||
|
BankForks::remove_snapshot(0, snapshot_path);
|
||||||
|
return Err(Error::new(ErrorKind::Other, "no snapshots loaded"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let root = bank_root.unwrap();
|
||||||
|
let (banks, slots, last_slot) =
|
||||||
|
BankForks::setup_banks(&mut bank_maps, &bank0.rc, &status_cache_rc);
|
||||||
|
let working_bank = banks[&last_slot].clone();
|
||||||
|
Ok(BankForks {
|
||||||
|
banks,
|
||||||
|
working_bank,
|
||||||
|
root,
|
||||||
|
slots,
|
||||||
|
snapshot_path: snapshot_path.clone(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,6 +365,10 @@ mod tests {
|
|||||||
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
|
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
use solana_sdk::system_transaction;
|
||||||
|
use std::env;
|
||||||
|
use std::fs::remove_dir_all;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bank_forks() {
|
fn test_bank_forks() {
|
||||||
@ -174,8 +393,8 @@ mod tests {
|
|||||||
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
|
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
|
||||||
bank_forks.insert(bank);
|
bank_forks.insert(bank);
|
||||||
let descendants = bank_forks.descendants();
|
let descendants = bank_forks.descendants();
|
||||||
let children: Vec<u64> = descendants[&0].iter().cloned().collect();
|
let children: HashSet<u64> = [1u64, 2u64].to_vec().into_iter().collect();
|
||||||
assert_eq!(children, vec![1, 2]);
|
assert_eq!(children, *descendants.get(&0).unwrap());
|
||||||
assert!(descendants[&1].is_empty());
|
assert!(descendants[&1].is_empty());
|
||||||
assert!(descendants[&2].is_empty());
|
assert!(descendants[&2].is_empty());
|
||||||
}
|
}
|
||||||
@ -219,4 +438,112 @@ mod tests {
|
|||||||
assert_eq!(bank_forks.active_banks(), vec![1]);
|
assert_eq!(bank_forks.active_banks(), vec![1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct TempPaths {
|
||||||
|
pub paths: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! tmp_bank_accounts_name {
|
||||||
|
() => {
|
||||||
|
&format!("{}-{}", file!(), line!())
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! get_tmp_bank_accounts_path {
|
||||||
|
() => {
|
||||||
|
get_tmp_bank_accounts_path(tmp_bank_accounts_name!())
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for TempPaths {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
|
||||||
|
paths.iter().for_each(|p| {
|
||||||
|
let _ignored = remove_dir_all(p);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_paths_vec(paths: &str) -> Vec<String> {
|
||||||
|
paths.split(',').map(|s| s.to_string()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_tmp_snapshots_path() -> TempPaths {
|
||||||
|
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||||
|
let path = format!("{}/snapshots", out_dir);
|
||||||
|
TempPaths {
|
||||||
|
paths: path.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_tmp_bank_accounts_path(paths: &str) -> TempPaths {
|
||||||
|
let vpaths = get_paths_vec(paths);
|
||||||
|
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||||
|
let vpaths: Vec<_> = vpaths
|
||||||
|
.iter()
|
||||||
|
.map(|path| format!("{}/{}", out_dir, path))
|
||||||
|
.collect();
|
||||||
|
TempPaths {
|
||||||
|
paths: vpaths.join(","),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore_from_snapshot(
|
||||||
|
genesis_block: &GenesisBlock,
|
||||||
|
bank_forks: BankForks,
|
||||||
|
account_paths: Option<String>,
|
||||||
|
last_slot: u64,
|
||||||
|
) {
|
||||||
|
let new =
|
||||||
|
BankForks::load_from_snapshot(&genesis_block, account_paths, &bank_forks.snapshot_path)
|
||||||
|
.unwrap();
|
||||||
|
for (slot, _) in new.banks.iter() {
|
||||||
|
if *slot > 0 {
|
||||||
|
let bank = bank_forks.banks.get(slot).unwrap().clone();
|
||||||
|
let new_bank = new.banks.get(slot).unwrap();
|
||||||
|
bank.compare_bank(&new_bank);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(new.working_bank().slot(), last_slot);
|
||||||
|
for (slot, _) in new.banks.iter() {
|
||||||
|
BankForks::remove_snapshot(*slot, &bank_forks.snapshot_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bank_forks_snapshot_n() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let path = get_tmp_bank_accounts_path!();
|
||||||
|
let spath = get_tmp_snapshots_path();
|
||||||
|
let GenesisBlockInfo {
|
||||||
|
genesis_block,
|
||||||
|
mint_keypair,
|
||||||
|
..
|
||||||
|
} = create_genesis_block(10_000);
|
||||||
|
for index in 0..10 {
|
||||||
|
let bank0 = Bank::new_with_paths(&genesis_block, Some(path.paths.clone()));
|
||||||
|
bank0.freeze();
|
||||||
|
let slot = bank0.slot();
|
||||||
|
let mut bank_forks = BankForks::new(0, bank0);
|
||||||
|
bank_forks.set_snapshot_config(Some(spath.paths.clone()));
|
||||||
|
bank_forks.add_snapshot(slot, 0).unwrap();
|
||||||
|
for forks in 0..index {
|
||||||
|
let bank = Bank::new_from_parent(&bank_forks[forks], &Pubkey::default(), forks + 1);
|
||||||
|
let key1 = Keypair::new().pubkey();
|
||||||
|
let tx = system_transaction::create_user_account(
|
||||||
|
&mint_keypair,
|
||||||
|
&key1,
|
||||||
|
1,
|
||||||
|
genesis_block.hash(),
|
||||||
|
);
|
||||||
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||||
|
bank.freeze();
|
||||||
|
let slot = bank.slot();
|
||||||
|
bank_forks.insert(bank);
|
||||||
|
bank_forks.add_snapshot(slot, 0).unwrap();
|
||||||
|
}
|
||||||
|
restore_from_snapshot(&genesis_block, bank_forks, Some(path.paths.clone()), index);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -67,8 +67,7 @@ impl BankingStage {
|
|||||||
poh_recorder,
|
poh_recorder,
|
||||||
verified_receiver,
|
verified_receiver,
|
||||||
verified_vote_receiver,
|
verified_vote_receiver,
|
||||||
2, // 1 for voting, 1 for banking.
|
4,
|
||||||
// More than 2 threads is slower in testnet testing.
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,7 +85,7 @@ impl BankingStage {
|
|||||||
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
||||||
// Once an entry has been recorded, its blockhash is registered with the bank.
|
// Once an entry has been recorded, its blockhash is registered with the bank.
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let my_pubkey = cluster_info.read().unwrap().id();
|
||||||
// Many banks that process transactions in parallel.
|
// Many banks that process transactions in parallel.
|
||||||
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
||||||
.map(|i| {
|
.map(|i| {
|
||||||
@ -105,6 +104,7 @@ impl BankingStage {
|
|||||||
.name("solana-banking-stage-tx".to_string())
|
.name("solana-banking-stage-tx".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
Self::process_loop(
|
Self::process_loop(
|
||||||
|
my_pubkey,
|
||||||
&verified_receiver,
|
&verified_receiver,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
@ -242,14 +242,13 @@ impl BankingStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn process_buffered_packets(
|
fn process_buffered_packets(
|
||||||
|
my_pubkey: &Pubkey,
|
||||||
socket: &std::net::UdpSocket,
|
socket: &std::net::UdpSocket,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
buffered_packets: &mut Vec<PacketsAndOffsets>,
|
buffered_packets: &mut Vec<PacketsAndOffsets>,
|
||||||
enable_forwarding: bool,
|
enable_forwarding: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let rcluster_info = cluster_info.read().unwrap();
|
|
||||||
|
|
||||||
let (decision, next_leader) = {
|
let (decision, next_leader) = {
|
||||||
let poh = poh_recorder.lock().unwrap();
|
let poh = poh_recorder.lock().unwrap();
|
||||||
let next_leader = poh.next_slot_leader();
|
let next_leader = poh.next_slot_leader();
|
||||||
@ -258,7 +257,7 @@ impl BankingStage {
|
|||||||
next_leader,
|
next_leader,
|
||||||
poh.bank().is_some(),
|
poh.bank().is_some(),
|
||||||
poh.would_be_leader(DEFAULT_TICKS_PER_SLOT * 2),
|
poh.would_be_leader(DEFAULT_TICKS_PER_SLOT * 2),
|
||||||
&rcluster_info.id(),
|
my_pubkey,
|
||||||
),
|
),
|
||||||
next_leader,
|
next_leader,
|
||||||
)
|
)
|
||||||
@ -266,28 +265,31 @@ impl BankingStage {
|
|||||||
|
|
||||||
match decision {
|
match decision {
|
||||||
BufferedPacketsDecision::Consume => {
|
BufferedPacketsDecision::Consume => {
|
||||||
let mut unprocessed = Self::consume_buffered_packets(
|
let mut unprocessed =
|
||||||
&rcluster_info.id(),
|
Self::consume_buffered_packets(my_pubkey, poh_recorder, buffered_packets)?;
|
||||||
poh_recorder,
|
|
||||||
buffered_packets,
|
|
||||||
)?;
|
|
||||||
buffered_packets.append(&mut unprocessed);
|
buffered_packets.append(&mut unprocessed);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
BufferedPacketsDecision::Forward => {
|
BufferedPacketsDecision::Forward => {
|
||||||
if enable_forwarding {
|
if enable_forwarding {
|
||||||
next_leader.map_or(Ok(()), |leader_pubkey| {
|
next_leader.map_or(Ok(()), |leader_pubkey| {
|
||||||
rcluster_info
|
let leader_addr = {
|
||||||
.lookup(&leader_pubkey)
|
cluster_info
|
||||||
.map_or(Ok(()), |leader| {
|
.read()
|
||||||
let _ = Self::forward_buffered_packets(
|
.unwrap()
|
||||||
&socket,
|
.lookup(&leader_pubkey)
|
||||||
&leader.tpu_via_blobs,
|
.map(|leader| leader.tpu_via_blobs)
|
||||||
&buffered_packets,
|
};
|
||||||
);
|
|
||||||
buffered_packets.clear();
|
leader_addr.map_or(Ok(()), |leader_addr| {
|
||||||
Ok(())
|
let _ = Self::forward_buffered_packets(
|
||||||
})
|
&socket,
|
||||||
|
&leader_addr,
|
||||||
|
&buffered_packets,
|
||||||
|
);
|
||||||
|
buffered_packets.clear();
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
buffered_packets.clear();
|
buffered_packets.clear();
|
||||||
@ -299,6 +301,7 @@ impl BankingStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_loop(
|
pub fn process_loop(
|
||||||
|
my_pubkey: Pubkey,
|
||||||
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
|
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
@ -311,6 +314,7 @@ impl BankingStage {
|
|||||||
loop {
|
loop {
|
||||||
if !buffered_packets.is_empty() {
|
if !buffered_packets.is_empty() {
|
||||||
Self::process_buffered_packets(
|
Self::process_buffered_packets(
|
||||||
|
&my_pubkey,
|
||||||
&socket,
|
&socket,
|
||||||
poh_recorder,
|
poh_recorder,
|
||||||
cluster_info,
|
cluster_info,
|
||||||
@ -331,11 +335,11 @@ impl BankingStage {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match Self::process_packets(
|
match Self::process_packets(
|
||||||
|
&my_pubkey,
|
||||||
&verified_receiver,
|
&verified_receiver,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
recv_start,
|
recv_start,
|
||||||
recv_timeout,
|
recv_timeout,
|
||||||
cluster_info,
|
|
||||||
id,
|
id,
|
||||||
) {
|
) {
|
||||||
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
||||||
@ -370,26 +374,23 @@ impl BankingStage {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn record_transactions<'a, 'b>(
|
fn record_transactions(
|
||||||
bank: &'a Bank,
|
bank_slot: u64,
|
||||||
txs: &'b [Transaction],
|
txs: &[Transaction],
|
||||||
results: &[transaction::Result<()>],
|
results: &[transaction::Result<()>],
|
||||||
poh: &Arc<Mutex<PohRecorder>>,
|
poh: &Arc<Mutex<PohRecorder>>,
|
||||||
recordable_txs: &'b mut Vec<&'b Transaction>,
|
) -> Result<()> {
|
||||||
) -> Result<LockedAccountsResults<'a, 'b, &'b Transaction>> {
|
|
||||||
let processed_transactions: Vec<_> = results
|
let processed_transactions: Vec<_> = results
|
||||||
.iter()
|
.iter()
|
||||||
.zip(txs.iter())
|
.zip(txs.iter())
|
||||||
.filter_map(|(r, x)| {
|
.filter_map(|(r, x)| {
|
||||||
if Bank::can_commit(r) {
|
if Bank::can_commit(r) {
|
||||||
recordable_txs.push(x);
|
|
||||||
Some(x.clone())
|
Some(x.clone())
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let record_locks = bank.lock_record_accounts(recordable_txs);
|
|
||||||
debug!("processed: {} ", processed_transactions.len());
|
debug!("processed: {} ", processed_transactions.len());
|
||||||
// unlock all the accounts with errors which are filtered by the above `filter_map`
|
// unlock all the accounts with errors which are filtered by the above `filter_map`
|
||||||
if !processed_transactions.is_empty() {
|
if !processed_transactions.is_empty() {
|
||||||
@ -401,16 +402,16 @@ impl BankingStage {
|
|||||||
// record and unlock will unlock all the successful transactions
|
// record and unlock will unlock all the successful transactions
|
||||||
poh.lock()
|
poh.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.record(bank.slot(), hash, processed_transactions)?;
|
.record(bank_slot, hash, processed_transactions)?;
|
||||||
}
|
}
|
||||||
Ok(record_locks)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_and_record_transactions_locked(
|
fn process_and_record_transactions_locked(
|
||||||
bank: &Bank,
|
bank: &Bank,
|
||||||
txs: &[Transaction],
|
txs: &[Transaction],
|
||||||
poh: &Arc<Mutex<PohRecorder>>,
|
poh: &Arc<Mutex<PohRecorder>>,
|
||||||
lock_results: &LockedAccountsResults<Transaction>,
|
lock_results: &LockedAccountsResults,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
|
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
|
||||||
@ -423,12 +424,10 @@ impl BankingStage {
|
|||||||
|
|
||||||
let freeze_lock = bank.freeze_lock();
|
let freeze_lock = bank.freeze_lock();
|
||||||
|
|
||||||
let mut recordable_txs = vec![];
|
let record_time = {
|
||||||
let (record_time, record_locks) = {
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let record_locks =
|
Self::record_transactions(bank.slot(), txs, &results, poh)?;
|
||||||
Self::record_transactions(bank, txs, &results, poh, &mut recordable_txs)?;
|
now.elapsed()
|
||||||
(now.elapsed(), record_locks)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let commit_time = {
|
let commit_time = {
|
||||||
@ -437,7 +436,6 @@ impl BankingStage {
|
|||||||
now.elapsed()
|
now.elapsed()
|
||||||
};
|
};
|
||||||
|
|
||||||
drop(record_locks);
|
|
||||||
drop(freeze_lock);
|
drop(freeze_lock);
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
@ -701,11 +699,11 @@ impl BankingStage {
|
|||||||
|
|
||||||
/// Process the incoming packets
|
/// Process the incoming packets
|
||||||
pub fn process_packets(
|
pub fn process_packets(
|
||||||
|
my_pubkey: &Pubkey,
|
||||||
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
|
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
|
||||||
poh: &Arc<Mutex<PohRecorder>>,
|
poh: &Arc<Mutex<PohRecorder>>,
|
||||||
recv_start: &mut Instant,
|
recv_start: &mut Instant,
|
||||||
recv_timeout: Duration,
|
recv_timeout: Duration,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
|
||||||
id: u32,
|
id: u32,
|
||||||
) -> Result<UnprocessedPackets> {
|
) -> Result<UnprocessedPackets> {
|
||||||
let mms = verified_receiver
|
let mms = verified_receiver
|
||||||
@ -747,7 +745,6 @@ impl BankingStage {
|
|||||||
|
|
||||||
if processed < verified_txs_len {
|
if processed < verified_txs_len {
|
||||||
let next_leader = poh.lock().unwrap().next_slot_leader();
|
let next_leader = poh.lock().unwrap().next_slot_leader();
|
||||||
let my_pubkey = cluster_info.read().unwrap().id();
|
|
||||||
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
|
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
|
||||||
while let Some((msgs, vers)) = mms_iter.next() {
|
while let Some((msgs, vers)) = mms_iter.next() {
|
||||||
let packet_indexes = Self::generate_packet_indexes(vers);
|
let packet_indexes = Self::generate_packet_indexes(vers);
|
||||||
@ -1183,14 +1180,8 @@ mod tests {
|
|||||||
];
|
];
|
||||||
|
|
||||||
let mut results = vec![Ok(()), Ok(())];
|
let mut results = vec![Ok(()), Ok(())];
|
||||||
BankingStage::record_transactions(
|
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder)
|
||||||
&bank,
|
.unwrap();
|
||||||
&transactions,
|
|
||||||
&results,
|
|
||||||
&poh_recorder,
|
|
||||||
&mut vec![],
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let (_, entries) = entry_receiver.recv().unwrap();
|
let (_, entries) = entry_receiver.recv().unwrap();
|
||||||
assert_eq!(entries[0].0.transactions.len(), transactions.len());
|
assert_eq!(entries[0].0.transactions.len(), transactions.len());
|
||||||
|
|
||||||
@ -1199,27 +1190,15 @@ mod tests {
|
|||||||
1,
|
1,
|
||||||
InstructionError::new_result_with_negative_lamports(),
|
InstructionError::new_result_with_negative_lamports(),
|
||||||
));
|
));
|
||||||
BankingStage::record_transactions(
|
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder)
|
||||||
&bank,
|
.unwrap();
|
||||||
&transactions,
|
|
||||||
&results,
|
|
||||||
&poh_recorder,
|
|
||||||
&mut vec![],
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let (_, entries) = entry_receiver.recv().unwrap();
|
let (_, entries) = entry_receiver.recv().unwrap();
|
||||||
assert_eq!(entries[0].0.transactions.len(), transactions.len());
|
assert_eq!(entries[0].0.transactions.len(), transactions.len());
|
||||||
|
|
||||||
// Other TransactionErrors should not be recorded
|
// Other TransactionErrors should not be recorded
|
||||||
results[0] = Err(TransactionError::AccountNotFound);
|
results[0] = Err(TransactionError::AccountNotFound);
|
||||||
BankingStage::record_transactions(
|
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder)
|
||||||
&bank,
|
.unwrap();
|
||||||
&transactions,
|
|
||||||
&results,
|
|
||||||
&poh_recorder,
|
|
||||||
&mut vec![],
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let (_, entries) = entry_receiver.recv().unwrap();
|
let (_, entries) = entry_receiver.recv().unwrap();
|
||||||
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
|
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
|
||||||
}
|
}
|
||||||
|
@ -10,10 +10,6 @@ use serde_json::json;
|
|||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
use std::io::prelude::*;
|
|
||||||
use std::net::Shutdown;
|
|
||||||
use std::os::unix::net::UnixStream;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
pub trait EntryWriter: std::fmt::Debug {
|
pub trait EntryWriter: std::fmt::Debug {
|
||||||
fn write(&self, payload: String) -> Result<()>;
|
fn write(&self, payload: String) -> Result<()>;
|
||||||
@ -48,16 +44,29 @@ pub struct EntrySocket {
|
|||||||
socket: String,
|
socket: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
const MESSAGE_TERMINATOR: &str = "\n";
|
|
||||||
|
|
||||||
impl EntryWriter for EntrySocket {
|
impl EntryWriter for EntrySocket {
|
||||||
|
#[cfg(not(windows))]
|
||||||
fn write(&self, payload: String) -> Result<()> {
|
fn write(&self, payload: String) -> Result<()> {
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::net::Shutdown;
|
||||||
|
use std::os::unix::net::UnixStream;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
const MESSAGE_TERMINATOR: &str = "\n";
|
||||||
|
|
||||||
let mut socket = UnixStream::connect(Path::new(&self.socket))?;
|
let mut socket = UnixStream::connect(Path::new(&self.socket))?;
|
||||||
socket.write_all(payload.as_bytes())?;
|
socket.write_all(payload.as_bytes())?;
|
||||||
socket.write_all(MESSAGE_TERMINATOR.as_bytes())?;
|
socket.write_all(MESSAGE_TERMINATOR.as_bytes())?;
|
||||||
socket.shutdown(Shutdown::Write)?;
|
socket.shutdown(Shutdown::Write)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
#[cfg(windows)]
|
||||||
|
fn write(&self, _payload: String) -> Result<()> {
|
||||||
|
Err(crate::result::Error::from(std::io::Error::new(
|
||||||
|
std::io::ErrorKind::Other,
|
||||||
|
"EntryWriter::write() not implemented for windows",
|
||||||
|
)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait BlockstreamEvents {
|
pub trait BlockstreamEvents {
|
||||||
|
@ -11,7 +11,7 @@ use solana_kvstore as kvstore;
|
|||||||
|
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
|
|
||||||
use hashbrown::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
#[cfg(not(feature = "kvstore"))]
|
#[cfg(not(feature = "kvstore"))]
|
||||||
use rocksdb;
|
use rocksdb;
|
||||||
@ -84,6 +84,7 @@ pub struct Blocktree {
|
|||||||
db: Arc<Database>,
|
db: Arc<Database>,
|
||||||
meta_cf: LedgerColumn<cf::SlotMeta>,
|
meta_cf: LedgerColumn<cf::SlotMeta>,
|
||||||
data_cf: LedgerColumn<cf::Data>,
|
data_cf: LedgerColumn<cf::Data>,
|
||||||
|
dead_slots_cf: LedgerColumn<cf::DeadSlots>,
|
||||||
erasure_cf: LedgerColumn<cf::Coding>,
|
erasure_cf: LedgerColumn<cf::Coding>,
|
||||||
erasure_meta_cf: LedgerColumn<cf::ErasureMeta>,
|
erasure_meta_cf: LedgerColumn<cf::ErasureMeta>,
|
||||||
orphans_cf: LedgerColumn<cf::Orphans>,
|
orphans_cf: LedgerColumn<cf::Orphans>,
|
||||||
@ -97,6 +98,8 @@ pub struct Blocktree {
|
|||||||
pub const META_CF: &str = "meta";
|
pub const META_CF: &str = "meta";
|
||||||
// Column family for the data in a leader slot
|
// Column family for the data in a leader slot
|
||||||
pub const DATA_CF: &str = "data";
|
pub const DATA_CF: &str = "data";
|
||||||
|
// Column family for slots that have been marked as dead
|
||||||
|
pub const DEAD_SLOTS_CF: &str = "dead_slots";
|
||||||
// Column family for erasure data
|
// Column family for erasure data
|
||||||
pub const ERASURE_CF: &str = "erasure";
|
pub const ERASURE_CF: &str = "erasure";
|
||||||
pub const ERASURE_META_CF: &str = "erasure_meta";
|
pub const ERASURE_META_CF: &str = "erasure_meta";
|
||||||
@ -124,6 +127,9 @@ impl Blocktree {
|
|||||||
// Create the data column family
|
// Create the data column family
|
||||||
let data_cf = db.column();
|
let data_cf = db.column();
|
||||||
|
|
||||||
|
// Create the dead slots column family
|
||||||
|
let dead_slots_cf = db.column();
|
||||||
|
|
||||||
// Create the erasure column family
|
// Create the erasure column family
|
||||||
let erasure_cf = db.column();
|
let erasure_cf = db.column();
|
||||||
|
|
||||||
@ -143,6 +149,7 @@ impl Blocktree {
|
|||||||
db,
|
db,
|
||||||
meta_cf,
|
meta_cf,
|
||||||
data_cf,
|
data_cf,
|
||||||
|
dead_slots_cf,
|
||||||
erasure_cf,
|
erasure_cf,
|
||||||
erasure_meta_cf,
|
erasure_meta_cf,
|
||||||
orphans_cf,
|
orphans_cf,
|
||||||
@ -177,6 +184,15 @@ impl Blocktree {
|
|||||||
self.meta_cf.get(slot)
|
self.meta_cf.get(slot)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_full(&self, slot: u64) -> bool {
|
||||||
|
if let Ok(meta) = self.meta_cf.get(slot) {
|
||||||
|
if let Some(meta) = meta {
|
||||||
|
return meta.is_full();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
pub fn erasure_meta(&self, slot: u64, set_index: u64) -> Result<Option<ErasureMeta>> {
|
pub fn erasure_meta(&self, slot: u64, set_index: u64) -> Result<Option<ErasureMeta>> {
|
||||||
self.erasure_meta_cf.get((slot, set_index))
|
self.erasure_meta_cf.get((slot, set_index))
|
||||||
}
|
}
|
||||||
@ -799,7 +815,17 @@ impl Blocktree {
|
|||||||
let result: HashMap<u64, Vec<u64>> = slots
|
let result: HashMap<u64, Vec<u64>> = slots
|
||||||
.iter()
|
.iter()
|
||||||
.zip(slot_metas)
|
.zip(slot_metas)
|
||||||
.filter_map(|(height, meta)| meta.map(|meta| (*height, meta.next_slots)))
|
.filter_map(|(height, meta)| {
|
||||||
|
meta.map(|meta| {
|
||||||
|
let valid_next_slots: Vec<u64> = meta
|
||||||
|
.next_slots
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.filter(|s| !self.is_dead(*s))
|
||||||
|
.collect();
|
||||||
|
(*height, valid_next_slots)
|
||||||
|
})
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
@ -818,18 +844,12 @@ impl Blocktree {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_root(&self, new_root: u64, prev_root: u64) -> Result<()> {
|
pub fn set_roots(&self, rooted_slots: &[u64]) -> Result<()> {
|
||||||
let mut current_slot = new_root;
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut batch_processor = self.db.batch_processor();
|
let mut batch_processor = self.db.batch_processor();
|
||||||
let mut write_batch = batch_processor.batch()?;
|
let mut write_batch = batch_processor.batch()?;
|
||||||
if new_root == 0 {
|
for slot in rooted_slots {
|
||||||
write_batch.put::<cf::Root>(0, &true)?;
|
write_batch.put::<cf::Root>(*slot, &true)?;
|
||||||
} else {
|
|
||||||
while current_slot != prev_root {
|
|
||||||
write_batch.put::<cf::Root>(current_slot, &true)?;
|
|
||||||
current_slot = self.meta(current_slot).unwrap().unwrap().parent_slot;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
batch_processor.write(write_batch)?;
|
batch_processor.write(write_batch)?;
|
||||||
@ -837,6 +857,22 @@ impl Blocktree {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_dead(&self, slot: u64) -> bool {
|
||||||
|
if let Some(true) = self
|
||||||
|
.db
|
||||||
|
.get::<cf::DeadSlots>(slot)
|
||||||
|
.expect("fetch from DeadSlots column family failed")
|
||||||
|
{
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_dead_slot(&self, slot: u64) -> Result<()> {
|
||||||
|
self.dead_slots_cf.put(slot, &true)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_orphans(&self, max: Option<usize>) -> Vec<u64> {
|
pub fn get_orphans(&self, max: Option<usize>) -> Vec<u64> {
|
||||||
let mut results = vec![];
|
let mut results = vec![];
|
||||||
|
|
||||||
@ -3128,30 +3164,12 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_set_root() {
|
fn test_set_roots() {
|
||||||
let blocktree_path = get_tmp_ledger_path!();
|
let blocktree_path = get_tmp_ledger_path!();
|
||||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||||
blocktree.set_root(0, 0).unwrap();
|
|
||||||
let chained_slots = vec![0, 2, 4, 7, 12, 15];
|
let chained_slots = vec![0, 2, 4, 7, 12, 15];
|
||||||
|
|
||||||
// Make a chain of slots
|
blocktree.set_roots(&chained_slots).unwrap();
|
||||||
let all_blobs = make_chaining_slot_entries(&chained_slots, 10);
|
|
||||||
|
|
||||||
// Insert the chain of slots into the ledger
|
|
||||||
for (slot_blobs, _) in all_blobs {
|
|
||||||
blocktree.insert_data_blobs(&slot_blobs[..]).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
blocktree.set_root(4, 0).unwrap();
|
|
||||||
for i in &chained_slots[0..3] {
|
|
||||||
assert!(blocktree.is_root(*i));
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in &chained_slots[3..] {
|
|
||||||
assert!(!blocktree.is_root(*i));
|
|
||||||
}
|
|
||||||
|
|
||||||
blocktree.set_root(15, 4).unwrap();
|
|
||||||
|
|
||||||
for i in chained_slots {
|
for i in chained_slots {
|
||||||
assert!(blocktree.is_root(i));
|
assert!(blocktree.is_root(i));
|
||||||
|
@ -28,6 +28,10 @@ pub mod columns {
|
|||||||
/// Data Column
|
/// Data Column
|
||||||
pub struct Data;
|
pub struct Data;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
/// Data Column
|
||||||
|
pub struct DeadSlots;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
/// The erasure meta column
|
/// The erasure meta column
|
||||||
pub struct ErasureMeta;
|
pub struct ErasureMeta;
|
||||||
|
@ -100,6 +100,25 @@ impl Column<Kvs> for cf::Data {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Column<Kvs> for cf::DeadSlots {
|
||||||
|
const NAME: &'static str = super::DEAD_SLOTS;
|
||||||
|
type Index = u64;
|
||||||
|
|
||||||
|
fn key(slot: u64) -> Key {
|
||||||
|
let mut key = Key::default();
|
||||||
|
BigEndian::write_u64(&mut key.0[8..16], slot);
|
||||||
|
key
|
||||||
|
}
|
||||||
|
|
||||||
|
fn index(key: &Key) -> u64 {
|
||||||
|
BigEndian::read_u64(&key.0[8..16])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TypedColumn<Kvs> for cf::Root {
|
||||||
|
type Type = bool;
|
||||||
|
}
|
||||||
|
|
||||||
impl Column<Kvs> for cf::Orphans {
|
impl Column<Kvs> for cf::Orphans {
|
||||||
const NAME: &'static str = super::ORPHANS_CF;
|
const NAME: &'static str = super::ORPHANS_CF;
|
||||||
type Index = u64;
|
type Index = u64;
|
||||||
|
@ -30,7 +30,7 @@ impl Backend for Rocks {
|
|||||||
type Error = rocksdb::Error;
|
type Error = rocksdb::Error;
|
||||||
|
|
||||||
fn open(path: &Path) -> Result<Rocks> {
|
fn open(path: &Path) -> Result<Rocks> {
|
||||||
use crate::blocktree::db::columns::{Coding, Data, ErasureMeta, Orphans, Root, SlotMeta};
|
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta};
|
||||||
|
|
||||||
fs::create_dir_all(&path)?;
|
fs::create_dir_all(&path)?;
|
||||||
|
|
||||||
@ -40,6 +40,7 @@ impl Backend for Rocks {
|
|||||||
// Column family names
|
// Column family names
|
||||||
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
||||||
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
|
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
|
||||||
|
let dead_slots_cf_descriptor = ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options());
|
||||||
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
|
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
|
||||||
let erasure_meta_cf_descriptor =
|
let erasure_meta_cf_descriptor =
|
||||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
|
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
|
||||||
@ -49,6 +50,7 @@ impl Backend for Rocks {
|
|||||||
let cfs = vec![
|
let cfs = vec![
|
||||||
meta_cf_descriptor,
|
meta_cf_descriptor,
|
||||||
data_cf_descriptor,
|
data_cf_descriptor,
|
||||||
|
dead_slots_cf_descriptor,
|
||||||
erasure_cf_descriptor,
|
erasure_cf_descriptor,
|
||||||
erasure_meta_cf_descriptor,
|
erasure_meta_cf_descriptor,
|
||||||
orphans_cf_descriptor,
|
orphans_cf_descriptor,
|
||||||
@ -62,11 +64,12 @@ impl Backend for Rocks {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn columns(&self) -> Vec<&'static str> {
|
fn columns(&self) -> Vec<&'static str> {
|
||||||
use crate::blocktree::db::columns::{Coding, Data, ErasureMeta, Orphans, Root, SlotMeta};
|
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta};
|
||||||
|
|
||||||
vec![
|
vec![
|
||||||
Coding::NAME,
|
Coding::NAME,
|
||||||
ErasureMeta::NAME,
|
ErasureMeta::NAME,
|
||||||
|
DeadSlots::NAME,
|
||||||
Data::NAME,
|
Data::NAME,
|
||||||
Orphans::NAME,
|
Orphans::NAME,
|
||||||
Root::NAME,
|
Root::NAME,
|
||||||
@ -161,6 +164,25 @@ impl Column<Rocks> for cf::Data {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Column<Rocks> for cf::DeadSlots {
|
||||||
|
const NAME: &'static str = super::DEAD_SLOTS_CF;
|
||||||
|
type Index = u64;
|
||||||
|
|
||||||
|
fn key(slot: u64) -> Vec<u8> {
|
||||||
|
let mut key = vec![0; 8];
|
||||||
|
BigEndian::write_u64(&mut key[..], slot);
|
||||||
|
key
|
||||||
|
}
|
||||||
|
|
||||||
|
fn index(key: &[u8]) -> u64 {
|
||||||
|
BigEndian::read_u64(&key[..8])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TypedColumn<Rocks> for cf::DeadSlots {
|
||||||
|
type Type = bool;
|
||||||
|
}
|
||||||
|
|
||||||
impl Column<Rocks> for cf::Orphans {
|
impl Column<Rocks> for cf::Orphans {
|
||||||
const NAME: &'static str = super::ORPHANS_CF;
|
const NAME: &'static str = super::ORPHANS_CF;
|
||||||
type Index = u64;
|
type Index = u64;
|
||||||
|
@ -51,7 +51,7 @@ mod tests {
|
|||||||
fn test_rooted_slot_iterator() {
|
fn test_rooted_slot_iterator() {
|
||||||
let blocktree_path = get_tmp_ledger_path("test_rooted_slot_iterator");
|
let blocktree_path = get_tmp_ledger_path("test_rooted_slot_iterator");
|
||||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||||
blocktree.set_root(0, 0).unwrap();
|
blocktree.set_roots(&[0]).unwrap();
|
||||||
let ticks_per_slot = 5;
|
let ticks_per_slot = 5;
|
||||||
/*
|
/*
|
||||||
Build a blocktree in the ledger with the following fork structure:
|
Build a blocktree in the ledger with the following fork structure:
|
||||||
@ -98,7 +98,7 @@ mod tests {
|
|||||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash);
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash);
|
||||||
|
|
||||||
// Set a root
|
// Set a root
|
||||||
blocktree.set_root(3, 0).unwrap();
|
blocktree.set_roots(&[1, 2, 3]).unwrap();
|
||||||
|
|
||||||
// Trying to get an iterator on a different fork will error
|
// Trying to get an iterator on a different fork will error
|
||||||
assert!(RootedSlotIterator::new(4, &blocktree).is_err());
|
assert!(RootedSlotIterator::new(4, &blocktree).is_err());
|
||||||
|
@ -3,18 +3,26 @@ use crate::blocktree::Blocktree;
|
|||||||
use crate::entry::{Entry, EntrySlice};
|
use crate::entry::{Entry, EntrySlice};
|
||||||
use crate::leader_schedule_cache::LeaderScheduleCache;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana_metrics::{datapoint, inc_new_counter_debug};
|
use rayon::ThreadPool;
|
||||||
|
use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
||||||
use solana_sdk::genesis_block::GenesisBlock;
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::timing::duration_as_ms;
|
use solana_sdk::timing::duration_as_ms;
|
||||||
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
|
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
|
||||||
use solana_sdk::transaction::Result;
|
use solana_sdk::transaction::Result;
|
||||||
use solana_sdk::transaction::Transaction;
|
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
pub const NUM_THREADS: u32 = 10;
|
||||||
|
use std::cell::RefCell;
|
||||||
|
|
||||||
|
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
|
||||||
|
.num_threads(sys_info::cpu_num().unwrap_or(NUM_THREADS) as usize)
|
||||||
|
.build()
|
||||||
|
.unwrap()));
|
||||||
|
|
||||||
fn first_err(results: &[Result<()>]) -> Result<()> {
|
fn first_err(results: &[Result<()>]) -> Result<()> {
|
||||||
for r in results {
|
for r in results {
|
||||||
if r.is_err() {
|
if r.is_err() {
|
||||||
@ -24,37 +32,38 @@ fn first_err(results: &[Result<()>]) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn par_execute_entries(
|
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)]) -> Result<()> {
|
||||||
bank: &Bank,
|
|
||||||
entries: &[(&Entry, LockedAccountsResults<Transaction>)],
|
|
||||||
) -> Result<()> {
|
|
||||||
inc_new_counter_debug!("bank-par_execute_entries-count", entries.len());
|
inc_new_counter_debug!("bank-par_execute_entries-count", entries.len());
|
||||||
let results: Vec<Result<()>> = entries
|
let results: Vec<Result<()>> = PAR_THREAD_POOL.with(|thread_pool| {
|
||||||
.into_par_iter()
|
thread_pool.borrow().install(|| {
|
||||||
.map(|(e, locked_accounts)| {
|
entries
|
||||||
let results = bank.load_execute_and_commit_transactions(
|
.into_par_iter()
|
||||||
&e.transactions,
|
.map(|(e, locked_accounts)| {
|
||||||
locked_accounts,
|
let results = bank.load_execute_and_commit_transactions(
|
||||||
MAX_RECENT_BLOCKHASHES,
|
&e.transactions,
|
||||||
);
|
locked_accounts,
|
||||||
let mut first_err = None;
|
MAX_RECENT_BLOCKHASHES,
|
||||||
for (r, tx) in results.iter().zip(e.transactions.iter()) {
|
);
|
||||||
if let Err(ref e) = r {
|
let mut first_err = None;
|
||||||
if first_err.is_none() {
|
for (r, tx) in results.iter().zip(e.transactions.iter()) {
|
||||||
first_err = Some(r.clone());
|
if let Err(ref e) = r {
|
||||||
|
if first_err.is_none() {
|
||||||
|
first_err = Some(r.clone());
|
||||||
|
}
|
||||||
|
if !Bank::can_commit(&r) {
|
||||||
|
warn!("Unexpected validator error: {:?}, tx: {:?}", e, tx);
|
||||||
|
datapoint_error!(
|
||||||
|
"validator_process_entry_error",
|
||||||
|
("error", format!("error: {:?}, tx: {:?}", e, tx), String)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !Bank::can_commit(&r) {
|
first_err.unwrap_or(Ok(()))
|
||||||
warn!("Unexpected validator error: {:?}, tx: {:?}", e, tx);
|
})
|
||||||
datapoint!(
|
.collect()
|
||||||
"validator_process_entry_error",
|
|
||||||
("error", format!("error: {:?}, tx: {:?}", e, tx), String)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
first_err.unwrap_or(Ok(()))
|
|
||||||
})
|
})
|
||||||
.collect();
|
});
|
||||||
|
|
||||||
first_err(&results)
|
first_err(&results)
|
||||||
}
|
}
|
||||||
@ -155,7 +164,7 @@ pub fn process_blocktree(
|
|||||||
vec![(slot, meta, bank, entry_height, last_entry_hash)]
|
vec![(slot, meta, bank, entry_height, last_entry_hash)]
|
||||||
};
|
};
|
||||||
|
|
||||||
blocktree.set_root(0, 0).expect("Couldn't set first root");
|
blocktree.set_roots(&[0]).expect("Couldn't set first root");
|
||||||
|
|
||||||
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0);
|
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0);
|
||||||
|
|
||||||
@ -420,7 +429,7 @@ pub mod tests {
|
|||||||
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
||||||
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
||||||
|
|
||||||
blocktree.set_root(4, 0).unwrap();
|
blocktree.set_roots(&[4, 1, 0]).unwrap();
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||||
@ -494,8 +503,7 @@ pub mod tests {
|
|||||||
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
||||||
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
||||||
|
|
||||||
blocktree.set_root(0, 0).unwrap();
|
blocktree.set_roots(&[0, 1]).unwrap();
|
||||||
blocktree.set_root(1, 0).unwrap();
|
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||||
@ -571,10 +579,11 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set a root on the last slot of the last confirmed epoch
|
// Set a root on the last slot of the last confirmed epoch
|
||||||
blocktree.set_root(last_slot, 0).unwrap();
|
let rooted_slots: Vec<_> = (0..=last_slot).collect();
|
||||||
|
blocktree.set_roots(&rooted_slots).unwrap();
|
||||||
|
|
||||||
// Set a root on the next slot of the confrimed epoch
|
// Set a root on the next slot of the confrimed epoch
|
||||||
blocktree.set_root(last_slot + 1, last_slot).unwrap();
|
blocktree.set_roots(&[last_slot + 1]).unwrap();
|
||||||
|
|
||||||
// Check that we can properly restart the ledger / leader scheduler doesn't fail
|
// Check that we can properly restart the ledger / leader scheduler doesn't fail
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
|
@ -1,183 +1,87 @@
|
|||||||
//! A stage to broadcast data from a leader node to validators
|
//! A stage to broadcast data from a leader node to validators
|
||||||
//!
|
use self::fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun;
|
||||||
|
use self::standard_broadcast_run::StandardBroadcastRun;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT};
|
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
|
||||||
use crate::entry::EntrySlice;
|
|
||||||
use crate::erasure::CodingGenerator;
|
use crate::erasure::CodingGenerator;
|
||||||
use crate::packet::index_blobs_with_genesis;
|
|
||||||
use crate::poh_recorder::WorkingBankEntries;
|
use crate::poh_recorder::WorkingBankEntries;
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::staking_utils;
|
use crate::staking_utils;
|
||||||
use rayon::prelude::*;
|
use rayon::ThreadPool;
|
||||||
use solana_metrics::{
|
use solana_metrics::{
|
||||||
datapoint, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_info,
|
datapoint, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_info,
|
||||||
inc_new_counter_warn,
|
|
||||||
};
|
};
|
||||||
use solana_sdk::hash::Hash;
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use solana_sdk::timing::duration_as_ms;
|
use solana_sdk::timing::duration_as_ms;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{self, Builder, JoinHandle};
|
use std::thread::{self, Builder, JoinHandle};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::Instant;
|
||||||
|
|
||||||
|
mod broadcast_utils;
|
||||||
|
mod fail_entry_verification_broadcast_run;
|
||||||
|
mod standard_broadcast_run;
|
||||||
|
|
||||||
|
pub const NUM_THREADS: u32 = 10;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
pub enum BroadcastStageReturnType {
|
pub enum BroadcastStageReturnType {
|
||||||
ChannelDisconnected,
|
ChannelDisconnected,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(PartialEq, Clone, Debug)]
|
||||||
struct BroadcastStats {
|
pub enum BroadcastStageType {
|
||||||
num_entries: Vec<usize>,
|
Standard,
|
||||||
run_elapsed: Vec<u64>,
|
FailEntryVerification,
|
||||||
to_blobs_elapsed: Vec<u64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Broadcast {
|
impl BroadcastStageType {
|
||||||
id: Pubkey,
|
pub fn new_broadcast_stage(
|
||||||
coding_generator: CodingGenerator,
|
&self,
|
||||||
stats: BroadcastStats,
|
sock: UdpSocket,
|
||||||
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
|
receiver: Receiver<WorkingBankEntries>,
|
||||||
|
exit_sender: &Arc<AtomicBool>,
|
||||||
|
blocktree: &Arc<Blocktree>,
|
||||||
|
) -> BroadcastStage {
|
||||||
|
match self {
|
||||||
|
BroadcastStageType::Standard => BroadcastStage::new(
|
||||||
|
sock,
|
||||||
|
cluster_info,
|
||||||
|
receiver,
|
||||||
|
exit_sender,
|
||||||
|
blocktree,
|
||||||
|
StandardBroadcastRun::new(),
|
||||||
|
),
|
||||||
|
|
||||||
|
BroadcastStageType::FailEntryVerification => BroadcastStage::new(
|
||||||
|
sock,
|
||||||
|
cluster_info,
|
||||||
|
receiver,
|
||||||
|
exit_sender,
|
||||||
|
blocktree,
|
||||||
|
FailEntryVerificationBroadcastRun::new(),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Broadcast {
|
trait BroadcastRun {
|
||||||
fn run(
|
fn run(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
broadcast: &mut Broadcast,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
receiver: &Receiver<WorkingBankEntries>,
|
receiver: &Receiver<WorkingBankEntries>,
|
||||||
sock: &UdpSocket,
|
sock: &UdpSocket,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
genesis_blockhash: &Hash,
|
) -> Result<()>;
|
||||||
) -> Result<()> {
|
}
|
||||||
let timer = Duration::new(1, 0);
|
|
||||||
let (mut bank, entries) = receiver.recv_timeout(timer)?;
|
|
||||||
let mut max_tick_height = bank.max_tick_height();
|
|
||||||
|
|
||||||
let run_start = Instant::now();
|
struct Broadcast {
|
||||||
let mut num_entries = entries.len();
|
coding_generator: CodingGenerator,
|
||||||
let mut ventries = Vec::new();
|
thread_pool: ThreadPool,
|
||||||
let mut last_tick = entries.last().map(|v| v.1).unwrap_or(0);
|
|
||||||
ventries.push(entries);
|
|
||||||
|
|
||||||
assert!(last_tick <= max_tick_height);
|
|
||||||
if last_tick != max_tick_height {
|
|
||||||
while let Ok((same_bank, entries)) = receiver.try_recv() {
|
|
||||||
// If the bank changed, that implies the previous slot was interrupted and we do not have to
|
|
||||||
// broadcast its entries.
|
|
||||||
if same_bank.slot() != bank.slot() {
|
|
||||||
num_entries = 0;
|
|
||||||
ventries.clear();
|
|
||||||
bank = same_bank.clone();
|
|
||||||
max_tick_height = bank.max_tick_height();
|
|
||||||
}
|
|
||||||
num_entries += entries.len();
|
|
||||||
last_tick = entries.last().map(|v| v.1).unwrap_or(0);
|
|
||||||
ventries.push(entries);
|
|
||||||
assert!(last_tick <= max_tick_height,);
|
|
||||||
if last_tick == max_tick_height {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let bank_epoch = bank.get_stakers_epoch(bank.slot());
|
|
||||||
let mut broadcast_table = cluster_info
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.sorted_tvu_peers(staking_utils::staked_nodes_at_epoch(&bank, bank_epoch).as_ref());
|
|
||||||
|
|
||||||
inc_new_counter_warn!("broadcast_service-num_peers", broadcast_table.len() + 1);
|
|
||||||
// Layer 1, leader nodes are limited to the fanout size.
|
|
||||||
broadcast_table.truncate(DATA_PLANE_FANOUT);
|
|
||||||
|
|
||||||
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
|
|
||||||
|
|
||||||
let to_blobs_start = Instant::now();
|
|
||||||
|
|
||||||
let blobs: Vec<_> = ventries
|
|
||||||
.into_par_iter()
|
|
||||||
.map(|p| {
|
|
||||||
let entries: Vec<_> = p.into_iter().map(|e| e.0).collect();
|
|
||||||
entries.to_shared_blobs()
|
|
||||||
})
|
|
||||||
.flatten()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let blob_index = blocktree
|
|
||||||
.meta(bank.slot())
|
|
||||||
.expect("Database error")
|
|
||||||
.map(|meta| meta.consumed)
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
index_blobs_with_genesis(
|
|
||||||
&blobs,
|
|
||||||
&self.id,
|
|
||||||
genesis_blockhash,
|
|
||||||
blob_index,
|
|
||||||
bank.slot(),
|
|
||||||
bank.parent().map_or(0, |parent| parent.slot()),
|
|
||||||
);
|
|
||||||
|
|
||||||
let contains_last_tick = last_tick == max_tick_height;
|
|
||||||
|
|
||||||
if contains_last_tick {
|
|
||||||
blobs.last().unwrap().write().unwrap().set_is_last_in_slot();
|
|
||||||
}
|
|
||||||
|
|
||||||
blocktree.write_shared_blobs(&blobs)?;
|
|
||||||
|
|
||||||
let coding = self.coding_generator.next(&blobs);
|
|
||||||
|
|
||||||
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
|
|
||||||
|
|
||||||
let broadcast_start = Instant::now();
|
|
||||||
|
|
||||||
// Send out data
|
|
||||||
ClusterInfo::broadcast(&self.id, contains_last_tick, &broadcast_table, sock, &blobs)?;
|
|
||||||
|
|
||||||
inc_new_counter_debug!("streamer-broadcast-sent", blobs.len());
|
|
||||||
|
|
||||||
// send out erasures
|
|
||||||
ClusterInfo::broadcast(&self.id, false, &broadcast_table, sock, &coding)?;
|
|
||||||
|
|
||||||
self.update_broadcast_stats(
|
|
||||||
duration_as_ms(&broadcast_start.elapsed()),
|
|
||||||
duration_as_ms(&run_start.elapsed()),
|
|
||||||
num_entries,
|
|
||||||
to_blobs_elapsed,
|
|
||||||
blob_index,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_broadcast_stats(
|
|
||||||
&mut self,
|
|
||||||
broadcast_elapsed: u64,
|
|
||||||
run_elapsed: u64,
|
|
||||||
num_entries: usize,
|
|
||||||
to_blobs_elapsed: u64,
|
|
||||||
blob_index: u64,
|
|
||||||
) {
|
|
||||||
inc_new_counter_info!("broadcast_service-time_ms", broadcast_elapsed as usize);
|
|
||||||
|
|
||||||
self.stats.num_entries.push(num_entries);
|
|
||||||
self.stats.to_blobs_elapsed.push(to_blobs_elapsed);
|
|
||||||
self.stats.run_elapsed.push(run_elapsed);
|
|
||||||
if self.stats.num_entries.len() >= 16 {
|
|
||||||
info!(
|
|
||||||
"broadcast: entries: {:?} blob times ms: {:?} broadcast times ms: {:?}",
|
|
||||||
self.stats.num_entries, self.stats.to_blobs_elapsed, self.stats.run_elapsed
|
|
||||||
);
|
|
||||||
self.stats.num_entries.clear();
|
|
||||||
self.stats.to_blobs_elapsed.clear();
|
|
||||||
self.stats.run_elapsed.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
datapoint!("broadcast-service", ("transmit-index", blob_index, i64));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implement a destructor for the BroadcastStage thread to signal it exited
|
// Implement a destructor for the BroadcastStage thread to signal it exited
|
||||||
@ -209,20 +113,21 @@ impl BroadcastStage {
|
|||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
receiver: &Receiver<WorkingBankEntries>,
|
receiver: &Receiver<WorkingBankEntries>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
genesis_blockhash: &Hash,
|
mut broadcast_stage_run: impl BroadcastRun,
|
||||||
) -> BroadcastStageReturnType {
|
) -> BroadcastStageReturnType {
|
||||||
let me = cluster_info.read().unwrap().my_data().clone();
|
|
||||||
let coding_generator = CodingGenerator::default();
|
let coding_generator = CodingGenerator::default();
|
||||||
|
|
||||||
let mut broadcast = Broadcast {
|
let mut broadcast = Broadcast {
|
||||||
id: me.id,
|
|
||||||
coding_generator,
|
coding_generator,
|
||||||
stats: BroadcastStats::default(),
|
thread_pool: rayon::ThreadPoolBuilder::new()
|
||||||
|
.num_threads(sys_info::cpu_num().unwrap_or(NUM_THREADS) as usize)
|
||||||
|
.build()
|
||||||
|
.unwrap(),
|
||||||
};
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if let Err(e) =
|
if let Err(e) =
|
||||||
broadcast.run(&cluster_info, receiver, sock, blocktree, genesis_blockhash)
|
broadcast_stage_run.run(&mut broadcast, &cluster_info, receiver, sock, blocktree)
|
||||||
{
|
{
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
|
||||||
@ -255,17 +160,16 @@ impl BroadcastStage {
|
|||||||
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
|
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
|
||||||
/// completing the cycle.
|
/// completing the cycle.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
fn new(
|
||||||
sock: UdpSocket,
|
sock: UdpSocket,
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
receiver: Receiver<WorkingBankEntries>,
|
receiver: Receiver<WorkingBankEntries>,
|
||||||
exit_sender: &Arc<AtomicBool>,
|
exit_sender: &Arc<AtomicBool>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
genesis_blockhash: &Hash,
|
broadcast_stage_run: impl BroadcastRun + Send + 'static,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let blocktree = blocktree.clone();
|
let blocktree = blocktree.clone();
|
||||||
let exit_sender = exit_sender.clone();
|
let exit_sender = exit_sender.clone();
|
||||||
let genesis_blockhash = *genesis_blockhash;
|
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-broadcaster".to_string())
|
.name("solana-broadcaster".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -275,7 +179,7 @@ impl BroadcastStage {
|
|||||||
&cluster_info,
|
&cluster_info,
|
||||||
&receiver,
|
&receiver,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
&genesis_blockhash,
|
broadcast_stage_run,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -302,6 +206,7 @@ mod test {
|
|||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
@ -347,7 +252,7 @@ mod test {
|
|||||||
entry_receiver,
|
entry_receiver,
|
||||||
&exit_sender,
|
&exit_sender,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
&Hash::default(),
|
StandardBroadcastRun::new(),
|
||||||
);
|
);
|
||||||
|
|
||||||
MockBroadcastStage {
|
MockBroadcastStage {
|
||||||
|
156
core/src/broadcast_stage/broadcast_utils.rs
Normal file
156
core/src/broadcast_stage/broadcast_utils.rs
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
use crate::entry::Entry;
|
||||||
|
use crate::entry::EntrySlice;
|
||||||
|
use crate::erasure::CodingGenerator;
|
||||||
|
use crate::packet::{self, SharedBlob};
|
||||||
|
use crate::poh_recorder::WorkingBankEntries;
|
||||||
|
use crate::result::Result;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use rayon::ThreadPool;
|
||||||
|
use solana_runtime::bank::Bank;
|
||||||
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signable};
|
||||||
|
use std::sync::mpsc::Receiver;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
pub(super) struct ReceiveResults {
|
||||||
|
pub ventries: Vec<Vec<(Entry, u64)>>,
|
||||||
|
pub num_entries: usize,
|
||||||
|
pub time_elapsed: Duration,
|
||||||
|
pub bank: Arc<Bank>,
|
||||||
|
pub last_tick: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReceiveResults {
|
||||||
|
pub fn new(
|
||||||
|
ventries: Vec<Vec<(Entry, u64)>>,
|
||||||
|
num_entries: usize,
|
||||||
|
time_elapsed: Duration,
|
||||||
|
bank: Arc<Bank>,
|
||||||
|
last_tick: u64,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
ventries,
|
||||||
|
num_entries,
|
||||||
|
time_elapsed,
|
||||||
|
bank,
|
||||||
|
last_tick,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn recv_slot_blobs(receiver: &Receiver<WorkingBankEntries>) -> Result<ReceiveResults> {
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
let (mut bank, entries) = receiver.recv_timeout(timer)?;
|
||||||
|
let recv_start = Instant::now();
|
||||||
|
let mut max_tick_height = bank.max_tick_height();
|
||||||
|
let mut num_entries = entries.len();
|
||||||
|
let mut ventries = Vec::new();
|
||||||
|
let mut last_tick = entries.last().map(|v| v.1).unwrap_or(0);
|
||||||
|
ventries.push(entries);
|
||||||
|
|
||||||
|
assert!(last_tick <= max_tick_height);
|
||||||
|
if last_tick != max_tick_height {
|
||||||
|
while let Ok((same_bank, entries)) = receiver.try_recv() {
|
||||||
|
// If the bank changed, that implies the previous slot was interrupted and we do not have to
|
||||||
|
// broadcast its entries.
|
||||||
|
if same_bank.slot() != bank.slot() {
|
||||||
|
num_entries = 0;
|
||||||
|
ventries.clear();
|
||||||
|
bank = same_bank.clone();
|
||||||
|
max_tick_height = bank.max_tick_height();
|
||||||
|
}
|
||||||
|
num_entries += entries.len();
|
||||||
|
last_tick = entries.last().map(|v| v.1).unwrap_or(0);
|
||||||
|
ventries.push(entries);
|
||||||
|
assert!(last_tick <= max_tick_height,);
|
||||||
|
if last_tick == max_tick_height {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let recv_end = recv_start.elapsed();
|
||||||
|
let receive_results = ReceiveResults::new(ventries, num_entries, recv_end, bank, last_tick);
|
||||||
|
Ok(receive_results)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn entries_to_blobs(
|
||||||
|
ventries: Vec<Vec<(Entry, u64)>>,
|
||||||
|
thread_pool: &ThreadPool,
|
||||||
|
latest_blob_index: u64,
|
||||||
|
last_tick: u64,
|
||||||
|
bank: &Bank,
|
||||||
|
keypair: &Keypair,
|
||||||
|
coding_generator: &mut CodingGenerator,
|
||||||
|
) -> (Vec<SharedBlob>, Vec<SharedBlob>) {
|
||||||
|
let blobs = generate_data_blobs(
|
||||||
|
ventries,
|
||||||
|
thread_pool,
|
||||||
|
latest_blob_index,
|
||||||
|
last_tick,
|
||||||
|
&bank,
|
||||||
|
&keypair,
|
||||||
|
);
|
||||||
|
|
||||||
|
let coding = generate_coding_blobs(&blobs, &thread_pool, coding_generator, &keypair);
|
||||||
|
|
||||||
|
(blobs, coding)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn generate_data_blobs(
|
||||||
|
ventries: Vec<Vec<(Entry, u64)>>,
|
||||||
|
thread_pool: &ThreadPool,
|
||||||
|
latest_blob_index: u64,
|
||||||
|
last_tick: u64,
|
||||||
|
bank: &Bank,
|
||||||
|
keypair: &Keypair,
|
||||||
|
) -> Vec<SharedBlob> {
|
||||||
|
let blobs: Vec<SharedBlob> = thread_pool.install(|| {
|
||||||
|
ventries
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|p| {
|
||||||
|
let entries: Vec<_> = p.into_iter().map(|e| e.0).collect();
|
||||||
|
entries.to_shared_blobs()
|
||||||
|
})
|
||||||
|
.flatten()
|
||||||
|
.collect()
|
||||||
|
});
|
||||||
|
|
||||||
|
packet::index_blobs(
|
||||||
|
&blobs,
|
||||||
|
&keypair.pubkey(),
|
||||||
|
latest_blob_index,
|
||||||
|
bank.slot(),
|
||||||
|
bank.parent().map_or(0, |parent| parent.slot()),
|
||||||
|
);
|
||||||
|
|
||||||
|
if last_tick == bank.max_tick_height() {
|
||||||
|
blobs.last().unwrap().write().unwrap().set_is_last_in_slot();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure not to modify the blob header or data after signing it here
|
||||||
|
thread_pool.install(|| {
|
||||||
|
blobs.par_iter().for_each(|b| {
|
||||||
|
b.write().unwrap().sign(keypair);
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
blobs
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn generate_coding_blobs(
|
||||||
|
blobs: &[SharedBlob],
|
||||||
|
thread_pool: &ThreadPool,
|
||||||
|
coding_generator: &mut CodingGenerator,
|
||||||
|
keypair: &Keypair,
|
||||||
|
) -> Vec<SharedBlob> {
|
||||||
|
let coding = coding_generator.next(&blobs);
|
||||||
|
|
||||||
|
thread_pool.install(|| {
|
||||||
|
coding.par_iter().for_each(|c| {
|
||||||
|
c.write().unwrap().sign(keypair);
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
coding
|
||||||
|
}
|
@ -0,0 +1,70 @@
|
|||||||
|
use super::*;
|
||||||
|
use solana_sdk::hash::Hash;
|
||||||
|
|
||||||
|
pub(super) struct FailEntryVerificationBroadcastRun {}
|
||||||
|
|
||||||
|
impl FailEntryVerificationBroadcastRun {
|
||||||
|
pub(super) fn new() -> Self {
|
||||||
|
Self {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||||
|
fn run(
|
||||||
|
&mut self,
|
||||||
|
broadcast: &mut Broadcast,
|
||||||
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
|
receiver: &Receiver<WorkingBankEntries>,
|
||||||
|
sock: &UdpSocket,
|
||||||
|
blocktree: &Arc<Blocktree>,
|
||||||
|
) -> Result<()> {
|
||||||
|
// 1) Pull entries from banking stage
|
||||||
|
let mut receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
|
||||||
|
let bank = receive_results.bank.clone();
|
||||||
|
let last_tick = receive_results.last_tick;
|
||||||
|
|
||||||
|
// 2) Convert entries to blobs + generate coding blobs. Set a garbage PoH on the last entry
|
||||||
|
// in the slot to make verification fail on validators
|
||||||
|
if last_tick == bank.max_tick_height() {
|
||||||
|
let mut last_entry = receive_results
|
||||||
|
.ventries
|
||||||
|
.last_mut()
|
||||||
|
.unwrap()
|
||||||
|
.last_mut()
|
||||||
|
.unwrap();
|
||||||
|
last_entry.0.hash = Hash::default();
|
||||||
|
}
|
||||||
|
|
||||||
|
let keypair = &cluster_info.read().unwrap().keypair.clone();
|
||||||
|
let latest_blob_index = blocktree
|
||||||
|
.meta(bank.slot())
|
||||||
|
.expect("Database error")
|
||||||
|
.map(|meta| meta.consumed)
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
|
||||||
|
receive_results.ventries,
|
||||||
|
&broadcast.thread_pool,
|
||||||
|
latest_blob_index,
|
||||||
|
last_tick,
|
||||||
|
&bank,
|
||||||
|
&keypair,
|
||||||
|
&mut broadcast.coding_generator,
|
||||||
|
);
|
||||||
|
|
||||||
|
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
|
||||||
|
|
||||||
|
// 3) Start broadcast step
|
||||||
|
let bank_epoch = bank.get_stakers_epoch(bank.slot());
|
||||||
|
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||||
|
|
||||||
|
// Broadcast data + erasures
|
||||||
|
cluster_info.read().unwrap().broadcast(
|
||||||
|
sock,
|
||||||
|
data_blobs.iter().chain(coding_blobs.iter()),
|
||||||
|
stakes.as_ref(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
116
core/src/broadcast_stage/standard_broadcast_run.rs
Normal file
116
core/src/broadcast_stage/standard_broadcast_run.rs
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
use super::broadcast_utils;
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct BroadcastStats {
|
||||||
|
num_entries: Vec<usize>,
|
||||||
|
run_elapsed: Vec<u64>,
|
||||||
|
to_blobs_elapsed: Vec<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) struct StandardBroadcastRun {
|
||||||
|
stats: BroadcastStats,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StandardBroadcastRun {
|
||||||
|
pub(super) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
stats: BroadcastStats::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_broadcast_stats(
|
||||||
|
&mut self,
|
||||||
|
broadcast_elapsed: u64,
|
||||||
|
run_elapsed: u64,
|
||||||
|
num_entries: usize,
|
||||||
|
to_blobs_elapsed: u64,
|
||||||
|
blob_index: u64,
|
||||||
|
) {
|
||||||
|
inc_new_counter_info!("broadcast_service-time_ms", broadcast_elapsed as usize);
|
||||||
|
|
||||||
|
self.stats.num_entries.push(num_entries);
|
||||||
|
self.stats.to_blobs_elapsed.push(to_blobs_elapsed);
|
||||||
|
self.stats.run_elapsed.push(run_elapsed);
|
||||||
|
if self.stats.num_entries.len() >= 16 {
|
||||||
|
info!(
|
||||||
|
"broadcast: entries: {:?} blob times ms: {:?} broadcast times ms: {:?}",
|
||||||
|
self.stats.num_entries, self.stats.to_blobs_elapsed, self.stats.run_elapsed
|
||||||
|
);
|
||||||
|
self.stats.num_entries.clear();
|
||||||
|
self.stats.to_blobs_elapsed.clear();
|
||||||
|
self.stats.run_elapsed.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
datapoint!("broadcast-service", ("transmit-index", blob_index, i64));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BroadcastRun for StandardBroadcastRun {
|
||||||
|
fn run(
|
||||||
|
&mut self,
|
||||||
|
broadcast: &mut Broadcast,
|
||||||
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
|
receiver: &Receiver<WorkingBankEntries>,
|
||||||
|
sock: &UdpSocket,
|
||||||
|
blocktree: &Arc<Blocktree>,
|
||||||
|
) -> Result<()> {
|
||||||
|
// 1) Pull entries from banking stage
|
||||||
|
let receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
|
||||||
|
let receive_elapsed = receive_results.time_elapsed;
|
||||||
|
let num_entries = receive_results.num_entries;
|
||||||
|
let bank = receive_results.bank.clone();
|
||||||
|
let last_tick = receive_results.last_tick;
|
||||||
|
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
|
||||||
|
|
||||||
|
// 2) Convert entries to blobs + generate coding blobs
|
||||||
|
let to_blobs_start = Instant::now();
|
||||||
|
let keypair = &cluster_info.read().unwrap().keypair.clone();
|
||||||
|
let latest_blob_index = blocktree
|
||||||
|
.meta(bank.slot())
|
||||||
|
.expect("Database error")
|
||||||
|
.map(|meta| meta.consumed)
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
|
||||||
|
receive_results.ventries,
|
||||||
|
&broadcast.thread_pool,
|
||||||
|
latest_blob_index,
|
||||||
|
last_tick,
|
||||||
|
&bank,
|
||||||
|
&keypair,
|
||||||
|
&mut broadcast.coding_generator,
|
||||||
|
);
|
||||||
|
|
||||||
|
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
|
||||||
|
let to_blobs_elapsed = to_blobs_start.elapsed();
|
||||||
|
|
||||||
|
// 3) Start broadcast step
|
||||||
|
let broadcast_start = Instant::now();
|
||||||
|
let bank_epoch = bank.get_stakers_epoch(bank.slot());
|
||||||
|
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||||
|
|
||||||
|
// Broadcast data + erasures
|
||||||
|
cluster_info.read().unwrap().broadcast(
|
||||||
|
sock,
|
||||||
|
data_blobs.iter().chain(coding_blobs.iter()),
|
||||||
|
stakes.as_ref(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
inc_new_counter_debug!(
|
||||||
|
"streamer-broadcast-sent",
|
||||||
|
data_blobs.len() + coding_blobs.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
let broadcast_elapsed = broadcast_start.elapsed();
|
||||||
|
self.update_broadcast_stats(
|
||||||
|
duration_as_ms(&broadcast_elapsed),
|
||||||
|
duration_as_ms(&(receive_elapsed + to_blobs_elapsed + broadcast_elapsed)),
|
||||||
|
num_entries,
|
||||||
|
duration_as_ms(&to_blobs_elapsed),
|
||||||
|
latest_blob_index,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -6,32 +6,11 @@ use std::io::{BufWriter, Write};
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
pub use solana_chacha_sys::chacha_cbc_encrypt;
|
||||||
|
|
||||||
pub const CHACHA_BLOCK_SIZE: usize = 64;
|
pub const CHACHA_BLOCK_SIZE: usize = 64;
|
||||||
pub const CHACHA_KEY_SIZE: usize = 32;
|
pub const CHACHA_KEY_SIZE: usize = 32;
|
||||||
|
|
||||||
#[link(name = "cpu-crypt")]
|
|
||||||
extern "C" {
|
|
||||||
fn chacha20_cbc_encrypt(
|
|
||||||
input: *const u8,
|
|
||||||
output: *mut u8,
|
|
||||||
in_len: usize,
|
|
||||||
key: *const u8,
|
|
||||||
ivec: *mut u8,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
|
|
||||||
unsafe {
|
|
||||||
chacha20_cbc_encrypt(
|
|
||||||
input.as_ptr(),
|
|
||||||
output.as_mut_ptr(),
|
|
||||||
input.len(),
|
|
||||||
key.as_ptr(),
|
|
||||||
ivec.as_mut_ptr(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn chacha_cbc_encrypt_ledger(
|
pub fn chacha_cbc_encrypt_ledger(
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
slice: u64,
|
slice: u64,
|
||||||
@ -154,7 +133,7 @@ mod tests {
|
|||||||
hasher.hash(&buf[..size]);
|
hasher.hash(&buf[..size]);
|
||||||
|
|
||||||
// golden needs to be updated if blob stuff changes....
|
// golden needs to be updated if blob stuff changes....
|
||||||
let golden: Hash = "9xb2Asf7UK5G8WqPwsvzo5xwLi4dixBSDiYKCtYRikA"
|
let golden: Hash = "E2HZjSC6VgH4nmEiTbMDATTeBcFjwSYz7QYvU7doGNhD"
|
||||||
.parse()
|
.parse()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
|
use solana_client::thin_client::ThinClient;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
|
||||||
pub trait Cluster {
|
pub trait Cluster {
|
||||||
fn get_node_pubkeys(&self) -> Vec<Pubkey>;
|
fn get_node_pubkeys(&self) -> Vec<Pubkey>;
|
||||||
|
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient>;
|
||||||
fn restart_node(&mut self, pubkey: Pubkey);
|
fn restart_node(&mut self, pubkey: Pubkey);
|
||||||
}
|
}
|
||||||
|
@ -24,12 +24,17 @@ use crate::repair_service::RepairType;
|
|||||||
use crate::result::Result;
|
use crate::result::Result;
|
||||||
use crate::staking_utils;
|
use crate::staking_utils;
|
||||||
use crate::streamer::{BlobReceiver, BlobSender};
|
use crate::streamer::{BlobReceiver, BlobSender};
|
||||||
|
use crate::weighted_shuffle::weighted_shuffle;
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
use core::cmp;
|
use core::cmp;
|
||||||
use hashbrown::HashMap;
|
use itertools::Itertools;
|
||||||
|
use rand::SeedableRng;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
use rand_chacha::ChaChaRng;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
|
use solana_metrics::{
|
||||||
|
datapoint_debug, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_warn,
|
||||||
|
};
|
||||||
use solana_netutil::{
|
use solana_netutil::{
|
||||||
bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range, PortRange,
|
bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range, PortRange,
|
||||||
};
|
};
|
||||||
@ -39,10 +44,11 @@ use solana_sdk::pubkey::Pubkey;
|
|||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signable, Signature};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signable, Signature};
|
||||||
use solana_sdk::timing::{duration_as_ms, timestamp};
|
use solana_sdk::timing::{duration_as_ms, timestamp};
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
|
use std::borrow::Borrow;
|
||||||
|
use std::borrow::Cow;
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
use std::collections::BTreeSet;
|
use std::collections::{BTreeSet, HashMap};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
@ -83,7 +89,7 @@ pub struct ClusterInfo {
|
|||||||
pub struct Locality {
|
pub struct Locality {
|
||||||
/// The bounds of the neighborhood represented by this locality
|
/// The bounds of the neighborhood represented by this locality
|
||||||
pub neighbor_bounds: (usize, usize),
|
pub neighbor_bounds: (usize, usize),
|
||||||
/// The `avalanche` layer this locality is in
|
/// The `turbine` layer this locality is in
|
||||||
pub layer_ix: usize,
|
pub layer_ix: usize,
|
||||||
/// The bounds of the current layer
|
/// The bounds of the current layer
|
||||||
pub layer_bounds: (usize, usize),
|
pub layer_bounds: (usize, usize),
|
||||||
@ -122,7 +128,7 @@ impl Signable for PruneData {
|
|||||||
self.pubkey
|
self.pubkey
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signable_data(&self) -> Vec<u8> {
|
fn signable_data(&self) -> Cow<[u8]> {
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
struct SignData {
|
struct SignData {
|
||||||
pubkey: Pubkey,
|
pubkey: Pubkey,
|
||||||
@ -136,7 +142,7 @@ impl Signable for PruneData {
|
|||||||
destination: self.destination,
|
destination: self.destination,
|
||||||
wallclock: self.wallclock,
|
wallclock: self.wallclock,
|
||||||
};
|
};
|
||||||
serialize(&data).expect("serialize PruneData")
|
Cow::Owned(serialize(&data).expect("serialize PruneData"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_signature(&self) -> Signature {
|
fn get_signature(&self) -> Signature {
|
||||||
@ -490,57 +496,69 @@ impl ClusterInfo {
|
|||||||
&& !ContactInfo::is_valid_address(&contact_info.tpu)
|
&& !ContactInfo::is_valid_address(&contact_info.tpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sort_by_stake<S: std::hash::BuildHasher>(
|
fn stake_weighted_shuffle<S: std::hash::BuildHasher>(
|
||||||
peers: &[ContactInfo],
|
peers: &[ContactInfo],
|
||||||
stakes: Option<&HashMap<Pubkey, u64, S>>,
|
stakes: Option<&HashMap<Pubkey, u64, S>>,
|
||||||
|
rng: ChaChaRng,
|
||||||
) -> Vec<(u64, ContactInfo)> {
|
) -> Vec<(u64, ContactInfo)> {
|
||||||
let mut peers_with_stakes: Vec<_> = peers
|
let (stake_weights, peers_with_stakes): (Vec<_>, Vec<_>) = peers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|c| {
|
.map(|c| {
|
||||||
(
|
let stake = stakes.map_or(0, |stakes| *stakes.get(&c.id).unwrap_or(&0));
|
||||||
stakes.map_or(0, |stakes| *stakes.get(&c.id).unwrap_or(&0)),
|
// For stake weighted shuffle a valid weight is atleast 1. Weight 0 is
|
||||||
c.clone(),
|
// assumed to be missing entry. So let's make sure stake weights are atleast 1
|
||||||
)
|
(cmp::max(1, stake), (stake, c.clone()))
|
||||||
})
|
})
|
||||||
|
.sorted_by(|(_, (l_stake, l_info)), (_, (r_stake, r_info))| {
|
||||||
|
if r_stake == l_stake {
|
||||||
|
r_info.id.cmp(&l_info.id)
|
||||||
|
} else {
|
||||||
|
r_stake.cmp(&l_stake)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unzip();
|
||||||
|
|
||||||
|
let shuffle = weighted_shuffle(stake_weights, rng);
|
||||||
|
|
||||||
|
let mut out: Vec<(u64, ContactInfo)> = shuffle
|
||||||
|
.iter()
|
||||||
|
.map(|x| peers_with_stakes[*x].clone())
|
||||||
.collect();
|
.collect();
|
||||||
peers_with_stakes.sort_unstable_by(|(l_stake, l_info), (r_stake, r_info)| {
|
|
||||||
if r_stake == l_stake {
|
out.dedup();
|
||||||
r_info.id.cmp(&l_info.id)
|
out
|
||||||
} else {
|
|
||||||
r_stake.cmp(&l_stake)
|
|
||||||
}
|
|
||||||
});
|
|
||||||
peers_with_stakes.dedup();
|
|
||||||
peers_with_stakes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list
|
/// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list
|
||||||
fn sorted_peers_and_index<S: std::hash::BuildHasher>(
|
pub fn shuffle_peers_and_index<S: std::hash::BuildHasher>(
|
||||||
&self,
|
&self,
|
||||||
stakes: Option<&HashMap<Pubkey, u64, S>>,
|
stakes: Option<&HashMap<Pubkey, u64, S>>,
|
||||||
|
rng: ChaChaRng,
|
||||||
) -> (usize, Vec<ContactInfo>) {
|
) -> (usize, Vec<ContactInfo>) {
|
||||||
let mut peers = self.retransmit_peers();
|
let mut peers = self.retransmit_peers();
|
||||||
peers.push(self.lookup(&self.id()).unwrap().clone());
|
peers.push(self.lookup(&self.id()).unwrap().clone());
|
||||||
let contacts_and_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
|
let contacts_and_stakes: Vec<_> = ClusterInfo::stake_weighted_shuffle(&peers, stakes, rng);
|
||||||
let mut index = 0;
|
let mut index = 0;
|
||||||
let peers: Vec<_> = contacts_and_stakes
|
let peers: Vec<_> = contacts_and_stakes
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter_map(|(i, (_, peer))| {
|
.map(|(i, (_, peer))| {
|
||||||
if peer.id == self.id() {
|
if peer.id == self.id() {
|
||||||
index = i;
|
index = i;
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(peer)
|
|
||||||
}
|
}
|
||||||
|
peer
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
(index, peers)
|
(index, peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sorted_tvu_peers(&self, stakes: Option<&HashMap<Pubkey, u64>>) -> Vec<ContactInfo> {
|
pub fn sorted_tvu_peers(
|
||||||
|
&self,
|
||||||
|
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||||
|
rng: ChaChaRng,
|
||||||
|
) -> Vec<ContactInfo> {
|
||||||
let peers = self.tvu_peers();
|
let peers = self.tvu_peers();
|
||||||
let peers_with_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
|
let peers_with_stakes: Vec<_> = ClusterInfo::stake_weighted_shuffle(&peers, stakes, rng);
|
||||||
peers_with_stakes
|
peers_with_stakes
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(_, peer)| (*peer).clone())
|
.map(|(_, peer)| (*peer).clone())
|
||||||
@ -692,40 +710,45 @@ impl ClusterInfo {
|
|||||||
|
|
||||||
/// broadcast messages from the leader to layer 1 nodes
|
/// broadcast messages from the leader to layer 1 nodes
|
||||||
/// # Remarks
|
/// # Remarks
|
||||||
pub fn broadcast(
|
pub fn broadcast<I>(
|
||||||
id: &Pubkey,
|
&self,
|
||||||
contains_last_tick: bool,
|
|
||||||
broadcast_table: &[ContactInfo],
|
|
||||||
s: &UdpSocket,
|
s: &UdpSocket,
|
||||||
blobs: &[SharedBlob],
|
blobs: I,
|
||||||
) -> Result<()> {
|
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||||
if broadcast_table.is_empty() {
|
) -> Result<()>
|
||||||
debug!("{}:not enough peers in cluster_info table", id);
|
where
|
||||||
inc_new_counter_error!("cluster_info-broadcast-not_enough_peers_error", 1);
|
I: IntoIterator,
|
||||||
Err(ClusterInfoError::NoPeers)?;
|
I::Item: Borrow<SharedBlob>,
|
||||||
}
|
{
|
||||||
|
let mut last_err = Ok(());
|
||||||
|
let mut broadcast_table_len = 0;
|
||||||
|
let mut blobs_len = 0;
|
||||||
|
blobs.into_iter().for_each(|b| {
|
||||||
|
blobs_len += 1;
|
||||||
|
let blob = b.borrow().read().unwrap();
|
||||||
|
let broadcast_table = self.sorted_tvu_peers(stakes, ChaChaRng::from_seed(blob.seed()));
|
||||||
|
broadcast_table_len = cmp::max(broadcast_table_len, broadcast_table.len());
|
||||||
|
|
||||||
let orders = Self::create_broadcast_orders(contains_last_tick, blobs, broadcast_table);
|
if !broadcast_table.is_empty() {
|
||||||
|
if let Err(e) = s.send_to(&blob.data[..blob.meta.size], &broadcast_table[0].tvu) {
|
||||||
trace!("broadcast orders table {}", orders.len());
|
trace!("{}: broadcast result {:?}", self.id(), e);
|
||||||
|
last_err = Err(e);
|
||||||
let errs = Self::send_orders(id, s, orders);
|
}
|
||||||
|
|
||||||
for e in errs {
|
|
||||||
if let Err(e) = &e {
|
|
||||||
trace!("{}: broadcast result {:?}", id, e);
|
|
||||||
}
|
}
|
||||||
e?;
|
});
|
||||||
|
|
||||||
|
last_err?;
|
||||||
|
|
||||||
|
inc_new_counter_debug!("cluster_info-broadcast-max_idx", blobs_len);
|
||||||
|
if broadcast_table_len != 0 {
|
||||||
|
inc_new_counter_warn!("broadcast_service-num_peers", broadcast_table_len + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
inc_new_counter_debug!("cluster_info-broadcast-max_idx", blobs.len());
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// retransmit messages to a list of nodes
|
/// retransmit messages to a list of nodes
|
||||||
/// # Remarks
|
/// # Remarks
|
||||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
/// We need to avoid having obj locked while doing a io, such as the `send_to`
|
||||||
pub fn retransmit_to(
|
pub fn retransmit_to(
|
||||||
obj: &Arc<RwLock<Self>>,
|
obj: &Arc<RwLock<Self>>,
|
||||||
peers: &[ContactInfo],
|
peers: &[ContactInfo],
|
||||||
@ -772,94 +795,6 @@ impl ClusterInfo {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_orders(
|
|
||||||
id: &Pubkey,
|
|
||||||
s: &UdpSocket,
|
|
||||||
orders: Vec<(SharedBlob, Vec<&ContactInfo>)>,
|
|
||||||
) -> Vec<io::Result<usize>> {
|
|
||||||
orders
|
|
||||||
.into_iter()
|
|
||||||
.flat_map(|(b, vs)| {
|
|
||||||
let blob = b.read().unwrap();
|
|
||||||
|
|
||||||
let ids_and_tvus = if log_enabled!(log::Level::Trace) {
|
|
||||||
let v_ids = vs.iter().map(|v| v.id);
|
|
||||||
let tvus = vs.iter().map(|v| v.tvu);
|
|
||||||
let ids_and_tvus = v_ids.zip(tvus).collect();
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"{}: BROADCAST idx: {} sz: {} to {:?} coding: {}",
|
|
||||||
id,
|
|
||||||
blob.index(),
|
|
||||||
blob.meta.size,
|
|
||||||
ids_and_tvus,
|
|
||||||
blob.is_coding()
|
|
||||||
);
|
|
||||||
|
|
||||||
ids_and_tvus
|
|
||||||
} else {
|
|
||||||
vec![]
|
|
||||||
};
|
|
||||||
|
|
||||||
assert!(blob.meta.size <= BLOB_SIZE);
|
|
||||||
let send_errs_for_blob: Vec<_> = vs
|
|
||||||
.iter()
|
|
||||||
.map(move |v| {
|
|
||||||
let e = s.send_to(&blob.data[..blob.meta.size], &v.tvu);
|
|
||||||
trace!(
|
|
||||||
"{}: done broadcast {} to {:?}",
|
|
||||||
id,
|
|
||||||
blob.meta.size,
|
|
||||||
ids_and_tvus
|
|
||||||
);
|
|
||||||
e
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
send_errs_for_blob
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_broadcast_orders<'a, T>(
|
|
||||||
contains_last_tick: bool,
|
|
||||||
blobs: &[T],
|
|
||||||
broadcast_table: &'a [ContactInfo],
|
|
||||||
) -> Vec<(T, Vec<&'a ContactInfo>)>
|
|
||||||
where
|
|
||||||
T: Clone,
|
|
||||||
{
|
|
||||||
// enumerate all the blobs in the window, those are the indices
|
|
||||||
// transmit them to nodes, starting from a different node.
|
|
||||||
if blobs.is_empty() {
|
|
||||||
return vec![];
|
|
||||||
}
|
|
||||||
let mut orders = Vec::with_capacity(blobs.len());
|
|
||||||
|
|
||||||
let x = thread_rng().gen_range(0, broadcast_table.len());
|
|
||||||
for (i, blob) in blobs.iter().enumerate() {
|
|
||||||
let br_idx = (x + i) % broadcast_table.len();
|
|
||||||
|
|
||||||
trace!("broadcast order data br_idx {}", br_idx);
|
|
||||||
|
|
||||||
orders.push((blob.clone(), vec![&broadcast_table[br_idx]]));
|
|
||||||
}
|
|
||||||
|
|
||||||
if contains_last_tick {
|
|
||||||
// Broadcast the last tick to everyone on the network so it doesn't get dropped
|
|
||||||
// (Need to maximize probability the next leader in line sees this handoff tick
|
|
||||||
// despite packet drops)
|
|
||||||
// If we had a tick at max_tick_height, then we know it must be the last
|
|
||||||
// Blob in the broadcast, There cannot be an entry that got sent after the
|
|
||||||
// last tick, guaranteed by the PohService).
|
|
||||||
orders.push((
|
|
||||||
blobs.last().unwrap().clone(),
|
|
||||||
broadcast_table.iter().collect(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
orders
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn window_index_request_bytes(&self, slot: u64, blob_index: u64) -> Result<Vec<u8>> {
|
pub fn window_index_request_bytes(&self, slot: u64, blob_index: u64) -> Result<Vec<u8>> {
|
||||||
let req = Protocol::RequestWindowIndex(self.my_data().clone(), slot, blob_index);
|
let req = Protocol::RequestWindowIndex(self.my_data().clone(), slot, blob_index);
|
||||||
let out = serialize(&req)?;
|
let out = serialize(&req)?;
|
||||||
@ -887,33 +822,34 @@ impl ClusterInfo {
|
|||||||
}
|
}
|
||||||
let n = thread_rng().gen::<usize>() % valid.len();
|
let n = thread_rng().gen::<usize>() % valid.len();
|
||||||
let addr = valid[n].gossip; // send the request to the peer's gossip port
|
let addr = valid[n].gossip; // send the request to the peer's gossip port
|
||||||
let out = {
|
let out = self.map_repair_request(repair_request)?;
|
||||||
match repair_request {
|
|
||||||
RepairType::Blob(slot, blob_index) => {
|
|
||||||
datapoint_debug!(
|
|
||||||
"cluster_info-repair",
|
|
||||||
("repair-slot", *slot, i64),
|
|
||||||
("repair-ix", *blob_index, i64)
|
|
||||||
);
|
|
||||||
self.window_index_request_bytes(*slot, *blob_index)?
|
|
||||||
}
|
|
||||||
RepairType::HighestBlob(slot, blob_index) => {
|
|
||||||
datapoint_debug!(
|
|
||||||
"cluster_info-repair_highest",
|
|
||||||
("repair-highest-slot", *slot, i64),
|
|
||||||
("repair-highest-ix", *blob_index, i64)
|
|
||||||
);
|
|
||||||
self.window_highest_index_request_bytes(*slot, *blob_index)?
|
|
||||||
}
|
|
||||||
RepairType::Orphan(slot) => {
|
|
||||||
datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
|
|
||||||
self.orphan_bytes(*slot)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((addr, out))
|
Ok((addr, out))
|
||||||
}
|
}
|
||||||
|
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
|
||||||
|
match repair_request {
|
||||||
|
RepairType::Blob(slot, blob_index) => {
|
||||||
|
datapoint_debug!(
|
||||||
|
"cluster_info-repair",
|
||||||
|
("repair-slot", *slot, i64),
|
||||||
|
("repair-ix", *blob_index, i64)
|
||||||
|
);
|
||||||
|
Ok(self.window_index_request_bytes(*slot, *blob_index)?)
|
||||||
|
}
|
||||||
|
RepairType::HighestBlob(slot, blob_index) => {
|
||||||
|
datapoint_debug!(
|
||||||
|
"cluster_info-repair_highest",
|
||||||
|
("repair-highest-slot", *slot, i64),
|
||||||
|
("repair-highest-ix", *blob_index, i64)
|
||||||
|
);
|
||||||
|
Ok(self.window_highest_index_request_bytes(*slot, *blob_index)?)
|
||||||
|
}
|
||||||
|
RepairType::Orphan(slot) => {
|
||||||
|
datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
|
||||||
|
Ok(self.orphan_bytes(*slot)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// If the network entrypoint hasn't been discovered yet, add it to the crds table
|
// If the network entrypoint hasn't been discovered yet, add it to the crds table
|
||||||
fn add_entrypoint(&mut self, pulls: &mut Vec<(Pubkey, Bloom<Hash>, SocketAddr, CrdsValue)>) {
|
fn add_entrypoint(&mut self, pulls: &mut Vec<(Pubkey, Bloom<Hash>, SocketAddr, CrdsValue)>) {
|
||||||
match &self.entrypoint {
|
match &self.entrypoint {
|
||||||
@ -967,18 +903,18 @@ impl ClusterInfo {
|
|||||||
}
|
}
|
||||||
fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> {
|
fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> {
|
||||||
let self_id = self.gossip.id;
|
let self_id = self.gossip.id;
|
||||||
let (_, peers, msgs) = self.gossip.new_push_messages(timestamp());
|
let (_, push_messages) = self.gossip.new_push_messages(timestamp());
|
||||||
peers
|
push_messages
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|p| {
|
.filter_map(|(peer, messages)| {
|
||||||
let peer_label = CrdsValueLabel::ContactInfo(p);
|
let peer_label = CrdsValueLabel::ContactInfo(peer);
|
||||||
self.gossip
|
self.gossip
|
||||||
.crds
|
.crds
|
||||||
.lookup(&peer_label)
|
.lookup(&peer_label)
|
||||||
.and_then(CrdsValue::contact_info)
|
.and_then(CrdsValue::contact_info)
|
||||||
.map(|p| p.gossip)
|
.map(|p| (p.gossip, messages))
|
||||||
})
|
})
|
||||||
.map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone())))
|
.map(|(peer, msgs)| (peer, Protocol::PushMessage(self_id, msgs)))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1156,7 +1092,7 @@ impl ClusterInfo {
|
|||||||
if caller.contact_info().is_none() {
|
if caller.contact_info().is_none() {
|
||||||
return vec![];
|
return vec![];
|
||||||
}
|
}
|
||||||
let mut from = caller.contact_info().cloned().unwrap();
|
let from = caller.contact_info().unwrap();
|
||||||
if from.id == self_id {
|
if from.id == self_id {
|
||||||
warn!(
|
warn!(
|
||||||
"PullRequest ignored, I'm talking to myself: me={} remoteme={}",
|
"PullRequest ignored, I'm talking to myself: me={} remoteme={}",
|
||||||
@ -1174,15 +1110,10 @@ impl ClusterInfo {
|
|||||||
let len = data.len();
|
let len = data.len();
|
||||||
trace!("get updates since response {}", len);
|
trace!("get updates since response {}", len);
|
||||||
let rsp = Protocol::PullResponse(self_id, data);
|
let rsp = Protocol::PullResponse(self_id, data);
|
||||||
// The remote node may not know its public IP:PORT. Record what it looks like to us.
|
// The remote node may not know its public IP:PORT. Instead of responding to the caller's
|
||||||
// This may or may not be correct for everybody, but it's better than leaving the remote with
|
// gossip addr, respond to the origin addr.
|
||||||
// an unspecified address in our table
|
|
||||||
if from.gossip.ip().is_unspecified() {
|
|
||||||
inc_new_counter_debug!("cluster_info-window-request-updates-unspec-gossip", 1);
|
|
||||||
from.gossip = *from_addr;
|
|
||||||
}
|
|
||||||
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
|
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
|
||||||
to_shared_blob(rsp, from.gossip).ok().into_iter().collect()
|
to_shared_blob(rsp, *from_addr).ok().into_iter().collect()
|
||||||
}
|
}
|
||||||
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
||||||
let len = data.len();
|
let len = data.len();
|
||||||
@ -1487,7 +1418,7 @@ impl ClusterInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Avalanche logic
|
/// Turbine logic
|
||||||
/// 1 - For the current node find out if it is in layer 1
|
/// 1 - For the current node find out if it is in layer 1
|
||||||
/// 1.1 - If yes, then broadcast to all layer 1 nodes
|
/// 1.1 - If yes, then broadcast to all layer 1 nodes
|
||||||
/// 1 - using the layer 1 index, broadcast to all layer 2 nodes assuming you know neighborhood size
|
/// 1 - using the layer 1 index, broadcast to all layer 2 nodes assuming you know neighborhood size
|
||||||
@ -1495,12 +1426,11 @@ impl ClusterInfo {
|
|||||||
/// 1 - also check if there are nodes in the next layer and repeat the layer 1 to layer 2 logic
|
/// 1 - also check if there are nodes in the next layer and repeat the layer 1 to layer 2 logic
|
||||||
|
|
||||||
/// Returns Neighbor Nodes and Children Nodes `(neighbors, children)` for a given node based on its stake (Bank Balance)
|
/// Returns Neighbor Nodes and Children Nodes `(neighbors, children)` for a given node based on its stake (Bank Balance)
|
||||||
pub fn compute_retransmit_peers<S: std::hash::BuildHasher>(
|
pub fn compute_retransmit_peers(
|
||||||
stakes: Option<&HashMap<Pubkey, u64, S>>,
|
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
|
||||||
fanout: usize,
|
fanout: usize,
|
||||||
|
my_index: usize,
|
||||||
|
peers: Vec<ContactInfo>,
|
||||||
) -> (Vec<ContactInfo>, Vec<ContactInfo>) {
|
) -> (Vec<ContactInfo>, Vec<ContactInfo>) {
|
||||||
let (my_index, peers) = cluster_info.read().unwrap().sorted_peers_and_index(stakes);
|
|
||||||
//calc num_layers and num_neighborhoods using the total number of nodes
|
//calc num_layers and num_neighborhoods using the total number of nodes
|
||||||
let (num_layers, layer_indices) = ClusterInfo::describe_data_plane(peers.len(), fanout);
|
let (num_layers, layer_indices) = ClusterInfo::describe_data_plane(peers.len(), fanout);
|
||||||
|
|
||||||
@ -2091,11 +2021,14 @@ mod tests {
|
|||||||
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
|
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
|
||||||
cluster_info.set_leader(&leader.id);
|
cluster_info.set_leader(&leader.id);
|
||||||
cluster_info.insert_info(peer.clone());
|
cluster_info.insert_info(peer.clone());
|
||||||
|
cluster_info.gossip.refresh_push_active_set(&HashMap::new());
|
||||||
//check that all types of gossip messages are signed correctly
|
//check that all types of gossip messages are signed correctly
|
||||||
let (_, _, vals) = cluster_info.gossip.new_push_messages(timestamp());
|
let (_, push_messages) = cluster_info.gossip.new_push_messages(timestamp());
|
||||||
// there should be some pushes ready
|
// there should be some pushes ready
|
||||||
assert!(vals.len() > 0);
|
assert_eq!(push_messages.len() > 0, true);
|
||||||
vals.par_iter().for_each(|v| assert!(v.verify()));
|
push_messages
|
||||||
|
.values()
|
||||||
|
.for_each(|v| v.par_iter().for_each(|v| assert!(v.verify())));
|
||||||
|
|
||||||
let (_, _, val) = cluster_info
|
let (_, _, val) = cluster_info
|
||||||
.gossip
|
.gossip
|
||||||
|
@ -260,14 +260,16 @@ impl ClusterInfoRepairListener {
|
|||||||
num_slots_to_repair: usize,
|
num_slots_to_repair: usize,
|
||||||
epoch_schedule: &EpochSchedule,
|
epoch_schedule: &EpochSchedule,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root + 1);
|
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root);
|
||||||
|
|
||||||
if slot_iter.is_err() {
|
if slot_iter.is_err() {
|
||||||
warn!("Root for repairee is on different fork OR replay_stage hasn't marked this slot as root yet");
|
info!(
|
||||||
|
"Root for repairee is on different fork. My root: {}, repairee_root: {}",
|
||||||
|
my_root, repairee_epoch_slots.root
|
||||||
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let slot_iter = slot_iter?;
|
let mut slot_iter = slot_iter?;
|
||||||
|
|
||||||
let mut total_data_blobs_sent = 0;
|
let mut total_data_blobs_sent = 0;
|
||||||
let mut total_coding_blobs_sent = 0;
|
let mut total_coding_blobs_sent = 0;
|
||||||
@ -276,6 +278,10 @@ impl ClusterInfoRepairListener {
|
|||||||
epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root);
|
epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root);
|
||||||
let max_confirmed_repairee_slot =
|
let max_confirmed_repairee_slot =
|
||||||
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
|
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
|
||||||
|
|
||||||
|
// Skip the first slot in the iterator because we know it's the root slot which the repairee
|
||||||
|
// already has
|
||||||
|
slot_iter.next();
|
||||||
for (slot, slot_meta) in slot_iter {
|
for (slot, slot_meta) in slot_iter {
|
||||||
if slot > my_root
|
if slot > my_root
|
||||||
|| num_slots_repaired >= num_slots_to_repair
|
|| num_slots_repaired >= num_slots_to_repair
|
||||||
@ -620,8 +626,8 @@ mod tests {
|
|||||||
blocktree.insert_data_blobs(&blobs).unwrap();
|
blocktree.insert_data_blobs(&blobs).unwrap();
|
||||||
|
|
||||||
// Write roots so that these slots will qualify to be sent by the repairman
|
// Write roots so that these slots will qualify to be sent by the repairman
|
||||||
blocktree.set_root(0, 0).unwrap();
|
let roots: Vec<_> = (0..=num_slots - 1).collect();
|
||||||
blocktree.set_root(num_slots - 1, 0).unwrap();
|
blocktree.set_roots(&roots).unwrap();
|
||||||
|
|
||||||
// Set up my information
|
// Set up my information
|
||||||
let my_pubkey = Pubkey::new_rand();
|
let my_pubkey = Pubkey::new_rand();
|
||||||
@ -696,8 +702,8 @@ mod tests {
|
|||||||
blocktree.insert_data_blobs(&blobs).unwrap();
|
blocktree.insert_data_blobs(&blobs).unwrap();
|
||||||
|
|
||||||
// Write roots so that these slots will qualify to be sent by the repairman
|
// Write roots so that these slots will qualify to be sent by the repairman
|
||||||
blocktree.set_root(0, 0).unwrap();
|
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();
|
||||||
blocktree.set_root(slots_per_epoch * 2 - 1, 0).unwrap();
|
blocktree.set_roots(&roots).unwrap();
|
||||||
|
|
||||||
// Set up my information
|
// Set up my information
|
||||||
let my_pubkey = Pubkey::new_rand();
|
let my_pubkey = Pubkey::new_rand();
|
||||||
|
@ -101,14 +101,10 @@ mod tests {
|
|||||||
let votes = (0..MAX_RECENT_VOTES)
|
let votes = (0..MAX_RECENT_VOTES)
|
||||||
.map(|i| Vote::new(i as u64, Hash::default()))
|
.map(|i| Vote::new(i as u64, Hash::default()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let vote_ix = vote_instruction::vote(
|
let vote_ix = vote_instruction::vote(&vote_keypair.pubkey(), &vote_keypair.pubkey(), votes);
|
||||||
&node_keypair.pubkey(),
|
|
||||||
&vote_keypair.pubkey(),
|
let mut vote_tx = Transaction::new_with_payer(vec![vote_ix], Some(&node_keypair.pubkey()));
|
||||||
&vote_keypair.pubkey(),
|
|
||||||
votes,
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut vote_tx = Transaction::new_unsigned_instructions(vec![vote_ix]);
|
|
||||||
vote_tx.partial_sign(&[&node_keypair], Hash::default());
|
vote_tx.partial_sign(&[&node_keypair], Hash::default());
|
||||||
vote_tx.partial_sign(&[&vote_keypair], Hash::default());
|
vote_tx.partial_sign(&[&vote_keypair], Hash::default());
|
||||||
|
|
||||||
|
@ -8,11 +8,13 @@ use crate::contact_info::ContactInfo;
|
|||||||
use crate::entry::{Entry, EntrySlice};
|
use crate::entry::{Entry, EntrySlice};
|
||||||
use crate::gossip_service::discover_cluster;
|
use crate::gossip_service::discover_cluster;
|
||||||
use crate::locktower::VOTE_THRESHOLD_DEPTH;
|
use crate::locktower::VOTE_THRESHOLD_DEPTH;
|
||||||
|
use hashbrown::HashSet;
|
||||||
use solana_client::thin_client::create_client;
|
use solana_client::thin_client::create_client;
|
||||||
use solana_runtime::epoch_schedule::MINIMUM_SLOT_LENGTH;
|
use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH;
|
||||||
use solana_sdk::client::SyncClient;
|
use solana_sdk::client::SyncClient;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::poh_config::PohConfig;
|
use solana_sdk::poh_config::PohConfig;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
use solana_sdk::timing::{
|
use solana_sdk::timing::{
|
||||||
@ -26,14 +28,18 @@ use std::time::Duration;
|
|||||||
const DEFAULT_SLOT_MILLIS: u64 = (DEFAULT_TICKS_PER_SLOT * 1000) / DEFAULT_NUM_TICKS_PER_SECOND;
|
const DEFAULT_SLOT_MILLIS: u64 = (DEFAULT_TICKS_PER_SLOT * 1000) / DEFAULT_NUM_TICKS_PER_SECOND;
|
||||||
|
|
||||||
/// Spend and verify from every node in the network
|
/// Spend and verify from every node in the network
|
||||||
pub fn spend_and_verify_all_nodes(
|
pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher>(
|
||||||
entry_point_info: &ContactInfo,
|
entry_point_info: &ContactInfo,
|
||||||
funding_keypair: &Keypair,
|
funding_keypair: &Keypair,
|
||||||
nodes: usize,
|
nodes: usize,
|
||||||
|
ignore_nodes: HashSet<Pubkey, S>,
|
||||||
) {
|
) {
|
||||||
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||||
assert!(cluster_nodes.len() >= nodes);
|
assert!(cluster_nodes.len() >= nodes);
|
||||||
for ingress_node in &cluster_nodes {
|
for ingress_node in &cluster_nodes {
|
||||||
|
if ignore_nodes.contains(&ingress_node.id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let random_keypair = Keypair::new();
|
let random_keypair = Keypair::new();
|
||||||
let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||||
let bal = client
|
let bal = client
|
||||||
@ -48,6 +54,9 @@ pub fn spend_and_verify_all_nodes(
|
|||||||
.retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 5, confs)
|
.retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 5, confs)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
for validator in &cluster_nodes {
|
for validator in &cluster_nodes {
|
||||||
|
if ignore_nodes.contains(&validator.id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
|
let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||||
client.poll_for_signature_confirmation(&sig, confs).unwrap();
|
client.poll_for_signature_confirmation(&sig, confs).unwrap();
|
||||||
}
|
}
|
||||||
@ -141,7 +150,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
|||||||
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||||
assert!(cluster_nodes.len() >= nodes);
|
assert!(cluster_nodes.len() >= nodes);
|
||||||
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||||
let first_two_epoch_slots = MINIMUM_SLOT_LENGTH * 3;
|
let first_two_epoch_slots = MINIMUM_SLOTS_PER_EPOCH * 3;
|
||||||
|
|
||||||
for ingress_node in &cluster_nodes {
|
for ingress_node in &cluster_nodes {
|
||||||
client
|
client
|
||||||
|
@ -6,6 +6,7 @@ use solana_sdk::rpc_port;
|
|||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::signature::{Signable, Signature};
|
use solana_sdk::signature::{Signable, Signature};
|
||||||
use solana_sdk::timing::timestamp;
|
use solana_sdk::timing::timestamp;
|
||||||
|
use std::borrow::Cow;
|
||||||
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
|
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
|
||||||
@ -225,7 +226,7 @@ impl Signable for ContactInfo {
|
|||||||
self.id
|
self.id
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signable_data(&self) -> Vec<u8> {
|
fn signable_data(&self) -> Cow<[u8]> {
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
struct SignData {
|
struct SignData {
|
||||||
id: Pubkey,
|
id: Pubkey,
|
||||||
@ -251,7 +252,7 @@ impl Signable for ContactInfo {
|
|||||||
rpc_pubsub: me.rpc_pubsub,
|
rpc_pubsub: me.rpc_pubsub,
|
||||||
wallclock: me.wallclock,
|
wallclock: me.wallclock,
|
||||||
};
|
};
|
||||||
serialize(&data).expect("failed to serialize ContactInfo")
|
Cow::Owned(serialize(&data).expect("failed to serialize ContactInfo"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_signature(&self) -> Signature {
|
fn get_signature(&self) -> Signature {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user