Compare commits
182 Commits
Author | SHA1 | Date | |
---|---|---|---|
ff9bd2f512 | |||
25810ce729 | |||
82c7f0e366 | |||
012d05f10b | |||
f853595efb | |||
09e4f7e49c | |||
cb37072ed7 | |||
0b109d3340 | |||
dcdc5b8cf7 | |||
1a7c30bb86 | |||
3ebc14f965 | |||
cf589efbbf | |||
94d5c64281 | |||
566de1fd0e | |||
cb0f367084 | |||
e08e1fe6ac | |||
d36af917ea | |||
c81733b41a | |||
b6558a2ef3 | |||
634d8e25ee | |||
fea212e64e | |||
e3ab76f1a7 | |||
87f1bd58b9 | |||
a056c1f18f | |||
8b34fd2c75 | |||
b912ee7fdf | |||
3cf708f019 | |||
070e0e9613 | |||
3e678511d2 | |||
4ce2105548 | |||
721c6a7e2d | |||
08f0fb1e14 | |||
f5f5281f85 | |||
1684a7bd18 | |||
8b1724bb70 | |||
eebdfe8d73 | |||
82776b333d | |||
e71ab55288 | |||
fd60ef8a8d | |||
aa0b67c93c | |||
15aa07f2a0 | |||
e4536621df | |||
a3c302c36a | |||
d12705f9b0 | |||
0add5c1dc8 | |||
a9e63455a1 | |||
4dc0495a1b | |||
5a79676b8a | |||
b67b0bff05 | |||
4c200635b7 | |||
b98200aca4 | |||
d59c1cd412 | |||
c4d9dff590 | |||
cf91ff8694 | |||
e867ce0944 | |||
29a25990d3 | |||
9a40ad76bd | |||
54b44977e0 | |||
9c7ccc0e2b | |||
7710ef8b2b | |||
c969975fde | |||
3eed6a6090 | |||
1661a7a55f | |||
6293d324db | |||
c1ecfec3b0 | |||
05b4dbf148 | |||
4efada6d84 | |||
23c01473a0 | |||
f2e2106f62 | |||
0cbac26591 | |||
4e7e5ace9d | |||
ab11327e34 | |||
3ba93aa8fe | |||
c309cd80aa | |||
d22a1c9b1f | |||
29698fcd38 | |||
7372ec9e1a | |||
840a64ee8b | |||
524bc2b9a6 | |||
62a29a41d1 | |||
5406d82d89 | |||
de6af95061 | |||
43f7cd8149 | |||
69e67d06a7 | |||
4f47fc00bc | |||
4b04c37c36 | |||
b27b515186 | |||
05bcb7f292 | |||
95a16426f3 | |||
bec094bb3e | |||
af9ebf1d1a | |||
6f2f7018e8 | |||
101d6b92ee | |||
349e8a9462 | |||
c0bffb56df | |||
970cc32e65 | |||
3ab492ccf8 | |||
d83a71d89f | |||
efbb573316 | |||
85554087d1 | |||
c3155a6e39 | |||
4abe95abec | |||
e0acd48944 | |||
afb00432d4 | |||
320bd66c84 | |||
1a9ac62f60 | |||
809b051f10 | |||
baac21209e | |||
5fb8baed04 | |||
512bfc93cb | |||
0f88872650 | |||
f4e40d2c41 | |||
6eac5951ed | |||
475a74d37f | |||
b8ee952135 | |||
15bed29afa | |||
6dbe7e8bee | |||
2cd556e43c | |||
060793f451 | |||
7e409a13cd | |||
aab410380e | |||
67b8ad6a0f | |||
c1e39a3b98 | |||
7e1a7b1f64 | |||
a9cfae486c | |||
8514d27c2f | |||
8999bfef65 | |||
96425fb520 | |||
ce505d24b1 | |||
f2187780d2 | |||
6a878602f2 | |||
f8543a268f | |||
e9b82bacda | |||
684e1c73dd | |||
901c74b653 | |||
2c0afe71b2 | |||
2f4a3ed190 | |||
26a7eb6fa5 | |||
aa21f5343a | |||
9c2809db21 | |||
9ccd362461 | |||
596f611ede | |||
78d5ace754 | |||
2b3218b5f2 | |||
d0fb55d9b1 | |||
a2c8e3952f | |||
beb8c7914e | |||
6bef16a6a1 | |||
e03215c4c0 | |||
8d1fd29fa6 | |||
46f655eddd | |||
ca36a6f4e0 | |||
fdb12b54fa | |||
09dd4bb702 | |||
01657ddfe7 | |||
51a2988bb2 | |||
083090817a | |||
f3676e2d03 | |||
4b8cb72977 | |||
2518e95fb0 | |||
bc17edcda3 | |||
eb185b9ea5 | |||
aa6c82cfdc | |||
b9bb5af4a5 | |||
1e20d449ce | |||
e94f268346 | |||
7ec198b9cc | |||
b2e762ccc6 | |||
bee411e826 | |||
34344982a9 | |||
f73d38739a | |||
63d66ece57 | |||
a4b5493ba1 | |||
8d613f3977 | |||
0ff2bfdd0c | |||
141e25d567 | |||
c67cc694ae | |||
d77359914f | |||
9293a54234 | |||
d9983905b3 | |||
3dc47a46d5 | |||
8638b3bb19 |
14
.buildkite/env/secrets.ejson
vendored
14
.buildkite/env/secrets.ejson
vendored
@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||||
"environment": {
|
"environment": {
|
||||||
"CODECOV_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:rHBSqXK7uSnveA4qwUxARZjTNZcA0hXU:ko8lLGwPECpVm19znWBRxKEpMF7xpTHBCEzVOxRar2wDThw4lNDAKqTS61vtkJLtdkHtug==]",
|
"CODECOV_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:jA0WqO70coUtF0iokRdgtCR/lF/lETAI:d/Wl8Tdl6xVh/B39cTf1DaQkomR7I/2vMhvxd1msJ++BjI2l3p2dFoGsXqWT+/os8VgiPg==]",
|
||||||
"CRATES_IO_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:NzN6y0ooXJBYvxB589khepthSxhKFkLB:ZTTFZh2A/kB2SAgjJJAMbwAfanRlzxOCNMVcA2MXBCpQHJeeZGULg+0MLACYswfS]",
|
"CRATES_IO_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:2FaZ6k4RGH8luyNRaN6yeZUQDNAu2KwC:XeYe0tCAivYE0F9HEWM79mAI6kNbfYaqP7k7yY+SBDvs0341U9BdGZp7SErbHleS]",
|
||||||
"GITHUB_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:iy0Fnxeo0aslTCvgXc5Ddj2ly6ZsQ8gK:GNOOj/kZUJ2rYKxTbLyVKtajWNoGQ3PcChwfEB4HdN18qDHlB96Z7gx01Pcf0qeIHODOWRtxlH4=]",
|
"GITHUB_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:9kh4DGPiGDcUU7ejSFWg3gTW8nrOM09Q:b+GE07Wu6/bEnkDZcUtf48vTKAFphrCSt3tNNER9h6A+wZ80k499edw4pbDdl9kEvxB30fFwrLQ=]",
|
||||||
"INFLUX_DATABASE": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:Ly/TpIRF0oCxmiBWv225S3mX8s6pfQR+:+tXGB2c9rRCVDcgNO1IDOo89]",
|
"INFLUX_DATABASE": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:rCHsYi0rc7dmvr1V3wEgNoaNIyr+9ClM:omjVcOqM7vwt44kJ+As4BjJL]",
|
||||||
"INFLUX_PASSWORD": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:ycrq1uQLoSfI932czD+krUOaJeLWpeq6:2iS7ukp/C7wVD3IT0GvQVcwccWGyLr4UocStF/XiDi0OB/N3YKIKN8SQU4ob1b6StAPZ/XOHmag=]",
|
"INFLUX_PASSWORD": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:bP5Gw1Vy66viKFKO41o2Gho998XajH/5:khkCYz2LFvkJkk7R4xY1Hfz1yU3/NENjauiUkPhXA+dmg1qOIToxEagCgIkRwyeCiYaoCR6CZyw=]",
|
||||||
"INFLUX_USERNAME": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:35hBKofakZ4Db/u0TOW53RXoNWzJTIcl:HWREcMTrgZ8DGB0ZupgSzNWr/tVyE06P]",
|
"INFLUX_USERNAME": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:ZamCvza2W9/bZRGSkqDu55xNN04XKKhp:5jlmCOdFbpL7EFez41zCbLfk3ZZlfmhI]",
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:kRz8CyJYKAg/AiwgLrcRNDJAmlRX2zvX:uV1XV6y2Fb+dN4Z9BIMPBRiNS3n+NL8GlJXyu1i7meIsph1DzfLg4Thcp5Mj9nUsFNLgqQgjnsa5C4XNY/h5AgMSzRrJxVj7RhVTRmDJ5/Vjq6v7wCMRfBOvF3rITsV4zTwWSV8yafFmS+ZQ+QJTRgtYsuoYAUNZ06IEebfDHcuNwws72hEGoD9w43hOLSpyEOmXbtZ9h1lIRxrgsrhYDpBlU5LkhDeTXAX5M5dwYxyquJFRwd5quGDV5DYsCh9bAkbjAyjWYymVJ78U9YJIQHT9izzQqTDlMQN49EbLo7MDIaC7O7HVtb7unDJs+DRejbHacoyWVulqVVwu3GRiZezu8zdjwzGHphMMxOtKQaidnqYgflNp/O01I8wZRgR1alsGcmIhEhI8YV/IvQ==]"
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:Oi2nsRxnvWnnBYsB6KwEDzLPcYgpYojU:ELbvjXkXKlgFCMES45R+fxG7Ex43WHWErjMbxZoqasxyr7GSH66hQzUWqiQSJyT4ukYrRhRC9YrsKKGkjACLU57X4EGIy9TuLgTnyBYhPnxLYStC3y/7o/MB5FCTt5wHJw3/A9p+me5+T4UmyZ7OeP21NhDUCGQcb0040VwYWS78klW2aQESJJ6wTI1xboE8/zC0vtnB/u50+LydbKEyb21r6y3OH9FYNEpSwIspWKcgpruJdQSCnDoKxP9YR1yzvk2rabss13LJNdV1Y6mQNIdP4OIFQhCs6dXT253RTl5qdZ0MruHwlp8wX4btOuYDcCoM5exr]"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
11
.gitignore
vendored
11
.gitignore
vendored
@ -1,10 +1,13 @@
|
|||||||
/target/
|
|
||||||
/ledger-tool/target/
|
|
||||||
/wallet/target/
|
|
||||||
/core/target/
|
|
||||||
/book/html/
|
/book/html/
|
||||||
/book/src/img/
|
/book/src/img/
|
||||||
/book/src/tests.ok
|
/book/src/tests.ok
|
||||||
|
/core/target/
|
||||||
|
/farf/
|
||||||
|
/ledger-tool/target/
|
||||||
|
/solana-release/
|
||||||
|
solana-release.tar.bz2
|
||||||
|
/target/
|
||||||
|
/wallet/target/
|
||||||
|
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
.cargo
|
.cargo
|
||||||
|
483
Cargo.lock
generated
483
Cargo.lock
generated
@ -347,6 +347,14 @@ dependencies = [
|
|||||||
"vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clear_on_drop"
|
||||||
|
version = "0.2.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clicolors-control"
|
name = "clicolors-control"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
@ -536,6 +544,18 @@ dependencies = [
|
|||||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "curve25519-dalek"
|
||||||
|
version = "1.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"clear_on_drop 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"subtle 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "difference"
|
name = "difference"
|
||||||
version = "2.0.0"
|
version = "2.0.0"
|
||||||
@ -572,6 +592,18 @@ name = "dtoa"
|
|||||||
version = "0.4.3"
|
version = "0.4.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ed25519-dalek"
|
||||||
|
version = "1.0.0-pre.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"clear_on_drop 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"curve25519-dalek 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "either"
|
name = "either"
|
||||||
version = "1.5.1"
|
version = "1.5.1"
|
||||||
@ -856,7 +888,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashbrown"
|
name = "hashbrown"
|
||||||
version = "0.2.1"
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hashbrown"
|
||||||
|
version = "0.3.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1435,6 +1472,16 @@ dependencies = [
|
|||||||
"version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num-derive"
|
||||||
|
version = "0.2.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "num-integer"
|
name = "num-integer"
|
||||||
version = "0.1.39"
|
version = "0.1.39"
|
||||||
@ -1867,6 +1914,17 @@ dependencies = [
|
|||||||
"redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
"redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "reed-solomon-erasure"
|
||||||
|
version = "3.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "1.1.2"
|
version = "1.1.2"
|
||||||
@ -2136,14 +2194,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ed25519-dalek 1.0.0-pre.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"hashbrown 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2160,113 +2219,146 @@ dependencies = [
|
|||||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"reed-solomon-erasure 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"reqwest 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
"reqwest 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-budget-api 0.13.0",
|
"solana-budget-api 0.14.0",
|
||||||
"solana-budget-program 0.13.0",
|
"solana-budget-program 0.14.0",
|
||||||
"solana-client 0.13.0",
|
"solana-client 0.14.0",
|
||||||
"solana-drone 0.13.0",
|
"solana-drone 0.14.0",
|
||||||
"solana-kvstore 0.13.0",
|
"solana-kvstore 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-storage-api 0.13.0",
|
"solana-storage-api 0.14.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-vote-api 0.14.0",
|
||||||
"solana-vote-program 0.13.0",
|
"solana-vote-program 0.14.0",
|
||||||
"solana-vote-signer 0.13.0",
|
"solana-vote-signer 0.14.0",
|
||||||
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "solana-bench-exchange"
|
||||||
|
version = "0.14.0"
|
||||||
|
dependencies = [
|
||||||
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"solana 0.14.0",
|
||||||
|
"solana-client 0.14.0",
|
||||||
|
"solana-drone 0.14.0",
|
||||||
|
"solana-exchange-api 0.14.0",
|
||||||
|
"solana-exchange-program 0.14.0",
|
||||||
|
"solana-logger 0.14.0",
|
||||||
|
"solana-metrics 0.14.0",
|
||||||
|
"solana-netutil 0.14.0",
|
||||||
|
"solana-runtime 0.14.0",
|
||||||
|
"solana-sdk 0.14.0",
|
||||||
|
"untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"ws 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-client 0.13.0",
|
"solana-client 0.14.0",
|
||||||
"solana-drone 0.13.0",
|
"solana-drone 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-bpf-programs"
|
name = "solana-bpf-programs"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-bpfloader 0.13.0",
|
"solana-bpfloader 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-bpfloader"
|
name = "solana-bpfloader"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-budget-api"
|
name = "solana-budget-api"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-budget-program"
|
name = "solana-budget-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-budget-api 0.13.0",
|
"solana-budget-api 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-client"
|
name = "solana-client"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2277,37 +2369,37 @@ dependencies = [
|
|||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-config-api"
|
name = "solana-config-api"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-config-program"
|
name = "solana-config-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-config-api 0.13.0",
|
"solana-config-api 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-drone"
|
name = "solana-drone"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2316,94 +2408,97 @@ dependencies = [
|
|||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-exchange-api"
|
name = "solana-exchange-api"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-exchange-program"
|
name = "solana-exchange-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-exchange-api 0.13.0",
|
"solana-exchange-api 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-failure-program"
|
name = "solana-failure-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-fullnode"
|
name = "solana-fullnode"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-drone 0.13.0",
|
"solana-drone 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-vote-api 0.14.0",
|
||||||
"solana-vote-signer 0.13.0",
|
"solana-vote-signer 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-genesis"
|
name = "solana-genesis"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hashbrown 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-budget-api 0.13.0",
|
"solana-budget-api 0.14.0",
|
||||||
"solana-config-api 0.13.0",
|
"solana-config-api 0.14.0",
|
||||||
"solana-exchange-api 0.13.0",
|
"solana-exchange-api 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-storage-api 0.13.0",
|
"solana-stake-api 0.14.0",
|
||||||
"solana-token-api 0.13.0",
|
"solana-storage-api 0.14.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-token-api 0.14.0",
|
||||||
|
"solana-vote-api 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-gossip"
|
name = "solana-gossip"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-client 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-install"
|
name = "solana-install"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2420,27 +2515,27 @@ dependencies = [
|
|||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_yaml 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_yaml 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-client 0.13.0",
|
"solana-client 0.14.0",
|
||||||
"solana-config-api 0.13.0",
|
"solana-config-api 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"tar 0.4.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tar 0.4.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-keygen"
|
name = "solana-keygen"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-kvstore"
|
name = "solana-kvstore"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2455,40 +2550,40 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-ledger-tool"
|
name = "solana-ledger-tool"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assert_cmd 0.11.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"assert_cmd 0.11.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-logger"
|
name = "solana-logger"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-metrics"
|
name = "solana-metrics"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"influx_db_client 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"influx_db_client 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"reqwest 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
"reqwest 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-netutil"
|
name = "solana-netutil"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ipnetwork 0.12.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ipnetwork 0.12.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2497,39 +2592,39 @@ dependencies = [
|
|||||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"reqwest 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
"reqwest 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-noop-program"
|
name = "solana-noop-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-replicator"
|
name = "solana-replicator"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-runtime"
|
name = "solana-runtime"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bv 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bv 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hashbrown 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2539,26 +2634,29 @@ dependencies = [
|
|||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-vote-api 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-sdk"
|
name = "solana-sdk"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"ed25519-dalek 1.0.0-pre.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"generic-array 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"generic-array 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2568,109 +2666,111 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-stake-api"
|
name = "solana-stake-api"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-vote-api 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-stake-program"
|
name = "solana-stake-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-stake-api 0.13.0",
|
"solana-stake-api 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-storage-api"
|
name = "solana-storage-api"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-storage-program"
|
name = "solana-storage-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-storage-api 0.13.0",
|
"solana-storage-api 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-token-api"
|
name = "solana-token-api"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-token-program"
|
name = "solana-token-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-token-api 0.13.0",
|
"solana-token-api 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-upload-perf"
|
name = "solana-upload-perf"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-vote-api"
|
name = "solana-vote-api"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-vote-program"
|
name = "solana-vote-program"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-vote-api 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-vote-signer"
|
name = "solana-vote-signer"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2680,13 +2780,13 @@ dependencies = [
|
|||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana-metrics 0.13.0",
|
"solana-metrics 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "solana-wallet"
|
name = "solana-wallet"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2694,17 +2794,18 @@ dependencies = [
|
|||||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"solana 0.13.0",
|
"solana 0.14.0",
|
||||||
"solana-budget-api 0.13.0",
|
"solana-budget-api 0.14.0",
|
||||||
"solana-budget-program 0.13.0",
|
"solana-budget-program 0.14.0",
|
||||||
"solana-client 0.13.0",
|
"solana-client 0.14.0",
|
||||||
"solana-drone 0.13.0",
|
"solana-drone 0.14.0",
|
||||||
"solana-logger 0.13.0",
|
"solana-logger 0.14.0",
|
||||||
"solana-netutil 0.13.0",
|
"solana-netutil 0.14.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.14.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-vote-api 0.14.0",
|
||||||
"solana-vote-signer 0.13.0",
|
"solana-vote-signer 0.14.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2736,6 +2837,11 @@ name = "strsim"
|
|||||||
version = "0.8.0"
|
version = "0.8.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "subtle"
|
||||||
|
version = "2.0.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "syn"
|
name = "syn"
|
||||||
version = "0.11.11"
|
version = "0.11.11"
|
||||||
@ -2786,7 +2892,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tar"
|
name = "tar"
|
||||||
version = "0.4.22"
|
version = "0.4.23"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -3349,6 +3455,7 @@ dependencies = [
|
|||||||
"checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878"
|
"checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878"
|
||||||
"checksum clang-sys 0.26.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6ef0c1bcf2e99c649104bd7a7012d8f8802684400e03db0ec0af48583c6fa0e4"
|
"checksum clang-sys 0.26.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6ef0c1bcf2e99c649104bd7a7012d8f8802684400e03db0ec0af48583c6fa0e4"
|
||||||
"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
|
"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
|
||||||
|
"checksum clear_on_drop 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "97276801e127ffb46b66ce23f35cc96bd454fa311294bced4bbace7baa8b1d17"
|
||||||
"checksum clicolors-control 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "73abfd4c73d003a674ce5d2933fca6ce6c42480ea84a5ffe0a2dc39ed56300f9"
|
"checksum clicolors-control 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "73abfd4c73d003a674ce5d2933fca6ce6c42480ea84a5ffe0a2dc39ed56300f9"
|
||||||
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
|
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
|
||||||
"checksum colored 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6e9a455e156a4271e12fd0246238c380b1e223e3736663c7a18ed8b6362028a9"
|
"checksum colored 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6e9a455e156a4271e12fd0246238c380b1e223e3736663c7a18ed8b6362028a9"
|
||||||
@ -3368,11 +3475,13 @@ dependencies = [
|
|||||||
"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
|
"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
|
||||||
"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"
|
"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"
|
||||||
"checksum crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c"
|
"checksum crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c"
|
||||||
|
"checksum curve25519-dalek 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e1f8a6fc0376eb52dc18af94915cc04dfdf8353746c0e8c550ae683a0815e5c1"
|
||||||
"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
|
"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
|
||||||
"checksum digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90"
|
"checksum digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90"
|
||||||
"checksum digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05f47366984d3ad862010e22c7ce81a7dbcaebbdfb37241a620f8b6596ee135c"
|
"checksum digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05f47366984d3ad862010e22c7ce81a7dbcaebbdfb37241a620f8b6596ee135c"
|
||||||
"checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901"
|
"checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901"
|
||||||
"checksum dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6d301140eb411af13d3115f9a562c85cc6b541ade9dfa314132244aaee7489dd"
|
"checksum dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6d301140eb411af13d3115f9a562c85cc6b541ade9dfa314132244aaee7489dd"
|
||||||
|
"checksum ed25519-dalek 1.0.0-pre.1 (registry+https://github.com/rust-lang/crates.io-index)" = "81956bcf7ef761fb4e1d88de3fa181358a0d26cbcb9755b587a08f9119824b86"
|
||||||
"checksum either 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c67353c641dc847124ea1902d69bd753dee9bb3beff9aa3662ecf86c971d1fac"
|
"checksum either 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c67353c641dc847124ea1902d69bd753dee9bb3beff9aa3662ecf86c971d1fac"
|
||||||
"checksum elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4841de15dbe0e49b9b62a417589299e3be0d557e0900d36acb87e6dae47197f5"
|
"checksum elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4841de15dbe0e49b9b62a417589299e3be0d557e0900d36acb87e6dae47197f5"
|
||||||
"checksum elfkit 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "02f182eda16a7360c80a2f8638d0726e9d5478173058f1505c42536ca666ecd2"
|
"checksum elfkit 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "02f182eda16a7360c80a2f8638d0726e9d5478173058f1505c42536ca666ecd2"
|
||||||
@ -3405,7 +3514,8 @@ dependencies = [
|
|||||||
"checksum h2 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "910a5e7be6283a9c91b3982fa5188368c8719cce2a3cf3b86048673bf9d9c36b"
|
"checksum h2 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "910a5e7be6283a9c91b3982fa5188368c8719cce2a3cf3b86048673bf9d9c36b"
|
||||||
"checksum hash32 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "12d790435639c06a7b798af9e1e331ae245b7ef915b92f70a39b4cf8c00686af"
|
"checksum hash32 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "12d790435639c06a7b798af9e1e331ae245b7ef915b92f70a39b4cf8c00686af"
|
||||||
"checksum hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da"
|
"checksum hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da"
|
||||||
"checksum hashbrown 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d22f2163f3350b00b15b96da81d4ec3a8616983c010e0b69f6e4d060a2db9cd4"
|
"checksum hashbrown 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "61e4900fa4e80b3d15c78a08ec8a08433246063fa7577e7b2c6426b3b21b1f79"
|
||||||
|
"checksum hashbrown 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "570178d5e4952010d138b0f1d581271ff3a02406d990f887d1e87e3d6e43b0ac"
|
||||||
"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
|
"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
|
||||||
"checksum hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ddc2928beef125e519d69ae1baa8c37ea2e0d3848545217f6db0179c5eb1d639"
|
"checksum hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ddc2928beef125e519d69ae1baa8c37ea2e0d3848545217f6db0179c5eb1d639"
|
||||||
"checksum hex-literal-impl 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "520870c3213943eb8d7803e80180d12a6c7ceb4ae74602544529d1643dc4ddda"
|
"checksum hex-literal-impl 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "520870c3213943eb8d7803e80180d12a6c7ceb4ae74602544529d1643dc4ddda"
|
||||||
@ -3465,6 +3575,7 @@ dependencies = [
|
|||||||
"checksum nix 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46f0f3210768d796e8fa79ec70ee6af172dacbe7147f5e69be5240a47778302b"
|
"checksum nix 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46f0f3210768d796e8fa79ec70ee6af172dacbe7147f5e69be5240a47778302b"
|
||||||
"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
|
"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
|
||||||
"checksum nom 4.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22293d25d3f33a8567cc8a1dc20f40c7eeb761ce83d0fcca059858580790cac3"
|
"checksum nom 4.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22293d25d3f33a8567cc8a1dc20f40c7eeb761ce83d0fcca059858580790cac3"
|
||||||
|
"checksum num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2"
|
||||||
"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea"
|
"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea"
|
||||||
"checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31"
|
"checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31"
|
||||||
"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
|
"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
|
||||||
@ -3516,6 +3627,7 @@ dependencies = [
|
|||||||
"checksum redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)" = "423e376fffca3dfa06c9e9790a9ccd282fafb3cc6e6397d01dbf64f9bacc6b85"
|
"checksum redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)" = "423e376fffca3dfa06c9e9790a9ccd282fafb3cc6e6397d01dbf64f9bacc6b85"
|
||||||
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
|
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
|
||||||
"checksum redox_users 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe5204c3a17e97dde73f285d49be585df59ed84b50a872baf416e73b62c3828"
|
"checksum redox_users 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe5204c3a17e97dde73f285d49be585df59ed84b50a872baf416e73b62c3828"
|
||||||
|
"checksum reed-solomon-erasure 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "77cbbd4c02f53e345fe49e74255a1b10080731ffb2a03475e11df7fc8a043c37"
|
||||||
"checksum regex 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53ee8cfdddb2e0291adfb9f13d31d3bbe0a03c9a402c01b1e24188d86c35b24f"
|
"checksum regex 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53ee8cfdddb2e0291adfb9f13d31d3bbe0a03c9a402c01b1e24188d86c35b24f"
|
||||||
"checksum regex-syntax 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "8c2f35eedad5295fdf00a63d7d4b238135723f92b434ec06774dad15c7ab0861"
|
"checksum regex-syntax 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "8c2f35eedad5295fdf00a63d7d4b238135723f92b434ec06774dad15c7ab0861"
|
||||||
"checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5"
|
"checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5"
|
||||||
@ -3550,12 +3662,13 @@ dependencies = [
|
|||||||
"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
|
"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
|
||||||
"checksum string 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b639411d0b9c738748b5397d5ceba08e648f4f1992231aa859af1a017f31f60b"
|
"checksum string 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b639411d0b9c738748b5397d5ceba08e648f4f1992231aa859af1a017f31f60b"
|
||||||
"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||||
|
"checksum subtle 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "702662512f3ddeb74a64ce2fbbf3707ee1b6bb663d28bb054e0779bbc720d926"
|
||||||
"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad"
|
"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad"
|
||||||
"checksum syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1825685f977249735d510a242a6727b46efe914bb67e38d30c071b1b72b1d5c2"
|
"checksum syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1825685f977249735d510a242a6727b46efe914bb67e38d30c071b1b72b1d5c2"
|
||||||
"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6"
|
"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6"
|
||||||
"checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015"
|
"checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015"
|
||||||
"checksum sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "617f594d3869801871433390254b4a79f2a18176d7f4ad5784fa990bc8c12986"
|
"checksum sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "617f594d3869801871433390254b4a79f2a18176d7f4ad5784fa990bc8c12986"
|
||||||
"checksum tar 0.4.22 (registry+https://github.com/rust-lang/crates.io-index)" = "c2167ff53da2a661702b3299f71a91b61b1dffef36b4b2884b1f9c67254c0133"
|
"checksum tar 0.4.23 (registry+https://github.com/rust-lang/crates.io-index)" = "8acf894d8bd30d060f3a8e457463f341ccabe475329e0670896de56e29e11b49"
|
||||||
"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
|
"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
|
||||||
"checksum tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b86c784c88d98c801132806dadd3819ed29d8600836c4088e855cdf3e178ed8a"
|
"checksum tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b86c784c88d98c801132806dadd3819ed29d8600836c4088e855cdf3e178ed8a"
|
||||||
"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
|
"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
".",
|
".",
|
||||||
|
"bench-exchange",
|
||||||
"bench-streamer",
|
"bench-streamer",
|
||||||
"bench-tps",
|
"bench-tps",
|
||||||
"drone",
|
"drone",
|
||||||
|
3
bench-exchange/.gitignore
vendored
Normal file
3
bench-exchange/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
/target/
|
||||||
|
/config/
|
||||||
|
/config-local/
|
39
bench-exchange/Cargo.toml
Normal file
39
bench-exchange/Cargo.toml
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
[package]
|
||||||
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
name = "solana-bench-exchange"
|
||||||
|
version = "0.14.0"
|
||||||
|
repository = "https://github.com/solana-labs/solana"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
bs58 = "0.2.0"
|
||||||
|
clap = "2.32.0"
|
||||||
|
bincode = "1.1.2"
|
||||||
|
env_logger = "0.6.0"
|
||||||
|
itertools = "0.8.0"
|
||||||
|
log = "0.4.6"
|
||||||
|
num-traits = "0.2"
|
||||||
|
num-derive = "0.2"
|
||||||
|
rayon = "1.0.3"
|
||||||
|
serde = "1.0.87"
|
||||||
|
serde_derive = "1.0.87"
|
||||||
|
serde_json = "1.0.38"
|
||||||
|
# solana-runtime = { path = "../solana/runtime"}
|
||||||
|
solana = { path = "../core", version = "0.14.0" }
|
||||||
|
solana-client = { path = "../client", version = "0.14.0" }
|
||||||
|
solana-drone = { path = "../drone", version = "0.14.0" }
|
||||||
|
solana-exchange-api = { path = "../programs/exchange_api", version = "0.14.0" }
|
||||||
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.14.0" }
|
||||||
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
|
solana-metrics = { path = "../metrics", version = "0.14.0" }
|
||||||
|
solana-netutil = { path = "../netutil", version = "0.14.0" }
|
||||||
|
solana-runtime = { path = "../runtime", version = "0.14.0" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
|
ws = "0.8.0"
|
||||||
|
untrusted = "0.6.2"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
cuda = ["solana/cuda"]
|
||||||
|
erasure = []
|
483
bench-exchange/README.md
Normal file
483
bench-exchange/README.md
Normal file
@ -0,0 +1,483 @@
|
|||||||
|
# token-exchange
|
||||||
|
Solana Token Exchange Bench
|
||||||
|
|
||||||
|
If you can't wait; jump to [Running the exchange](#Running-the-exchange) to
|
||||||
|
learn how to start and interact with the exchange.
|
||||||
|
|
||||||
|
### Table of Contents
|
||||||
|
[Overview](#Overview)<br>
|
||||||
|
[Premiss](#Premiss)<br>
|
||||||
|
[Exchange startup](#Exchange-startup)<br>
|
||||||
|
[Trade requests](#Trade-requests)<br>
|
||||||
|
[Trade cancellations](#Trade-cancellations)<br>
|
||||||
|
[Trade swap](#Trade-swap)<br>
|
||||||
|
[Exchange program operations](#Exchange-program-operations)<br>
|
||||||
|
[Quotes and OHLCV](#Quotes-and-OHLCV)<br>
|
||||||
|
[Investor strategies](#Investor-strategies)<br>
|
||||||
|
[Running the exchange](#Running-the-exchange)<br>
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
An exchange is a marketplace where one asset can be traded for another. This
|
||||||
|
demo demonstrates one way to host an exchange on the Solana blockchain by
|
||||||
|
emulating a currency exchange.
|
||||||
|
|
||||||
|
The assets are virtual tokens held by investors who may post trade requests to
|
||||||
|
the exchange. A broker monitors the exchange and posts swap requests for
|
||||||
|
matching trade orders. All the transactions can execute concurrently.
|
||||||
|
|
||||||
|
## Premise
|
||||||
|
|
||||||
|
- Exchange
|
||||||
|
- An exchange is a marketplace where one asset can be traded for another.
|
||||||
|
The exchange in this demo is the on-chain program that implements the
|
||||||
|
tokens and the policies for trading those tokens.
|
||||||
|
- Token
|
||||||
|
- A virtual asset that can be owned, traded, and holds virtual intrinsic value
|
||||||
|
compared to other assets. There are four types of tokens in this demo, A,
|
||||||
|
B, C, D. Each one may be traded for another.
|
||||||
|
- Token account
|
||||||
|
- An account owned by the exchange that holds a quantity of one type of token.
|
||||||
|
- Account request
|
||||||
|
- A request to create a token account
|
||||||
|
- Token request
|
||||||
|
- A request to deposit tokens of a particular type into a token account.
|
||||||
|
- Token pair
|
||||||
|
- A unique ordered list of two tokens. For the four types of tokens used in
|
||||||
|
this demo, the valid pairs are AB, AC, AD, BC, BD, CD.
|
||||||
|
- Direction of trade
|
||||||
|
- Describes which token in the pair the investor wants to sell and buy and can
|
||||||
|
be either "To" or "From". For example, if an investor issues a "To" trade
|
||||||
|
for "AB" then they which to exchange A tokens to B tokens. A "From" order
|
||||||
|
would read the other way, A tokens from B tokens.
|
||||||
|
- Price ratio
|
||||||
|
- An expression of the relative prices of two tokens. They consist of the
|
||||||
|
price of the primary token and the price of the secondary token. For
|
||||||
|
simplicity sake, the primary token's price is always 1, which forces the
|
||||||
|
secondary to be the common denominator. For example, if token A was worth
|
||||||
|
2 and token B was worth 6, the price ratio would be 1:3 or just 3. Price
|
||||||
|
ratios are represented as fixed point numbers. The fixed point scaler is
|
||||||
|
defined in
|
||||||
|
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
|
||||||
|
- Trade request
|
||||||
|
- A Solana transaction executed by the exchange requesting the trade of one
|
||||||
|
type of token for another. Trade requests are made up of the token pair,
|
||||||
|
the direction of the trade, quantity of the primary token, the price ratio,
|
||||||
|
and the two token accounts to be credited/deducted. An example trade
|
||||||
|
request looks like "T AB 5 2" which reads "Exchange 5 A tokens to B tokens
|
||||||
|
at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
||||||
|
deducted and 10 B tokens credited to the trade initiator's token accounts.
|
||||||
|
Successful trade requests result in a trade order.
|
||||||
|
- Trade order
|
||||||
|
- The result of a successful trade request. Trade orders are stored in
|
||||||
|
accounts owned by the submitter of the trade request. They can only be
|
||||||
|
canceled by their owner but can be used by anyone in a trade swap. They
|
||||||
|
contain the same information as the trade request.
|
||||||
|
- Price spread
|
||||||
|
- The difference between the two matching trade orders. The spread is the
|
||||||
|
profit of the broker initiating the swap request.
|
||||||
|
- Swap requirements
|
||||||
|
- Policies that result in a successful trade swap.
|
||||||
|
- Swap request
|
||||||
|
- A request to exchange tokens between to trade orders
|
||||||
|
- Trade swap
|
||||||
|
- A successful trade. A swap consists of two matching trade orders that meet
|
||||||
|
swap requirements. A trade swap may not wholly satisfy one or both of the
|
||||||
|
trade orders in which case the trade orders are adjusted appropriately. As
|
||||||
|
long as the swap requirements are met there will be an exchange of tokens
|
||||||
|
between accounts. Any price spread is deposited into the broker's profit
|
||||||
|
account. All trade swaps are recorded in a new account for posterity.
|
||||||
|
- Investor
|
||||||
|
- Individual investors who hold a number of tokens and wish to trade them on
|
||||||
|
the exchange. Investors operate as Solana thin clients who own a set of
|
||||||
|
accounts containing tokens and/or trade requests. Investors post
|
||||||
|
transactions to the exchange in order to request tokens and post or cancel
|
||||||
|
trade requests.
|
||||||
|
- Broker
|
||||||
|
- An agent who facilitates trading between investors. Brokers operate as
|
||||||
|
Solana thin clients who monitor all the trade orders looking for a trade
|
||||||
|
match. Once found, the broker issues a swap request to the exchange.
|
||||||
|
Brokers are the engine of the exchange and are rewarded for their efforts by
|
||||||
|
accumulating the price spreads of the swaps they initiate. Brokers also
|
||||||
|
provide current bid/ask price and OHLCV (Open, High, Low, Close, Volume)
|
||||||
|
information on demand via a public network port.
|
||||||
|
- Transaction fees
|
||||||
|
- Solana transaction fees are paid for by the transaction submitters who are
|
||||||
|
the Investors and Brokers.
|
||||||
|
|
||||||
|
## Exchange startup
|
||||||
|
|
||||||
|
The exchange is up and running when it reaches a state where it can take
|
||||||
|
investor's trades and broker's swap requests. To achieve this state the
|
||||||
|
following must occur in order:
|
||||||
|
|
||||||
|
- Start the Solana blockchain
|
||||||
|
- Start the broker thin-client
|
||||||
|
- The broker subscribes to change notifications for all the accounts owned by
|
||||||
|
the exchange program id. The subscription is managed via Solana's JSON RPC
|
||||||
|
interface.
|
||||||
|
- The broker starts responding to queries for bid/ask price and OHLCV
|
||||||
|
|
||||||
|
The broker responding successfully to price and OHLCV requests is the signal to
|
||||||
|
the investors that trades submitted after that point will be analyzed. <!--This
|
||||||
|
is not ideal, and instead investors should be able to submit trades at any time,
|
||||||
|
and the broker could come and go without missing a trade. One way to achieve
|
||||||
|
this is for the broker to read the current state of all accounts looking for all
|
||||||
|
open trade orders.-->
|
||||||
|
|
||||||
|
Investors will initially query the exchange to discover their current balance
|
||||||
|
for each type of token. If the investor does not already have an account for
|
||||||
|
each type of token, they will submit account requests. Brokers as well will
|
||||||
|
request accounts to hold the tokens they earn by initiating trade swaps.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
/// Supported token types
|
||||||
|
pub enum Token {
|
||||||
|
A,
|
||||||
|
B,
|
||||||
|
C,
|
||||||
|
D,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Supported token pairs
|
||||||
|
pub enum TokenPair {
|
||||||
|
AB,
|
||||||
|
AC,
|
||||||
|
AD,
|
||||||
|
BC,
|
||||||
|
BD,
|
||||||
|
CD,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum ExchangeInstruction {
|
||||||
|
/// New token account
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 - New token account
|
||||||
|
AccountRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Token accounts are populated with this structure
|
||||||
|
pub struct TokenAccountInfo {
|
||||||
|
/// Investor who owns this account
|
||||||
|
pub owner: Pubkey,
|
||||||
|
/// Current number of tokens this account holds
|
||||||
|
pub tokens: Tokens,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For this demo investors or brokers can request more tokens from the exchange at
|
||||||
|
any time by submitting token requests. In non-demos, an exchange of this type
|
||||||
|
would provide another way to exchange a 3rd party asset into tokens.
|
||||||
|
|
||||||
|
To request tokens, investors submit transfer requests:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub enum ExchangeInstruction {
|
||||||
|
/// Transfer tokens between two accounts
|
||||||
|
/// key 0 - Account to transfer tokens to
|
||||||
|
/// key 1 - Account to transfer tokens from. This can be the exchange program itself,
|
||||||
|
/// the exchange has a limitless number of tokens it can transfer.
|
||||||
|
TransferRequest(Token, u64),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Trade requests
|
||||||
|
|
||||||
|
When an investor decides to exchange a token of one type for another, they
|
||||||
|
submit a transaction to the Solana Blockchain containing a trade request, which,
|
||||||
|
if successful, is turned into a trade order. Trade orders do not expire but are
|
||||||
|
cancellable. <!-- Trade orders should have a timestamp to enable trade
|
||||||
|
expiration --> When a trade order is created, tokens are deducted from a token
|
||||||
|
account and the trade order acts as an escrow. The tokens are held until the
|
||||||
|
trade order is fulfilled or canceled. If the direction is `To`, then the number
|
||||||
|
of `tokens` are deducted from the primary account, if `From` then `tokens`
|
||||||
|
multiplied by `price` are deducted from the secondary account. Trade orders are
|
||||||
|
no longer valid when the number of `tokens` goes to zero, at which point they
|
||||||
|
can no longer be used. <!-- Could support refilling trade orders, so trade order
|
||||||
|
accounts are refilled rather than accumulating -->
|
||||||
|
|
||||||
|
```rust
|
||||||
|
/// Direction of the exchange between two tokens in a pair
|
||||||
|
pub enum Direction {
|
||||||
|
/// Trade first token type (primary) in the pair 'To' the second
|
||||||
|
To,
|
||||||
|
/// Trade first token type in the pair 'From' the second (secondary)
|
||||||
|
From,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TradeRequestInfo {
|
||||||
|
/// Direction of trade
|
||||||
|
pub direction: Direction,
|
||||||
|
|
||||||
|
/// Token pair to trade
|
||||||
|
pub pair: TokenPair,
|
||||||
|
|
||||||
|
/// Number of tokens to exchange; refers to the primary or the secondary depending on the direction
|
||||||
|
pub tokens: u64,
|
||||||
|
|
||||||
|
/// The price ratio the primary price over the secondary price. The primary price is fixed
|
||||||
|
/// and equal to the variable `SCALER`.
|
||||||
|
pub price: u64,
|
||||||
|
|
||||||
|
/// Token account to deposit tokens on successful swap
|
||||||
|
pub dst_account: Pubkey,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum ExchangeInstruction {
|
||||||
|
/// Trade request
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 - Account in which to record the swap
|
||||||
|
/// key 2 - Token account associated with this trade
|
||||||
|
TradeRequest(TradeRequestInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Trade accounts are populated with this structure
|
||||||
|
pub struct TradeOrderInfo {
|
||||||
|
/// Owner of the trade order
|
||||||
|
pub owner: Pubkey,
|
||||||
|
/// Direction of the exchange
|
||||||
|
pub direction: Direction,
|
||||||
|
/// Token pair indicating two tokens to exchange, first is primary
|
||||||
|
pub pair: TokenPair,
|
||||||
|
/// Number of tokens to exchange; primary or secondary depending on direction
|
||||||
|
pub tokens: u64,
|
||||||
|
/// Scaled price of the secondary token given the primary is equal to the scale value
|
||||||
|
/// If scale is 1 and price is 2 then ratio is 1:2 or 1 primary token for 2 secondary tokens
|
||||||
|
pub price: u64,
|
||||||
|
/// account which the tokens were source from. The trade account holds the tokens in escrow
|
||||||
|
/// until either one or more part of a swap or the trade is canceled.
|
||||||
|
pub src_account: Pubkey,
|
||||||
|
/// account which the tokens the tokens will be deposited into on a successful trade
|
||||||
|
pub dst_account: Pubkey,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Trade cancellations
|
||||||
|
|
||||||
|
An investor may cancel a trade at anytime, but only trades they own. If the
|
||||||
|
cancellation is successful, any tokens held in escrow are returned to the
|
||||||
|
account from which they came.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub enum ExchangeInstruction {
|
||||||
|
/// Trade cancellation
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 -Trade order to cancel
|
||||||
|
TradeCancellation,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Trade swaps
|
||||||
|
|
||||||
|
The broker is monitoring the accounts assigned to the exchange program and
|
||||||
|
building a trade-order table. The trade order table is used to identify
|
||||||
|
matching trade orders which could be fulfilled. When a match is found the
|
||||||
|
broker should issue a swap request. Swap requests may not satisfy the entirety
|
||||||
|
of either order, but the exchange will greedily fulfill it. Any leftover tokens
|
||||||
|
in either account will keep the trade order valid for further swap requests in
|
||||||
|
the future.
|
||||||
|
|
||||||
|
Matching trade orders are defined by the following swap requirements:
|
||||||
|
|
||||||
|
- Opposite polarity (one `To` and one `From`)
|
||||||
|
- Operate on the same token pair
|
||||||
|
- The price ratio of the `From` order is greater than or equal to the `To` order
|
||||||
|
- There are sufficient tokens to perform the trade
|
||||||
|
|
||||||
|
Orders can be written in the following format:
|
||||||
|
|
||||||
|
`investor direction pair quantity price-ratio`
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
- `1 T AB 2 1`
|
||||||
|
- Investor 1 wishes to exchange 2 A tokens to B tokens at a ratio of 1 A to 1
|
||||||
|
B
|
||||||
|
- `2 F AC 6 1.2`
|
||||||
|
- Investor 2 wishes to exchange A tokens from 6 B tokens at a ratio of 1 A
|
||||||
|
from 1.2 B
|
||||||
|
|
||||||
|
An order table could look something like the following. Notice how the columns
|
||||||
|
are sorted low to high and high to low, respectively. Prices are dramatic and
|
||||||
|
whole for clarity.
|
||||||
|
|
||||||
|
|Row| To | From |
|
||||||
|
|---|-------------|------------|
|
||||||
|
| 1 | 1 T AB 2 4 | 2 F AB 2 8 |
|
||||||
|
| 2 | 1 T AB 1 4 | 2 F AB 2 8 |
|
||||||
|
| 3 | 1 T AB 6 6 | 2 F AB 2 7 |
|
||||||
|
| 4 | 1 T AB 2 8 | 2 F AB 3 6 |
|
||||||
|
| 5 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||||
|
|
||||||
|
As part of a successful swap request, the exchange will credit tokens to the
|
||||||
|
broker's account equal to the difference in the price ratios or the two orders.
|
||||||
|
These tokens are considered the broker's profit for initiating the trade.
|
||||||
|
|
||||||
|
The broker would initiate the following swap on the order table above:
|
||||||
|
|
||||||
|
- Row 1, To: Investor 1 trades 2 A tokens to 8 B tokens
|
||||||
|
- Row 1, From: Investor 2 trades 2 A tokens from 8 B tokens
|
||||||
|
- Broker takes 8 B tokens as profit
|
||||||
|
|
||||||
|
Both row 1 trades are fully realized, table becomes:
|
||||||
|
|
||||||
|
|Row| To | From |
|
||||||
|
|---|-------------|------------|
|
||||||
|
| 1 | 1 T AB 1 4 | 2 F AB 2 8 |
|
||||||
|
| 2 | 1 T AB 6 6 | 2 F AB 2 7 |
|
||||||
|
| 3 | 1 T AB 2 8 | 2 F AB 3 6 |
|
||||||
|
| 4 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||||
|
|
||||||
|
The broker would initiate the following swap:
|
||||||
|
|
||||||
|
- Row 1, To: Investor 1 trades 1 A token to 4 B tokens
|
||||||
|
- Row 1, From: Investor 2 trades 1 A token from 4 B tokens
|
||||||
|
- Broker takes 4 B tokens as profit
|
||||||
|
|
||||||
|
Row 1 From is not fully realized, table becomes:
|
||||||
|
|
||||||
|
|Row| To | From |
|
||||||
|
|---|-------------|------------|
|
||||||
|
| 1 | 1 T AB 6 6 | 2 F AB 1 8 |
|
||||||
|
| 2 | 1 T AB 2 8 | 2 F AB 2 7 |
|
||||||
|
| 3 | 1 T AB 2 10 | 2 F AB 3 6 |
|
||||||
|
| 4 | | 2 F AB 1 5 |
|
||||||
|
|
||||||
|
The broker would initiate the following swap:
|
||||||
|
|
||||||
|
- Row 1, To: Investor 1 trades 1 A token to 6 B tokens
|
||||||
|
- Row 1, From: Investor 2 trades 1 A token from 6 B tokens
|
||||||
|
- Broker takes 2 B tokens as profit
|
||||||
|
|
||||||
|
Row 1 To is now fully realized, table becomes:
|
||||||
|
|
||||||
|
|Row| To | From |
|
||||||
|
|---|-------------|------------|
|
||||||
|
| 1 | 1 T AB 5 6 | 2 F AB 2 7 |
|
||||||
|
| 2 | 1 T AB 2 8 | 2 F AB 3 5 |
|
||||||
|
| 3 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||||
|
|
||||||
|
The broker would initiate the following last swap:
|
||||||
|
|
||||||
|
- Row 1, To: Investor 1 trades 2 A token to 12 B tokens
|
||||||
|
- Row 1, From: Investor 2 trades 2 A token from 12 B tokens
|
||||||
|
- Broker takes 4 B tokens as profit
|
||||||
|
|
||||||
|
Table becomes:
|
||||||
|
|
||||||
|
|Row| To | From |
|
||||||
|
|---|-------------|------------|
|
||||||
|
| 1 | 1 T AB 3 6 | 2 F AB 3 5 |
|
||||||
|
| 2 | 1 T AB 2 8 | 2 F AB 1 5 |
|
||||||
|
| 3 | 1 T AB 2 10 | |
|
||||||
|
|
||||||
|
At this point the lowest To's price is larger than the largest From's price so
|
||||||
|
no more swaps would be initiated until new orders came in.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub enum ExchangeInstruction {
|
||||||
|
/// Trade swap request
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 - Account in which to record the swap
|
||||||
|
/// key 2 - 'To' trade order
|
||||||
|
/// key 3 - `From` trade order
|
||||||
|
/// key 4 - Token account associated with the To Trade
|
||||||
|
/// key 5 - Token account associated with From trade
|
||||||
|
/// key 6 - Token account in which to deposit the brokers profit from the swap.
|
||||||
|
SwapRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Swap accounts are populated with this structure
|
||||||
|
pub struct TradeSwapInfo {
|
||||||
|
/// Pair swapped
|
||||||
|
pub pair: TokenPair,
|
||||||
|
/// `To` trade order
|
||||||
|
pub to_trade_order: Pubkey,
|
||||||
|
/// `From` trade order
|
||||||
|
pub from_trade_order: Pubkey,
|
||||||
|
/// Number of primary tokens exchanged
|
||||||
|
pub primary_tokens: u64,
|
||||||
|
/// Price the primary tokens were exchanged for
|
||||||
|
pub primary_price: u64,
|
||||||
|
/// Number of secondary tokens exchanged
|
||||||
|
pub secondary_tokens: u64,
|
||||||
|
/// Price the secondary tokens were exchanged for
|
||||||
|
pub secondary_price: u64,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Exchange program operations
|
||||||
|
|
||||||
|
Putting all the commands together from above, the following operations will be
|
||||||
|
supported by the on-chain exchange program:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub enum ExchangeInstruction {
|
||||||
|
/// New token account
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 - New token account
|
||||||
|
AccountRequest,
|
||||||
|
|
||||||
|
/// Transfer tokens between two accounts
|
||||||
|
/// key 0 - Account to transfer tokens to
|
||||||
|
/// key 1 - Account to transfer tokens from. This can be the exchange program itself,
|
||||||
|
/// the exchange has a limitless number of tokens it can transfer.
|
||||||
|
TransferRequest(Token, u64),
|
||||||
|
|
||||||
|
/// Trade request
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 - Account in which to record the swap
|
||||||
|
/// key 2 - Token account associated with this trade
|
||||||
|
TradeRequest(TradeRequestInfo),
|
||||||
|
|
||||||
|
/// Trade cancellation
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 -Trade order to cancel
|
||||||
|
TradeCancellation,
|
||||||
|
|
||||||
|
/// Trade swap request
|
||||||
|
/// key 0 - Signer
|
||||||
|
/// key 1 - Account in which to record the swap
|
||||||
|
/// key 2 - 'To' trade order
|
||||||
|
/// key 3 - `From` trade order
|
||||||
|
/// key 4 - Token account associated with the To Trade
|
||||||
|
/// key 5 - Token account associated with From trade
|
||||||
|
/// key 6 - Token account in which to deposit the brokers profit from the swap.
|
||||||
|
SwapRequest,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quotes and OHLCV
|
||||||
|
|
||||||
|
The broker will provide current bid/ask price quotes based on trade actively and
|
||||||
|
also provide OHLCV based on some time window. The details of how the bid/ask
|
||||||
|
price quotes are calculated are yet to be decided.
|
||||||
|
|
||||||
|
## Investor strategies
|
||||||
|
|
||||||
|
To make a compelling demo, the investors needs to provide interesting trade
|
||||||
|
behavior. Something as simple as a randomly twiddled baseline would be a
|
||||||
|
minimum starting point.
|
||||||
|
|
||||||
|
## Running the exchange
|
||||||
|
|
||||||
|
The exchange bench posts trades and swaps matches as fast as it can.
|
||||||
|
|
||||||
|
You might want to bump the duration up
|
||||||
|
to 60 seconds and the batch size to 1000 for better numbers. You can modify those
|
||||||
|
in client_demo/src/demo.rs::test_exchange_local_cluster.
|
||||||
|
|
||||||
|
The following command runs the bench:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ RUST_LOG=solana_bench_exchange=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||||
|
```
|
||||||
|
|
||||||
|
To also see the cluster messages:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ RUST_LOG=solana_bench_exchange=info,solana=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
1092
bench-exchange/src/bench.rs
Normal file
1092
bench-exchange/src/bench.rs
Normal file
File diff suppressed because it is too large
Load Diff
188
bench-exchange/src/cli.rs
Normal file
188
bench-exchange/src/cli.rs
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, ArgMatches};
|
||||||
|
use solana::gen_keys::GenKeys;
|
||||||
|
use solana_drone::drone::DRONE_PORT;
|
||||||
|
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::process::exit;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
pub struct Config {
|
||||||
|
pub network_addr: SocketAddr,
|
||||||
|
pub drone_addr: SocketAddr,
|
||||||
|
pub identity: Keypair,
|
||||||
|
pub threads: usize,
|
||||||
|
pub num_nodes: usize,
|
||||||
|
pub duration: Duration,
|
||||||
|
pub transfer_delay: u64,
|
||||||
|
pub fund_amount: u64,
|
||||||
|
pub batch_size: usize,
|
||||||
|
pub chunk_size: usize,
|
||||||
|
pub account_groups: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
network_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
||||||
|
drone_addr: SocketAddr::from(([127, 0, 0, 1], DRONE_PORT)),
|
||||||
|
identity: Keypair::new(),
|
||||||
|
num_nodes: 1,
|
||||||
|
threads: 4,
|
||||||
|
duration: Duration::new(u64::max_value(), 0),
|
||||||
|
transfer_delay: 0,
|
||||||
|
fund_amount: 100_000,
|
||||||
|
batch_size: 100,
|
||||||
|
chunk_size: 100,
|
||||||
|
account_groups: 100,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
||||||
|
App::new(crate_name!())
|
||||||
|
.about(crate_description!())
|
||||||
|
.version(crate_version!())
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("network")
|
||||||
|
.short("n")
|
||||||
|
.long("network")
|
||||||
|
.value_name("HOST:PORT")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("127.0.0.1:8001")
|
||||||
|
.help("Network's gossip entry point; defaults to 127.0.0.1:8001"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("drone")
|
||||||
|
.short("d")
|
||||||
|
.long("drone")
|
||||||
|
.value_name("HOST:PORT")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("127.0.0.1:9900")
|
||||||
|
.help("Location of the drone; defaults to 127.0.0.1:9900"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("identity")
|
||||||
|
.short("i")
|
||||||
|
.long("identity")
|
||||||
|
.value_name("PATH")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("File containing a client identity (keypair)"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("threads")
|
||||||
|
.long("threads")
|
||||||
|
.value_name("<threads>")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("1")
|
||||||
|
.help("Number of threads submitting transactions"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("num-nodes")
|
||||||
|
.long("num-nodes")
|
||||||
|
.value_name("NUM")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("1")
|
||||||
|
.help("Wait for NUM nodes to converge"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("duration")
|
||||||
|
.long("duration")
|
||||||
|
.value_name("SECS")
|
||||||
|
.takes_value(true)
|
||||||
|
.default_value("60")
|
||||||
|
.help("Seconds to run benchmark, then exit; default is forever"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("transfer-delay")
|
||||||
|
.long("transfer-delay")
|
||||||
|
.value_name("<delay>")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("0")
|
||||||
|
.help("Delay between each chunk"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("fund-amount")
|
||||||
|
.long("fund-amount")
|
||||||
|
.value_name("<fund>")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("100000")
|
||||||
|
.help("Number of lamports to fund to each signer"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("batch-size")
|
||||||
|
.long("batch-size")
|
||||||
|
.value_name("<batch>")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("1000")
|
||||||
|
.help("Number of transactions before the signer rolls over"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("chunk-size")
|
||||||
|
.long("chunk-size")
|
||||||
|
.value_name("<cunk>")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("500")
|
||||||
|
.help("Number of transactions to generate and send at a time"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("account-groups")
|
||||||
|
.long("account-groups")
|
||||||
|
.value_name("<groups>")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(false)
|
||||||
|
.default_value("10")
|
||||||
|
.help("Number of account groups to cycle for each batch"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||||
|
let mut args = Config::default();
|
||||||
|
|
||||||
|
args.network_addr = solana_netutil::parse_host_port(matches.value_of("network").unwrap())
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to parse network address: {}", e);
|
||||||
|
exit(1)
|
||||||
|
});
|
||||||
|
|
||||||
|
args.drone_addr = solana_netutil::parse_host_port(matches.value_of("drone").unwrap())
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to parse drone address: {}", e);
|
||||||
|
exit(1)
|
||||||
|
});
|
||||||
|
|
||||||
|
if matches.is_present("identity") {
|
||||||
|
args.identity = read_keypair(matches.value_of("identity").unwrap())
|
||||||
|
.expect("can't read client identity");
|
||||||
|
} else {
|
||||||
|
args.identity = {
|
||||||
|
let seed = [42_u8; 32];
|
||||||
|
let mut rnd = GenKeys::new(seed);
|
||||||
|
rnd.gen_keypair()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
args.threads = value_t!(matches.value_of("threads"), usize).expect("Failed to parse threads");
|
||||||
|
args.num_nodes =
|
||||||
|
value_t!(matches.value_of("num-nodes"), usize).expect("Failed to parse num-nodes");
|
||||||
|
let duration = value_t!(matches.value_of("duration"), u64).expect("Failed to parse duration");
|
||||||
|
args.duration = Duration::from_secs(duration);
|
||||||
|
args.transfer_delay =
|
||||||
|
value_t!(matches.value_of("transfer-delay"), u64).expect("Failed to parse transfer-delay");
|
||||||
|
args.fund_amount =
|
||||||
|
value_t!(matches.value_of("fund-amount"), u64).expect("Failed to parse fund-amount");
|
||||||
|
args.batch_size =
|
||||||
|
value_t!(matches.value_of("batch-size"), usize).expect("Failed to parse batch-size");
|
||||||
|
args.chunk_size =
|
||||||
|
value_t!(matches.value_of("chunk-size"), usize).expect("Failed to parse chunk-size");
|
||||||
|
args.account_groups = value_t!(matches.value_of("account-groups"), usize)
|
||||||
|
.expect("Failed to parse account-groups");
|
||||||
|
|
||||||
|
args
|
||||||
|
}
|
67
bench-exchange/src/main.rs
Normal file
67
bench-exchange/src/main.rs
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
pub mod bench;
|
||||||
|
mod cli;
|
||||||
|
pub mod order_book;
|
||||||
|
|
||||||
|
use crate::bench::{airdrop_lamports, do_bench_exchange, get_clients, Config};
|
||||||
|
use log::*;
|
||||||
|
use solana::gossip_service::discover_nodes;
|
||||||
|
use solana_sdk::signature::KeypairUtil;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
solana_logger::setup();
|
||||||
|
solana_metrics::set_panic_hook("bench-exchange");
|
||||||
|
|
||||||
|
let matches = cli::build_args().get_matches();
|
||||||
|
let cli_config = cli::extract_args(&matches);
|
||||||
|
|
||||||
|
let cli::Config {
|
||||||
|
network_addr,
|
||||||
|
drone_addr,
|
||||||
|
identity,
|
||||||
|
threads,
|
||||||
|
num_nodes,
|
||||||
|
duration,
|
||||||
|
transfer_delay,
|
||||||
|
fund_amount,
|
||||||
|
batch_size,
|
||||||
|
chunk_size,
|
||||||
|
account_groups,
|
||||||
|
..
|
||||||
|
} = cli_config;
|
||||||
|
|
||||||
|
info!("Connecting to the cluster");
|
||||||
|
let nodes = discover_nodes(&network_addr, num_nodes).unwrap_or_else(|_| {
|
||||||
|
panic!("Failed to discover nodes");
|
||||||
|
});
|
||||||
|
|
||||||
|
let clients = get_clients(&nodes);
|
||||||
|
|
||||||
|
info!("{} nodes found", clients.len());
|
||||||
|
if clients.len() < num_nodes {
|
||||||
|
panic!("Error: Insufficient nodes discovered");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Funding keypair: {}", identity.pubkey());
|
||||||
|
|
||||||
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
|
const NUM_SIGNERS: u64 = 2;
|
||||||
|
airdrop_lamports(
|
||||||
|
&clients[0],
|
||||||
|
&drone_addr,
|
||||||
|
&identity,
|
||||||
|
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||||
|
);
|
||||||
|
|
||||||
|
let config = Config {
|
||||||
|
identity,
|
||||||
|
threads,
|
||||||
|
duration,
|
||||||
|
transfer_delay,
|
||||||
|
fund_amount,
|
||||||
|
batch_size,
|
||||||
|
chunk_size,
|
||||||
|
account_groups,
|
||||||
|
};
|
||||||
|
|
||||||
|
do_bench_exchange(clients, config);
|
||||||
|
}
|
138
bench-exchange/src/order_book.rs
Normal file
138
bench-exchange/src/order_book.rs
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
use itertools::EitherOrBoth::{Both, Left, Right};
|
||||||
|
use itertools::Itertools;
|
||||||
|
use log::*;
|
||||||
|
use solana_exchange_api::exchange_state::*;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
use std::collections::BinaryHeap;
|
||||||
|
use std::{error, fmt};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
|
pub struct ToOrder {
|
||||||
|
pub pubkey: Pubkey,
|
||||||
|
pub info: TradeOrderInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for ToOrder {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
other.info.price.cmp(&self.info.price)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl PartialOrd for ToOrder {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
|
pub struct FromOrder {
|
||||||
|
pub pubkey: Pubkey,
|
||||||
|
pub info: TradeOrderInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for FromOrder {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
self.info.price.cmp(&other.info.price)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl PartialOrd for FromOrder {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct OrderBook {
|
||||||
|
// TODO scale to x token types
|
||||||
|
to_ab: BinaryHeap<ToOrder>,
|
||||||
|
from_ab: BinaryHeap<FromOrder>,
|
||||||
|
}
|
||||||
|
impl fmt::Display for OrderBook {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
writeln!(
|
||||||
|
f,
|
||||||
|
"+-Order Book--------------------------+-------------------------------------+"
|
||||||
|
)?;
|
||||||
|
for (i, it) in self
|
||||||
|
.to_ab
|
||||||
|
.iter()
|
||||||
|
.zip_longest(self.from_ab.iter())
|
||||||
|
.enumerate()
|
||||||
|
{
|
||||||
|
match it {
|
||||||
|
Both(to, from) => writeln!(
|
||||||
|
f,
|
||||||
|
"| T AB {:8} for {:8}/{:8} | F AB {:8} for {:8}/{:8} |{}",
|
||||||
|
to.info.tokens,
|
||||||
|
SCALER,
|
||||||
|
to.info.price,
|
||||||
|
from.info.tokens,
|
||||||
|
SCALER,
|
||||||
|
from.info.price,
|
||||||
|
i
|
||||||
|
)?,
|
||||||
|
Left(to) => writeln!(
|
||||||
|
f,
|
||||||
|
"| T AB {:8} for {:8}/{:8} | |{}",
|
||||||
|
to.info.tokens, SCALER, to.info.price, i
|
||||||
|
)?,
|
||||||
|
Right(from) => writeln!(
|
||||||
|
f,
|
||||||
|
"| | F AB {:8} for {:8}/{:8} |{}",
|
||||||
|
from.info.tokens, SCALER, from.info.price, i
|
||||||
|
)?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"+-------------------------------------+-------------------------------------+"
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OrderBook {
|
||||||
|
// TODO
|
||||||
|
// pub fn cancel(&mut self, pubkey: Pubkey) -> Result<(), Box<dyn error::Error>> {
|
||||||
|
// Ok(())
|
||||||
|
// }
|
||||||
|
pub fn push(
|
||||||
|
&mut self,
|
||||||
|
pubkey: Pubkey,
|
||||||
|
info: TradeOrderInfo,
|
||||||
|
) -> Result<(), Box<dyn error::Error>> {
|
||||||
|
check_trade(info.direction, info.tokens, info.price)?;
|
||||||
|
match info.direction {
|
||||||
|
Direction::To => {
|
||||||
|
self.to_ab.push(ToOrder { pubkey, info });
|
||||||
|
}
|
||||||
|
Direction::From => {
|
||||||
|
self.from_ab.push(FromOrder { pubkey, info });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub fn pop(&mut self) -> Option<(ToOrder, FromOrder)> {
|
||||||
|
if let Some(pair) = Self::pop_pair(&mut self.to_ab, &mut self.from_ab) {
|
||||||
|
return Some(pair);
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
pub fn get_num_outstanding(&self) -> (usize, usize) {
|
||||||
|
(self.to_ab.len(), self.from_ab.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pop_pair(
|
||||||
|
to_ab: &mut BinaryHeap<ToOrder>,
|
||||||
|
from_ab: &mut BinaryHeap<FromOrder>,
|
||||||
|
) -> Option<(ToOrder, FromOrder)> {
|
||||||
|
let to = to_ab.peek()?;
|
||||||
|
let from = from_ab.peek()?;
|
||||||
|
if from.info.price < to.info.price {
|
||||||
|
debug!("Trade not viable");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let to = to_ab.pop()?;
|
||||||
|
let from = from_ab.pop()?;
|
||||||
|
Some((to, from))
|
||||||
|
}
|
||||||
|
}
|
@ -2,16 +2,16 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
solana = { path = "../core", version = "0.13.0" }
|
solana = { path = "../core", version = "0.14.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.13.0" }
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.13.0" }
|
solana-netutil = { path = "../netutil", version = "0.14.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
||||||
use solana::packet::{Packet, SharedPackets, BLOB_SIZE, PACKET_DATA_SIZE};
|
use solana::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||||
use solana::result::Result;
|
use solana::result::Result;
|
||||||
use solana::streamer::{receiver, PacketReceiver};
|
use solana::streamer::{receiver, PacketReceiver};
|
||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
@ -14,19 +14,19 @@ use std::time::SystemTime;
|
|||||||
|
|
||||||
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let msgs = SharedPackets::default();
|
let mut msgs = Packets::default();
|
||||||
let msgs_ = msgs.clone();
|
msgs.packets.resize(10, Packet::default());
|
||||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
for w in &mut msgs.packets {
|
||||||
for w in &mut msgs.write().unwrap().packets {
|
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
}
|
}
|
||||||
|
let msgs_ = msgs.clone();
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
for p in &msgs_.read().unwrap().packets {
|
for p in &msgs_.packets {
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
assert!(p.meta.size < BLOB_SIZE);
|
assert!(p.meta.size < BLOB_SIZE);
|
||||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||||
@ -43,7 +43,7 @@ fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> Join
|
|||||||
}
|
}
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||||
rvs.fetch_add(msgs.read().unwrap().packets.len(), Ordering::Relaxed);
|
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
read_channels.push(r_reader);
|
read_channels.push(r_reader);
|
||||||
read_threads.push(receiver(Arc::new(read), &exit, s_reader, "bench-streamer"));
|
read_threads.push(receiver(Arc::new(read), &exit, s_reader));
|
||||||
}
|
}
|
||||||
|
|
||||||
let t_producer1 = producer(&addr, exit.clone());
|
let t_producer1 = producer(&addr, exit.clone());
|
||||||
|
3
bench-tps/.gitignore
vendored
Normal file
3
bench-tps/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
/target/
|
||||||
|
/config/
|
||||||
|
/config-local/
|
@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -11,13 +11,14 @@ homepage = "https://solana.com/"
|
|||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
rayon = "1.0.3"
|
rayon = "1.0.3"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana = { path = "../core", version = "0.13.0" }
|
solana = { path = "../core", version = "0.14.0" }
|
||||||
solana-client = { path = "../client", version = "0.13.0" }
|
solana-client = { path = "../client", version = "0.14.0" }
|
||||||
solana-drone = { path = "../drone", version = "0.13.0" }
|
solana-drone = { path = "../drone", version = "0.14.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.13.0" }
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.13.0" }
|
solana-metrics = { path = "../metrics", version = "0.14.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.13.0" }
|
solana-netutil = { path = "../netutil", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-runtime = { path = "../runtime", version = "0.14.0" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
@ -1,18 +1,10 @@
|
|||||||
use solana_metrics;
|
use solana_metrics;
|
||||||
|
|
||||||
use crate::cli::Config;
|
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
|
||||||
use solana::contact_info::ContactInfo;
|
|
||||||
use solana::gen_keys::GenKeys;
|
use solana::gen_keys::GenKeys;
|
||||||
use solana::gossip_service::discover_nodes;
|
|
||||||
use solana_client::thin_client::create_client;
|
|
||||||
use solana_client::thin_client::ThinClient;
|
|
||||||
use solana_drone::drone::request_airdrop_transaction;
|
use solana_drone::drone::request_airdrop_transaction;
|
||||||
use solana_metrics::influxdb;
|
use solana_metrics::influxdb;
|
||||||
use solana_sdk::client::{AsyncClient, SyncClient};
|
use solana_sdk::client::Client;
|
||||||
use solana_sdk::hash::Hash;
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::system_instruction;
|
use solana_sdk::system_instruction;
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
@ -38,77 +30,54 @@ pub struct NodeStats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub const MAX_SPENDS_PER_TX: usize = 4;
|
pub const MAX_SPENDS_PER_TX: usize = 4;
|
||||||
|
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 20;
|
||||||
|
|
||||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||||
|
|
||||||
pub fn do_bench_tps(config: Config) {
|
pub struct Config {
|
||||||
|
pub id: Keypair,
|
||||||
|
pub threads: usize,
|
||||||
|
pub thread_batch_sleep_ms: usize,
|
||||||
|
pub duration: Duration,
|
||||||
|
pub tx_count: usize,
|
||||||
|
pub sustained: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
id: Keypair::new(),
|
||||||
|
threads: 4,
|
||||||
|
thread_batch_sleep_ms: 0,
|
||||||
|
duration: Duration::new(std::u64::MAX, 0),
|
||||||
|
tx_count: 500_000,
|
||||||
|
sustained: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn do_bench_tps<T>(
|
||||||
|
clients: Vec<T>,
|
||||||
|
config: Config,
|
||||||
|
gen_keypairs: Vec<Keypair>,
|
||||||
|
keypair0_balance: u64,
|
||||||
|
) where
|
||||||
|
T: 'static + Client + Send + Sync,
|
||||||
|
{
|
||||||
let Config {
|
let Config {
|
||||||
network_addr: network,
|
|
||||||
drone_addr,
|
|
||||||
id,
|
id,
|
||||||
threads,
|
threads,
|
||||||
thread_batch_sleep_ms,
|
thread_batch_sleep_ms,
|
||||||
num_nodes,
|
|
||||||
duration,
|
duration,
|
||||||
tx_count,
|
tx_count,
|
||||||
sustained,
|
sustained,
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
let nodes = discover_nodes(&network, num_nodes).unwrap_or_else(|err| {
|
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
||||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
let client = &clients[0];
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
if nodes.len() < num_nodes {
|
|
||||||
eprintln!(
|
|
||||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
|
||||||
num_nodes
|
|
||||||
);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
let cluster_entrypoint = nodes[0].clone(); // Pick the first node, why not?
|
|
||||||
|
|
||||||
let client = create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
|
|
||||||
let mut barrier_client =
|
|
||||||
create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
|
|
||||||
|
|
||||||
let mut seed = [0u8; 32];
|
|
||||||
seed.copy_from_slice(&id.public_key_bytes()[..32]);
|
|
||||||
let mut rnd = GenKeys::new(seed);
|
|
||||||
|
|
||||||
println!("Creating {} keypairs...", tx_count * 2);
|
|
||||||
let mut total_keys = 0;
|
|
||||||
let mut target = tx_count * 2;
|
|
||||||
while target > 0 {
|
|
||||||
total_keys += target;
|
|
||||||
target /= MAX_SPENDS_PER_TX;
|
|
||||||
}
|
|
||||||
let gen_keypairs = rnd.gen_n_keypairs(total_keys as u64);
|
|
||||||
let barrier_source_keypair = Keypair::new();
|
|
||||||
let barrier_dest_id = Pubkey::new_rand();
|
|
||||||
|
|
||||||
println!("Get lamports...");
|
|
||||||
let num_lamports_per_account = 20;
|
|
||||||
|
|
||||||
// Sample the first keypair, see if it has lamports, if so then resume
|
|
||||||
// to avoid lamport loss
|
|
||||||
let keypair0_balance = client
|
|
||||||
.poll_get_balance(&gen_keypairs.last().unwrap().pubkey())
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
if num_lamports_per_account > keypair0_balance {
|
|
||||||
let extra = num_lamports_per_account - keypair0_balance;
|
|
||||||
let total = extra * (gen_keypairs.len() as u64);
|
|
||||||
airdrop_lamports(&client, &drone_addr, &id, total);
|
|
||||||
println!("adding more lamports {}", extra);
|
|
||||||
fund_keys(&client, &id, &gen_keypairs, extra);
|
|
||||||
}
|
|
||||||
let start = gen_keypairs.len() - (tx_count * 2) as usize;
|
let start = gen_keypairs.len() - (tx_count * 2) as usize;
|
||||||
let keypairs = &gen_keypairs[start..];
|
let keypairs = &gen_keypairs[start..];
|
||||||
airdrop_lamports(&barrier_client, &drone_addr, &barrier_source_keypair, 1);
|
|
||||||
|
|
||||||
println!("Get last ID...");
|
|
||||||
let mut blockhash = client.get_recent_blockhash().unwrap();
|
|
||||||
println!("Got last ID {:?}", blockhash);
|
|
||||||
|
|
||||||
let first_tx_count = client.get_transaction_count().expect("transaction count");
|
let first_tx_count = client.get_transaction_count().expect("transaction count");
|
||||||
println!("Initial transaction count {}", first_tx_count);
|
println!("Initial transaction count {}", first_tx_count);
|
||||||
@ -120,15 +89,16 @@ pub fn do_bench_tps(config: Config) {
|
|||||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||||
let sample_period = 1; // in seconds
|
let sample_period = 1; // in seconds
|
||||||
println!("Sampling TPS every {} second...", sample_period);
|
println!("Sampling TPS every {} second...", sample_period);
|
||||||
let v_threads: Vec<_> = nodes
|
let v_threads: Vec<_> = clients
|
||||||
.into_iter()
|
.iter()
|
||||||
.map(|v| {
|
.map(|client| {
|
||||||
let exit_signal = exit_signal.clone();
|
let exit_signal = exit_signal.clone();
|
||||||
let maxes = maxes.clone();
|
let maxes = maxes.clone();
|
||||||
|
let client = client.clone();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-client-sample".to_string())
|
.name("solana-client-sample".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
|
sample_tx_count(&exit_signal, &maxes, first_tx_count, sample_period, &client);
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap()
|
||||||
})
|
})
|
||||||
@ -143,19 +113,19 @@ pub fn do_bench_tps(config: Config) {
|
|||||||
.map(|_| {
|
.map(|_| {
|
||||||
let exit_signal = exit_signal.clone();
|
let exit_signal = exit_signal.clone();
|
||||||
let shared_txs = shared_txs.clone();
|
let shared_txs = shared_txs.clone();
|
||||||
let cluster_entrypoint = cluster_entrypoint.clone();
|
|
||||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||||
let total_tx_sent_count = total_tx_sent_count.clone();
|
let total_tx_sent_count = total_tx_sent_count.clone();
|
||||||
|
let client = client.clone();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-client-sender".to_string())
|
.name("solana-client-sender".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
do_tx_transfers(
|
do_tx_transfers(
|
||||||
&exit_signal,
|
&exit_signal,
|
||||||
&shared_txs,
|
&shared_txs,
|
||||||
&cluster_entrypoint,
|
|
||||||
&shared_tx_active_thread_count,
|
&shared_tx_active_thread_count,
|
||||||
&total_tx_sent_count,
|
&total_tx_sent_count,
|
||||||
thread_batch_sleep_ms,
|
thread_batch_sleep_ms,
|
||||||
|
&client,
|
||||||
);
|
);
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -167,7 +137,7 @@ pub fn do_bench_tps(config: Config) {
|
|||||||
let mut reclaim_lamports_back_to_source_account = false;
|
let mut reclaim_lamports_back_to_source_account = false;
|
||||||
let mut i = keypair0_balance;
|
let mut i = keypair0_balance;
|
||||||
while start.elapsed() < duration {
|
while start.elapsed() < duration {
|
||||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
|
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||||
metrics_submit_lamport_balance(balance);
|
metrics_submit_lamport_balance(balance);
|
||||||
|
|
||||||
// ping-pong between source and destination accounts for each loop iteration
|
// ping-pong between source and destination accounts for each loop iteration
|
||||||
@ -180,7 +150,7 @@ pub fn do_bench_tps(config: Config) {
|
|||||||
&keypairs[len..],
|
&keypairs[len..],
|
||||||
threads,
|
threads,
|
||||||
reclaim_lamports_back_to_source_account,
|
reclaim_lamports_back_to_source_account,
|
||||||
&cluster_entrypoint,
|
&client,
|
||||||
);
|
);
|
||||||
// In sustained mode overlap the transfers with generation
|
// In sustained mode overlap the transfers with generation
|
||||||
// this has higher average performance but lower peak performance
|
// this has higher average performance but lower peak performance
|
||||||
@ -190,18 +160,9 @@ pub fn do_bench_tps(config: Config) {
|
|||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
|
|
||||||
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
|
|
||||||
// to validate the network is still functional.
|
|
||||||
send_barrier_transaction(
|
|
||||||
&mut barrier_client,
|
|
||||||
&mut blockhash,
|
|
||||||
&barrier_source_keypair,
|
|
||||||
&barrier_dest_id,
|
|
||||||
);
|
|
||||||
|
|
||||||
i += 1;
|
i += 1;
|
||||||
if should_switch_directions(num_lamports_per_account, i) {
|
if should_switch_directions(NUM_LAMPORTS_PER_ACCOUNT, i) {
|
||||||
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -224,7 +185,7 @@ pub fn do_bench_tps(config: Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
|
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||||
metrics_submit_lamport_balance(balance);
|
metrics_submit_lamport_balance(balance);
|
||||||
|
|
||||||
compute_and_report_stats(
|
compute_and_report_stats(
|
||||||
@ -245,29 +206,29 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sample_tx_count(
|
fn sample_tx_count<T: Client>(
|
||||||
exit_signal: &Arc<AtomicBool>,
|
exit_signal: &Arc<AtomicBool>,
|
||||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
maxes: &Arc<RwLock<Vec<(String, NodeStats)>>>,
|
||||||
first_tx_count: u64,
|
first_tx_count: u64,
|
||||||
v: &ContactInfo,
|
|
||||||
sample_period: u64,
|
sample_period: u64,
|
||||||
|
client: &Arc<T>,
|
||||||
) {
|
) {
|
||||||
let client = create_client(v.client_facing_addr(), FULLNODE_PORT_RANGE);
|
|
||||||
let mut now = Instant::now();
|
let mut now = Instant::now();
|
||||||
let mut initial_tx_count = client.get_transaction_count().expect("transaction count");
|
let mut initial_tx_count = client.get_transaction_count().expect("transaction count");
|
||||||
let mut max_tps = 0.0;
|
let mut max_tps = 0.0;
|
||||||
let mut total;
|
let mut total;
|
||||||
|
|
||||||
let log_prefix = format!("{:21}:", v.tpu.to_string());
|
let log_prefix = format!("{:21}:", client.transactions_addr());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let tx_count = client.get_transaction_count().expect("transaction count");
|
let mut tx_count = client.get_transaction_count().expect("transaction count");
|
||||||
assert!(
|
if tx_count < initial_tx_count {
|
||||||
tx_count >= initial_tx_count,
|
println!(
|
||||||
"expected tx_count({}) >= initial_tx_count({})",
|
"expected tx_count({}) >= initial_tx_count({})",
|
||||||
tx_count,
|
tx_count, initial_tx_count
|
||||||
initial_tx_count
|
|
||||||
);
|
);
|
||||||
|
tx_count = initial_tx_count;
|
||||||
|
}
|
||||||
let duration = now.elapsed();
|
let duration = now.elapsed();
|
||||||
now = Instant::now();
|
now = Instant::now();
|
||||||
let sample = tx_count - initial_tx_count;
|
let sample = tx_count - initial_tx_count;
|
||||||
@ -295,97 +256,23 @@ fn sample_tx_count(
|
|||||||
tps: max_tps,
|
tps: max_tps,
|
||||||
tx: total,
|
tx: total,
|
||||||
};
|
};
|
||||||
maxes.write().unwrap().push((v.tpu, stats));
|
maxes
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.push((client.transactions_addr(), stats));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send loopback payment of 0 lamports and confirm the network processed it
|
fn generate_txs<T: Client>(
|
||||||
fn send_barrier_transaction(
|
|
||||||
barrier_client: &mut ThinClient,
|
|
||||||
blockhash: &mut Hash,
|
|
||||||
source_keypair: &Keypair,
|
|
||||||
dest_id: &Pubkey,
|
|
||||||
) {
|
|
||||||
let transfer_start = Instant::now();
|
|
||||||
|
|
||||||
let mut poll_count = 0;
|
|
||||||
loop {
|
|
||||||
if poll_count > 0 && poll_count % 8 == 0 {
|
|
||||||
println!(
|
|
||||||
"polling for barrier transaction confirmation, attempt {}",
|
|
||||||
poll_count
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
*blockhash = barrier_client.get_recent_blockhash().unwrap();
|
|
||||||
|
|
||||||
let transaction =
|
|
||||||
system_transaction::create_user_account(&source_keypair, dest_id, 0, *blockhash, 0);
|
|
||||||
let signature = barrier_client
|
|
||||||
.async_send_transaction(transaction)
|
|
||||||
.expect("Unable to send barrier transaction");
|
|
||||||
|
|
||||||
let confirmatiom = barrier_client.poll_for_signature(&signature);
|
|
||||||
let duration_ms = duration_as_ms(&transfer_start.elapsed());
|
|
||||||
if confirmatiom.is_ok() {
|
|
||||||
println!("barrier transaction confirmed in {} ms", duration_ms);
|
|
||||||
|
|
||||||
solana_metrics::submit(
|
|
||||||
influxdb::Point::new("bench-tps")
|
|
||||||
.add_tag(
|
|
||||||
"op",
|
|
||||||
influxdb::Value::String("send_barrier_transaction".to_string()),
|
|
||||||
)
|
|
||||||
.add_field("poll_count", influxdb::Value::Integer(poll_count))
|
|
||||||
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
|
|
||||||
.to_owned(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Sanity check that the client balance is still 1
|
|
||||||
let balance = barrier_client
|
|
||||||
.poll_balance_with_timeout(
|
|
||||||
&source_keypair.pubkey(),
|
|
||||||
&Duration::from_millis(100),
|
|
||||||
&Duration::from_secs(10),
|
|
||||||
)
|
|
||||||
.expect("Failed to get balance");
|
|
||||||
if balance != 1 {
|
|
||||||
panic!("Expected an account balance of 1 (balance: {}", balance);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timeout after 3 minutes. When running a CPU-only leader+validator+drone+bench-tps on a dev
|
|
||||||
// machine, some batches of transactions can take upwards of 1 minute...
|
|
||||||
if duration_ms > 1000 * 60 * 3 {
|
|
||||||
println!("Error: Couldn't confirm barrier transaction!");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_blockhash = barrier_client.get_recent_blockhash().unwrap();
|
|
||||||
if new_blockhash == *blockhash {
|
|
||||||
if poll_count > 0 && poll_count % 8 == 0 {
|
|
||||||
println!("blockhash is not advancing, still at {:?}", *blockhash);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
*blockhash = new_blockhash;
|
|
||||||
}
|
|
||||||
|
|
||||||
poll_count += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_txs(
|
|
||||||
shared_txs: &SharedTransactions,
|
shared_txs: &SharedTransactions,
|
||||||
source: &[Keypair],
|
source: &[Keypair],
|
||||||
dest: &[Keypair],
|
dest: &[Keypair],
|
||||||
threads: usize,
|
threads: usize,
|
||||||
reclaim: bool,
|
reclaim: bool,
|
||||||
contact_info: &ContactInfo,
|
client: &Arc<T>,
|
||||||
) {
|
) {
|
||||||
let client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
|
||||||
let blockhash = client.get_recent_blockhash().unwrap();
|
let blockhash = client.get_recent_blockhash().unwrap();
|
||||||
let tx_count = source.len();
|
let tx_count = source.len();
|
||||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||||
@ -437,15 +324,14 @@ fn generate_txs(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_tx_transfers(
|
fn do_tx_transfers<T: Client>(
|
||||||
exit_signal: &Arc<AtomicBool>,
|
exit_signal: &Arc<AtomicBool>,
|
||||||
shared_txs: &SharedTransactions,
|
shared_txs: &SharedTransactions,
|
||||||
contact_info: &ContactInfo,
|
|
||||||
shared_tx_thread_count: &Arc<AtomicIsize>,
|
shared_tx_thread_count: &Arc<AtomicIsize>,
|
||||||
total_tx_sent_count: &Arc<AtomicUsize>,
|
total_tx_sent_count: &Arc<AtomicUsize>,
|
||||||
thread_batch_sleep_ms: usize,
|
thread_batch_sleep_ms: usize,
|
||||||
|
client: &Arc<T>,
|
||||||
) {
|
) {
|
||||||
let client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
|
||||||
loop {
|
loop {
|
||||||
if thread_batch_sleep_ms > 0 {
|
if thread_batch_sleep_ms > 0 {
|
||||||
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
|
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
|
||||||
@ -460,7 +346,7 @@ fn do_tx_transfers(
|
|||||||
println!(
|
println!(
|
||||||
"Transferring 1 unit {} times... to {}",
|
"Transferring 1 unit {} times... to {}",
|
||||||
txs0.len(),
|
txs0.len(),
|
||||||
contact_info.tpu
|
client.as_ref().transactions_addr(),
|
||||||
);
|
);
|
||||||
let tx_len = txs0.len();
|
let tx_len = txs0.len();
|
||||||
let transfer_start = Instant::now();
|
let transfer_start = Instant::now();
|
||||||
@ -495,7 +381,7 @@ fn do_tx_transfers(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_funding_transfer(client: &ThinClient, tx: &Transaction, amount: u64) -> bool {
|
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
|
||||||
for a in &tx.message().account_keys[1..] {
|
for a in &tx.message().account_keys[1..] {
|
||||||
if client.get_balance(a).unwrap_or(0) >= amount {
|
if client.get_balance(a).unwrap_or(0) >= amount {
|
||||||
return true;
|
return true;
|
||||||
@ -508,7 +394,7 @@ fn verify_funding_transfer(client: &ThinClient, tx: &Transaction, amount: u64) -
|
|||||||
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
||||||
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
||||||
/// or full
|
/// or full
|
||||||
fn fund_keys(client: &ThinClient, source: &Keypair, dests: &[Keypair], lamports: u64) {
|
pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Keypair], lamports: u64) {
|
||||||
let total = lamports * dests.len() as u64;
|
let total = lamports * dests.len() as u64;
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||||
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
||||||
@ -610,8 +496,13 @@ fn fund_keys(client: &ThinClient, source: &Keypair, dests: &[Keypair], lamports:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn airdrop_lamports(client: &ThinClient, drone_addr: &SocketAddr, id: &Keypair, tx_count: u64) {
|
pub fn airdrop_lamports<T: Client>(
|
||||||
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
|
client: &T,
|
||||||
|
drone_addr: &SocketAddr,
|
||||||
|
id: &Keypair,
|
||||||
|
tx_count: u64,
|
||||||
|
) {
|
||||||
|
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||||
metrics_submit_lamport_balance(starting_balance);
|
metrics_submit_lamport_balance(starting_balance);
|
||||||
println!("starting balance {}", starting_balance);
|
println!("starting balance {}", starting_balance);
|
||||||
|
|
||||||
@ -628,7 +519,14 @@ fn airdrop_lamports(client: &ThinClient, drone_addr: &SocketAddr, id: &Keypair,
|
|||||||
match request_airdrop_transaction(&drone_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
match request_airdrop_transaction(&drone_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||||
Ok(transaction) => {
|
Ok(transaction) => {
|
||||||
let signature = client.async_send_transaction(transaction).unwrap();
|
let signature = client.async_send_transaction(transaction).unwrap();
|
||||||
client.poll_for_signature(&signature).unwrap();
|
client
|
||||||
|
.poll_for_signature_confirmation(&signature, 1)
|
||||||
|
.unwrap_or_else(|_| {
|
||||||
|
panic!(
|
||||||
|
"Error requesting airdrop: to addr: {:?} amount: {}",
|
||||||
|
drone_addr, airdrop_amount
|
||||||
|
)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
panic!(
|
panic!(
|
||||||
@ -638,7 +536,7 @@ fn airdrop_lamports(client: &ThinClient, drone_addr: &SocketAddr, id: &Keypair,
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let current_balance = client.poll_get_balance(&id.pubkey()).unwrap_or_else(|e| {
|
let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| {
|
||||||
println!("airdrop error {}", e);
|
println!("airdrop error {}", e);
|
||||||
starting_balance
|
starting_balance
|
||||||
});
|
});
|
||||||
@ -658,7 +556,7 @@ fn airdrop_lamports(client: &ThinClient, drone_addr: &SocketAddr, id: &Keypair,
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn compute_and_report_stats(
|
fn compute_and_report_stats(
|
||||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
maxes: &Arc<RwLock<Vec<(String, NodeStats)>>>,
|
||||||
sample_period: u64,
|
sample_period: u64,
|
||||||
tx_send_elapsed: &Duration,
|
tx_send_elapsed: &Duration,
|
||||||
total_tx_send_count: usize,
|
total_tx_send_count: usize,
|
||||||
@ -679,10 +577,7 @@ fn compute_and_report_stats(
|
|||||||
|
|
||||||
println!(
|
println!(
|
||||||
"{:20} | {:13.2} | {} {}",
|
"{:20} | {:13.2} | {} {}",
|
||||||
(*sock).to_string(),
|
sock, stats.tps, stats.tx, maybe_flag
|
||||||
stats.tps,
|
|
||||||
stats.tx,
|
|
||||||
maybe_flag
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if stats.tps == 0.0 {
|
if stats.tps == 0.0 {
|
||||||
@ -728,12 +623,31 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
|
|||||||
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn generate_keypairs(id: &Keypair, tx_count: usize) -> Vec<Keypair> {
|
||||||
|
let mut seed = [0u8; 32];
|
||||||
|
seed.copy_from_slice(&id.to_bytes()[..32]);
|
||||||
|
let mut rnd = GenKeys::new(seed);
|
||||||
|
|
||||||
|
let mut total_keys = 0;
|
||||||
|
let mut target = tx_count * 2;
|
||||||
|
while target > 0 {
|
||||||
|
total_keys += target;
|
||||||
|
target /= MAX_SPENDS_PER_TX;
|
||||||
|
}
|
||||||
|
rnd.gen_n_keypairs(total_keys as u64)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
||||||
use solana::fullnode::FullnodeConfig;
|
use solana::fullnode::FullnodeConfig;
|
||||||
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
||||||
|
use solana_client::thin_client::create_client;
|
||||||
use solana_drone::drone::run_local_drone;
|
use solana_drone::drone::run_local_drone;
|
||||||
|
use solana_runtime::bank::Bank;
|
||||||
|
use solana_runtime::bank_client::BankClient;
|
||||||
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -770,13 +684,33 @@ mod tests {
|
|||||||
run_local_drone(drone_keypair, addr_sender, None);
|
run_local_drone(drone_keypair, addr_sender, None);
|
||||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||||
|
|
||||||
let mut cfg = Config::default();
|
let mut config = Config::default();
|
||||||
cfg.network_addr = cluster.entry_point_info.gossip;
|
config.tx_count = 100;
|
||||||
cfg.drone_addr = drone_addr;
|
config.duration = Duration::from_secs(5);
|
||||||
cfg.tx_count = 100;
|
|
||||||
cfg.duration = Duration::from_secs(5);
|
|
||||||
cfg.num_nodes = NUM_NODES;
|
|
||||||
|
|
||||||
do_bench_tps(cfg);
|
let keypairs = generate_keypairs(&config.id, config.tx_count);
|
||||||
|
let client = create_client(
|
||||||
|
(cluster.entry_point_info.gossip, drone_addr),
|
||||||
|
FULLNODE_PORT_RANGE,
|
||||||
|
);
|
||||||
|
|
||||||
|
do_bench_tps(vec![client], config, keypairs, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bench_tps_bank_client() {
|
||||||
|
let (genesis_block, id) = GenesisBlock::new(10_000);
|
||||||
|
let bank = Bank::new(&genesis_block);
|
||||||
|
let clients = vec![BankClient::new(bank)];
|
||||||
|
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.id = id;
|
||||||
|
config.tx_count = 10;
|
||||||
|
config.duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
let keypairs = generate_keypairs(&config.id, config.tx_count);
|
||||||
|
fund_keys(&clients[0], &config.id, &keypairs, 20);
|
||||||
|
|
||||||
|
do_bench_tps(clients, config, keypairs, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,15 +1,91 @@
|
|||||||
mod bench;
|
mod bench;
|
||||||
mod cli;
|
mod cli;
|
||||||
|
|
||||||
use crate::bench::do_bench_tps;
|
use crate::bench::{
|
||||||
|
airdrop_lamports, do_bench_tps, fund_keys, generate_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT,
|
||||||
|
};
|
||||||
|
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
||||||
|
use solana::contact_info::ContactInfo;
|
||||||
|
use solana::gossip_service::discover_nodes;
|
||||||
|
use solana_client::thin_client::create_client;
|
||||||
|
use solana_sdk::client::SyncClient;
|
||||||
|
use solana_sdk::signature::KeypairUtil;
|
||||||
|
use std::process::exit;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
solana_metrics::set_panic_hook("bench-tps");
|
solana_metrics::set_panic_hook("bench-tps");
|
||||||
|
|
||||||
let matches = cli::build_args().get_matches();
|
let matches = cli::build_args().get_matches();
|
||||||
|
let cli_config = cli::extract_args(&matches);
|
||||||
|
|
||||||
let cfg = cli::extract_args(&matches);
|
let cli::Config {
|
||||||
|
network_addr,
|
||||||
|
drone_addr,
|
||||||
|
id,
|
||||||
|
threads,
|
||||||
|
num_nodes,
|
||||||
|
duration,
|
||||||
|
tx_count,
|
||||||
|
thread_batch_sleep_ms,
|
||||||
|
sustained,
|
||||||
|
} = cli_config;
|
||||||
|
|
||||||
do_bench_tps(cfg);
|
println!("Connecting to the cluster");
|
||||||
|
let nodes = discover_nodes(&network_addr, num_nodes).unwrap_or_else(|err| {
|
||||||
|
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
if nodes.len() < num_nodes {
|
||||||
|
eprintln!(
|
||||||
|
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||||
|
num_nodes
|
||||||
|
);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
let clients: Vec<_> = nodes
|
||||||
|
.iter()
|
||||||
|
.filter_map(|node| {
|
||||||
|
let cluster_entrypoint = node.clone();
|
||||||
|
let cluster_addrs = cluster_entrypoint.client_facing_addr();
|
||||||
|
if ContactInfo::is_valid_address(&cluster_addrs.0)
|
||||||
|
&& ContactInfo::is_valid_address(&cluster_addrs.1)
|
||||||
|
{
|
||||||
|
let client = create_client(cluster_addrs, FULLNODE_PORT_RANGE);
|
||||||
|
Some(client)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
println!("Creating {} keypairs...", tx_count * 2);
|
||||||
|
let keypairs = generate_keypairs(&id, tx_count);
|
||||||
|
|
||||||
|
println!("Get lamports...");
|
||||||
|
|
||||||
|
// Sample the first keypair, see if it has lamports, if so then resume.
|
||||||
|
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
|
||||||
|
let keypair0_balance = clients[0]
|
||||||
|
.get_balance(&keypairs.last().unwrap().pubkey())
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
if NUM_LAMPORTS_PER_ACCOUNT > keypair0_balance {
|
||||||
|
let extra = NUM_LAMPORTS_PER_ACCOUNT - keypair0_balance;
|
||||||
|
let total = extra * (keypairs.len() as u64);
|
||||||
|
airdrop_lamports(&clients[0], &drone_addr, &id, total);
|
||||||
|
println!("adding more lamports {}", extra);
|
||||||
|
fund_keys(&clients[0], &id, &keypairs, extra);
|
||||||
|
}
|
||||||
|
|
||||||
|
let config = Config {
|
||||||
|
id,
|
||||||
|
threads,
|
||||||
|
thread_batch_sleep_ms,
|
||||||
|
duration,
|
||||||
|
tx_count,
|
||||||
|
sustained,
|
||||||
|
};
|
||||||
|
|
||||||
|
do_bench_tps(clients, config, keypairs, keypair0_balance);
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
- [Ledger Replication](ledger-replication.md)
|
- [Ledger Replication](ledger-replication.md)
|
||||||
- [Secure Vote Signing](vote-signing.md)
|
- [Secure Vote Signing](vote-signing.md)
|
||||||
- [Staking Delegation and Rewards](stake-delegation-and-rewards.md)
|
- [Staking Delegation and Rewards](stake-delegation-and-rewards.md)
|
||||||
|
- [Performance Metrics](performance-metrics.md)
|
||||||
|
|
||||||
- [Anatomy of a Fullnode](fullnode.md)
|
- [Anatomy of a Fullnode](fullnode.md)
|
||||||
- [TPU](tpu.md)
|
- [TPU](tpu.md)
|
||||||
|
@ -10,7 +10,7 @@ client's account.
|
|||||||
A drone is a simple signing service. It listens for requests to sign
|
A drone is a simple signing service. It listens for requests to sign
|
||||||
*transaction data*. Once received, the drone validates the request however it
|
*transaction data*. Once received, the drone validates the request however it
|
||||||
sees fit. It may, for example, only accept transaction data with a
|
sees fit. It may, for example, only accept transaction data with a
|
||||||
`SystemInstruction::Move` instruction transferring only up to a certain amount
|
`SystemInstruction::Transfer` instruction transferring only up to a certain amount
|
||||||
of tokens. If the drone accepts the transaction, it returns an `Ok(Signature)`
|
of tokens. If the drone accepts the transaction, it returns an `Ok(Signature)`
|
||||||
where `Signature` is a signature of the transaction data using the drone's
|
where `Signature` is a signature of the transaction data using the drone's
|
||||||
private key. If it rejects the transaction data, it returns a `DroneError`
|
private key. If it rejects the transaction data, it returns a `DroneError`
|
||||||
@ -76,7 +76,7 @@ beyond a certain *age*.
|
|||||||
|
|
||||||
If the transaction data size is smaller than the size of the returned signature
|
If the transaction data size is smaller than the size of the returned signature
|
||||||
(or descriptive error), a single client can flood the network. Considering
|
(or descriptive error), a single client can flood the network. Considering
|
||||||
that a simple `Move` operation requires two public keys (each 32 bytes) and a
|
that a simple `Transfer` operation requires two public keys (each 32 bytes) and a
|
||||||
`fee` field, and that the returned signature is 64 bytes (and a byte to
|
`fee` field, and that the returned signature is 64 bytes (and a byte to
|
||||||
indicate `Ok`), consideration for this attack may not be required.
|
indicate `Ok`), consideration for this attack may not be required.
|
||||||
|
|
||||||
|
@ -24,8 +24,10 @@ Methods
|
|||||||
* [confirmTransaction](#confirmtransaction)
|
* [confirmTransaction](#confirmtransaction)
|
||||||
* [getAccountInfo](#getaccountinfo)
|
* [getAccountInfo](#getaccountinfo)
|
||||||
* [getBalance](#getbalance)
|
* [getBalance](#getbalance)
|
||||||
|
* [getClusterNodes](#getclusternodes)
|
||||||
* [getRecentBlockhash](#getrecentblockhash)
|
* [getRecentBlockhash](#getrecentblockhash)
|
||||||
* [getSignatureStatus](#getsignaturestatus)
|
* [getSignatureStatus](#getsignaturestatus)
|
||||||
|
* [getSlotLeader](#getslotleader)
|
||||||
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
||||||
* [getTransactionCount](#gettransactioncount)
|
* [getTransactionCount](#gettransactioncount)
|
||||||
* [requestAirdrop](#requestairdrop)
|
* [requestAirdrop](#requestairdrop)
|
||||||
@ -114,6 +116,30 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### getClusterNodes
|
||||||
|
Returns information about all the nodes participating in the cluster
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be an array of JSON objects, each with the following sub fields:
|
||||||
|
* `id` - Node identifier, as base-58 encoded string
|
||||||
|
* `gossip` - Gossip network address for the node
|
||||||
|
* `tpu` - TPU network address for the node
|
||||||
|
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","id":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
### getAccountInfo
|
### getAccountInfo
|
||||||
Returns all information associated with the account of provided Pubkey
|
Returns all information associated with the account of provided Pubkey
|
||||||
|
|
||||||
@ -183,7 +209,27 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
|||||||
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
|
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
-----
|
||||||
|
|
||||||
|
### getSlotLeader
|
||||||
|
Returns the current slot leader
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `string` - Node Id as base-58 encoded string
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotLeader"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
### getNumBlocksSinceSignatureConfirmation
|
### getNumBlocksSinceSignatureConfirmation
|
||||||
Returns the current number of blocks since signature has been confirmed.
|
Returns the current number of blocks since signature has been confirmed.
|
||||||
|
29
book/src/performance-metrics.md
Normal file
29
book/src/performance-metrics.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
# Performance Metrics
|
||||||
|
|
||||||
|
Solana cluster performance is measured as average number of transactions per second
|
||||||
|
that the network can sustain (TPS). And, how long it takes for a transaction to be
|
||||||
|
confirmed by super majority of the cluster (Confirmation Time).
|
||||||
|
|
||||||
|
Each cluster node maintains various counters that are incremented on certain events.
|
||||||
|
These counters are periodically uploaded to a cloud based database. Solana's metrics
|
||||||
|
dashboard fetches these counters, and computes the performance metrics and displays
|
||||||
|
it on the dashboard.
|
||||||
|
|
||||||
|
## TPS
|
||||||
|
|
||||||
|
The leader node's banking stage maintains a count of transactions that it processed.
|
||||||
|
The dashboard displays the count averaged over 2 second period in the TPS time series
|
||||||
|
graph. The dashboard also shows per second mean, maximum and total TPS as a running
|
||||||
|
counter.
|
||||||
|
|
||||||
|
## Confirmation Time
|
||||||
|
|
||||||
|
Each validator node maintains a list of active ledger forks that are visible to the node.
|
||||||
|
A fork is considered to be frozen when the node has received and processed all entries
|
||||||
|
corresponding to the fork. A fork is considered to be confirmed when it receives cumulative
|
||||||
|
super majority vote, and when one of its children forks is frozen.
|
||||||
|
|
||||||
|
The node assigns a timestamp to every new fork, and computes the time it took to confirm
|
||||||
|
the fork. This time is reflected as validator confirmation time in performance metrics.
|
||||||
|
The performance dashboard displays the average of each validator node's confirmation time
|
||||||
|
as a time series graph.
|
@ -3,8 +3,8 @@
|
|||||||
A client *app* interacts with a Solana cluster by sending it *transactions*
|
A client *app* interacts with a Solana cluster by sending it *transactions*
|
||||||
with one or more *instructions*. The Solana *runtime* passes those instructions
|
with one or more *instructions*. The Solana *runtime* passes those instructions
|
||||||
to user-contributed *programs*. An instruction might, for example, tell a
|
to user-contributed *programs*. An instruction might, for example, tell a
|
||||||
program to move *lamports* from one *account* to another or create an interactive
|
program to transfer *lamports* from one *account* to another or create an interactive
|
||||||
contract that governs how lamports are moved. Instructions are executed
|
contract that governs how lamports are transfered. Instructions are executed
|
||||||
atomically. If any instruction is invalid, any changes made within the
|
atomically. If any instruction is invalid, any changes made within the
|
||||||
transaction are discarded.
|
transaction are discarded.
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ data array and assign it to a Program.
|
|||||||
|
|
||||||
* `Assign` - Allows the user to assign an existing account to a program.
|
* `Assign` - Allows the user to assign an existing account to a program.
|
||||||
|
|
||||||
* `Move` - Moves lamports between accounts.
|
* `Transfer` - Transfers lamports between accounts.
|
||||||
|
|
||||||
## Program State Security
|
## Program State Security
|
||||||
|
|
||||||
|
@ -1,30 +1,51 @@
|
|||||||
## Testnet Participation
|
## Testnet Participation
|
||||||
This document describes how to participate in the beta testnet as a
|
This document describes how to participate in the testnet as a
|
||||||
validator node.
|
validator node.
|
||||||
|
|
||||||
Please note some of the information and instructions described here may change
|
Please note some of the information and instructions described here may change
|
||||||
in future releases.
|
in future releases.
|
||||||
|
|
||||||
### Beta Testnet Overview
|
### Beta Testnet Overview
|
||||||
The beta testnet features a validator running at beta.testnet.solana.com, which
|
The testnet features a validator running at testnet.solana.com, which
|
||||||
serves as the entrypoint to the cluster for your validator.
|
serves as the entrypoint to the cluster for your validator.
|
||||||
|
|
||||||
Additionally there is a blockexplorer available at http://beta.testnet.solana.com/.
|
Additionally there is a blockexplorer available at
|
||||||
|
[http://testnet.solana.com/](http://testnet.solana.com/).
|
||||||
|
|
||||||
The beta testnet is configured to reset the ledger every 24hours, or sooner
|
The testnet is configured to reset the ledger daily, or sooner
|
||||||
should an hourly automated sanity test fail.
|
should the hourly automated cluster sanity test fail.
|
||||||
|
|
||||||
|
There is a **#validator-support** Discord channel available to reach other
|
||||||
|
testnet participants, https://discord.gg/pquxPsq.
|
||||||
|
|
||||||
### Machine Requirements
|
### Machine Requirements
|
||||||
Since the beta testnet is not intended for stress testing of max transaction
|
Since the testnet is not intended for stress testing of max transaction
|
||||||
throughput, a higher-end machine with a GPU is not necessary to participate.
|
throughput, a higher-end machine with a GPU is not necessary to participate.
|
||||||
|
|
||||||
However ensure the machine used is not behind a residential NAT to avoid NAT
|
However ensure the machine used is not behind a residential NAT to avoid NAT
|
||||||
traversal issues. A cloud-hosted machine works best. Ensure that IP ports
|
traversal issues. A cloud-hosted machine works best. **Ensure that IP ports
|
||||||
8000 through 10000 are not blocked for Internet traffic.
|
8000 through 10000 are not blocked for Internet inbound and outbound traffic.**
|
||||||
|
|
||||||
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
||||||
MacOS or WSL users may build from source.
|
MacOS or WSL users may build from source.
|
||||||
|
|
||||||
|
#### Confirm The Testnet Is Reachable
|
||||||
|
Before attaching a validator node, sanity check that the cluster is accessible
|
||||||
|
to your machine by running some simple commands. If any of the commands fail,
|
||||||
|
please retry 5-10 minutes later to confirm the testnet is not just restarting
|
||||||
|
itself before debugging further.
|
||||||
|
|
||||||
|
Fetch the current transaction count over JSON RPC:
|
||||||
|
```bash
|
||||||
|
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||||
|
```
|
||||||
|
|
||||||
|
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
|
||||||
|
|
||||||
|
View the [metrics dashboard](
|
||||||
|
https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet)
|
||||||
|
for more detail on cluster activity.
|
||||||
|
|
||||||
### Validator Setup
|
### Validator Setup
|
||||||
#### Obtaining The Software
|
#### Obtaining The Software
|
||||||
##### Bootstrap with `solana-install`
|
##### Bootstrap with `solana-install`
|
||||||
@ -32,16 +53,15 @@ MacOS or WSL users may build from source.
|
|||||||
The `solana-install` tool can be used to easily install and upgrade the cluster
|
The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||||
software on Linux x86_64 systems.
|
software on Linux x86_64 systems.
|
||||||
|
|
||||||
Install the latest release with a single shell command:
|
|
||||||
```bash
|
```bash
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | \
|
$ export SOLANA_RELEASE=v0.14.0 # skip this line to install the latest release
|
||||||
sh -c - --url https://api.beta.testnet.solana.com
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.14.0/install/solana-install-init.sh | sh -s
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively build the `solana-install` program from source and run the
|
Alternatively build the `solana-install` program from source and run the
|
||||||
following command to obtain the same result:
|
following command to obtain the same result:
|
||||||
```bash
|
```bash
|
||||||
$ solana-install init --url https://api.beta.testnet.solana.com
|
$ solana-install init
|
||||||
```
|
```
|
||||||
|
|
||||||
After a successful install, `solana-install update` may be used to easily update the cluster
|
After a successful install, `solana-install update` may be used to easily update the cluster
|
||||||
@ -50,87 +70,105 @@ software to a newer version.
|
|||||||
##### Download Prebuilt Binaries
|
##### Download Prebuilt Binaries
|
||||||
Binaries are available for Linux x86_64 systems.
|
Binaries are available for Linux x86_64 systems.
|
||||||
|
|
||||||
Download the binaries by navigating to https://github.com/solana-labs/solana/releases/latest, download
|
Download the binaries by navigating to
|
||||||
**solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
|
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
||||||
|
archive:
|
||||||
```bash
|
```bash
|
||||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||||
$ cd solana-release/
|
$ cd solana-release/
|
||||||
$ export PATH=$PWD/bin:$PATH
|
$ export PATH=$PWD/bin:$PATH
|
||||||
```
|
```
|
||||||
##### Build From Source
|
##### Build From Source
|
||||||
If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to:
|
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
||||||
> https://github.com/solana-labs/solana/releases/latest
|
from source, navigate to
|
||||||
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
Download the source code tarball (solana-*[release]*.tar.gz) from our latest release tag. Extract the code and build the binaries with:
|
and download the **Source Code** archive. Extract the code and build the
|
||||||
|
binaries with:
|
||||||
```bash
|
```bash
|
||||||
$ ./scripts/cargo-install-all.sh .
|
$ ./scripts/cargo-install-all.sh .
|
||||||
$ export PATH=$PWD/bin:$PATH
|
$ export PATH=$PWD/bin:$PATH
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Confirm The Testnet Is Reachable
|
|
||||||
Before attaching a validator node, sanity check that the cluster is accessible
|
|
||||||
to your machine by running some simple wallet commands. If any of these
|
|
||||||
commands fail, please retry 5-10 minutes later to confirm the testnet is not
|
|
||||||
just restarting itself before debugging further.
|
|
||||||
|
|
||||||
Receive an airdrop of lamports from the testnet drone:
|
|
||||||
```bash
|
|
||||||
$ solana-wallet -n beta.testnet.solana.com airdrop 123
|
|
||||||
$ solana-wallet -n beta.testnet.solana.com balance
|
|
||||||
```
|
|
||||||
|
|
||||||
Fetch the current testnet transaction count over JSON RPC:
|
|
||||||
```bash
|
|
||||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
|
|
||||||
```
|
|
||||||
|
|
||||||
Inspect the blockexplorer at http://beta.testnet.solana.com/ for activity.
|
|
||||||
|
|
||||||
Run the following command to join the gossip network and view all the other nodes in the cluster:
|
|
||||||
```bash
|
|
||||||
$ solana-gossip --network beta.testnet.solana.com:8001
|
|
||||||
```
|
|
||||||
|
|
||||||
### Starting The Validator
|
### Starting The Validator
|
||||||
The following command will start a new validator node.
|
Sanity check that you are able to interact with the cluster by receiving a small
|
||||||
|
airdrop of lamports from the testnet drone:
|
||||||
|
```bash
|
||||||
|
$ solana-wallet -n testnet.solana.com airdrop 123
|
||||||
|
$ solana-wallet -n testnet.solana.com balance
|
||||||
|
```
|
||||||
|
|
||||||
|
Also try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||||
|
```bash
|
||||||
|
$ solana-gossip --network testnet.solana.com:8001 spy
|
||||||
|
# Press ^C to exit
|
||||||
|
```
|
||||||
|
|
||||||
|
Then the following command will start a new validator node.
|
||||||
|
|
||||||
If this is a `solana-install`-installation:
|
If this is a `solana-install`-installation:
|
||||||
```bash
|
```bash
|
||||||
$ fullnode-x.sh --public-address --poll-for-new-genesis-block beta.testnet.solana.com:8001
|
$ clear-fullnode-config.sh
|
||||||
|
$ fullnode.sh --public-address --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, the `solana-install run` command can be used to run the validator
|
Alternatively, the `solana-install run` command can be used to run the validator
|
||||||
node while periodically checking for and applying software updates:
|
node while periodically checking for and applying software updates:
|
||||||
```bash
|
```bash
|
||||||
$ solana-install run fullnode-x.sh --public-address --poll-for-new-genesis-block beta.testnet.solana.com:8001
|
$ clear-fullnode-config.sh
|
||||||
|
$ solana-install run fullnode.sh -- --public-address --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
When not using `solana-install`:
|
If you built from source:
|
||||||
```bash
|
```bash
|
||||||
$ USE_INSTALL=1 ./multinode-demo/fullnode-x.sh --public-address --poll-for-new-genesis-block beta.testnet.solana.com:8001
|
$ USE_INSTALL=1 ./multinode-demo/clear-fullnode-config.sh
|
||||||
|
$ USE_INSTALL=1 ./multinode-demo/fullnode.sh --public-address --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
Then from another console, confirm the IP address if your node is now visible in
|
|
||||||
the gossip network by running:
|
|
||||||
```bash
|
|
||||||
$ solana-gossip --network beta.testnet.solana.com:8001
|
|
||||||
```
|
|
||||||
|
|
||||||
Congratulations, you're now participating in the testnet cluster!
|
|
||||||
|
|
||||||
#### Controlling local network port allocation
|
#### Controlling local network port allocation
|
||||||
By default the validator will dynamically select available network ports in the
|
By default the validator will dynamically select available network ports in the
|
||||||
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
||||||
example, `fullnode-x.sh --dynamic-port-range 11000-11010 ...` will restrict the
|
example, `fullnode.sh --dynamic-port-range 11000-11010 ...` will restrict the
|
||||||
validator to ports 11000-11011.
|
validator to ports 11000-11011.
|
||||||
|
|
||||||
|
### Validator Monitoring
|
||||||
|
From another console, confirm the IP address of your validator is visible in the
|
||||||
|
gossip network by running:
|
||||||
|
```bash
|
||||||
|
$ solana-gossip --network edge.testnet.solana.com:8001 spy
|
||||||
|
```
|
||||||
|
|
||||||
|
When `fullnode.sh` starts, it will output a fullnode configuration that looks
|
||||||
|
similar to:
|
||||||
|
```bash
|
||||||
|
======================[ Fullnode configuration ]======================
|
||||||
|
node id: 4ceWXsL3UJvn7NYZiRkw7NsryMpviaKBDYr8GK7J61Dm
|
||||||
|
vote id: 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||||
|
ledger: ...
|
||||||
|
accounts: ...
|
||||||
|
======================================================================
|
||||||
|
```
|
||||||
|
|
||||||
|
Provide the **vote id** pubkey to the `solana-wallet show-vote-account` command to view
|
||||||
|
the recent voting activity from your validator:
|
||||||
|
```bash
|
||||||
|
$ solana-wallet -n testnet.solana.com show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||||
|
```
|
||||||
|
|
||||||
|
The vote id for the validator can also be found by running:
|
||||||
|
```bash
|
||||||
|
# If this is a `solana-install`-installation run:
|
||||||
|
$ solana-keygen pubkey ~/.local/share/solana/install/active_release/config-local/fullnode-vote-id.json
|
||||||
|
# Otherwise run:
|
||||||
|
$ solana-keygen pubkey ./config-local/fullnode-vote-id.json
|
||||||
|
```
|
||||||
|
|
||||||
### Sharing Metrics From Your Validator
|
### Sharing Metrics From Your Validator
|
||||||
If you'd like to share metrics perform the following steps before starting the
|
If you'd like to share metrics perform the following steps before starting the
|
||||||
validator node:
|
validator node:
|
||||||
```bash
|
```bash
|
||||||
export u="username obtained from the Solana maintainers"
|
export u="username obtained from the Solana maintainers"
|
||||||
export p="password obtained from the Solana maintainers"
|
export p="password obtained from the Solana maintainers"
|
||||||
export SOLANA_METRICS_CONFIG="db=testnet-beta,u=${u:?},p=${p:?}"
|
export SOLANA_METRICS_CONFIG="db=testnet,u=${u:?},p=${p:?}"
|
||||||
source scripts/configure-metrics.sh
|
source scripts/configure-metrics.sh
|
||||||
```
|
```
|
||||||
Inspect for your contributions to our [metrics dashboard](https://metrics.solana.com:3000/d/U9-26Cqmk/testnet-monitor-cloud?refresh=60s&orgId=2&var-hostid=All).
|
|
||||||
|
@ -17,7 +17,7 @@ steps:
|
|||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||||
name: "stable"
|
name: "stable"
|
||||||
timeout_in_minutes: 25
|
timeout_in_minutes: 30
|
||||||
artifact_paths: "log-*.txt"
|
artifact_paths: "log-*.txt"
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||||
name: "coverage"
|
name: "coverage"
|
||||||
|
@ -52,15 +52,6 @@ runTest() {
|
|||||||
|
|
||||||
build
|
build
|
||||||
|
|
||||||
runTest "Leader rotation off" \
|
|
||||||
"ci/localnet-sanity.sh -i 128 -b"
|
|
||||||
|
|
||||||
runTest "Leader rotation off, restart" \
|
|
||||||
"ci/localnet-sanity.sh -i 128 -k 16 -b"
|
|
||||||
|
|
||||||
runTest "Leader rotation off, incremental restart, extra node" \
|
|
||||||
"ci/localnet-sanity.sh -i 128 -k 16 -R -x -b"
|
|
||||||
|
|
||||||
runTest "Leader rotation on" \
|
runTest "Leader rotation on" \
|
||||||
"ci/localnet-sanity.sh -i 128"
|
"ci/localnet-sanity.sh -i 128"
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ while getopts "ch?i:k:brxR" opt; do
|
|||||||
restartInterval=$OPTARG
|
restartInterval=$OPTARG
|
||||||
;;
|
;;
|
||||||
b)
|
b)
|
||||||
maybeNoLeaderRotation="--only-bootstrap-stake"
|
maybeNoLeaderRotation="--stake 0"
|
||||||
;;
|
;;
|
||||||
x)
|
x)
|
||||||
extraNodes=$((extraNodes + 1))
|
extraNodes=$((extraNodes + 1))
|
||||||
@ -78,7 +78,6 @@ source scripts/configure-metrics.sh
|
|||||||
nodes=(
|
nodes=(
|
||||||
"multinode-demo/drone.sh"
|
"multinode-demo/drone.sh"
|
||||||
"multinode-demo/bootstrap-leader.sh \
|
"multinode-demo/bootstrap-leader.sh \
|
||||||
$maybeNoLeaderRotation \
|
|
||||||
--enable-rpc-exit \
|
--enable-rpc-exit \
|
||||||
--init-complete-file init-complete-node1.log"
|
--init-complete-file init-complete-node1.log"
|
||||||
"multinode-demo/fullnode.sh \
|
"multinode-demo/fullnode.sh \
|
||||||
@ -91,7 +90,7 @@ nodes=(
|
|||||||
for i in $(seq 1 $extraNodes); do
|
for i in $(seq 1 $extraNodes); do
|
||||||
nodes+=(
|
nodes+=(
|
||||||
"multinode-demo/fullnode.sh \
|
"multinode-demo/fullnode.sh \
|
||||||
-X dyn$i \
|
--label dyn$i \
|
||||||
--init-complete-file init-complete-node$((2 + i)).log \
|
--init-complete-file init-complete-node$((2 + i)).log \
|
||||||
$maybeNoLeaderRotation"
|
$maybeNoLeaderRotation"
|
||||||
)
|
)
|
||||||
@ -307,8 +306,7 @@ while [[ $iteration -le $iterations ]]; do
|
|||||||
set -x
|
set -x
|
||||||
client_id=/tmp/client-id.json-$$
|
client_id=/tmp/client-id.json-$$
|
||||||
$solana_keygen -o $client_id || exit $?
|
$solana_keygen -o $client_id || exit $?
|
||||||
$solana_gossip \
|
$solana_gossip spy --num-nodes-exactly $numNodes || exit $?
|
||||||
--num-nodes-exactly $numNodes || exit $?
|
|
||||||
rm -rf $client_id
|
rm -rf $client_id
|
||||||
) || flag_error
|
) || flag_error
|
||||||
|
|
||||||
|
@ -19,9 +19,10 @@ CRATES=(
|
|||||||
metrics
|
metrics
|
||||||
client
|
client
|
||||||
drone
|
drone
|
||||||
programs/{budget_api,config_api,storage_api,token_api,vote_api}
|
programs/{budget_api,config_api,stake_api,storage_api,token_api,vote_api,exchange_api}
|
||||||
|
programs/{vote_program,budget_program,bpf_loader,config_program,exchange_program,failure_program}
|
||||||
|
programs/{noop_program,stake_program,storage_program,token_program}
|
||||||
runtime
|
runtime
|
||||||
programs/{budget,bpf_loader,config,vote,storage,token,vote}
|
|
||||||
vote-signer
|
vote-signer
|
||||||
core
|
core
|
||||||
fullnode
|
fullnode
|
||||||
@ -32,7 +33,6 @@ CRATES=(
|
|||||||
install
|
install
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Only package/publish if this is a tagged release
|
# Only package/publish if this is a tagged release
|
||||||
[[ -n $TRIGGERED_BUILDKITE_TAG ]] || {
|
[[ -n $TRIGGERED_BUILDKITE_TAG ]] || {
|
||||||
echo TRIGGERED_BUILDKITE_TAG unset, skipped
|
echo TRIGGERED_BUILDKITE_TAG unset, skipped
|
||||||
@ -55,7 +55,7 @@ for crate in "${CRATES[@]}"; do
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "-- $crate"
|
echo "-- $crate"
|
||||||
grep -q "^version = \"$expectedCrateVersion\"$" Cargo.toml || {
|
grep -q "^version = \"$expectedCrateVersion\"$" "$crate"/Cargo.toml || {
|
||||||
echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
|
echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
@ -77,16 +77,16 @@ exec multinode-demo/fullnode.sh "$@"
|
|||||||
EOF
|
EOF
|
||||||
chmod +x solana-release/bin/fullnode.sh
|
chmod +x solana-release/bin/fullnode.sh
|
||||||
|
|
||||||
# Add a wrapper script for fullnode-x.sh
|
# Add a wrapper script for clear-fullnode-config.sh
|
||||||
# TODO: Remove multinode/... from tarball
|
# TODO: Remove multinode/... from tarball
|
||||||
cat > solana-release/bin/fullnode-x.sh <<'EOF'
|
cat > solana-release/bin/clear-fullnode-config.sh <<'EOF'
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"/..
|
cd "$(dirname "$0")"/..
|
||||||
export USE_INSTALL=1
|
export USE_INSTALL=1
|
||||||
exec multinode-demo/fullnode-x.sh "$@"
|
exec multinode-demo/clear-fullnode-config.sh "$@"
|
||||||
EOF
|
EOF
|
||||||
chmod +x solana-release/bin/fullnode-x.sh
|
chmod +x solana-release/bin/clear-fullnode-config.sh
|
||||||
|
|
||||||
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
|
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
|
||||||
cp solana-release/bin/solana-install solana-install-$TARGET
|
cp solana-release/bin/solana-install solana-install-$TARGET
|
||||||
@ -111,10 +111,10 @@ for file in solana-release-$TARGET.tar.bz2 solana-install-$TARGET; do
|
|||||||
--env AWS_SECRET_ACCESS_KEY \
|
--env AWS_SECRET_ACCESS_KEY \
|
||||||
--volume "$PWD:/solana" \
|
--volume "$PWD:/solana" \
|
||||||
eremite/aws-cli:2018.12.18 \
|
eremite/aws-cli:2018.12.18 \
|
||||||
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://solana-release/"$CHANNEL_OR_TAG"/"$file"
|
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||||
|
|
||||||
echo Published to:
|
echo Published to:
|
||||||
$DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/"$file"
|
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||||
)
|
)
|
||||||
|
|
||||||
if [[ -n $TAG ]]; then
|
if [[ -n $TAG ]]; then
|
||||||
|
@ -41,7 +41,7 @@ test-stable-perf)
|
|||||||
^sdk/ \
|
^sdk/ \
|
||||||
|| {
|
|| {
|
||||||
annotate --style info \
|
annotate --style info \
|
||||||
"Skipped test-stable-perf as no relavant files were modified"
|
"Skipped test-stable-perf as no relevant files were modified"
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ source ci/upload-ci-artifact.sh
|
|||||||
[[ -n $ITERATION_WAIT ]] || ITERATION_WAIT=300
|
[[ -n $ITERATION_WAIT ]] || ITERATION_WAIT=300
|
||||||
[[ -n $NUMBER_OF_NODES ]] || NUMBER_OF_NODES="10 25 50 100"
|
[[ -n $NUMBER_OF_NODES ]] || NUMBER_OF_NODES="10 25 50 100"
|
||||||
[[ -n $LEADER_CPU_MACHINE_TYPE ]] ||
|
[[ -n $LEADER_CPU_MACHINE_TYPE ]] ||
|
||||||
LEADER_CPU_MACHINE_TYPE="n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100"
|
LEADER_CPU_MACHINE_TYPE="--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100"
|
||||||
[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2
|
[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2
|
||||||
[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation
|
[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation
|
||||||
[[ -n $TESTNET_ZONES ]] || TESTNET_ZONES="us-west1-b"
|
[[ -n $TESTNET_ZONES ]] || TESTNET_ZONES="us-west1-b"
|
||||||
@ -52,14 +52,14 @@ launchTestnet() {
|
|||||||
declare q_mean_tps='
|
declare q_mean_tps='
|
||||||
SELECT round(mean("sum_count")) AS "mean_tps" FROM (
|
SELECT round(mean("sum_count")) AS "mean_tps" FROM (
|
||||||
SELECT sum("count") AS "sum_count"
|
SELECT sum("count") AS "sum_count"
|
||||||
FROM "testnet-automation"."autogen"."counter-bank-process_transactions-txs"
|
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||||
WHERE time > now() - 300s GROUP BY time(1s)
|
WHERE time > now() - 300s GROUP BY time(1s)
|
||||||
)'
|
)'
|
||||||
|
|
||||||
declare q_max_tps='
|
declare q_max_tps='
|
||||||
SELECT round(max("sum_count")) AS "max_tps" FROM (
|
SELECT round(max("sum_count")) AS "max_tps" FROM (
|
||||||
SELECT sum("count") AS "sum_count"
|
SELECT sum("count") AS "sum_count"
|
||||||
FROM "testnet-automation"."autogen"."counter-bank-process_transactions-txs"
|
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||||
WHERE time > now() - 300s GROUP BY time(1s)
|
WHERE time > now() - 300s GROUP BY time(1s)
|
||||||
)'
|
)'
|
||||||
|
|
||||||
|
@ -10,9 +10,12 @@ bootstrapFullNodeMachineType=
|
|||||||
clientNodeCount=0
|
clientNodeCount=0
|
||||||
additionalFullNodeCount=10
|
additionalFullNodeCount=10
|
||||||
publicNetwork=false
|
publicNetwork=false
|
||||||
skipSetup=false
|
stopNetwork=false
|
||||||
|
reuseLedger=false
|
||||||
|
skipCreate=false
|
||||||
skipStart=false
|
skipStart=false
|
||||||
externalNode=false
|
externalNode=false
|
||||||
|
failOnValidatorBootupFailure=true
|
||||||
tarChannelOrTag=edge
|
tarChannelOrTag=edge
|
||||||
delete=false
|
delete=false
|
||||||
enableGpu=false
|
enableGpu=false
|
||||||
@ -53,6 +56,11 @@ Deploys a CD testnet
|
|||||||
-D - Delete the network
|
-D - Delete the network
|
||||||
-r - Reuse existing node/ledger configuration from a
|
-r - Reuse existing node/ledger configuration from a
|
||||||
previous |start| (ie, don't run ./multinode-demo/setup.sh).
|
previous |start| (ie, don't run ./multinode-demo/setup.sh).
|
||||||
|
-x - External node. Default: false
|
||||||
|
-e - Skip create. Assume the nodes have already been created
|
||||||
|
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
|
||||||
|
-S - Stop network software without tearing down nodes.
|
||||||
|
-f - Discard validator nodes that didn't bootup successfully
|
||||||
|
|
||||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||||
metrics
|
metrics
|
||||||
@ -62,7 +70,7 @@ EOF
|
|||||||
|
|
||||||
zone=()
|
zone=()
|
||||||
|
|
||||||
while getopts "h?p:Pn:c:t:gG:a:Dbd:rusxz:p:C:" opt; do
|
while getopts "h?p:Pn:c:t:gG:a:Dbd:rusxz:p:C:Sfe" opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
h | \?)
|
h | \?)
|
||||||
usage
|
usage
|
||||||
@ -115,7 +123,10 @@ while getopts "h?p:Pn:c:t:gG:a:Dbd:rusxz:p:C:" opt; do
|
|||||||
delete=true
|
delete=true
|
||||||
;;
|
;;
|
||||||
r)
|
r)
|
||||||
skipSetup=true
|
reuseLedger=true
|
||||||
|
;;
|
||||||
|
e)
|
||||||
|
skipCreate=true
|
||||||
;;
|
;;
|
||||||
s)
|
s)
|
||||||
skipStart=true
|
skipStart=true
|
||||||
@ -123,9 +134,15 @@ while getopts "h?p:Pn:c:t:gG:a:Dbd:rusxz:p:C:" opt; do
|
|||||||
x)
|
x)
|
||||||
externalNode=true
|
externalNode=true
|
||||||
;;
|
;;
|
||||||
|
f)
|
||||||
|
failOnValidatorBootupFailure=false
|
||||||
|
;;
|
||||||
u)
|
u)
|
||||||
blockstreamer=true
|
blockstreamer=true
|
||||||
;;
|
;;
|
||||||
|
S)
|
||||||
|
stopNetwork=true
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
usage "Error: unhandled option: $opt"
|
usage "Error: unhandled option: $opt"
|
||||||
;;
|
;;
|
||||||
@ -162,7 +179,16 @@ for val in "${zone[@]}"; do
|
|||||||
zone_args+=("-z $val")
|
zone_args+=("-z $val")
|
||||||
done
|
done
|
||||||
|
|
||||||
if ! $skipSetup; then
|
if $stopNetwork; then
|
||||||
|
skipCreate=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if $delete; then
|
||||||
|
skipCreate=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create the network
|
||||||
|
if ! $skipCreate; then
|
||||||
echo "--- $cloudProvider.sh delete"
|
echo "--- $cloudProvider.sh delete"
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
time net/"$cloudProvider".sh delete ${zone_args[@]} -p "$netName" ${externalNode:+-x}
|
time net/"$cloudProvider".sh delete ${zone_args[@]} -p "$netName" ${externalNode:+-x}
|
||||||
@ -208,6 +234,10 @@ if ! $skipSetup; then
|
|||||||
create_args+=(-x)
|
create_args+=(-x)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if ! $failOnValidatorBootupFailure; then
|
||||||
|
create_args+=(-f)
|
||||||
|
fi
|
||||||
|
|
||||||
time net/"$cloudProvider".sh create "${create_args[@]}"
|
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||||
else
|
else
|
||||||
echo "--- $cloudProvider.sh config"
|
echo "--- $cloudProvider.sh config"
|
||||||
@ -220,6 +250,14 @@ else
|
|||||||
config_args+=(-P)
|
config_args+=(-P)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if $externalNode; then
|
||||||
|
config_args+=(-x)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! $failOnValidatorBootupFailure; then
|
||||||
|
config_args+=(-f)
|
||||||
|
fi
|
||||||
|
|
||||||
time net/"$cloudProvider".sh config "${config_args[@]}"
|
time net/"$cloudProvider".sh config "${config_args[@]}"
|
||||||
fi
|
fi
|
||||||
net/init-metrics.sh -e
|
net/init-metrics.sh -e
|
||||||
@ -227,7 +265,24 @@ net/init-metrics.sh -e
|
|||||||
echo "+++ $cloudProvider.sh info"
|
echo "+++ $cloudProvider.sh info"
|
||||||
net/"$cloudProvider".sh info
|
net/"$cloudProvider".sh info
|
||||||
|
|
||||||
echo --- net.sh start
|
if $stopNetwork; then
|
||||||
|
echo --- net.sh stop
|
||||||
|
time net/net.sh stop
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
ok=true
|
||||||
|
if ! $skipStart; then
|
||||||
|
(
|
||||||
|
if $skipCreate; then
|
||||||
|
# TODO: Enable rolling updates
|
||||||
|
#op=update
|
||||||
|
op=restart
|
||||||
|
else
|
||||||
|
op=start
|
||||||
|
fi
|
||||||
|
echo "--- net.sh $op"
|
||||||
|
|
||||||
maybeRejectExtraNodes=
|
maybeRejectExtraNodes=
|
||||||
if ! $publicNetwork; then
|
if ! $publicNetwork; then
|
||||||
maybeRejectExtraNodes="-o rejectExtraNodes"
|
maybeRejectExtraNodes="-o rejectExtraNodes"
|
||||||
@ -241,20 +296,9 @@ if [[ -n $NO_LEDGER_VERIFY ]]; then
|
|||||||
maybeNoLedgerVerify="-o noLedgerVerify"
|
maybeNoLedgerVerify="-o noLedgerVerify"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
maybeSkipSetup=
|
maybeReuseLedger=
|
||||||
if $skipSetup; then
|
if $reuseLedger; then
|
||||||
maybeSkipSetup="-r"
|
maybeReuseLedger="-r"
|
||||||
fi
|
|
||||||
|
|
||||||
ok=true
|
|
||||||
if ! $skipStart; then
|
|
||||||
(
|
|
||||||
if $skipSetup; then
|
|
||||||
# TODO: Enable rolling updates
|
|
||||||
#op=update
|
|
||||||
op=restart
|
|
||||||
else
|
|
||||||
op=start
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
maybeUpdateManifestKeypairFile=
|
maybeUpdateManifestKeypairFile=
|
||||||
@ -267,7 +311,7 @@ if ! $skipStart; then
|
|||||||
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
|
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
|
||||||
time net/net.sh $op -t "$tarChannelOrTag" \
|
time net/net.sh $op -t "$tarChannelOrTag" \
|
||||||
$maybeUpdateManifestKeypairFile \
|
$maybeUpdateManifestKeypairFile \
|
||||||
$maybeSkipSetup \
|
$maybeReuseLedger \
|
||||||
$maybeRejectExtraNodes \
|
$maybeRejectExtraNodes \
|
||||||
$maybeNoValidatorSanity \
|
$maybeNoValidatorSanity \
|
||||||
$maybeNoLedgerVerify
|
$maybeNoLedgerVerify
|
||||||
|
@ -42,20 +42,32 @@ steps:
|
|||||||
value: "testnet-beta"
|
value: "testnet-beta"
|
||||||
- label: "testnet-beta-perf"
|
- label: "testnet-beta-perf"
|
||||||
value: "testnet-beta-perf"
|
value: "testnet-beta-perf"
|
||||||
|
- label: "testnet-demo"
|
||||||
|
value: "testnet-demo"
|
||||||
- select: "Operation"
|
- select: "Operation"
|
||||||
key: "testnet-operation"
|
key: "testnet-operation"
|
||||||
default: "sanity-or-restart"
|
default: "sanity-or-restart"
|
||||||
options:
|
options:
|
||||||
- label: "Sanity check. Restart network on failure"
|
- label: "Create testnet and then start software. If the testnet already exists it will be deleted and re-created"
|
||||||
value: "sanity-or-restart"
|
value: "create-and-start"
|
||||||
- label: "Start (or restart) the network"
|
- label: "Create testnet, but do not start software. If the testnet already exists it will be deleted and re-created"
|
||||||
|
value: "create"
|
||||||
|
- label: "Start network software on an existing testnet. If software is already running it will be restarted"
|
||||||
value: "start"
|
value: "start"
|
||||||
- label: "Update the network software. Restart network on failure"
|
- label: "Stop network software without deleting testnet nodes"
|
||||||
value: "update-or-restart"
|
|
||||||
- label: "Stop the network"
|
|
||||||
value: "stop"
|
value: "stop"
|
||||||
|
- label: "Update the network software. Restart network software on failure"
|
||||||
|
value: "update-or-restart"
|
||||||
|
- label: "Sanity check. Restart network software on failure"
|
||||||
|
value: "sanity-or-restart"
|
||||||
- label: "Sanity check only"
|
- label: "Sanity check only"
|
||||||
value: "sanity"
|
value: "sanity"
|
||||||
|
- label: "Delete the testnet"
|
||||||
|
value: "delete"
|
||||||
|
- label: "Enable/unlock the testnet"
|
||||||
|
value: "enable"
|
||||||
|
- label: "Delete and then lock the testnet from further operation until it is re-enabled"
|
||||||
|
value: "disable"
|
||||||
- command: "ci/$(basename "$0")"
|
- command: "ci/$(basename "$0")"
|
||||||
agents:
|
agents:
|
||||||
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
|
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
|
||||||
@ -64,6 +76,93 @@ EOF
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
ci/channel-info.sh
|
||||||
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
|
|
||||||
|
EC2_ZONES=(us-west-1a sa-east-1a ap-northeast-2a eu-central-1a ca-central-1a)
|
||||||
|
|
||||||
|
# GCE zones with _lots_ of quota
|
||||||
|
GCE_ZONES=(
|
||||||
|
us-west1-a
|
||||||
|
us-central1-a
|
||||||
|
us-east1-b
|
||||||
|
europe-west4-a
|
||||||
|
|
||||||
|
us-west1-b
|
||||||
|
us-central1-b
|
||||||
|
us-east1-c
|
||||||
|
europe-west4-b
|
||||||
|
|
||||||
|
us-west1-c
|
||||||
|
us-east1-d
|
||||||
|
europe-west4-c
|
||||||
|
)
|
||||||
|
|
||||||
|
# GCE zones with enough quota for one CPU-only fullnode
|
||||||
|
GCE_LOW_QUOTA_ZONES=(
|
||||||
|
asia-east2-a
|
||||||
|
asia-northeast1-b
|
||||||
|
asia-northeast2-b
|
||||||
|
asia-south1-c
|
||||||
|
asia-southeast1-b
|
||||||
|
australia-southeast1-b
|
||||||
|
europe-north1-a
|
||||||
|
europe-west2-b
|
||||||
|
europe-west3-c
|
||||||
|
europe-west6-a
|
||||||
|
northamerica-northeast1-a
|
||||||
|
southamerica-east1-b
|
||||||
|
)
|
||||||
|
|
||||||
|
case $TESTNET in
|
||||||
|
testnet-edge|testnet-edge-perf)
|
||||||
|
CHANNEL_OR_TAG=edge
|
||||||
|
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||||
|
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
||||||
|
;;
|
||||||
|
testnet-beta|testnet-beta-perf)
|
||||||
|
CHANNEL_OR_TAG=beta
|
||||||
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
|
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
||||||
|
;;
|
||||||
|
testnet)
|
||||||
|
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||||
|
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||||
|
: "${EC2_NODE_COUNT:=10}"
|
||||||
|
: "${GCE_NODE_COUNT:=}"
|
||||||
|
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
||||||
|
;;
|
||||||
|
testnet-perf)
|
||||||
|
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||||
|
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||||
|
;;
|
||||||
|
testnet-demo)
|
||||||
|
CHANNEL_OR_TAG=beta
|
||||||
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
|
: "${GCE_NODE_COUNT:=150}"
|
||||||
|
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
||||||
|
: "${TESTNET_DB_HOST:=https://clocktower-f1d56615.influxcloud.net:8086}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
EC2_ZONE_ARGS=()
|
||||||
|
for val in "${EC2_ZONES[@]}"; do
|
||||||
|
EC2_ZONE_ARGS+=("-z $val")
|
||||||
|
done
|
||||||
|
GCE_ZONE_ARGS=()
|
||||||
|
for val in "${GCE_ZONES[@]}"; do
|
||||||
|
GCE_ZONE_ARGS+=("-z $val")
|
||||||
|
done
|
||||||
|
GCE_LOW_QUOTA_ZONE_ARGS=()
|
||||||
|
for val in "${GCE_LOW_QUOTA_ZONES[@]}"; do
|
||||||
|
GCE_LOW_QUOTA_ZONE_ARGS+=("-z $val")
|
||||||
|
done
|
||||||
|
|
||||||
if [[ -n $TESTNET_DB_HOST ]]; then
|
if [[ -n $TESTNET_DB_HOST ]]; then
|
||||||
SOLANA_METRICS_PARTIAL_CONFIG="host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
|
SOLANA_METRICS_PARTIAL_CONFIG="host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||||
fi
|
fi
|
||||||
@ -72,30 +171,9 @@ export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
|
|||||||
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||||
source scripts/configure-metrics.sh
|
source scripts/configure-metrics.sh
|
||||||
|
|
||||||
ci/channel-info.sh
|
|
||||||
eval "$(ci/channel-info.sh)"
|
|
||||||
|
|
||||||
if [[ -n $TESTNET_TAG ]]; then
|
if [[ -n $TESTNET_TAG ]]; then
|
||||||
CHANNEL_OR_TAG=$TESTNET_TAG
|
CHANNEL_OR_TAG=$TESTNET_TAG
|
||||||
else
|
else
|
||||||
case $TESTNET in
|
|
||||||
testnet-edge|testnet-edge-perf)
|
|
||||||
CHANNEL_OR_TAG=edge
|
|
||||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
|
||||||
;;
|
|
||||||
testnet-beta|testnet-beta-perf)
|
|
||||||
CHANNEL_OR_TAG=beta
|
|
||||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
|
||||||
;;
|
|
||||||
testnet|testnet-perf)
|
|
||||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
|
||||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||||
(
|
(
|
||||||
@ -112,6 +190,7 @@ steps:
|
|||||||
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
||||||
EC2_NODE_COUNT: "$EC2_NODE_COUNT"
|
EC2_NODE_COUNT: "$EC2_NODE_COUNT"
|
||||||
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
|
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
|
||||||
|
GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT"
|
||||||
EOF
|
EOF
|
||||||
) | buildkite-agent pipeline upload
|
) | buildkite-agent pipeline upload
|
||||||
exit 0
|
exit 0
|
||||||
@ -124,6 +203,7 @@ sanity() {
|
|||||||
testnet-edge)
|
testnet-edge)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
NO_LEDGER_VERIFY=1 \
|
||||||
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
@ -139,23 +219,8 @@ sanity() {
|
|||||||
testnet-beta)
|
testnet-beta)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
EC2_ZONES=(us-west-1a sa-east-1a ap-northeast-2a eu-central-1a ca-central-1a)
|
NO_LEDGER_VERIFY=1 \
|
||||||
ok=true
|
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||||
for zone in "${EC2_ZONES[@]}"; do
|
|
||||||
if ! $ok; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
ci/testnet-sanity.sh beta-testnet-solana-com ec2 "$zone" || ok=false
|
|
||||||
done
|
|
||||||
|
|
||||||
GCE_ZONES=(us-west1-b asia-east2-a europe-west4-a southamerica-east1-b us-east4-c)
|
|
||||||
for zone in "${GCE_ZONES[@]}"; do
|
|
||||||
if ! $ok; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
ci/testnet-sanity.sh beta-testnet-solana-com gce "$zone" || ok=false
|
|
||||||
done
|
|
||||||
$ok
|
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-beta-perf)
|
testnet-beta-perf)
|
||||||
@ -170,8 +235,19 @@ sanity() {
|
|||||||
testnet)
|
testnet)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
ci/testnet-sanity.sh testnet-solana-com ec2 us-west-1a
|
|
||||||
#ci/testnet-sanity.sh testnet-solana-com gce us-east1-c
|
ok=true
|
||||||
|
if [[ -n $EC2_NODE_COUNT ]]; then
|
||||||
|
NO_LEDGER_VERIFY=1 \
|
||||||
|
ci/testnet-sanity.sh testnet-solana-com ec2 "${EC2_ZONES[0]}" || ok=false
|
||||||
|
elif [[ -n $GCE_NODE_COUNT ]]; then
|
||||||
|
NO_LEDGER_VERIFY=1 \
|
||||||
|
ci/testnet-sanity.sh testnet-solana-com gce "${GCE_ZONES[0]}" || ok=false
|
||||||
|
else
|
||||||
|
echo "Error: no EC2 or GCE nodes"
|
||||||
|
ok=false
|
||||||
|
fi
|
||||||
|
$ok
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-perf)
|
testnet-perf)
|
||||||
@ -184,6 +260,21 @@ sanity() {
|
|||||||
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
|
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
|
testnet-demo)
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
|
||||||
|
ok=true
|
||||||
|
if [[ -n $GCE_NODE_COUNT ]]; then
|
||||||
|
NO_LEDGER_VERIFY=1 \
|
||||||
|
ci/testnet-sanity.sh demo-testnet-solana-com gce "${GCE_ZONES[0]}" -f || ok=false
|
||||||
|
else
|
||||||
|
echo "Error: no GCE nodes"
|
||||||
|
ok=false
|
||||||
|
fi
|
||||||
|
$ok
|
||||||
|
)
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
exit 1
|
exit 1
|
||||||
@ -191,15 +282,27 @@ sanity() {
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
deploy() {
|
||||||
|
declare maybeCreate=$1
|
||||||
|
declare maybeStart=$2
|
||||||
|
declare maybeStop=$3
|
||||||
|
declare maybeDelete=$4
|
||||||
|
|
||||||
start() {
|
echo "--- deploy \"$maybeCreate\" \"$maybeStart\" \"$maybeStop\" \"$maybeDelete\""
|
||||||
declare maybeDelete=$1
|
|
||||||
if [[ -z $maybeDelete ]]; then
|
# Create or recreate the nodes
|
||||||
echo "--- start $TESTNET"
|
if [[ -z $maybeCreate ]]; then
|
||||||
|
skipCreate=skip
|
||||||
else
|
else
|
||||||
echo "--- stop $TESTNET"
|
skipCreate=""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start or restart the network software on the nodes
|
||||||
|
if [[ -z $maybeStart ]]; then
|
||||||
|
skipStart=skip
|
||||||
|
else
|
||||||
|
skipStart=""
|
||||||
fi
|
fi
|
||||||
declare maybeReuseLedger=$2
|
|
||||||
|
|
||||||
case $TESTNET in
|
case $TESTNET in
|
||||||
testnet-edge)
|
testnet-edge)
|
||||||
@ -207,7 +310,9 @@ start() {
|
|||||||
set -x
|
set -x
|
||||||
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
|
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
|
||||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
|
||||||
${maybeReuseLedger:+-r} \
|
${skipCreate:+-e} \
|
||||||
|
${skipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D}
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
@ -220,39 +325,22 @@ start() {
|
|||||||
ci/testnet-deploy.sh -p edge-perf-testnet-solana-com -C ec2 -z us-west-2b \
|
ci/testnet-deploy.sh -p edge-perf-testnet-solana-com -C ec2 -z us-west-2b \
|
||||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
-b \
|
-b \
|
||||||
${maybeReuseLedger:+-r} \
|
${skipCreate:+-e} \
|
||||||
|
${skipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D}
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-beta)
|
testnet-beta)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
EC2_ZONES=(us-west-1a sa-east-1a ap-northeast-2a eu-central-1a ca-central-1a)
|
NO_VALIDATOR_SANITY=1 \
|
||||||
GCE_ZONES=(us-west1-b asia-east2-a europe-west4-a southamerica-east1-b us-east4-c)
|
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
|
||||||
# Build an array to pass as opts to testnet-deploy.sh: "-z zone1 -z zone2 ..."
|
-b \
|
||||||
GCE_ZONE_ARGS=()
|
${skipCreate:+-e} \
|
||||||
for val in "${GCE_ZONES[@]}"; do
|
${skipStart:+-s} \
|
||||||
GCE_ZONE_ARGS+=("-z $val")
|
${maybeStop:+-S} \
|
||||||
done
|
|
||||||
|
|
||||||
EC2_ZONE_ARGS=()
|
|
||||||
for val in "${EC2_ZONES[@]}"; do
|
|
||||||
EC2_ZONE_ARGS+=("-z $val")
|
|
||||||
done
|
|
||||||
|
|
||||||
[[ -n $EC2_NODE_COUNT ]] || EC2_NODE_COUNT=60
|
|
||||||
[[ -n $GCE_NODE_COUNT ]] || GCE_NODE_COUNT=40
|
|
||||||
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
|
|
||||||
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -s -u -P -a eipalloc-0f286cf8a0771ce35 \
|
|
||||||
${maybeReuseLedger:+-r} \
|
|
||||||
${maybeDelete:+-D}
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
ci/testnet-deploy.sh -p beta-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
|
||||||
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -x -P \
|
|
||||||
${maybeReuseLedger:+-r} \
|
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D}
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
@ -265,23 +353,38 @@ start() {
|
|||||||
ci/testnet-deploy.sh -p beta-perf-testnet-solana-com -C ec2 -z us-west-2b \
|
ci/testnet-deploy.sh -p beta-perf-testnet-solana-com -C ec2 -z us-west-2b \
|
||||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
-b \
|
-b \
|
||||||
${maybeReuseLedger:+-r} \
|
${skipCreate:+-e} \
|
||||||
|
${skipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D}
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet)
|
testnet)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
NO_VALIDATOR_SANITY=1 \
|
|
||||||
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 -z us-west-1a \
|
if [[ -n $GCE_NODE_COUNT ]] || [[ -n $skipStart ]]; then
|
||||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0fa502bf95f6f18b2 \
|
maybeSkipStart="skip"
|
||||||
-b \
|
fi
|
||||||
${maybeReuseLedger:+-r} \
|
|
||||||
|
# shellcheck disable=SC2068
|
||||||
|
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -a eipalloc-0fa502bf95f6f18b2 \
|
||||||
|
${skipCreate:+-e} \
|
||||||
|
${maybeSkipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D}
|
||||||
#ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-east1-c \
|
|
||||||
# -t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
|
if [[ -n $GCE_NODE_COUNT ]]; then
|
||||||
# ${maybeReuseLedger:+-r} \
|
# shellcheck disable=SC2068
|
||||||
# ${maybeDelete:+-D}
|
ci/testnet-deploy.sh -p testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P \
|
||||||
|
${skipCreate:+-e} \
|
||||||
|
${skipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
|
${maybeDelete:+-D} \
|
||||||
|
-x
|
||||||
|
fi
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-perf)
|
testnet-perf)
|
||||||
@ -291,17 +394,42 @@ start() {
|
|||||||
NO_VALIDATOR_SANITY=1 \
|
NO_VALIDATOR_SANITY=1 \
|
||||||
RUST_LOG=solana=warn \
|
RUST_LOG=solana=warn \
|
||||||
ci/testnet-deploy.sh -p perf-testnet-solana-com -C gce -z us-west1-b \
|
ci/testnet-deploy.sh -p perf-testnet-solana-com -C gce -z us-west1-b \
|
||||||
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||||
-t "$CHANNEL_OR_TAG" -c 2 \
|
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
-b \
|
-b \
|
||||||
-d pd-ssd \
|
-d pd-ssd \
|
||||||
${maybeReuseLedger:+-r} \
|
${skipCreate:+-e} \
|
||||||
|
${skipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D}
|
||||||
#ci/testnet-deploy.sh -p perf-testnet-solana-com -C ec2 -z us-east-1a \
|
)
|
||||||
# -g \
|
;;
|
||||||
# -t "$CHANNEL_OR_TAG" -c 2 \
|
testnet-demo)
|
||||||
# ${maybeReuseLedger:+-r} \
|
(
|
||||||
# ${maybeDelete:+-D}
|
set -x
|
||||||
|
|
||||||
|
if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]] || [[ -n $skipStart ]]; then
|
||||||
|
maybeSkipStart="skip"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2068
|
||||||
|
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f \
|
||||||
|
-a demo-testnet-solana-com \
|
||||||
|
${skipCreate:+-e} \
|
||||||
|
${maybeSkipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
|
${maybeDelete:+-D}
|
||||||
|
|
||||||
|
if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]]; then
|
||||||
|
# shellcheck disable=SC2068
|
||||||
|
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x \
|
||||||
|
${skipCreate:+-e} \
|
||||||
|
${skipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
|
${maybeDelete:+-D}
|
||||||
|
fi
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@ -311,30 +439,82 @@ start() {
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ENABLED_LOCKFILE="${HOME}/${TESTNET}.is_enabled"
|
||||||
|
|
||||||
|
create-and-start() {
|
||||||
|
deploy create start
|
||||||
|
}
|
||||||
|
create() {
|
||||||
|
deploy create
|
||||||
|
}
|
||||||
|
start() {
|
||||||
|
deploy "" start
|
||||||
|
}
|
||||||
stop() {
|
stop() {
|
||||||
start delete
|
deploy "" ""
|
||||||
|
}
|
||||||
|
delete() {
|
||||||
|
deploy "" "" "" delete
|
||||||
|
}
|
||||||
|
enable_testnet() {
|
||||||
|
touch "${ENABLED_LOCKFILE}"
|
||||||
|
echo "+++ $TESTNET now enabled"
|
||||||
|
}
|
||||||
|
disable_testnet() {
|
||||||
|
rm -f "${ENABLED_LOCKFILE}"
|
||||||
|
echo "+++ $TESTNET now disabled"
|
||||||
|
}
|
||||||
|
is_testnet_enabled() {
|
||||||
|
if [[ ! -f ${ENABLED_LOCKFILE} ]]; then
|
||||||
|
echo "+++ ${TESTNET} is currently disabled. Enable ${TESTNET} by running ci/testnet-manager.sh with \$TESTNET_OP=enable, then re-run with current settings."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
case $TESTNET_OP in
|
case $TESTNET_OP in
|
||||||
sanity)
|
enable)
|
||||||
sanity
|
enable_testnet
|
||||||
|
;;
|
||||||
|
disable)
|
||||||
|
disable_testnet
|
||||||
|
delete
|
||||||
|
;;
|
||||||
|
create-and-start)
|
||||||
|
is_testnet_enabled
|
||||||
|
create-and-start
|
||||||
|
;;
|
||||||
|
create)
|
||||||
|
is_testnet_enabled
|
||||||
|
create
|
||||||
;;
|
;;
|
||||||
start)
|
start)
|
||||||
|
is_testnet_enabled
|
||||||
start
|
start
|
||||||
;;
|
;;
|
||||||
stop)
|
stop)
|
||||||
|
is_testnet_enabled
|
||||||
stop
|
stop
|
||||||
;;
|
;;
|
||||||
|
sanity)
|
||||||
|
is_testnet_enabled
|
||||||
|
sanity
|
||||||
|
;;
|
||||||
|
delete)
|
||||||
|
is_testnet_enabled
|
||||||
|
delete
|
||||||
|
;;
|
||||||
update-or-restart)
|
update-or-restart)
|
||||||
if start "" update; then
|
is_testnet_enabled
|
||||||
|
if start; then
|
||||||
echo Update successful
|
echo Update successful
|
||||||
else
|
else
|
||||||
echo "+++ Update failed, restarting the network"
|
echo "+++ Update failed, restarting the network"
|
||||||
$metricsWriteDatapoint "testnet-manager update-failure=1"
|
$metricsWriteDatapoint "testnet-manager update-failure=1"
|
||||||
start
|
create-and-start
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
sanity-or-restart)
|
sanity-or-restart)
|
||||||
|
is_testnet_enabled
|
||||||
if sanity; then
|
if sanity; then
|
||||||
echo Pass
|
echo Pass
|
||||||
else
|
else
|
||||||
@ -344,18 +524,22 @@ sanity-or-restart)
|
|||||||
# TODO: Restore attempt to restart the cluster before recreating it
|
# TODO: Restore attempt to restart the cluster before recreating it
|
||||||
# See https://github.com/solana-labs/solana/issues/3774
|
# See https://github.com/solana-labs/solana/issues/3774
|
||||||
if false; then
|
if false; then
|
||||||
if start "" update; then
|
if start; then
|
||||||
echo Update successful
|
echo Update successful
|
||||||
else
|
else
|
||||||
echo "+++ Update failed, restarting the network"
|
echo "+++ Update failed, restarting the network"
|
||||||
$metricsWriteDatapoint "testnet-manager update-failure=1"
|
$metricsWriteDatapoint "testnet-manager update-failure=1"
|
||||||
start
|
create-and-start
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
start
|
create-and-start
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Invalid TESTNET_OP=$TESTNET_OP"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo --- fin
|
echo --- fin
|
||||||
|
@ -11,13 +11,13 @@ usage() {
|
|||||||
echo "Error: $*"
|
echo "Error: $*"
|
||||||
fi
|
fi
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
usage: $0 [name] [cloud] [zone]
|
usage: $0 [name] [cloud] [zone1] ... [zoneN]
|
||||||
|
|
||||||
Sanity check a CD testnet
|
Sanity check a testnet
|
||||||
|
|
||||||
name - name of the network
|
name - name of the network
|
||||||
cloud - cloud provider to use (gce, ec2)
|
cloud - cloud provider to use (gce, ec2)
|
||||||
zone - cloud provider zone of the network
|
zone1 .. zoneN - cloud provider zones to check
|
||||||
|
|
||||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||||
metrics
|
metrics
|
||||||
@ -27,10 +27,10 @@ EOF
|
|||||||
|
|
||||||
netName=$1
|
netName=$1
|
||||||
cloudProvider=$2
|
cloudProvider=$2
|
||||||
zone=$3
|
|
||||||
[[ -n $netName ]] || usage ""
|
[[ -n $netName ]] || usage ""
|
||||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||||
[[ -n $zone ]] || usage "Zone not specified"
|
shift 2
|
||||||
|
[[ -n $1 ]] || usage "zone1 not specified"
|
||||||
|
|
||||||
shutdown() {
|
shutdown() {
|
||||||
exitcode=$?
|
exitcode=$?
|
||||||
@ -52,17 +52,20 @@ rm -f net/config/config
|
|||||||
trap shutdown EXIT INT
|
trap shutdown EXIT INT
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
echo "--- $cloudProvider.sh config"
|
for zone in "$@"; do
|
||||||
|
echo "--- $cloudProvider config [$zone]"
|
||||||
timeout 5m net/"$cloudProvider".sh config -p "$netName" -z "$zone"
|
timeout 5m net/"$cloudProvider".sh config -p "$netName" -z "$zone"
|
||||||
net/init-metrics.sh -e
|
net/init-metrics.sh -e
|
||||||
echo "+++ $cloudProvider.sh info"
|
echo "+++ $cloudProvider.sh info"
|
||||||
net/"$cloudProvider".sh info
|
net/"$cloudProvider".sh info
|
||||||
echo --- net.sh sanity
|
echo "--- net.sh sanity [$cloudProvider:$zone]"
|
||||||
ok=true
|
ok=true
|
||||||
timeout 5m net/net.sh sanity \
|
timeout 5m net/net.sh sanity \
|
||||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} || ok=false
|
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||||
|
$zone || ok=false
|
||||||
|
|
||||||
net/net.sh logs
|
net/net.sh logs
|
||||||
$ok
|
$ok
|
||||||
|
done
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-client"
|
name = "solana-client"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
description = "Solana Client"
|
description = "Solana Client"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -17,10 +17,10 @@ reqwest = "0.9.11"
|
|||||||
serde = "1.0.89"
|
serde = "1.0.89"
|
||||||
serde_derive = "1.0.88"
|
serde_derive = "1.0.88"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana-netutil = { path = "../netutil", version = "0.13.0" }
|
solana-netutil = { path = "../netutil", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
jsonrpc-core = "10.1.0"
|
jsonrpc-core = "10.1.0"
|
||||||
jsonrpc-http-server = "10.1.0"
|
jsonrpc-http-server = "10.1.0"
|
||||||
solana-logger = { path = "../logger", version = "0.13.0" }
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
|
50
client/src/client_error.rs
Normal file
50
client/src/client_error.rs
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
use crate::rpc_request;
|
||||||
|
use solana_sdk::transaction::TransactionError;
|
||||||
|
use std::{fmt, io};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum ClientError {
|
||||||
|
Io(io::Error),
|
||||||
|
Reqwest(reqwest::Error),
|
||||||
|
RpcError(rpc_request::RpcError),
|
||||||
|
SerdeJson(serde_json::error::Error),
|
||||||
|
TransactionError(TransactionError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ClientError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "solana client error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for ClientError {}
|
||||||
|
|
||||||
|
impl From<io::Error> for ClientError {
|
||||||
|
fn from(err: io::Error) -> ClientError {
|
||||||
|
ClientError::Io(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<reqwest::Error> for ClientError {
|
||||||
|
fn from(err: reqwest::Error) -> ClientError {
|
||||||
|
ClientError::Reqwest(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<rpc_request::RpcError> for ClientError {
|
||||||
|
fn from(err: rpc_request::RpcError) -> ClientError {
|
||||||
|
ClientError::RpcError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<serde_json::error::Error> for ClientError {
|
||||||
|
fn from(err: serde_json::error::Error) -> ClientError {
|
||||||
|
ClientError::SerdeJson(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<TransactionError> for ClientError {
|
||||||
|
fn from(err: TransactionError) -> ClientError {
|
||||||
|
ClientError::TransactionError(err)
|
||||||
|
}
|
||||||
|
}
|
@ -1,3 +1,4 @@
|
|||||||
|
use crate::client_error::ClientError;
|
||||||
use crate::rpc_request::RpcRequest;
|
use crate::rpc_request::RpcRequest;
|
||||||
|
|
||||||
pub(crate) trait GenericRpcClientRequest {
|
pub(crate) trait GenericRpcClientRequest {
|
||||||
@ -6,5 +7,5 @@ pub(crate) trait GenericRpcClientRequest {
|
|||||||
request: &RpcRequest,
|
request: &RpcRequest,
|
||||||
params: Option<serde_json::Value>,
|
params: Option<serde_json::Value>,
|
||||||
retries: usize,
|
retries: usize,
|
||||||
) -> Result<serde_json::Value, Box<dyn std::error::Error>>;
|
) -> Result<serde_json::Value, ClientError>;
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
pub mod client_error;
|
||||||
mod generic_rpc_client_request;
|
mod generic_rpc_client_request;
|
||||||
pub mod mock_rpc_client_request;
|
pub mod mock_rpc_client_request;
|
||||||
pub mod rpc_client;
|
pub mod rpc_client;
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use crate::client_error::ClientError;
|
||||||
use crate::generic_rpc_client_request::GenericRpcClientRequest;
|
use crate::generic_rpc_client_request::GenericRpcClientRequest;
|
||||||
use crate::rpc_request::RpcRequest;
|
use crate::rpc_request::RpcRequest;
|
||||||
use serde_json::{Number, Value};
|
use serde_json::{Number, Value};
|
||||||
@ -23,7 +24,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
|||||||
request: &RpcRequest,
|
request: &RpcRequest,
|
||||||
params: Option<serde_json::Value>,
|
params: Option<serde_json::Value>,
|
||||||
_retries: usize,
|
_retries: usize,
|
||||||
) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
|
) -> Result<serde_json::Value, ClientError> {
|
||||||
if self.url == "fails" {
|
if self.url == "fails" {
|
||||||
return Ok(Value::Null);
|
return Ok(Value::Null);
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use crate::client_error::ClientError;
|
||||||
use crate::generic_rpc_client_request::GenericRpcClientRequest;
|
use crate::generic_rpc_client_request::GenericRpcClientRequest;
|
||||||
use crate::mock_rpc_client_request::MockRpcClientRequest;
|
use crate::mock_rpc_client_request::MockRpcClientRequest;
|
||||||
use crate::rpc_client_request::RpcClientRequest;
|
use crate::rpc_client_request::RpcClientRequest;
|
||||||
@ -19,7 +20,7 @@ use std::thread::sleep;
|
|||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
pub struct RpcClient {
|
pub struct RpcClient {
|
||||||
client: Box<GenericRpcClientRequest>,
|
client: Box<GenericRpcClientRequest + Send + Sync>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RpcClient {
|
impl RpcClient {
|
||||||
@ -46,10 +47,7 @@ impl RpcClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_transaction(
|
pub fn send_transaction(&self, transaction: &Transaction) -> Result<String, ClientError> {
|
||||||
&self,
|
|
||||||
transaction: &Transaction,
|
|
||||||
) -> Result<String, Box<dyn error::Error>> {
|
|
||||||
let serialized = serialize(transaction).unwrap();
|
let serialized = serialize(transaction).unwrap();
|
||||||
let params = json!([serialized]);
|
let params = json!([serialized]);
|
||||||
let signature = self
|
let signature = self
|
||||||
@ -67,7 +65,7 @@ impl RpcClient {
|
|||||||
pub fn get_signature_status(
|
pub fn get_signature_status(
|
||||||
&self,
|
&self,
|
||||||
signature: &str,
|
signature: &str,
|
||||||
) -> Result<Option<transaction::Result<()>>, Box<dyn error::Error>> {
|
) -> Result<Option<transaction::Result<()>>, ClientError> {
|
||||||
let params = json!([signature.to_string()]);
|
let params = json!([signature.to_string()]);
|
||||||
let signature_status =
|
let signature_status =
|
||||||
self.client
|
self.client
|
||||||
@ -81,7 +79,7 @@ impl RpcClient {
|
|||||||
&self,
|
&self,
|
||||||
transaction: &mut Transaction,
|
transaction: &mut Transaction,
|
||||||
signer: &T,
|
signer: &T,
|
||||||
) -> Result<String, Box<dyn error::Error>> {
|
) -> Result<String, ClientError> {
|
||||||
let mut send_retries = 5;
|
let mut send_retries = 5;
|
||||||
loop {
|
loop {
|
||||||
let mut status_retries = 4;
|
let mut status_retries = 4;
|
||||||
@ -117,6 +115,9 @@ impl RpcClient {
|
|||||||
send_retries - 1
|
send_retries - 1
|
||||||
};
|
};
|
||||||
if send_retries == 0 {
|
if send_retries == 0 {
|
||||||
|
if status.is_some() {
|
||||||
|
status.unwrap()?
|
||||||
|
} else {
|
||||||
Err(io::Error::new(
|
Err(io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
format!("Transaction {:?} failed: {:?}", signature_str, status),
|
format!("Transaction {:?} failed: {:?}", signature_str, status),
|
||||||
@ -124,6 +125,7 @@ impl RpcClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn send_and_confirm_transactions(
|
pub fn send_and_confirm_transactions(
|
||||||
&self,
|
&self,
|
||||||
@ -201,7 +203,7 @@ impl RpcClient {
|
|||||||
&self,
|
&self,
|
||||||
tx: &mut Transaction,
|
tx: &mut Transaction,
|
||||||
signer_key: &T,
|
signer_key: &T,
|
||||||
) -> Result<(), Box<dyn error::Error>> {
|
) -> Result<(), ClientError> {
|
||||||
let blockhash = self.get_new_blockhash(&tx.message().recent_blockhash)?;
|
let blockhash = self.get_new_blockhash(&tx.message().recent_blockhash)?;
|
||||||
tx.sign(&[signer_key], blockhash);
|
tx.sign(&[signer_key], blockhash);
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -482,7 +484,7 @@ impl RpcClient {
|
|||||||
)
|
)
|
||||||
.map_err(|error| {
|
.map_err(|error| {
|
||||||
debug!(
|
debug!(
|
||||||
"Response get_num_blocks_since_signature_confirmation: {}",
|
"Response get_num_blocks_since_signature_confirmation: {:?}",
|
||||||
error
|
error
|
||||||
);
|
);
|
||||||
io::Error::new(
|
io::Error::new(
|
||||||
@ -526,7 +528,7 @@ impl RpcClient {
|
|||||||
request: &RpcRequest,
|
request: &RpcRequest,
|
||||||
params: Option<Value>,
|
params: Option<Value>,
|
||||||
retries: usize,
|
retries: usize,
|
||||||
) -> Result<Value, Box<dyn error::Error>> {
|
) -> Result<Value, ClientError> {
|
||||||
self.client.send(request, params, retries)
|
self.client.send(request, params, retries)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -743,4 +745,9 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rpc_client_thread() {
|
||||||
|
let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||||
|
thread::spawn(move || rpc_client);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use crate::client_error::ClientError;
|
||||||
use crate::generic_rpc_client_request::GenericRpcClientRequest;
|
use crate::generic_rpc_client_request::GenericRpcClientRequest;
|
||||||
use crate::rpc_request::{RpcError, RpcRequest};
|
use crate::rpc_request::{RpcError, RpcRequest};
|
||||||
use log::*;
|
use log::*;
|
||||||
@ -36,7 +37,7 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
|||||||
request: &RpcRequest,
|
request: &RpcRequest,
|
||||||
params: Option<serde_json::Value>,
|
params: Option<serde_json::Value>,
|
||||||
mut retries: usize,
|
mut retries: usize,
|
||||||
) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
|
) -> Result<serde_json::Value, ClientError> {
|
||||||
// Concurrent requests are not supported so reuse the same request id for all requests
|
// Concurrent requests are not supported so reuse the same request id for all requests
|
||||||
let request_id = 1;
|
let request_id = 1;
|
||||||
|
|
||||||
@ -62,8 +63,8 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
|||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
info!(
|
info!(
|
||||||
"make_rpc_request() failed, {} retries left: {:?}",
|
"make_rpc_request({:?}) failed, {} retries left: {:?}",
|
||||||
retries, e
|
request, retries, e
|
||||||
);
|
);
|
||||||
if retries == 0 {
|
if retries == 0 {
|
||||||
Err(e)?;
|
Err(e)?;
|
||||||
|
@ -4,21 +4,23 @@ use std::{error, fmt};
|
|||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum RpcRequest {
|
pub enum RpcRequest {
|
||||||
ConfirmTransaction,
|
ConfirmTransaction,
|
||||||
|
DeregisterNode,
|
||||||
|
FullnodeExit,
|
||||||
GetAccountInfo,
|
GetAccountInfo,
|
||||||
GetBalance,
|
GetBalance,
|
||||||
|
GetClusterNodes,
|
||||||
|
GetNumBlocksSinceSignatureConfirmation,
|
||||||
GetRecentBlockhash,
|
GetRecentBlockhash,
|
||||||
GetSignatureStatus,
|
GetSignatureStatus,
|
||||||
GetTransactionCount,
|
GetSlotLeader,
|
||||||
RequestAirdrop,
|
|
||||||
SendTransaction,
|
|
||||||
RegisterNode,
|
|
||||||
SignVote,
|
|
||||||
DeregisterNode,
|
|
||||||
GetStorageBlockhash,
|
GetStorageBlockhash,
|
||||||
GetStorageEntryHeight,
|
GetStorageEntryHeight,
|
||||||
GetStoragePubkeysForEntryHeight,
|
GetStoragePubkeysForEntryHeight,
|
||||||
FullnodeExit,
|
GetTransactionCount,
|
||||||
GetNumBlocksSinceSignatureConfirmation,
|
RegisterNode,
|
||||||
|
RequestAirdrop,
|
||||||
|
SendTransaction,
|
||||||
|
SignVote,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RpcRequest {
|
impl RpcRequest {
|
||||||
@ -26,23 +28,25 @@ impl RpcRequest {
|
|||||||
let jsonrpc = "2.0";
|
let jsonrpc = "2.0";
|
||||||
let method = match self {
|
let method = match self {
|
||||||
RpcRequest::ConfirmTransaction => "confirmTransaction",
|
RpcRequest::ConfirmTransaction => "confirmTransaction",
|
||||||
|
RpcRequest::DeregisterNode => "deregisterNode",
|
||||||
|
RpcRequest::FullnodeExit => "fullnodeExit",
|
||||||
RpcRequest::GetAccountInfo => "getAccountInfo",
|
RpcRequest::GetAccountInfo => "getAccountInfo",
|
||||||
RpcRequest::GetBalance => "getBalance",
|
RpcRequest::GetBalance => "getBalance",
|
||||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
RpcRequest::GetClusterNodes => "getClusterNodes",
|
||||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
|
||||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
|
||||||
RpcRequest::RequestAirdrop => "requestAirdrop",
|
|
||||||
RpcRequest::SendTransaction => "sendTransaction",
|
|
||||||
RpcRequest::RegisterNode => "registerNode",
|
|
||||||
RpcRequest::SignVote => "signVote",
|
|
||||||
RpcRequest::DeregisterNode => "deregisterNode",
|
|
||||||
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
|
|
||||||
RpcRequest::GetStorageEntryHeight => "getStorageEntryHeight",
|
|
||||||
RpcRequest::GetStoragePubkeysForEntryHeight => "getStoragePubkeysForEntryHeight",
|
|
||||||
RpcRequest::FullnodeExit => "fullnodeExit",
|
|
||||||
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
|
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
|
||||||
"getNumBlocksSinceSignatureConfirmation"
|
"getNumBlocksSinceSignatureConfirmation"
|
||||||
}
|
}
|
||||||
|
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||||
|
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||||
|
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||||
|
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
|
||||||
|
RpcRequest::GetStorageEntryHeight => "getStorageEntryHeight",
|
||||||
|
RpcRequest::GetStoragePubkeysForEntryHeight => "getStoragePubkeysForEntryHeight",
|
||||||
|
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||||
|
RpcRequest::RegisterNode => "registerNode",
|
||||||
|
RpcRequest::RequestAirdrop => "requestAirdrop",
|
||||||
|
RpcRequest::SendTransaction => "sendTransaction",
|
||||||
|
RpcRequest::SignVote => "signVote",
|
||||||
};
|
};
|
||||||
let mut request = json!({
|
let mut request = json!({
|
||||||
"jsonrpc": jsonrpc,
|
"jsonrpc": jsonrpc,
|
||||||
|
@ -137,19 +137,6 @@ impl ThinClient {
|
|||||||
self.rpc_client.wait_for_balance(pubkey, expected_balance)
|
self.rpc_client.wait_for_balance(pubkey, expected_balance)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn poll_for_signature(&self, signature: &Signature) -> io::Result<()> {
|
|
||||||
self.rpc_client.poll_for_signature(signature)
|
|
||||||
}
|
|
||||||
/// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks`
|
|
||||||
pub fn poll_for_signature_confirmation(
|
|
||||||
&self,
|
|
||||||
signature: &Signature,
|
|
||||||
min_confirmed_blocks: usize,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
self.rpc_client
|
|
||||||
.poll_for_signature_confirmation(signature, min_confirmed_blocks)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check a signature in the bank. This method blocks
|
/// Check a signature in the bank. This method blocks
|
||||||
/// until the server sends a response.
|
/// until the server sends a response.
|
||||||
pub fn check_signature(&self, signature: &Signature) -> bool {
|
pub fn check_signature(&self, signature: &Signature) -> bool {
|
||||||
@ -168,7 +155,11 @@ impl ThinClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client for ThinClient {}
|
impl Client for ThinClient {
|
||||||
|
fn transactions_addr(&self) -> String {
|
||||||
|
self.transactions_addr.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SyncClient for ThinClient {
|
impl SyncClient for ThinClient {
|
||||||
fn send_message(&self, keypairs: &[&Keypair], message: Message) -> TransportResult<Signature> {
|
fn send_message(&self, keypairs: &[&Keypair], message: Message) -> TransportResult<Signature> {
|
||||||
@ -217,7 +208,7 @@ impl SyncClient for ThinClient {
|
|||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
io::Error::new(
|
io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
format!("send_transaction failed with error {}", err),
|
format!("send_transaction failed with error {:?}", err),
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
Ok(status)
|
Ok(status)
|
||||||
@ -232,6 +223,21 @@ impl SyncClient for ThinClient {
|
|||||||
let transaction_count = self.rpc_client.get_transaction_count()?;
|
let transaction_count = self.rpc_client.get_transaction_count()?;
|
||||||
Ok(transaction_count)
|
Ok(transaction_count)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks`
|
||||||
|
fn poll_for_signature_confirmation(
|
||||||
|
&self,
|
||||||
|
signature: &Signature,
|
||||||
|
min_confirmed_blocks: usize,
|
||||||
|
) -> TransportResult<()> {
|
||||||
|
Ok(self
|
||||||
|
.rpc_client
|
||||||
|
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> {
|
||||||
|
Ok(self.rpc_client.poll_for_signature(signature)?)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsyncClient for ThinClient {
|
impl AsyncClient for ThinClient {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
readme = "../README.md"
|
readme = "../README.md"
|
||||||
@ -25,6 +25,7 @@ bs58 = "0.2.0"
|
|||||||
byteorder = "1.3.1"
|
byteorder = "1.3.1"
|
||||||
chrono = { version = "0.4.0", features = ["serde"] }
|
chrono = { version = "0.4.0", features = ["serde"] }
|
||||||
crc = { version = "1.8.1", optional = true }
|
crc = { version = "1.8.1", optional = true }
|
||||||
|
ed25519-dalek = "1.0.0-pre.0"
|
||||||
hashbrown = "0.2.0"
|
hashbrown = "0.2.0"
|
||||||
indexmap = "1.0"
|
indexmap = "1.0"
|
||||||
itertools = "0.8.0"
|
itertools = "0.8.0"
|
||||||
@ -40,24 +41,24 @@ nix = "0.13.0"
|
|||||||
rand = "0.6.5"
|
rand = "0.6.5"
|
||||||
rand_chacha = "0.1.1"
|
rand_chacha = "0.1.1"
|
||||||
rayon = "1.0.0"
|
rayon = "1.0.0"
|
||||||
|
reed-solomon-erasure = "3.1.1"
|
||||||
reqwest = "0.9.11"
|
reqwest = "0.9.11"
|
||||||
ring = "0.13.2"
|
|
||||||
rocksdb = "0.11.0"
|
rocksdb = "0.11.0"
|
||||||
serde = "1.0.89"
|
serde = "1.0.89"
|
||||||
serde_derive = "1.0.88"
|
serde_derive = "1.0.88"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana-budget-api = { path = "../programs/budget_api", version = "0.13.0" }
|
solana-budget-api = { path = "../programs/budget_api", version = "0.14.0" }
|
||||||
solana-client = { path = "../client", version = "0.13.0" }
|
solana-client = { path = "../client", version = "0.14.0" }
|
||||||
solana-drone = { path = "../drone", version = "0.13.0" }
|
solana-drone = { path = "../drone", version = "0.14.0" }
|
||||||
solana-kvstore = { path = "../kvstore", version = "0.13.0", optional = true }
|
solana-kvstore = { path = "../kvstore", version = "0.14.0" , optional = true }
|
||||||
solana-logger = { path = "../logger", version = "0.13.0" }
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.13.0" }
|
solana-metrics = { path = "../metrics", version = "0.14.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.13.0" }
|
solana-netutil = { path = "../netutil", version = "0.14.0" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.13.0" }
|
solana-runtime = { path = "../runtime", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
solana-storage-api = { path = "../programs/storage_api", version = "0.13.0" }
|
solana-storage-api = { path = "../programs/storage_api", version = "0.14.0" }
|
||||||
solana-vote-api = { path = "../programs/vote_api", version = "0.13.0" }
|
solana-vote-api = { path = "../programs/vote_api", version = "0.14.0" }
|
||||||
solana-vote-signer = { path = "../vote-signer", version = "0.13.0" }
|
solana-vote-signer = { path = "../vote-signer", version = "0.14.0" }
|
||||||
sys-info = "0.5.6"
|
sys-info = "0.5.6"
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
tokio-codec = "0.1"
|
tokio-codec = "0.1"
|
||||||
@ -66,8 +67,8 @@ untrusted = "0.6.2"
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex-literal = "0.1.4"
|
hex-literal = "0.1.4"
|
||||||
matches = "0.1.6"
|
matches = "0.1.6"
|
||||||
solana-vote-program = { path = "../programs/vote_program", version = "0.13.0" }
|
solana-vote-program = { path = "../programs/vote_program", version = "0.14.0" }
|
||||||
solana-budget-program = { path = "../programs/budget_program", version = "0.13.0" }
|
solana-budget-program = { path = "../programs/budget_program", version = "0.14.0" }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "banking_stage"
|
name = "banking_stage"
|
||||||
|
@ -10,6 +10,7 @@ use solana::banking_stage::{create_test_recorder, BankingStage};
|
|||||||
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
|
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||||
use solana::cluster_info::ClusterInfo;
|
use solana::cluster_info::ClusterInfo;
|
||||||
use solana::cluster_info::Node;
|
use solana::cluster_info::Node;
|
||||||
|
use solana::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use solana::packet::to_packets_chunked;
|
use solana::packet::to_packets_chunked;
|
||||||
use solana::poh_recorder::WorkingBankEntries;
|
use solana::poh_recorder::WorkingBankEntries;
|
||||||
use solana::service::Service;
|
use solana::service::Service;
|
||||||
@ -55,7 +56,9 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
|||||||
let (genesis_block, mint_keypair) = GenesisBlock::new(mint_total);
|
let (genesis_block, mint_keypair) = GenesisBlock::new(mint_total);
|
||||||
|
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
|
let (vote_sender, vote_receiver) = channel();
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let dummy = system_transaction::transfer(
|
let dummy = system_transaction::transfer(
|
||||||
&mint_keypair,
|
&mint_keypair,
|
||||||
&mint_keypair.pubkey(),
|
&mint_keypair.pubkey(),
|
||||||
@ -103,7 +106,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
|||||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
|
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
let len = x.read().unwrap().packets.len();
|
let len = x.packets.len();
|
||||||
(x, iter::repeat(1).take(len).collect())
|
(x, iter::repeat(1).take(len).collect())
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@ -116,7 +119,13 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
|||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
let _banking_stage = BankingStage::new(
|
||||||
|
&cluster_info,
|
||||||
|
&poh_recorder,
|
||||||
|
verified_receiver,
|
||||||
|
vote_receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||||
|
|
||||||
let mut id = genesis_block.hash();
|
let mut id = genesis_block.hash();
|
||||||
@ -138,6 +147,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
|||||||
start += half_len;
|
start += half_len;
|
||||||
start %= verified.len();
|
start %= verified.len();
|
||||||
});
|
});
|
||||||
|
drop(vote_sender);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
poh_service.join().unwrap();
|
poh_service.join().unwrap();
|
||||||
}
|
}
|
||||||
@ -155,7 +165,9 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
|||||||
let (genesis_block, mint_keypair) = GenesisBlock::new(mint_total);
|
let (genesis_block, mint_keypair) = GenesisBlock::new(mint_total);
|
||||||
|
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
|
let (vote_sender, vote_receiver) = channel();
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let dummy = system_transaction::transfer(
|
let dummy = system_transaction::transfer(
|
||||||
&mint_keypair,
|
&mint_keypair,
|
||||||
&mint_keypair.pubkey(),
|
&mint_keypair.pubkey(),
|
||||||
@ -218,7 +230,7 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
|||||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
|
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
let len = x.read().unwrap().packets.len();
|
let len = x.packets.len();
|
||||||
(x, iter::repeat(1).take(len).collect())
|
(x, iter::repeat(1).take(len).collect())
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@ -232,7 +244,13 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
|||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
let _banking_stage = BankingStage::new(
|
||||||
|
&cluster_info,
|
||||||
|
&poh_recorder,
|
||||||
|
verified_receiver,
|
||||||
|
vote_receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||||
|
|
||||||
let mut id = genesis_block.hash();
|
let mut id = genesis_block.hash();
|
||||||
@ -254,6 +272,7 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
|||||||
start += half_len;
|
start += half_len;
|
||||||
start %= verified.len();
|
start %= verified.len();
|
||||||
});
|
});
|
||||||
|
drop(vote_sender);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
poh_service.join().unwrap();
|
poh_service.join().unwrap();
|
||||||
}
|
}
|
||||||
|
@ -24,9 +24,8 @@ fn main() {
|
|||||||
|
|
||||||
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
|
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
|
||||||
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
|
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
|
||||||
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
|
|
||||||
|
|
||||||
if chacha || cuda || erasure {
|
if chacha || cuda {
|
||||||
println!("cargo:rerun-if-changed={}", perf_libs_dir);
|
println!("cargo:rerun-if-changed={}", perf_libs_dir);
|
||||||
println!("cargo:rustc-link-search=native={}", perf_libs_dir);
|
println!("cargo:rustc-link-search=native={}", perf_libs_dir);
|
||||||
}
|
}
|
||||||
@ -46,30 +45,4 @@ fn main() {
|
|||||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||||
}
|
}
|
||||||
if erasure {
|
|
||||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
|
||||||
{
|
|
||||||
println!(
|
|
||||||
"cargo:rerun-if-changed={}/libgf_complete.dylib",
|
|
||||||
perf_libs_dir
|
|
||||||
);
|
|
||||||
println!("cargo:rerun-if-changed={}/libJerasure.dylib", perf_libs_dir);
|
|
||||||
}
|
|
||||||
#[cfg(all(unix, not(any(target_os = "macos", target_os = "ios"))))]
|
|
||||||
{
|
|
||||||
println!("cargo:rerun-if-changed={}/libgf_complete.so", perf_libs_dir);
|
|
||||||
println!("cargo:rerun-if-changed={}/libJerasure.so", perf_libs_dir);
|
|
||||||
}
|
|
||||||
#[cfg(windows)]
|
|
||||||
{
|
|
||||||
println!(
|
|
||||||
"cargo:rerun-if-changed={}/libgf_complete.dll",
|
|
||||||
perf_libs_dir
|
|
||||||
);
|
|
||||||
println!("cargo:rerun-if-changed={}/libJerasure.dll", perf_libs_dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
|
||||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -6,9 +6,8 @@ use crate::cluster_info::ClusterInfo;
|
|||||||
use crate::contact_info::ContactInfo;
|
use crate::contact_info::ContactInfo;
|
||||||
use crate::entry;
|
use crate::entry;
|
||||||
use crate::entry::{hash_transactions, Entry};
|
use crate::entry::{hash_transactions, Entry};
|
||||||
use crate::leader_schedule_utils;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::packet;
|
use crate::packet;
|
||||||
use crate::packet::SharedPackets;
|
|
||||||
use crate::packet::{Packet, Packets};
|
use crate::packet::{Packet, Packets};
|
||||||
use crate::poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntries};
|
use crate::poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntries};
|
||||||
use crate::poh_service::{PohService, PohServiceConfig};
|
use crate::poh_service::{PohService, PohServiceConfig};
|
||||||
@ -20,8 +19,9 @@ use solana_metrics::counter::Counter;
|
|||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::timing::{self, duration_as_us, MAX_RECENT_BLOCKHASHES};
|
use solana_sdk::timing::{self, duration_as_us, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES};
|
||||||
use solana_sdk::transaction::{self, Transaction, TransactionError};
|
use solana_sdk::transaction::{self, Transaction};
|
||||||
|
use std::cmp;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||||
@ -31,7 +31,7 @@ use std::time::Duration;
|
|||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use sys_info;
|
use sys_info;
|
||||||
|
|
||||||
pub type UnprocessedPackets = Vec<(SharedPackets, usize, Vec<u8>)>; // `usize` is the index of the first unprocessed packet in `SharedPackets`
|
pub type UnprocessedPackets = Vec<(Packets, usize, Vec<u8>)>; // `usize` is the index of the first unprocessed packet in `SharedPackets`
|
||||||
|
|
||||||
// number of threads is 1 until mt bank is ready
|
// number of threads is 1 until mt bank is ready
|
||||||
pub const NUM_THREADS: u32 = 10;
|
pub const NUM_THREADS: u32 = 10;
|
||||||
@ -55,12 +55,16 @@ impl BankingStage {
|
|||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
verified_receiver: Receiver<VerifiedPackets>,
|
verified_receiver: Receiver<VerifiedPackets>,
|
||||||
|
verified_vote_receiver: Receiver<VerifiedPackets>,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self::new_num_threads(
|
Self::new_num_threads(
|
||||||
cluster_info,
|
cluster_info,
|
||||||
poh_recorder,
|
poh_recorder,
|
||||||
verified_receiver,
|
verified_receiver,
|
||||||
Self::num_threads(),
|
verified_vote_receiver,
|
||||||
|
cmp::min(2, Self::num_threads()),
|
||||||
|
leader_schedule_cache,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,9 +72,12 @@ impl BankingStage {
|
|||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
verified_receiver: Receiver<VerifiedPackets>,
|
verified_receiver: Receiver<VerifiedPackets>,
|
||||||
|
verified_vote_receiver: Receiver<VerifiedPackets>,
|
||||||
num_threads: u32,
|
num_threads: u32,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let verified_receiver = Arc::new(Mutex::new(verified_receiver));
|
let verified_receiver = Arc::new(Mutex::new(verified_receiver));
|
||||||
|
let verified_vote_receiver = Arc::new(Mutex::new(verified_vote_receiver));
|
||||||
|
|
||||||
// Single thread to generate entries from many banks.
|
// Single thread to generate entries from many banks.
|
||||||
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
||||||
@ -79,12 +86,19 @@ impl BankingStage {
|
|||||||
|
|
||||||
// Many banks that process transactions in parallel.
|
// Many banks that process transactions in parallel.
|
||||||
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
||||||
.map(|_| {
|
.map(|i| {
|
||||||
let verified_receiver = verified_receiver.clone();
|
let (verified_receiver, enable_forwarding) = if i < num_threads - 1 {
|
||||||
|
(verified_receiver.clone(), true)
|
||||||
|
} else {
|
||||||
|
// Disable forwarding of vote transactions, as votes are gossiped
|
||||||
|
(verified_vote_receiver.clone(), false)
|
||||||
|
};
|
||||||
|
|
||||||
let poh_recorder = poh_recorder.clone();
|
let poh_recorder = poh_recorder.clone();
|
||||||
let cluster_info = cluster_info.clone();
|
let cluster_info = cluster_info.clone();
|
||||||
let exit = exit.clone();
|
let exit = exit.clone();
|
||||||
let mut recv_start = Instant::now();
|
let mut recv_start = Instant::now();
|
||||||
|
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-banking-stage-tx".to_string())
|
.name("solana-banking-stage-tx".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -93,6 +107,8 @@ impl BankingStage {
|
|||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
&mut recv_start,
|
&mut recv_start,
|
||||||
|
enable_forwarding,
|
||||||
|
leader_schedule_cache,
|
||||||
);
|
);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
})
|
})
|
||||||
@ -105,16 +121,17 @@ impl BankingStage {
|
|||||||
fn forward_unprocessed_packets(
|
fn forward_unprocessed_packets(
|
||||||
socket: &std::net::UdpSocket,
|
socket: &std::net::UdpSocket,
|
||||||
tpu_via_blobs: &std::net::SocketAddr,
|
tpu_via_blobs: &std::net::SocketAddr,
|
||||||
unprocessed_packets: &[(SharedPackets, usize, Vec<u8>)],
|
unprocessed_packets: &[(Packets, usize, Vec<u8>)],
|
||||||
) -> std::io::Result<()> {
|
) -> std::io::Result<()> {
|
||||||
let locked_packets: Vec<_> = unprocessed_packets
|
let locked_packets: Vec<_> = unprocessed_packets
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(p, start_index, _)| (p.read().unwrap(), start_index))
|
.map(|(p, start_index, _)| (p, start_index))
|
||||||
.collect();
|
.collect();
|
||||||
let packets: Vec<&Packet> = locked_packets
|
let packets: Vec<&Packet> = locked_packets
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|(p, start_index)| &p.packets[**start_index..])
|
.flat_map(|(p, start_index)| &p.packets[**start_index..])
|
||||||
.collect();
|
.collect();
|
||||||
|
inc_new_counter_info!("banking_stage-forwarded_packets", packets.len());
|
||||||
let blobs = packet::packets_to_blobs(&packets);
|
let blobs = packet::packets_to_blobs(&packets);
|
||||||
|
|
||||||
for blob in blobs {
|
for blob in blobs {
|
||||||
@ -126,18 +143,22 @@ impl BankingStage {
|
|||||||
|
|
||||||
fn process_buffered_packets(
|
fn process_buffered_packets(
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
buffered_packets: &[(SharedPackets, usize, Vec<u8>)],
|
buffered_packets: &[(Packets, usize, Vec<u8>)],
|
||||||
) -> Result<UnprocessedPackets> {
|
) -> Result<UnprocessedPackets> {
|
||||||
let mut unprocessed_packets = vec![];
|
let mut unprocessed_packets = vec![];
|
||||||
let mut bank_shutdown = false;
|
let mut bank_shutdown = false;
|
||||||
|
let mut rebuffered_packets = 0;
|
||||||
|
let mut new_tx_count = 0;
|
||||||
for (msgs, offset, vers) in buffered_packets {
|
for (msgs, offset, vers) in buffered_packets {
|
||||||
if bank_shutdown {
|
if bank_shutdown {
|
||||||
|
rebuffered_packets += vers.len() - *offset;
|
||||||
unprocessed_packets.push((msgs.to_owned(), *offset, vers.to_owned()));
|
unprocessed_packets.push((msgs.to_owned(), *offset, vers.to_owned()));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let bank = poh_recorder.lock().unwrap().bank();
|
let bank = poh_recorder.lock().unwrap().bank();
|
||||||
if bank.is_none() {
|
if bank.is_none() {
|
||||||
|
rebuffered_packets += vers.len() - *offset;
|
||||||
unprocessed_packets.push((msgs.to_owned(), *offset, vers.to_owned()));
|
unprocessed_packets.push((msgs.to_owned(), *offset, vers.to_owned()));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -146,7 +167,10 @@ impl BankingStage {
|
|||||||
let (processed, verified_txs, verified_indexes) =
|
let (processed, verified_txs, verified_indexes) =
|
||||||
Self::process_received_packets(&bank, &poh_recorder, &msgs, &vers, *offset)?;
|
Self::process_received_packets(&bank, &poh_recorder, &msgs, &vers, *offset)?;
|
||||||
|
|
||||||
|
new_tx_count += processed;
|
||||||
|
|
||||||
if processed < verified_txs.len() {
|
if processed < verified_txs.len() {
|
||||||
|
rebuffered_packets += verified_txs.len() - processed;
|
||||||
bank_shutdown = true;
|
bank_shutdown = true;
|
||||||
// Collect any unprocessed transactions in this batch for forwarding
|
// Collect any unprocessed transactions in this batch for forwarding
|
||||||
unprocessed_packets.push((
|
unprocessed_packets.push((
|
||||||
@ -156,12 +180,18 @@ impl BankingStage {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inc_new_counter_info!("banking_stage-rebuffered_packets", rebuffered_packets);
|
||||||
|
inc_new_counter_info!("banking_stage-consumed_buffered_packets", new_tx_count);
|
||||||
|
inc_new_counter_info!("banking_stage-process_transactions", new_tx_count);
|
||||||
|
|
||||||
Ok(unprocessed_packets)
|
Ok(unprocessed_packets)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_or_forward_packets(
|
fn process_or_forward_packets(
|
||||||
leader_data: Option<&ContactInfo>,
|
leader_data: Option<&ContactInfo>,
|
||||||
bank_is_available: bool,
|
bank_is_available: bool,
|
||||||
|
would_be_leader: bool,
|
||||||
my_id: &Pubkey,
|
my_id: &Pubkey,
|
||||||
) -> BufferedPacketsDecision {
|
) -> BufferedPacketsDecision {
|
||||||
leader_data.map_or(
|
leader_data.map_or(
|
||||||
@ -172,6 +202,9 @@ impl BankingStage {
|
|||||||
if bank_is_available {
|
if bank_is_available {
|
||||||
// If the bank is available, this node is the leader
|
// If the bank is available, this node is the leader
|
||||||
BufferedPacketsDecision::Consume
|
BufferedPacketsDecision::Consume
|
||||||
|
} else if would_be_leader {
|
||||||
|
// If the node will be the leader soon, hold the packets for now
|
||||||
|
BufferedPacketsDecision::Hold
|
||||||
} else if x.id != *my_id {
|
} else if x.id != *my_id {
|
||||||
// If the current node is not the leader, forward the buffered packets
|
// If the current node is not the leader, forward the buffered packets
|
||||||
BufferedPacketsDecision::Forward
|
BufferedPacketsDecision::Forward
|
||||||
@ -187,26 +220,33 @@ impl BankingStage {
|
|||||||
socket: &std::net::UdpSocket,
|
socket: &std::net::UdpSocket,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
buffered_packets: &[(SharedPackets, usize, Vec<u8>)],
|
buffered_packets: &[(Packets, usize, Vec<u8>)],
|
||||||
|
enable_forwarding: bool,
|
||||||
) -> Result<UnprocessedPackets> {
|
) -> Result<UnprocessedPackets> {
|
||||||
let rcluster_info = cluster_info.read().unwrap();
|
let rcluster_info = cluster_info.read().unwrap();
|
||||||
|
|
||||||
let decision = Self::process_or_forward_packets(
|
let decision = {
|
||||||
|
let poh = poh_recorder.lock().unwrap();
|
||||||
|
Self::process_or_forward_packets(
|
||||||
rcluster_info.leader_data(),
|
rcluster_info.leader_data(),
|
||||||
poh_recorder.lock().unwrap().bank().is_some(),
|
poh.bank().is_some(),
|
||||||
|
poh.would_be_leader(DEFAULT_TICKS_PER_SLOT),
|
||||||
&rcluster_info.id(),
|
&rcluster_info.id(),
|
||||||
);
|
)
|
||||||
|
};
|
||||||
|
|
||||||
match decision {
|
match decision {
|
||||||
BufferedPacketsDecision::Consume => {
|
BufferedPacketsDecision::Consume => {
|
||||||
Self::process_buffered_packets(poh_recorder, buffered_packets)
|
Self::process_buffered_packets(poh_recorder, buffered_packets)
|
||||||
}
|
}
|
||||||
BufferedPacketsDecision::Forward => {
|
BufferedPacketsDecision::Forward => {
|
||||||
|
if enable_forwarding {
|
||||||
let _ = Self::forward_unprocessed_packets(
|
let _ = Self::forward_unprocessed_packets(
|
||||||
&socket,
|
&socket,
|
||||||
&rcluster_info.leader_data().unwrap().tpu_via_blobs,
|
&rcluster_info.leader_data().unwrap().tpu_via_blobs,
|
||||||
&buffered_packets,
|
&buffered_packets,
|
||||||
);
|
);
|
||||||
|
}
|
||||||
Ok(vec![])
|
Ok(vec![])
|
||||||
}
|
}
|
||||||
_ => Ok(buffered_packets.to_vec()),
|
_ => Ok(buffered_packets.to_vec()),
|
||||||
@ -216,19 +256,27 @@ impl BankingStage {
|
|||||||
fn should_buffer_packets(
|
fn should_buffer_packets(
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let rcluster_info = cluster_info.read().unwrap();
|
let rcluster_info = cluster_info.read().unwrap();
|
||||||
|
|
||||||
// Buffer the packets if I am the next leader
|
// Buffer the packets if I am the next leader
|
||||||
// or, if it was getting sent to me
|
// or, if it was getting sent to me
|
||||||
let leader_id = match poh_recorder.lock().unwrap().bank() {
|
// or, the next leader is unknown
|
||||||
Some(bank) => {
|
let poh = poh_recorder.lock().unwrap();
|
||||||
leader_schedule_utils::slot_leader_at(bank.slot() + 1, &bank).unwrap_or_default()
|
let leader_id = match poh.bank() {
|
||||||
}
|
Some(bank) => leader_schedule_cache
|
||||||
None => rcluster_info
|
.slot_leader_at_else_compute(bank.slot() + 1, &bank)
|
||||||
.leader_data()
|
|
||||||
.map(|x| x.id)
|
|
||||||
.unwrap_or_default(),
|
.unwrap_or_default(),
|
||||||
|
None => {
|
||||||
|
if poh.would_be_leader(DEFAULT_TICKS_PER_SLOT) {
|
||||||
|
rcluster_info.id()
|
||||||
|
} else {
|
||||||
|
rcluster_info
|
||||||
|
.leader_data()
|
||||||
|
.map_or(rcluster_info.id(), |x| x.id)
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
leader_id == rcluster_info.id()
|
leader_id == rcluster_info.id()
|
||||||
@ -239,6 +287,8 @@ impl BankingStage {
|
|||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
recv_start: &mut Instant,
|
recv_start: &mut Instant,
|
||||||
|
enable_forwarding: bool,
|
||||||
|
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
||||||
) {
|
) {
|
||||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let mut buffered_packets = vec![];
|
let mut buffered_packets = vec![];
|
||||||
@ -249,6 +299,7 @@ impl BankingStage {
|
|||||||
poh_recorder,
|
poh_recorder,
|
||||||
cluster_info,
|
cluster_info,
|
||||||
&buffered_packets,
|
&buffered_packets,
|
||||||
|
enable_forwarding,
|
||||||
)
|
)
|
||||||
.map(|packets| buffered_packets = packets)
|
.map(|packets| buffered_packets = packets)
|
||||||
.unwrap_or_else(|_| buffered_packets.clear());
|
.unwrap_or_else(|_| buffered_packets.clear());
|
||||||
@ -268,11 +319,24 @@ impl BankingStage {
|
|||||||
{
|
{
|
||||||
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
||||||
Ok(unprocessed_packets) => {
|
Ok(unprocessed_packets) => {
|
||||||
if Self::should_buffer_packets(poh_recorder, cluster_info) {
|
if unprocessed_packets.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if Self::should_buffer_packets(
|
||||||
|
poh_recorder,
|
||||||
|
cluster_info,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
) {
|
||||||
|
let num = unprocessed_packets
|
||||||
|
.iter()
|
||||||
|
.map(|(x, start, _)| x.packets.len().saturating_sub(*start))
|
||||||
|
.sum();
|
||||||
|
inc_new_counter_info!("banking_stage-buffered_packets", num);
|
||||||
buffered_packets.extend_from_slice(&unprocessed_packets);
|
buffered_packets.extend_from_slice(&unprocessed_packets);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if enable_forwarding {
|
||||||
if let Some(leader) = cluster_info.read().unwrap().leader_data() {
|
if let Some(leader) = cluster_info.read().unwrap().leader_data() {
|
||||||
let _ = Self::forward_unprocessed_packets(
|
let _ = Self::forward_unprocessed_packets(
|
||||||
&socket,
|
&socket,
|
||||||
@ -281,11 +345,18 @@ impl BankingStage {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
debug!("solana-banking-stage-tx: exit due to {:?}", err);
|
debug!("solana-banking-stage-tx: exit due to {:?}", err);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let num = buffered_packets
|
||||||
|
.iter()
|
||||||
|
.map(|(x, start, _)| x.packets.len().saturating_sub(*start))
|
||||||
|
.sum();
|
||||||
|
inc_new_counter_info!("banking_stage-total_buffered_packets", num);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,14 +381,10 @@ impl BankingStage {
|
|||||||
let processed_transactions: Vec<_> = results
|
let processed_transactions: Vec<_> = results
|
||||||
.iter()
|
.iter()
|
||||||
.zip(txs.iter())
|
.zip(txs.iter())
|
||||||
.filter_map(|(r, x)| match r {
|
.filter_map(|(r, x)| {
|
||||||
Ok(_) => Some(x.clone()),
|
if Bank::can_commit(r) {
|
||||||
Err(TransactionError::InstructionError(index, err)) => {
|
|
||||||
debug!("instruction error {:?}, {:?}", index, err);
|
|
||||||
Some(x.clone())
|
Some(x.clone())
|
||||||
}
|
} else {
|
||||||
Err(ref e) => {
|
|
||||||
debug!("process transaction failed {:?}", e);
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -417,7 +484,7 @@ impl BankingStage {
|
|||||||
+ entry::num_will_fit(
|
+ entry::num_will_fit(
|
||||||
&transactions[chunk_start..],
|
&transactions[chunk_start..],
|
||||||
packet::BLOB_DATA_SIZE as u64,
|
packet::BLOB_DATA_SIZE as u64,
|
||||||
&Entry::serialized_size,
|
&Entry::serialized_to_blob_size,
|
||||||
);
|
);
|
||||||
|
|
||||||
let result = Self::process_and_record_transactions(
|
let result = Self::process_and_record_transactions(
|
||||||
@ -425,7 +492,8 @@ impl BankingStage {
|
|||||||
&transactions[chunk_start..chunk_end],
|
&transactions[chunk_start..chunk_end],
|
||||||
poh,
|
poh,
|
||||||
);
|
);
|
||||||
trace!("process_transcations: {:?}", result);
|
trace!("process_transactions: {:?}", result);
|
||||||
|
chunk_start = chunk_end;
|
||||||
if let Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)) = result {
|
if let Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)) = result {
|
||||||
info!(
|
info!(
|
||||||
"process transactions: max height reached slot: {} height: {}",
|
"process transactions: max height reached slot: {} height: {}",
|
||||||
@ -435,7 +503,6 @@ impl BankingStage {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
result?;
|
result?;
|
||||||
chunk_start = chunk_end;
|
|
||||||
}
|
}
|
||||||
Ok(chunk_start)
|
Ok(chunk_start)
|
||||||
}
|
}
|
||||||
@ -443,14 +510,13 @@ impl BankingStage {
|
|||||||
fn process_received_packets(
|
fn process_received_packets(
|
||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
poh: &Arc<Mutex<PohRecorder>>,
|
poh: &Arc<Mutex<PohRecorder>>,
|
||||||
msgs: &Arc<RwLock<Packets>>,
|
msgs: &Packets,
|
||||||
vers: &[u8],
|
vers: &[u8],
|
||||||
offset: usize,
|
offset: usize,
|
||||||
) -> Result<(usize, Vec<Transaction>, Vec<usize>)> {
|
) -> Result<(usize, Vec<Transaction>, Vec<usize>)> {
|
||||||
debug!("banking-stage-tx bank {}", bank.slot());
|
debug!("banking-stage-tx bank {}", bank.slot());
|
||||||
let transactions = Self::deserialize_transactions(&Packets::new(
|
let transactions =
|
||||||
msgs.read().unwrap().packets[offset..].to_owned(),
|
Self::deserialize_transactions(&Packets::new(msgs.packets[offset..].to_owned()));
|
||||||
));
|
|
||||||
|
|
||||||
let vers = vers[offset..].to_owned();
|
let vers = vers[offset..].to_owned();
|
||||||
|
|
||||||
@ -506,7 +572,7 @@ impl BankingStage {
|
|||||||
timing::duration_as_ms(&recv_start.elapsed()),
|
timing::duration_as_ms(&recv_start.elapsed()),
|
||||||
count,
|
count,
|
||||||
);
|
);
|
||||||
inc_new_counter_info!("banking_stage-entries_received", mms_len);
|
inc_new_counter_info!("banking_stage-transactions_received", count);
|
||||||
let proc_start = Instant::now();
|
let proc_start = Instant::now();
|
||||||
let mut new_tx_count = 0;
|
let mut new_tx_count = 0;
|
||||||
|
|
||||||
@ -588,6 +654,7 @@ pub fn create_test_recorder(
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
blocktree,
|
blocktree,
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
poh_recorder.set_bank(&bank);
|
poh_recorder.set_bank(&bank);
|
||||||
|
|
||||||
@ -610,6 +677,7 @@ mod tests {
|
|||||||
use solana_sdk::instruction::InstructionError;
|
use solana_sdk::instruction::InstructionError;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
|
use solana_sdk::transaction::TransactionError;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
|
|
||||||
@ -617,7 +685,9 @@ mod tests {
|
|||||||
fn test_banking_stage_shutdown1() {
|
fn test_banking_stage_shutdown1() {
|
||||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
|
let (vote_sender, vote_receiver) = channel();
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blocktree = Arc::new(
|
let blocktree = Arc::new(
|
||||||
@ -627,8 +697,15 @@ mod tests {
|
|||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
let banking_stage = BankingStage::new(
|
||||||
|
&cluster_info,
|
||||||
|
&poh_recorder,
|
||||||
|
verified_receiver,
|
||||||
|
vote_receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
drop(verified_sender);
|
drop(verified_sender);
|
||||||
|
drop(vote_sender);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
banking_stage.join().unwrap();
|
banking_stage.join().unwrap();
|
||||||
poh_service.join().unwrap();
|
poh_service.join().unwrap();
|
||||||
@ -642,8 +719,10 @@ mod tests {
|
|||||||
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||||
genesis_block.ticks_per_slot = 4;
|
genesis_block.ticks_per_slot = 4;
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let start_hash = bank.last_blockhash();
|
let start_hash = bank.last_blockhash();
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
|
let (vote_sender, vote_receiver) = channel();
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blocktree = Arc::new(
|
let blocktree = Arc::new(
|
||||||
@ -653,10 +732,17 @@ mod tests {
|
|||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
let banking_stage = BankingStage::new(
|
||||||
|
&cluster_info,
|
||||||
|
&poh_recorder,
|
||||||
|
verified_receiver,
|
||||||
|
vote_receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
trace!("sending bank");
|
trace!("sending bank");
|
||||||
sleep(Duration::from_millis(600));
|
sleep(Duration::from_millis(600));
|
||||||
drop(verified_sender);
|
drop(verified_sender);
|
||||||
|
drop(vote_sender);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
poh_service.join().unwrap();
|
poh_service.join().unwrap();
|
||||||
drop(poh_recorder);
|
drop(poh_recorder);
|
||||||
@ -680,8 +766,10 @@ mod tests {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let (genesis_block, mint_keypair) = GenesisBlock::new(10);
|
let (genesis_block, mint_keypair) = GenesisBlock::new(10);
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let start_hash = bank.last_blockhash();
|
let start_hash = bank.last_blockhash();
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
|
let (vote_sender, vote_receiver) = channel();
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blocktree = Arc::new(
|
let blocktree = Arc::new(
|
||||||
@ -691,7 +779,13 @@ mod tests {
|
|||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
let banking_stage = BankingStage::new(
|
||||||
|
&cluster_info,
|
||||||
|
&poh_recorder,
|
||||||
|
verified_receiver,
|
||||||
|
vote_receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
|
|
||||||
// fund another account so we can send 2 good transactions in a single batch.
|
// fund another account so we can send 2 good transactions in a single batch.
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
@ -728,6 +822,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
drop(verified_sender);
|
drop(verified_sender);
|
||||||
|
drop(vote_sender);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
poh_service.join().unwrap();
|
poh_service.join().unwrap();
|
||||||
drop(poh_recorder);
|
drop(poh_recorder);
|
||||||
@ -805,11 +900,13 @@ mod tests {
|
|||||||
.send(vec![(packets[0].clone(), vec![1u8])])
|
.send(vec![(packets[0].clone(), vec![1u8])])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
let (vote_sender, vote_receiver) = channel();
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let entry_receiver = {
|
let entry_receiver = {
|
||||||
// start a banking_stage to eat verified receiver
|
// start a banking_stage to eat verified receiver
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let blocktree = Arc::new(
|
let blocktree = Arc::new(
|
||||||
Blocktree::open(&ledger_path)
|
Blocktree::open(&ledger_path)
|
||||||
.expect("Expected to be able to open database ledger"),
|
.expect("Expected to be able to open database ledger"),
|
||||||
@ -823,7 +920,9 @@ mod tests {
|
|||||||
&cluster_info,
|
&cluster_info,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
verified_receiver,
|
verified_receiver,
|
||||||
1,
|
vote_receiver,
|
||||||
|
2,
|
||||||
|
&leader_schedule_cache,
|
||||||
);
|
);
|
||||||
|
|
||||||
// wait for banking_stage to eat the packets
|
// wait for banking_stage to eat the packets
|
||||||
@ -835,6 +934,7 @@ mod tests {
|
|||||||
entry_receiver
|
entry_receiver
|
||||||
};
|
};
|
||||||
drop(verified_sender);
|
drop(verified_sender);
|
||||||
|
drop(vote_sender);
|
||||||
|
|
||||||
// consume the entire entry_receiver, feed it into a new bank
|
// consume the entire entry_receiver, feed it into a new bank
|
||||||
// check that the balance is what we expect.
|
// check that the balance is what we expect.
|
||||||
@ -879,6 +979,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||||
|
|
||||||
@ -922,34 +1023,38 @@ mod tests {
|
|||||||
let my_id1 = Pubkey::new_rand();
|
let my_id1 = Pubkey::new_rand();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BankingStage::process_or_forward_packets(None, true, &my_id),
|
BankingStage::process_or_forward_packets(None, true, false, &my_id),
|
||||||
BufferedPacketsDecision::Hold
|
BufferedPacketsDecision::Hold
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BankingStage::process_or_forward_packets(None, false, &my_id),
|
BankingStage::process_or_forward_packets(None, false, false, &my_id),
|
||||||
BufferedPacketsDecision::Hold
|
BufferedPacketsDecision::Hold
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BankingStage::process_or_forward_packets(None, false, &my_id1),
|
BankingStage::process_or_forward_packets(None, false, false, &my_id1),
|
||||||
BufferedPacketsDecision::Hold
|
BufferedPacketsDecision::Hold
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut contact_info = ContactInfo::default();
|
let mut contact_info = ContactInfo::default();
|
||||||
contact_info.id = my_id1;
|
contact_info.id = my_id1;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BankingStage::process_or_forward_packets(Some(&contact_info), false, &my_id),
|
BankingStage::process_or_forward_packets(Some(&contact_info), false, false, &my_id),
|
||||||
BufferedPacketsDecision::Forward
|
BufferedPacketsDecision::Forward
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BankingStage::process_or_forward_packets(Some(&contact_info), true, &my_id),
|
BankingStage::process_or_forward_packets(Some(&contact_info), false, true, &my_id),
|
||||||
BufferedPacketsDecision::Consume
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
BankingStage::process_or_forward_packets(Some(&contact_info), false, &my_id1),
|
|
||||||
BufferedPacketsDecision::Hold
|
BufferedPacketsDecision::Hold
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BankingStage::process_or_forward_packets(Some(&contact_info), true, &my_id1),
|
BankingStage::process_or_forward_packets(Some(&contact_info), true, false, &my_id),
|
||||||
|
BufferedPacketsDecision::Consume
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
BankingStage::process_or_forward_packets(Some(&contact_info), false, false, &my_id1),
|
||||||
|
BufferedPacketsDecision::Hold
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
BankingStage::process_or_forward_packets(Some(&contact_info), true, false, &my_id1),
|
||||||
BufferedPacketsDecision::Consume
|
BufferedPacketsDecision::Consume
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -986,6 +1091,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&pubkey,
|
&pubkey,
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -9,7 +9,6 @@ use std::borrow::Borrow;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub mod columns {
|
pub mod columns {
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -28,7 +27,6 @@ pub mod columns {
|
|||||||
/// Data Column
|
/// Data Column
|
||||||
pub struct Data;
|
pub struct Data;
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
/// The erasure meta column
|
/// The erasure meta column
|
||||||
pub struct ErasureMeta;
|
pub struct ErasureMeta;
|
||||||
@ -134,7 +132,7 @@ where
|
|||||||
B: Backend,
|
B: Backend,
|
||||||
C: Column<B>,
|
C: Column<B>,
|
||||||
{
|
{
|
||||||
pub db: Arc<Database<B>>,
|
backend: PhantomData<B>,
|
||||||
column: PhantomData<C>,
|
column: PhantomData<C>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +170,7 @@ where
|
|||||||
.get_cf(self.cf_handle::<C>(), C::key(key).borrow())
|
.get_cf(self.cf_handle::<C>(), C::key(key).borrow())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_bytes<C>(&self, key: C::Index, data: &[u8]) -> Result<()>
|
pub fn put_bytes<C>(&mut self, key: C::Index, data: &[u8]) -> Result<()>
|
||||||
where
|
where
|
||||||
C: Column<B>,
|
C: Column<B>,
|
||||||
{
|
{
|
||||||
@ -180,7 +178,7 @@ where
|
|||||||
.put_cf(self.cf_handle::<C>(), C::key(key).borrow(), data)
|
.put_cf(self.cf_handle::<C>(), C::key(key).borrow(), data)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete<C>(&self, key: C::Index) -> Result<()>
|
pub fn delete<C>(&mut self, key: C::Index) -> Result<()>
|
||||||
where
|
where
|
||||||
C: Column<B>,
|
C: Column<B>,
|
||||||
{
|
{
|
||||||
@ -204,7 +202,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put<C>(&self, key: C::Index, value: &C::Type) -> Result<()>
|
pub fn put<C>(&mut self, key: C::Index, value: &C::Type) -> Result<()>
|
||||||
where
|
where
|
||||||
C: TypedColumn<B>,
|
C: TypedColumn<B>,
|
||||||
{
|
{
|
||||||
@ -242,7 +240,7 @@ where
|
|||||||
Ok(iter)
|
Ok(iter)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn batch(&self) -> Result<WriteBatch<B>> {
|
pub fn batch(&mut self) -> Result<WriteBatch<B>> {
|
||||||
let db_write_batch = self.backend.batch()?;
|
let db_write_batch = self.backend.batch()?;
|
||||||
let map = self
|
let map = self
|
||||||
.backend
|
.backend
|
||||||
@ -258,7 +256,7 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write(&self, batch: WriteBatch<B>) -> Result<()> {
|
pub fn write(&mut self, batch: WriteBatch<B>) -> Result<()> {
|
||||||
self.backend.write(batch.write_batch)
|
self.backend.write(batch.write_batch)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,6 +267,16 @@ where
|
|||||||
{
|
{
|
||||||
self.backend.cf_handle(C::NAME).clone()
|
self.backend.cf_handle(C::NAME).clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn column<C>(&self) -> LedgerColumn<B, C>
|
||||||
|
where
|
||||||
|
C: Column<B>,
|
||||||
|
{
|
||||||
|
LedgerColumn {
|
||||||
|
backend: PhantomData,
|
||||||
|
column: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B, C> Cursor<B, C>
|
impl<B, C> Cursor<B, C>
|
||||||
@ -325,43 +333,24 @@ where
|
|||||||
B: Backend,
|
B: Backend,
|
||||||
C: Column<B>,
|
C: Column<B>,
|
||||||
{
|
{
|
||||||
pub fn new(db: &Arc<Database<B>>) -> Self {
|
pub fn get_bytes(&self, db: &Database<B>, key: C::Index) -> Result<Option<Vec<u8>>> {
|
||||||
LedgerColumn {
|
db.backend.get_cf(self.handle(db), C::key(key).borrow())
|
||||||
db: Arc::clone(db),
|
|
||||||
column: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_bytes(&self, key: C::Index, value: &[u8]) -> Result<()> {
|
pub fn cursor(&self, db: &Database<B>) -> Result<Cursor<B, C>> {
|
||||||
self.db
|
db.cursor()
|
||||||
.backend
|
|
||||||
.put_cf(self.handle(), C::key(key).borrow(), value)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_bytes(&self, key: C::Index) -> Result<Option<Vec<u8>>> {
|
pub fn iter(&self, db: &Database<B>) -> Result<impl Iterator<Item = (C::Index, Vec<u8>)>> {
|
||||||
self.db.backend.get_cf(self.handle(), C::key(key).borrow())
|
db.iter::<C>()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(&self, key: C::Index) -> Result<()> {
|
pub fn handle(&self, db: &Database<B>) -> B::ColumnFamily {
|
||||||
self.db
|
db.cf_handle::<C>()
|
||||||
.backend
|
|
||||||
.delete_cf(self.handle(), C::key(key).borrow())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cursor(&self) -> Result<Cursor<B, C>> {
|
pub fn is_empty(&self, db: &Database<B>) -> Result<bool> {
|
||||||
self.db.cursor()
|
let mut cursor = self.cursor(db)?;
|
||||||
}
|
|
||||||
|
|
||||||
pub fn iter(&self) -> Result<impl Iterator<Item = (C::Index, Vec<u8>)>> {
|
|
||||||
self.db.iter::<C>()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn handle(&self) -> B::ColumnFamily {
|
|
||||||
self.db.cf_handle::<C>()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_empty(&self) -> Result<bool> {
|
|
||||||
let mut cursor = self.cursor()?;
|
|
||||||
cursor.seek_to_first();
|
cursor.seek_to_first();
|
||||||
Ok(!cursor.valid())
|
Ok(!cursor.valid())
|
||||||
}
|
}
|
||||||
@ -370,14 +359,35 @@ where
|
|||||||
impl<B, C> LedgerColumn<B, C>
|
impl<B, C> LedgerColumn<B, C>
|
||||||
where
|
where
|
||||||
B: Backend,
|
B: Backend,
|
||||||
C: TypedColumn<B>,
|
C: Column<B>,
|
||||||
{
|
{
|
||||||
pub fn put(&self, key: C::Index, value: &C::Type) -> Result<()> {
|
pub fn put_bytes(&self, db: &mut Database<B>, key: C::Index, value: &[u8]) -> Result<()> {
|
||||||
self.db.put::<C>(key, value)
|
db.backend
|
||||||
|
.put_cf(self.handle(db), C::key(key).borrow(), value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(&self, key: C::Index) -> Result<Option<C::Type>> {
|
pub fn delete(&self, db: &mut Database<B>, key: C::Index) -> Result<()> {
|
||||||
self.db.get::<C>(key)
|
db.backend.delete_cf(self.handle(db), C::key(key).borrow())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B, C> LedgerColumn<B, C>
|
||||||
|
where
|
||||||
|
B: Backend,
|
||||||
|
C: TypedColumn<B>,
|
||||||
|
{
|
||||||
|
pub fn get(&self, db: &Database<B>, key: C::Index) -> Result<Option<C::Type>> {
|
||||||
|
db.get::<C>(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B, C> LedgerColumn<B, C>
|
||||||
|
where
|
||||||
|
B: Backend,
|
||||||
|
C: TypedColumn<B>,
|
||||||
|
{
|
||||||
|
pub fn put(&self, db: &mut Database<B>, key: C::Index, value: &C::Type) -> Result<()> {
|
||||||
|
db.put::<C>(key, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,7 +138,6 @@ impl TypedColumn<Kvs> for cf::SlotMeta {
|
|||||||
type Type = super::SlotMeta;
|
type Type = super::SlotMeta;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
impl Column<Kvs> for cf::ErasureMeta {
|
impl Column<Kvs> for cf::ErasureMeta {
|
||||||
const NAME: &'static str = super::ERASURE_META_CF;
|
const NAME: &'static str = super::ERASURE_META_CF;
|
||||||
type Index = (u64, u64);
|
type Index = (u64, u64);
|
||||||
@ -157,7 +156,6 @@ impl Column<Kvs> for cf::ErasureMeta {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
impl TypedColumn<Kvs> for cf::ErasureMeta {
|
impl TypedColumn<Kvs> for cf::ErasureMeta {
|
||||||
type Type = super::ErasureMeta;
|
type Type = super::ErasureMeta;
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#[cfg(feature = "erasure")]
|
|
||||||
use crate::erasure::{NUM_CODING, NUM_DATA};
|
use crate::erasure::{NUM_CODING, NUM_DATA};
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
||||||
@ -36,7 +35,22 @@ impl SlotMeta {
|
|||||||
if self.last_index == std::u64::MAX {
|
if self.last_index == std::u64::MAX {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
assert!(self.consumed <= self.last_index + 1);
|
|
||||||
|
// Should never happen
|
||||||
|
if self.consumed > self.last_index + 1 {
|
||||||
|
solana_metrics::submit(
|
||||||
|
solana_metrics::influxdb::Point::new("blocktree_error")
|
||||||
|
.add_field(
|
||||||
|
"error",
|
||||||
|
solana_metrics::influxdb::Value::String(format!(
|
||||||
|
"Observed a slot meta with consumed: {} > meta.last_index + 1: {}",
|
||||||
|
self.consumed,
|
||||||
|
self.last_index + 1
|
||||||
|
)),
|
||||||
|
)
|
||||||
|
.to_owned(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
self.consumed == self.last_index + 1
|
self.consumed == self.last_index + 1
|
||||||
}
|
}
|
||||||
@ -59,68 +73,92 @@ impl SlotMeta {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
|
||||||
/// Erasure coding information
|
/// Erasure coding information
|
||||||
pub struct ErasureMeta {
|
pub struct ErasureMeta {
|
||||||
/// Which erasure set in the slot this is
|
/// Which erasure set in the slot this is
|
||||||
pub set_index: u64,
|
pub set_index: u64,
|
||||||
|
/// Size of shards in this erasure set
|
||||||
|
pub size: usize,
|
||||||
/// Bitfield representing presence/absence of data blobs
|
/// Bitfield representing presence/absence of data blobs
|
||||||
pub data: u64,
|
pub data: u64,
|
||||||
/// Bitfield representing presence/absence of coding blobs
|
/// Bitfield representing presence/absence of coding blobs
|
||||||
pub coding: u64,
|
pub coding: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum ErasureMetaStatus {
|
||||||
|
CanRecover,
|
||||||
|
DataFull,
|
||||||
|
StillNeed(usize),
|
||||||
|
}
|
||||||
|
|
||||||
impl ErasureMeta {
|
impl ErasureMeta {
|
||||||
pub fn new(set_index: u64) -> ErasureMeta {
|
pub fn new(set_index: u64) -> ErasureMeta {
|
||||||
ErasureMeta {
|
ErasureMeta {
|
||||||
set_index,
|
set_index,
|
||||||
|
size: 0,
|
||||||
data: 0,
|
data: 0,
|
||||||
coding: 0,
|
coding: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn can_recover(&self) -> bool {
|
pub fn status(&self) -> ErasureMetaStatus {
|
||||||
let (data_missing, coding_missing) = (
|
let (data_missing, coding_missing) = (
|
||||||
NUM_DATA - self.data.count_ones() as usize,
|
NUM_DATA - self.data.count_ones() as usize,
|
||||||
NUM_CODING - self.coding.count_ones() as usize,
|
NUM_CODING - self.coding.count_ones() as usize,
|
||||||
);
|
);
|
||||||
|
if data_missing > 0 && data_missing + coding_missing <= NUM_CODING {
|
||||||
data_missing > 0 && data_missing + coding_missing <= NUM_CODING
|
assert!(self.size != 0);
|
||||||
|
ErasureMetaStatus::CanRecover
|
||||||
|
} else if data_missing == 0 {
|
||||||
|
ErasureMetaStatus::DataFull
|
||||||
|
} else {
|
||||||
|
ErasureMetaStatus::StillNeed(data_missing + coding_missing - NUM_CODING)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_coding_present(&self, index: u64) -> bool {
|
pub fn is_coding_present(&self, index: u64) -> bool {
|
||||||
let set_index = Self::set_index_for(index);
|
if let Some(position) = self.data_index_in_set(index) {
|
||||||
let position = index - self.start_index();
|
self.coding & (1 << position) != 0
|
||||||
|
} else {
|
||||||
set_index == self.set_index && self.coding & (1 << position) != 0
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_coding_present(&mut self, index: u64) {
|
pub fn set_size(&mut self, size: usize) {
|
||||||
let set_index = Self::set_index_for(index);
|
self.size = size;
|
||||||
|
}
|
||||||
|
|
||||||
if set_index as u64 == self.set_index {
|
pub fn size(&self) -> usize {
|
||||||
let position = index - self.start_index();
|
self.size
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_coding_present(&mut self, index: u64, present: bool) {
|
||||||
|
if let Some(position) = self.data_index_in_set(index) {
|
||||||
|
if present {
|
||||||
self.coding |= 1 << position;
|
self.coding |= 1 << position;
|
||||||
|
} else {
|
||||||
|
self.coding &= !(1 << position);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_data_present(&self, index: u64) -> bool {
|
pub fn is_data_present(&self, index: u64) -> bool {
|
||||||
let set_index = Self::set_index_for(index);
|
if let Some(position) = self.data_index_in_set(index) {
|
||||||
let position = index - self.start_index();
|
self.data & (1 << position) != 0
|
||||||
|
} else {
|
||||||
set_index == self.set_index && self.data & (1 << position) != 0
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_data_present(&mut self, index: u64) {
|
pub fn set_data_present(&mut self, index: u64, present: bool) {
|
||||||
let set_index = Self::set_index_for(index);
|
if let Some(position) = self.data_index_in_set(index) {
|
||||||
|
if present {
|
||||||
if set_index as u64 == self.set_index {
|
|
||||||
let position = index - self.start_index();
|
|
||||||
|
|
||||||
self.data |= 1 << position;
|
self.data |= 1 << position;
|
||||||
|
} else {
|
||||||
|
self.data &= !(1 << position);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,6 +166,20 @@ impl ErasureMeta {
|
|||||||
index / NUM_DATA as u64
|
index / NUM_DATA as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn data_index_in_set(&self, index: u64) -> Option<u64> {
|
||||||
|
let set_index = Self::set_index_for(index);
|
||||||
|
|
||||||
|
if set_index == self.set_index {
|
||||||
|
Some(index - self.start_index())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn coding_index_in_set(&self, index: u64) -> Option<u64> {
|
||||||
|
self.data_index_in_set(index).map(|i| i + NUM_DATA as u64)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn start_index(&self) -> u64 {
|
pub fn start_index(&self) -> u64 {
|
||||||
self.set_index * NUM_DATA as u64
|
self.set_index * NUM_DATA as u64
|
||||||
}
|
}
|
||||||
@ -139,18 +191,53 @@ impl ErasureMeta {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
#[test]
|
||||||
|
fn test_meta_indexes() {
|
||||||
|
use rand::{thread_rng, Rng};
|
||||||
|
// to avoid casts everywhere
|
||||||
|
const NUM_DATA: u64 = crate::erasure::NUM_DATA as u64;
|
||||||
|
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
|
for _ in 0..100 {
|
||||||
|
let set_index = rng.gen_range(0, 1_000);
|
||||||
|
let blob_index = (set_index * NUM_DATA) + rng.gen_range(0, 16);
|
||||||
|
|
||||||
|
assert_eq!(set_index, ErasureMeta::set_index_for(blob_index));
|
||||||
|
let e_meta = ErasureMeta::new(set_index);
|
||||||
|
|
||||||
|
assert_eq!(e_meta.start_index(), set_index * NUM_DATA);
|
||||||
|
let (data_end_idx, coding_end_idx) = e_meta.end_indexes();
|
||||||
|
assert_eq!(data_end_idx, (set_index + 1) * NUM_DATA);
|
||||||
|
assert_eq!(coding_end_idx, set_index * NUM_DATA + NUM_CODING as u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut e_meta = ErasureMeta::new(0);
|
||||||
|
|
||||||
|
assert_eq!(e_meta.data_index_in_set(0), Some(0));
|
||||||
|
assert_eq!(e_meta.data_index_in_set(NUM_DATA / 2), Some(NUM_DATA / 2));
|
||||||
|
assert_eq!(e_meta.data_index_in_set(NUM_DATA - 1), Some(NUM_DATA - 1));
|
||||||
|
assert_eq!(e_meta.data_index_in_set(NUM_DATA), None);
|
||||||
|
assert_eq!(e_meta.data_index_in_set(std::u64::MAX), None);
|
||||||
|
|
||||||
|
e_meta.set_index = 1;
|
||||||
|
|
||||||
|
assert_eq!(e_meta.data_index_in_set(0), None);
|
||||||
|
assert_eq!(e_meta.data_index_in_set(NUM_DATA - 1), None);
|
||||||
|
assert_eq!(e_meta.data_index_in_set(NUM_DATA), Some(0));
|
||||||
|
assert_eq!(
|
||||||
|
e_meta.data_index_in_set(NUM_DATA * 2 - 1),
|
||||||
|
Some(NUM_DATA - 1)
|
||||||
|
);
|
||||||
|
assert_eq!(e_meta.data_index_in_set(std::u64::MAX), None);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_meta_coding_present() {
|
fn test_meta_coding_present() {
|
||||||
let set_index = 0;
|
let mut e_meta = ErasureMeta::default();
|
||||||
let mut e_meta = ErasureMeta {
|
|
||||||
set_index,
|
|
||||||
data: 0,
|
|
||||||
coding: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
for i in 0..NUM_CODING as u64 {
|
for i in 0..NUM_CODING as u64 {
|
||||||
e_meta.set_coding_present(i);
|
e_meta.set_coding_present(i, true);
|
||||||
assert_eq!(e_meta.is_coding_present(i), true);
|
assert_eq!(e_meta.is_coding_present(i), true);
|
||||||
}
|
}
|
||||||
for i in NUM_CODING as u64..NUM_DATA as u64 {
|
for i in NUM_CODING as u64..NUM_DATA as u64 {
|
||||||
@ -160,7 +247,7 @@ fn test_meta_coding_present() {
|
|||||||
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 17) as u64);
|
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 17) as u64);
|
||||||
|
|
||||||
for i in (NUM_DATA * 17) as u64..((NUM_DATA * 17) + NUM_CODING) as u64 {
|
for i in (NUM_DATA * 17) as u64..((NUM_DATA * 17) + NUM_CODING) as u64 {
|
||||||
e_meta.set_coding_present(i);
|
e_meta.set_coding_present(i, true);
|
||||||
assert_eq!(e_meta.is_coding_present(i), true);
|
assert_eq!(e_meta.is_coding_present(i), true);
|
||||||
}
|
}
|
||||||
for i in (NUM_DATA * 17 + NUM_CODING) as u64..((NUM_DATA * 17) + NUM_DATA) as u64 {
|
for i in (NUM_DATA * 17 + NUM_CODING) as u64..((NUM_DATA * 17) + NUM_DATA) as u64 {
|
||||||
@ -168,46 +255,63 @@ fn test_meta_coding_present() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_can_recover() {
|
fn test_erasure_meta_status() {
|
||||||
let set_index = 0;
|
let mut e_meta = ErasureMeta::default();
|
||||||
let mut e_meta = ErasureMeta {
|
|
||||||
set_index,
|
|
||||||
data: 0,
|
|
||||||
coding: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
assert!(!e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::StillNeed(NUM_DATA));
|
||||||
|
|
||||||
e_meta.data = 0b1111_1111_1111_1111;
|
e_meta.data = 0b1111_1111_1111_1111;
|
||||||
e_meta.coding = 0x00;
|
e_meta.coding = 0x00;
|
||||||
|
|
||||||
assert!(!e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
|
||||||
|
|
||||||
e_meta.coding = 0x0e;
|
e_meta.coding = 0x0e;
|
||||||
assert_eq!(0x0fu8, 0b0000_1111u8);
|
e_meta.size = 1;
|
||||||
assert!(!e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
|
||||||
|
|
||||||
e_meta.data = 0b0111_1111_1111_1111;
|
e_meta.data = 0b0111_1111_1111_1111;
|
||||||
assert!(e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::CanRecover);
|
||||||
|
|
||||||
e_meta.data = 0b0111_1111_1111_1110;
|
e_meta.data = 0b0111_1111_1111_1110;
|
||||||
assert!(e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::CanRecover);
|
||||||
|
|
||||||
e_meta.data = 0b0111_1111_1011_1110;
|
e_meta.data = 0b0111_1111_1011_1110;
|
||||||
assert!(e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::CanRecover);
|
||||||
|
|
||||||
e_meta.data = 0b0111_1011_1011_1110;
|
e_meta.data = 0b0111_1011_1011_1110;
|
||||||
assert!(!e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::StillNeed(1));
|
||||||
|
|
||||||
e_meta.data = 0b0111_1011_1011_1110;
|
e_meta.data = 0b0111_1011_1011_1110;
|
||||||
assert!(!e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::StillNeed(1));
|
||||||
|
|
||||||
e_meta.coding = 0b0000_1110;
|
e_meta.coding = 0b0000_1110;
|
||||||
e_meta.data = 0b1111_1111_1111_1100;
|
e_meta.data = 0b1111_1111_1111_1100;
|
||||||
assert!(e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::CanRecover);
|
||||||
|
|
||||||
e_meta.data = 0b1111_1111_1111_1000;
|
e_meta.data = 0b1111_1111_1111_1000;
|
||||||
assert!(e_meta.can_recover());
|
assert_eq!(e_meta.status(), ErasureMetaStatus::CanRecover);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_meta_data_present() {
|
||||||
|
let mut e_meta = ErasureMeta::default();
|
||||||
|
|
||||||
|
for i in 0..NUM_DATA as u64 {
|
||||||
|
e_meta.set_data_present(i, true);
|
||||||
|
assert_eq!(e_meta.is_data_present(i), true);
|
||||||
|
}
|
||||||
|
for i in NUM_DATA as u64..2 * NUM_DATA as u64 {
|
||||||
|
assert_eq!(e_meta.is_data_present(i), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 23) as u64);
|
||||||
|
|
||||||
|
for i in (NUM_DATA * 23) as u64..(NUM_DATA * 24) as u64 {
|
||||||
|
e_meta.set_data_present(i, true);
|
||||||
|
assert_eq!(e_meta.is_data_present(i), true);
|
||||||
|
}
|
||||||
|
for i in (NUM_DATA * 22) as u64..(NUM_DATA * 23) as u64 {
|
||||||
|
assert_eq!(e_meta.is_data_present(i), false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,9 +30,7 @@ impl Backend for Rocks {
|
|||||||
type Error = rocksdb::Error;
|
type Error = rocksdb::Error;
|
||||||
|
|
||||||
fn open(path: &Path) -> Result<Rocks> {
|
fn open(path: &Path) -> Result<Rocks> {
|
||||||
#[cfg(feature = "erasure")]
|
use crate::blocktree::db::columns::{Coding, Data, ErasureMeta, Orphans, SlotMeta};
|
||||||
use crate::blocktree::db::columns::ErasureMeta;
|
|
||||||
use crate::blocktree::db::columns::{Coding, Data, Orphans, SlotMeta};
|
|
||||||
|
|
||||||
fs::create_dir_all(&path)?;
|
fs::create_dir_all(&path)?;
|
||||||
|
|
||||||
@ -43,7 +41,6 @@ impl Backend for Rocks {
|
|||||||
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
||||||
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
|
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
|
||||||
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
|
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
let erasure_meta_cf_descriptor =
|
let erasure_meta_cf_descriptor =
|
||||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
|
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
|
||||||
let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
|
let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
|
||||||
@ -52,7 +49,6 @@ impl Backend for Rocks {
|
|||||||
meta_cf_descriptor,
|
meta_cf_descriptor,
|
||||||
data_cf_descriptor,
|
data_cf_descriptor,
|
||||||
erasure_cf_descriptor,
|
erasure_cf_descriptor,
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
erasure_meta_cf_descriptor,
|
erasure_meta_cf_descriptor,
|
||||||
orphans_cf_descriptor,
|
orphans_cf_descriptor,
|
||||||
];
|
];
|
||||||
@ -64,13 +60,10 @@ impl Backend for Rocks {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn columns(&self) -> Vec<&'static str> {
|
fn columns(&self) -> Vec<&'static str> {
|
||||||
#[cfg(feature = "erasure")]
|
use crate::blocktree::db::columns::{Coding, Data, ErasureMeta, Orphans, SlotMeta};
|
||||||
use crate::blocktree::db::columns::ErasureMeta;
|
|
||||||
use crate::blocktree::db::columns::{Coding, Data, Orphans, SlotMeta};
|
|
||||||
|
|
||||||
vec![
|
vec![
|
||||||
Coding::NAME,
|
Coding::NAME,
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
ErasureMeta::NAME,
|
ErasureMeta::NAME,
|
||||||
Data::NAME,
|
Data::NAME,
|
||||||
Orphans::NAME,
|
Orphans::NAME,
|
||||||
@ -196,7 +189,6 @@ impl TypedColumn<Rocks> for cf::SlotMeta {
|
|||||||
type Type = super::SlotMeta;
|
type Type = super::SlotMeta;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
impl Column<Rocks> for cf::ErasureMeta {
|
impl Column<Rocks> for cf::ErasureMeta {
|
||||||
const NAME: &'static str = super::ERASURE_META_CF;
|
const NAME: &'static str = super::ERASURE_META_CF;
|
||||||
type Index = (u64, u64);
|
type Index = (u64, u64);
|
||||||
@ -216,7 +208,6 @@ impl Column<Rocks> for cf::ErasureMeta {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
impl TypedColumn<Rocks> for cf::ErasureMeta {
|
impl TypedColumn<Rocks> for cf::ErasureMeta {
|
||||||
type Type = super::ErasureMeta;
|
type Type = super::ErasureMeta;
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use crate::bank_forks::BankForks;
|
use crate::bank_forks::BankForks;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::entry::{Entry, EntrySlice};
|
use crate::entry::{Entry, EntrySlice};
|
||||||
use crate::leader_schedule_utils;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana_metrics::counter::Counter;
|
use solana_metrics::counter::Counter;
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
@ -9,10 +9,10 @@ use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
|||||||
use solana_sdk::genesis_block::GenesisBlock;
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::timing::duration_as_ms;
|
use solana_sdk::timing::duration_as_ms;
|
||||||
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
|
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
|
||||||
use solana_sdk::transaction::{Result, TransactionError};
|
use solana_sdk::transaction::Result;
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn first_err(results: &[Result<()>]) -> Result<()> {
|
fn first_err(results: &[Result<()>]) -> Result<()> {
|
||||||
for r in results {
|
for r in results {
|
||||||
@ -21,13 +21,6 @@ fn first_err(results: &[Result<()>]) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_unexpected_validator_error(r: &Result<()>) -> bool {
|
|
||||||
match r {
|
|
||||||
Err(TransactionError::DuplicateSignature) => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)]) -> Result<()> {
|
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)]) -> Result<()> {
|
||||||
inc_new_counter_info!("bank-par_execute_entries-count", entries.len());
|
inc_new_counter_info!("bank-par_execute_entries-count", entries.len());
|
||||||
let results: Vec<Result<()>> = entries
|
let results: Vec<Result<()>> = entries
|
||||||
@ -44,7 +37,7 @@ fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)])
|
|||||||
if first_err.is_none() {
|
if first_err.is_none() {
|
||||||
first_err = Some(r.clone());
|
first_err = Some(r.clone());
|
||||||
}
|
}
|
||||||
if is_unexpected_validator_error(&r) {
|
if !Bank::can_commit(&r) {
|
||||||
warn!("Unexpected validator error: {:?}", e);
|
warn!("Unexpected validator error: {:?}", e);
|
||||||
solana_metrics::submit(
|
solana_metrics::submit(
|
||||||
solana_metrics::influxdb::Point::new("validator_process_entry_error")
|
solana_metrics::influxdb::Point::new("validator_process_entry_error")
|
||||||
@ -116,10 +109,9 @@ pub fn process_blocktree(
|
|||||||
genesis_block: &GenesisBlock,
|
genesis_block: &GenesisBlock,
|
||||||
blocktree: &Blocktree,
|
blocktree: &Blocktree,
|
||||||
account_paths: Option<String>,
|
account_paths: Option<String>,
|
||||||
) -> result::Result<(BankForks, Vec<BankForksInfo>), BlocktreeProcessorError> {
|
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
info!("processing ledger...");
|
info!("processing ledger...");
|
||||||
|
|
||||||
// Setup bank for slot 0
|
// Setup bank for slot 0
|
||||||
let mut pending_slots = {
|
let mut pending_slots = {
|
||||||
let slot = 0;
|
let slot = 0;
|
||||||
@ -139,11 +131,19 @@ pub fn process_blocktree(
|
|||||||
vec![(slot, meta, bank, entry_height, last_entry_hash)]
|
vec![(slot, meta, bank, entry_height, last_entry_hash)]
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule());
|
||||||
|
|
||||||
let mut fork_info = vec![];
|
let mut fork_info = vec![];
|
||||||
|
let mut last_status_report = Instant::now();
|
||||||
while !pending_slots.is_empty() {
|
while !pending_slots.is_empty() {
|
||||||
let (slot, meta, bank, mut entry_height, mut last_entry_hash) =
|
let (slot, meta, bank, mut entry_height, mut last_entry_hash) =
|
||||||
pending_slots.pop().unwrap();
|
pending_slots.pop().unwrap();
|
||||||
|
|
||||||
|
if last_status_report.elapsed() > Duration::from_secs(2) {
|
||||||
|
info!("processing ledger...block {}", slot);
|
||||||
|
last_status_report = Instant::now();
|
||||||
|
}
|
||||||
|
|
||||||
// Fetch all entries for this slot
|
// Fetch all entries for this slot
|
||||||
let mut entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
|
let mut entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
|
||||||
warn!("Failed to load entries for slot {}: {:?}", slot, err);
|
warn!("Failed to load entries for slot {}: {:?}", slot, err);
|
||||||
@ -216,7 +216,9 @@ pub fn process_blocktree(
|
|||||||
if next_meta.is_full() {
|
if next_meta.is_full() {
|
||||||
let next_bank = Arc::new(Bank::new_from_parent(
|
let next_bank = Arc::new(Bank::new_from_parent(
|
||||||
&bank,
|
&bank,
|
||||||
&leader_schedule_utils::slot_leader_at(next_slot, &bank).unwrap(),
|
&leader_schedule_cache
|
||||||
|
.slot_leader_at_else_compute(next_slot, &bank)
|
||||||
|
.unwrap(),
|
||||||
next_slot,
|
next_slot,
|
||||||
));
|
));
|
||||||
trace!("Add child bank for slot={}", next_slot);
|
trace!("Add child bank for slot={}", next_slot);
|
||||||
@ -245,12 +247,12 @@ pub fn process_blocktree(
|
|||||||
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
|
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
|
||||||
let bank_forks = BankForks::new_from_banks(&banks);
|
let bank_forks = BankForks::new_from_banks(&banks);
|
||||||
info!(
|
info!(
|
||||||
"processed ledger in {}ms, forks={}...",
|
"processing ledger...complete in {}ms, forks={}...",
|
||||||
duration_as_ms(&now.elapsed()),
|
duration_as_ms(&now.elapsed()),
|
||||||
bank_forks_info.len(),
|
bank_forks_info.len(),
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok((bank_forks, bank_forks_info))
|
Ok((bank_forks, bank_forks_info, leader_schedule_cache))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -327,7 +329,7 @@ mod tests {
|
|||||||
// slot 2, points at slot 1
|
// slot 2, points at slot 1
|
||||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
|
||||||
|
|
||||||
let (mut _bank_forks, bank_forks_info) =
|
let (mut _bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1);
|
assert_eq!(bank_forks_info.len(), 1);
|
||||||
@ -387,7 +389,7 @@ mod tests {
|
|||||||
blocktree.set_root(0).unwrap();
|
blocktree.set_root(0).unwrap();
|
||||||
blocktree.set_root(1).unwrap();
|
blocktree.set_root(1).unwrap();
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
||||||
@ -537,7 +539,7 @@ mod tests {
|
|||||||
.write_entries(1, 0, 0, genesis_block.ticks_per_slot, &entries)
|
.write_entries(1, 0, 0, genesis_block.ticks_per_slot, &entries)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
|
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
|
||||||
let (bank_forks, bank_forks_info) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1);
|
assert_eq!(bank_forks_info.len(), 1);
|
||||||
@ -562,7 +564,7 @@ mod tests {
|
|||||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||||
|
|
||||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||||
let (bank_forks, bank_forks_info) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1);
|
assert_eq!(bank_forks_info.len(), 1);
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
//! A stage to broadcast data from a leader node to validators
|
//! A stage to broadcast data from a leader node to validators
|
||||||
//!
|
//!
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT};
|
use crate::cluster_info::{ClusterInfo, ClusterInfoError, NEIGHBORHOOD_SIZE};
|
||||||
use crate::entry::{EntrySender, EntrySlice};
|
use crate::entry::{EntrySender, EntrySlice};
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
use crate::erasure::CodingGenerator;
|
use crate::erasure::CodingGenerator;
|
||||||
use crate::packet::index_blobs;
|
use crate::packet::index_blobs_with_genesis;
|
||||||
use crate::poh_recorder::WorkingBankEntries;
|
use crate::poh_recorder::WorkingBankEntries;
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
@ -13,6 +12,7 @@ use crate::staking_utils;
|
|||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana_metrics::counter::Counter;
|
use solana_metrics::counter::Counter;
|
||||||
use solana_metrics::{influxdb, submit};
|
use solana_metrics::{influxdb, submit};
|
||||||
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::timing::duration_as_ms;
|
use solana_sdk::timing::duration_as_ms;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@ -29,8 +29,6 @@ pub enum BroadcastStageReturnType {
|
|||||||
|
|
||||||
struct Broadcast {
|
struct Broadcast {
|
||||||
id: Pubkey,
|
id: Pubkey,
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
coding_generator: CodingGenerator,
|
coding_generator: CodingGenerator,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,6 +40,7 @@ impl Broadcast {
|
|||||||
sock: &UdpSocket,
|
sock: &UdpSocket,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: &EntrySender,
|
storage_entry_sender: &EntrySender,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let (mut bank, entries) = receiver.recv_timeout(timer)?;
|
let (mut bank, entries) = receiver.recv_timeout(timer)?;
|
||||||
@ -74,14 +73,14 @@ impl Broadcast {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut broadcast_table = cluster_info
|
let bank_epoch = bank.get_stakers_epoch(bank.slot());
|
||||||
.read()
|
let mut broadcast_table = cluster_info.read().unwrap().sorted_tvu_peers(
|
||||||
.unwrap()
|
&staking_utils::delegated_stakes_at_epoch(&bank, bank_epoch).unwrap(),
|
||||||
.sorted_tvu_peers(&staking_utils::delegated_stakes(&bank));
|
);
|
||||||
// Layer 1, leader nodes are limited to the fanout size.
|
|
||||||
broadcast_table.truncate(DATA_PLANE_FANOUT);
|
|
||||||
|
|
||||||
inc_new_counter_info!("broadcast_service-num_peers", broadcast_table.len() + 1);
|
inc_new_counter_info!("broadcast_service-num_peers", broadcast_table.len() + 1);
|
||||||
|
// Layer 1, leader nodes are limited to the fanout size.
|
||||||
|
broadcast_table.truncate(NEIGHBORHOOD_SIZE);
|
||||||
|
|
||||||
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
|
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
|
||||||
|
|
||||||
let to_blobs_start = Instant::now();
|
let to_blobs_start = Instant::now();
|
||||||
@ -103,9 +102,10 @@ impl Broadcast {
|
|||||||
.map(|meta| meta.consumed)
|
.map(|meta| meta.consumed)
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
index_blobs(
|
index_blobs_with_genesis(
|
||||||
&blobs,
|
&blobs,
|
||||||
&self.id,
|
&self.id,
|
||||||
|
genesis_blockhash,
|
||||||
blob_index,
|
blob_index,
|
||||||
bank.slot(),
|
bank.slot(),
|
||||||
bank.parent().map_or(0, |parent| parent.slot()),
|
bank.parent().map_or(0, |parent| parent.slot()),
|
||||||
@ -119,7 +119,6 @@ impl Broadcast {
|
|||||||
|
|
||||||
blocktree.write_shared_blobs(&blobs)?;
|
blocktree.write_shared_blobs(&blobs)?;
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
let coding = self.coding_generator.next(&blobs);
|
let coding = self.coding_generator.next(&blobs);
|
||||||
|
|
||||||
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
|
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
|
||||||
@ -129,14 +128,10 @@ impl Broadcast {
|
|||||||
// Send out data
|
// Send out data
|
||||||
ClusterInfo::broadcast(&self.id, contains_last_tick, &broadcast_table, sock, &blobs)?;
|
ClusterInfo::broadcast(&self.id, contains_last_tick, &broadcast_table, sock, &blobs)?;
|
||||||
|
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
ClusterInfo::broadcast(&self.id, false, &broadcast_table, sock, &coding)?;
|
|
||||||
|
|
||||||
inc_new_counter_info!("streamer-broadcast-sent", blobs.len());
|
inc_new_counter_info!("streamer-broadcast-sent", blobs.len());
|
||||||
|
|
||||||
// generate and transmit any erasure coding blobs. if erasure isn't supported, just send everything again
|
// send out erasures
|
||||||
#[cfg(not(feature = "erasure"))]
|
ClusterInfo::broadcast(&self.id, false, &broadcast_table, sock, &coding)?;
|
||||||
ClusterInfo::broadcast(&self.id, contains_last_tick, &broadcast_table, sock, &blobs)?;
|
|
||||||
|
|
||||||
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
|
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
|
||||||
|
|
||||||
@ -192,13 +187,14 @@ impl BroadcastStage {
|
|||||||
receiver: &Receiver<WorkingBankEntries>,
|
receiver: &Receiver<WorkingBankEntries>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: EntrySender,
|
storage_entry_sender: EntrySender,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> BroadcastStageReturnType {
|
) -> BroadcastStageReturnType {
|
||||||
let me = cluster_info.read().unwrap().my_data().clone();
|
let me = cluster_info.read().unwrap().my_data().clone();
|
||||||
|
let coding_generator = CodingGenerator::default();
|
||||||
|
|
||||||
let mut broadcast = Broadcast {
|
let mut broadcast = Broadcast {
|
||||||
id: me.id,
|
id: me.id,
|
||||||
#[cfg(feature = "erasure")]
|
coding_generator,
|
||||||
coding_generator: CodingGenerator::new(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
@ -208,6 +204,7 @@ impl BroadcastStage {
|
|||||||
sock,
|
sock,
|
||||||
blocktree,
|
blocktree,
|
||||||
&storage_entry_sender,
|
&storage_entry_sender,
|
||||||
|
genesis_blockhash,
|
||||||
) {
|
) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
|
||||||
@ -247,9 +244,11 @@ impl BroadcastStage {
|
|||||||
exit_sender: &Arc<AtomicBool>,
|
exit_sender: &Arc<AtomicBool>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: EntrySender,
|
storage_entry_sender: EntrySender,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let blocktree = blocktree.clone();
|
let blocktree = blocktree.clone();
|
||||||
let exit_sender = exit_sender.clone();
|
let exit_sender = exit_sender.clone();
|
||||||
|
let genesis_blockhash = *genesis_blockhash;
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-broadcaster".to_string())
|
.name("solana-broadcaster".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -260,6 +259,7 @@ impl BroadcastStage {
|
|||||||
&receiver,
|
&receiver,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
storage_entry_sender,
|
storage_entry_sender,
|
||||||
|
&genesis_blockhash,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -284,9 +284,9 @@ mod test {
|
|||||||
use crate::entry::create_ticks;
|
use crate::entry::create_ticks;
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
@ -321,7 +321,9 @@ mod test {
|
|||||||
|
|
||||||
let exit_sender = Arc::new(AtomicBool::new(false));
|
let exit_sender = Arc::new(AtomicBool::new(false));
|
||||||
let (storage_sender, _receiver) = channel();
|
let (storage_sender, _receiver) = channel();
|
||||||
let bank = Arc::new(Bank::default());
|
|
||||||
|
let (genesis_block, _) = GenesisBlock::new(10_000);
|
||||||
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
|
||||||
// Start up the broadcast stage
|
// Start up the broadcast stage
|
||||||
let broadcast_service = BroadcastStage::new(
|
let broadcast_service = BroadcastStage::new(
|
||||||
@ -331,6 +333,7 @@ mod test {
|
|||||||
&exit_sender,
|
&exit_sender,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
storage_sender,
|
storage_sender,
|
||||||
|
&Hash::default(),
|
||||||
);
|
);
|
||||||
|
|
||||||
MockBroadcastStage {
|
MockBroadcastStage {
|
||||||
@ -341,15 +344,13 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
|
||||||
//TODO this test won't work since broadcast stage no longer edits the ledger
|
|
||||||
fn test_broadcast_ledger() {
|
fn test_broadcast_ledger() {
|
||||||
|
solana_logger::setup();
|
||||||
let ledger_path = get_tmp_ledger_path("test_broadcast_ledger");
|
let ledger_path = get_tmp_ledger_path("test_broadcast_ledger");
|
||||||
|
|
||||||
{
|
{
|
||||||
// Create the leader scheduler
|
// Create the leader scheduler
|
||||||
let leader_keypair = Keypair::new();
|
let leader_keypair = Keypair::new();
|
||||||
let start_tick_height = 0;
|
|
||||||
let max_tick_height = start_tick_height + DEFAULT_TICKS_PER_SLOT;
|
|
||||||
|
|
||||||
let (entry_sender, entry_receiver) = channel();
|
let (entry_sender, entry_receiver) = channel();
|
||||||
let broadcast_service = setup_dummy_broadcast_service(
|
let broadcast_service = setup_dummy_broadcast_service(
|
||||||
@ -358,6 +359,9 @@ mod test {
|
|||||||
entry_receiver,
|
entry_receiver,
|
||||||
);
|
);
|
||||||
let bank = broadcast_service.bank.clone();
|
let bank = broadcast_service.bank.clone();
|
||||||
|
let start_tick_height = bank.tick_height();
|
||||||
|
let max_tick_height = bank.max_tick_height();
|
||||||
|
let ticks_per_slot = bank.ticks_per_slot();
|
||||||
|
|
||||||
let ticks = create_ticks(max_tick_height - start_tick_height, Hash::default());
|
let ticks = create_ticks(max_tick_height - start_tick_height, Hash::default());
|
||||||
for (i, tick) in ticks.into_iter().enumerate() {
|
for (i, tick) in ticks.into_iter().enumerate() {
|
||||||
@ -367,15 +371,23 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sleep(Duration::from_millis(2000));
|
sleep(Duration::from_millis(2000));
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"[broadcast_ledger] max_tick_height: {}, start_tick_height: {}, ticks_per_slot: {}",
|
||||||
|
max_tick_height,
|
||||||
|
start_tick_height,
|
||||||
|
ticks_per_slot,
|
||||||
|
);
|
||||||
|
|
||||||
let blocktree = broadcast_service.blocktree;
|
let blocktree = broadcast_service.blocktree;
|
||||||
let mut blob_index = 0;
|
let mut blob_index = 0;
|
||||||
for i in 0..max_tick_height - start_tick_height {
|
for i in 0..max_tick_height - start_tick_height {
|
||||||
let slot = (start_tick_height + i + 1) / DEFAULT_TICKS_PER_SLOT;
|
let slot = (start_tick_height + i + 1) / ticks_per_slot;
|
||||||
|
|
||||||
let result = blocktree.get_data_blob(slot, blob_index).unwrap();
|
let result = blocktree.get_data_blob(slot, blob_index).unwrap();
|
||||||
|
|
||||||
blob_index += 1;
|
blob_index += 1;
|
||||||
assert!(result.is_some());
|
result.expect("expect blob presence");
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(entry_sender);
|
drop(entry_sender);
|
||||||
|
@ -94,7 +94,7 @@ mod tests {
|
|||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::chacha::chacha_cbc_encrypt_ledger;
|
use crate::chacha::chacha_cbc_encrypt_ledger;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use ring::signature::Ed25519KeyPair;
|
use crate::gen_keys::GenKeys;
|
||||||
use solana_sdk::hash::{hash, Hash, Hasher};
|
use solana_sdk::hash::{hash, Hash, Hasher};
|
||||||
use solana_sdk::signature::KeypairUtil;
|
use solana_sdk::signature::KeypairUtil;
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
@ -103,19 +103,14 @@ mod tests {
|
|||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use untrusted::Input;
|
|
||||||
|
|
||||||
fn make_tiny_deterministic_test_entries(num: usize) -> Vec<Entry> {
|
fn make_tiny_deterministic_test_entries(num: usize) -> Vec<Entry> {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let one = hash(&zero.as_ref());
|
let one = hash(&zero.as_ref());
|
||||||
let pkcs = [
|
|
||||||
48, 83, 2, 1, 1, 48, 5, 6, 3, 43, 101, 112, 4, 34, 4, 32, 109, 148, 235, 20, 97, 127,
|
let seed = [2u8; 32];
|
||||||
43, 194, 109, 43, 121, 76, 54, 38, 234, 14, 108, 68, 209, 227, 137, 191, 167, 144, 177,
|
let mut rnd = GenKeys::new(seed);
|
||||||
174, 57, 182, 79, 198, 196, 93, 161, 35, 3, 33, 0, 116, 121, 255, 78, 31, 95, 179, 172,
|
let keypair = rnd.gen_keypair();
|
||||||
30, 125, 206, 87, 88, 78, 46, 145, 25, 154, 161, 252, 3, 58, 235, 116, 39, 148, 193,
|
|
||||||
150, 111, 61, 20, 226,
|
|
||||||
];
|
|
||||||
let keypair = Ed25519KeyPair::from_pkcs8(Input::from(&pkcs)).unwrap();
|
|
||||||
|
|
||||||
let mut id = one;
|
let mut id = one;
|
||||||
let mut num_hashes = 0;
|
let mut num_hashes = 0;
|
||||||
@ -164,7 +159,7 @@ mod tests {
|
|||||||
use bs58;
|
use bs58;
|
||||||
// golden needs to be updated if blob stuff changes....
|
// golden needs to be updated if blob stuff changes....
|
||||||
let golden = Hash::new(
|
let golden = Hash::new(
|
||||||
&bs58::decode("5NBn4cBZmNZRftkjxj3um8W1eyYPzn2RgUJSA3SVbHaJ")
|
&bs58::decode("5Pz5KQyNht2nqkJhVd8F9zTFxzoDvbQSzaxQbtCPiyCo")
|
||||||
.into_vec()
|
.into_vec()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
);
|
||||||
|
@ -51,9 +51,8 @@ use std::time::{Duration, Instant};
|
|||||||
|
|
||||||
pub const FULLNODE_PORT_RANGE: PortRange = (8000, 10_000);
|
pub const FULLNODE_PORT_RANGE: PortRange = (8000, 10_000);
|
||||||
|
|
||||||
/// The fanout for Ledger Replication
|
/// The Data plane "neighborhood" size
|
||||||
pub const DATA_PLANE_FANOUT: usize = 200;
|
pub const NEIGHBORHOOD_SIZE: usize = 200;
|
||||||
pub const NEIGHBORHOOD_SIZE: usize = DATA_PLANE_FANOUT;
|
|
||||||
/// Set whether node capacity should grow as layers are added
|
/// Set whether node capacity should grow as layers are added
|
||||||
pub const GROW_LAYER_CAPACITY: bool = false;
|
pub const GROW_LAYER_CAPACITY: bool = false;
|
||||||
|
|
||||||
@ -244,40 +243,50 @@ impl ClusterInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn contact_info_trace(&self) -> String {
|
pub fn contact_info_trace(&self) -> String {
|
||||||
let leader_id = self.gossip_leader_id;
|
let now = timestamp();
|
||||||
|
let mut spy_nodes = 0;
|
||||||
|
let my_id = self.my_data().id;
|
||||||
let nodes: Vec<_> = self
|
let nodes: Vec<_> = self
|
||||||
.tvu_peers()
|
.all_peers()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|node| {
|
.map(|(node, last_updated)| {
|
||||||
let mut annotation = String::new();
|
if !ContactInfo::is_valid_address(&node.gossip) {
|
||||||
if node.id == leader_id {
|
spy_nodes += 1;
|
||||||
annotation.push_str(" [leader]");
|
|
||||||
}
|
}
|
||||||
|
fn addr_to_string(addr: &SocketAddr) -> String {
|
||||||
format!(
|
if ContactInfo::is_valid_address(addr) {
|
||||||
"- gossip: {:20} | {}{}\n \
|
addr.to_string()
|
||||||
tpu: {:20} |\n \
|
|
||||||
rpc: {:20} |\n",
|
|
||||||
node.gossip.to_string(),
|
|
||||||
node.id,
|
|
||||||
annotation,
|
|
||||||
node.tpu.to_string(),
|
|
||||||
if ContactInfo::is_valid_address(&node.rpc) {
|
|
||||||
node.rpc.to_string()
|
|
||||||
} else {
|
} else {
|
||||||
"none".to_string()
|
"none".to_string()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
format!(
|
||||||
|
"- gossip: {:20} | {:5}ms | {} {}\n \
|
||||||
|
tpu: {:20} | |\n \
|
||||||
|
rpc: {:20} | |\n",
|
||||||
|
addr_to_string(&node.gossip),
|
||||||
|
now.saturating_sub(last_updated),
|
||||||
|
node.id,
|
||||||
|
if node.id == my_id { "(me)" } else { "" }.to_string(),
|
||||||
|
addr_to_string(&node.tpu),
|
||||||
|
addr_to_string(&node.rpc),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
format!(
|
format!(
|
||||||
" Node contact info | Node identifier\n\
|
" Node contact info | Age | Node identifier \n\
|
||||||
-------------------------------+------------------\n\
|
-------------------------------+---------+-----------------------------------\n\
|
||||||
{}\
|
{}\
|
||||||
Nodes: {}",
|
Nodes: {}{}",
|
||||||
nodes.join(""),
|
nodes.join(""),
|
||||||
nodes.len()
|
nodes.len() - spy_nodes,
|
||||||
|
if spy_nodes > 0 {
|
||||||
|
format!("\nSpies: {}", spy_nodes)
|
||||||
|
} else {
|
||||||
|
"".to_string()
|
||||||
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,6 +347,20 @@ impl ClusterInfo {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// All nodes in gossip (including spy nodes) and the last time we heard about them
|
||||||
|
pub(crate) fn all_peers(&self) -> Vec<(ContactInfo, u64)> {
|
||||||
|
self.gossip
|
||||||
|
.crds
|
||||||
|
.table
|
||||||
|
.values()
|
||||||
|
.filter_map(|x| {
|
||||||
|
x.value
|
||||||
|
.contact_info()
|
||||||
|
.map(|ci| (ci.clone(), x.local_timestamp))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn gossip_peers(&self) -> Vec<ContactInfo> {
|
pub fn gossip_peers(&self) -> Vec<ContactInfo> {
|
||||||
let me = self.my_data().id;
|
let me = self.my_data().id;
|
||||||
self.gossip
|
self.gossip
|
||||||
@ -408,16 +431,28 @@ impl ClusterInfo {
|
|||||||
peers_with_stakes
|
peers_with_stakes
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sorted_retransmit_peers<S: std::hash::BuildHasher>(
|
/// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list
|
||||||
|
fn sorted_peers_and_index<S: std::hash::BuildHasher>(
|
||||||
&self,
|
&self,
|
||||||
stakes: &HashMap<Pubkey, u64, S>,
|
stakes: &HashMap<Pubkey, u64, S>,
|
||||||
) -> Vec<ContactInfo> {
|
) -> (usize, Vec<ContactInfo>) {
|
||||||
let peers = self.retransmit_peers();
|
let mut peers = self.retransmit_peers();
|
||||||
let peers_with_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
|
peers.push(self.lookup(&self.id()).unwrap().clone());
|
||||||
peers_with_stakes
|
let contacts_and_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
|
||||||
.iter()
|
let mut index = 0;
|
||||||
.map(|(_, peer)| (*peer).clone())
|
let peers: Vec<_> = contacts_and_stakes
|
||||||
.collect()
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter_map(|(i, (_, peer))| {
|
||||||
|
if peer.id == self.id() {
|
||||||
|
index = i;
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(peer)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
(index, peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sorted_tvu_peers(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<ContactInfo> {
|
pub fn sorted_tvu_peers(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<ContactInfo> {
|
||||||
@ -614,30 +649,38 @@ impl ClusterInfo {
|
|||||||
obj: &Arc<RwLock<Self>>,
|
obj: &Arc<RwLock<Self>>,
|
||||||
peers: &[ContactInfo],
|
peers: &[ContactInfo],
|
||||||
blob: &SharedBlob,
|
blob: &SharedBlob,
|
||||||
|
slot_leader_id: Option<Pubkey>,
|
||||||
s: &UdpSocket,
|
s: &UdpSocket,
|
||||||
|
forwarded: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (me, orders): (ContactInfo, &[ContactInfo]) = {
|
let (me, orders): (ContactInfo, &[ContactInfo]) = {
|
||||||
// copy to avoid locking during IO
|
// copy to avoid locking during IO
|
||||||
let s = obj.read().unwrap();
|
let s = obj.read().unwrap();
|
||||||
(s.my_data().clone(), peers)
|
(s.my_data().clone(), peers)
|
||||||
};
|
};
|
||||||
let rblob = blob.read().unwrap();
|
// hold a write lock so no one modifies the blob until we send it
|
||||||
|
let mut wblob = blob.write().unwrap();
|
||||||
|
let was_forwarded = !wblob.should_forward();
|
||||||
|
wblob.set_forwarded(forwarded);
|
||||||
trace!("retransmit orders {}", orders.len());
|
trace!("retransmit orders {}", orders.len());
|
||||||
let errs: Vec<_> = orders
|
let errs: Vec<_> = orders
|
||||||
.par_iter()
|
.par_iter()
|
||||||
|
.filter(|v| v.id != slot_leader_id.unwrap_or_default())
|
||||||
.map(|v| {
|
.map(|v| {
|
||||||
debug!(
|
debug!(
|
||||||
"{}: retransmit blob {} to {} {}",
|
"{}: retransmit blob {} to {} {}",
|
||||||
me.id,
|
me.id,
|
||||||
rblob.index(),
|
wblob.index(),
|
||||||
v.id,
|
v.id,
|
||||||
v.tvu,
|
v.tvu,
|
||||||
);
|
);
|
||||||
//TODO profile this, may need multiple sockets for par_iter
|
//TODO profile this, may need multiple sockets for par_iter
|
||||||
assert!(rblob.meta.size <= BLOB_SIZE);
|
assert!(wblob.meta.size <= BLOB_SIZE);
|
||||||
s.send_to(&rblob.data[..rblob.meta.size], &v.tvu)
|
s.send_to(&wblob.data[..wblob.meta.size], &v.tvu)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
// reset the blob to its old state. This avoids us having to copy the blob to modify it
|
||||||
|
wblob.set_forwarded(was_forwarded);
|
||||||
for e in errs {
|
for e in errs {
|
||||||
if let Err(e) = &e {
|
if let Err(e) = &e {
|
||||||
inc_new_counter_info!("cluster_info-retransmit-send_to_error", 1, 1);
|
inc_new_counter_info!("cluster_info-retransmit-send_to_error", 1, 1);
|
||||||
@ -648,14 +691,6 @@ impl ClusterInfo {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// retransmit messages from the leader to layer 1 nodes
|
|
||||||
/// # Remarks
|
|
||||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
|
||||||
pub fn retransmit(obj: &Arc<RwLock<Self>>, blob: &SharedBlob, s: &UdpSocket) -> Result<()> {
|
|
||||||
let peers = obj.read().unwrap().retransmit_peers();
|
|
||||||
ClusterInfo::retransmit_to(obj, &peers, blob, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_orders(
|
fn send_orders(
|
||||||
id: &Pubkey,
|
id: &Pubkey,
|
||||||
s: &UdpSocket,
|
s: &UdpSocket,
|
||||||
@ -853,8 +888,6 @@ impl ClusterInfo {
|
|||||||
.collect();
|
.collect();
|
||||||
if pr.is_empty() {
|
if pr.is_empty() {
|
||||||
self.add_entrypoint(&mut pr);
|
self.add_entrypoint(&mut pr);
|
||||||
} else {
|
|
||||||
self.entrypoint = None;
|
|
||||||
}
|
}
|
||||||
pr.into_iter()
|
pr.into_iter()
|
||||||
.map(|(peer, filter, gossip, self_info)| {
|
.map(|(peer, filter, gossip, self_info)| {
|
||||||
@ -1071,13 +1104,9 @@ impl ClusterInfo {
|
|||||||
.process_pull_request(caller, filter, now);
|
.process_pull_request(caller, filter, now);
|
||||||
let len = data.len();
|
let len = data.len();
|
||||||
trace!("get updates since response {}", len);
|
trace!("get updates since response {}", len);
|
||||||
if data.is_empty() {
|
|
||||||
trace!("no updates me {}", self_id);
|
|
||||||
vec![]
|
|
||||||
} else {
|
|
||||||
let rsp = Protocol::PullResponse(self_id, data);
|
let rsp = Protocol::PullResponse(self_id, data);
|
||||||
// the remote side may not know his public IP:PORT, record what he looks like to us
|
// The remote node may not know its public IP:PORT. Record what it looks like to us.
|
||||||
// this may or may not be correct for everybody but it's better than leaving him with
|
// This may or may not be correct for everybody, but it's better than leaving the remote with
|
||||||
// an unspecified address in our table
|
// an unspecified address in our table
|
||||||
if from.gossip.ip().is_unspecified() {
|
if from.gossip.ip().is_unspecified() {
|
||||||
inc_new_counter_info!("cluster_info-window-request-updates-unspec-gossip", 1);
|
inc_new_counter_info!("cluster_info-window-request-updates-unspec-gossip", 1);
|
||||||
@ -1086,7 +1115,6 @@ impl ClusterInfo {
|
|||||||
inc_new_counter_info!("cluster_info-pull_request-rsp", len);
|
inc_new_counter_info!("cluster_info-pull_request-rsp", len);
|
||||||
to_shared_blob(rsp, from.gossip).ok().into_iter().collect()
|
to_shared_blob(rsp, from.gossip).ok().into_iter().collect()
|
||||||
}
|
}
|
||||||
}
|
|
||||||
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
||||||
let len = data.len();
|
let len = data.len();
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
@ -1107,7 +1135,7 @@ impl ClusterInfo {
|
|||||||
data: &[CrdsValue],
|
data: &[CrdsValue],
|
||||||
) -> Vec<SharedBlob> {
|
) -> Vec<SharedBlob> {
|
||||||
let self_id = me.read().unwrap().gossip.id;
|
let self_id = me.read().unwrap().gossip.id;
|
||||||
inc_new_counter_info!("cluster_info-push_message", 1);
|
inc_new_counter_info!("cluster_info-push_message", 1, 0, 1000);
|
||||||
let prunes: Vec<_> = me
|
let prunes: Vec<_> = me
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -1383,8 +1411,7 @@ pub fn compute_retransmit_peers<S: std::hash::BuildHasher>(
|
|||||||
hood_size: usize,
|
hood_size: usize,
|
||||||
grow: bool,
|
grow: bool,
|
||||||
) -> (Vec<ContactInfo>, Vec<ContactInfo>) {
|
) -> (Vec<ContactInfo>, Vec<ContactInfo>) {
|
||||||
let peers = cluster_info.read().unwrap().sorted_retransmit_peers(stakes);
|
let (my_index, peers) = cluster_info.read().unwrap().sorted_peers_and_index(stakes);
|
||||||
let my_id = cluster_info.read().unwrap().id();
|
|
||||||
//calc num_layers and num_neighborhoods using the total number of nodes
|
//calc num_layers and num_neighborhoods using the total number of nodes
|
||||||
let (num_layers, layer_indices) =
|
let (num_layers, layer_indices) =
|
||||||
ClusterInfo::describe_data_plane(peers.len(), fanout, hood_size, grow);
|
ClusterInfo::describe_data_plane(peers.len(), fanout, hood_size, grow);
|
||||||
@ -1393,16 +1420,8 @@ pub fn compute_retransmit_peers<S: std::hash::BuildHasher>(
|
|||||||
/* single layer data plane */
|
/* single layer data plane */
|
||||||
(peers, vec![])
|
(peers, vec![])
|
||||||
} else {
|
} else {
|
||||||
//find my index (my ix is the same as the first node with smaller stake)
|
|
||||||
let my_index = peers
|
|
||||||
.iter()
|
|
||||||
.position(|ci| *stakes.get(&ci.id).unwrap_or(&0) <= *stakes.get(&my_id).unwrap_or(&0));
|
|
||||||
//find my layer
|
//find my layer
|
||||||
let locality = ClusterInfo::localize(
|
let locality = ClusterInfo::localize(&layer_indices, hood_size, my_index);
|
||||||
&layer_indices,
|
|
||||||
hood_size,
|
|
||||||
my_index.unwrap_or(peers.len() - 1),
|
|
||||||
);
|
|
||||||
let upper_bound = cmp::min(locality.neighbor_bounds.1, peers.len());
|
let upper_bound = cmp::min(locality.neighbor_bounds.1, peers.len());
|
||||||
let neighbors = peers[locality.neighbor_bounds.0..upper_bound].to_vec();
|
let neighbors = peers[locality.neighbor_bounds.0..upper_bound].to_vec();
|
||||||
let mut children = Vec::new();
|
let mut children = Vec::new();
|
||||||
@ -2200,5 +2219,5 @@ fn test_add_entrypoint() {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.new_pull_requests(&HashMap::new());
|
.new_pull_requests(&HashMap::new());
|
||||||
assert_eq!(1, pulls.len());
|
assert_eq!(1, pulls.len());
|
||||||
assert_eq!(cluster_info.read().unwrap().entrypoint, None);
|
assert_eq!(cluster_info.read().unwrap().entrypoint, Some(entrypoint));
|
||||||
}
|
}
|
||||||
|
@ -58,6 +58,7 @@ impl ClusterInfoVoteListener {
|
|||||||
last_ts = new_ts;
|
last_ts = new_ts;
|
||||||
inc_new_counter_info!("cluster_info_vote_listener-recv_count", votes.len());
|
inc_new_counter_info!("cluster_info_vote_listener-recv_count", votes.len());
|
||||||
let msgs = packet::to_packets(&votes);
|
let msgs = packet::to_packets(&votes);
|
||||||
|
if !msgs.is_empty() {
|
||||||
let r = if sigverify_disabled {
|
let r = if sigverify_disabled {
|
||||||
sigverify::ed25519_verify_disabled(&msgs)
|
sigverify::ed25519_verify_disabled(&msgs)
|
||||||
} else {
|
} else {
|
||||||
@ -65,6 +66,7 @@ impl ClusterInfoVoteListener {
|
|||||||
};
|
};
|
||||||
sender.send(msgs.into_iter().zip(r).collect())?;
|
sender.send(msgs.into_iter().zip(r).collect())?;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
|
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ use solana_sdk::system_transaction;
|
|||||||
use solana_sdk::timing::{
|
use solana_sdk::timing::{
|
||||||
duration_as_ms, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS, NUM_TICKS_PER_SECOND,
|
duration_as_ms, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS, NUM_TICKS_PER_SECOND,
|
||||||
};
|
};
|
||||||
use std::io;
|
use solana_sdk::transport::TransportError;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
|||||||
);
|
);
|
||||||
match sig {
|
match sig {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
result = Err(e);
|
result = Err(TransportError::IoError(e));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,7 +227,7 @@ fn poll_all_nodes_for_signature(
|
|||||||
cluster_nodes: &[ContactInfo],
|
cluster_nodes: &[ContactInfo],
|
||||||
sig: &Signature,
|
sig: &Signature,
|
||||||
confs: usize,
|
confs: usize,
|
||||||
) -> io::Result<()> {
|
) -> Result<(), TransportError> {
|
||||||
for validator in cluster_nodes {
|
for validator in cluster_nodes {
|
||||||
if validator.id == entry_point_info.id {
|
if validator.id == entry_point_info.id {
|
||||||
continue;
|
continue;
|
||||||
|
@ -13,7 +13,6 @@ use solana_sdk::hash::{Hash, Hasher};
|
|||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::mem::size_of;
|
|
||||||
use std::sync::mpsc::{Receiver, Sender};
|
use std::sync::mpsc::{Receiver, Sender};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
@ -52,7 +51,8 @@ pub struct Entry {
|
|||||||
impl Entry {
|
impl Entry {
|
||||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||||
pub fn new(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
pub fn new(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
||||||
let entry = {
|
assert!(Self::serialized_to_blob_size(&transactions) <= BLOB_DATA_SIZE as u64);
|
||||||
|
|
||||||
if num_hashes == 0 && transactions.is_empty() {
|
if num_hashes == 0 && transactions.is_empty() {
|
||||||
Entry {
|
Entry {
|
||||||
num_hashes: 0,
|
num_hashes: 0,
|
||||||
@ -79,17 +79,6 @@ impl Entry {
|
|||||||
transactions,
|
transactions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
let size = Entry::serialized_size(&entry.transactions[..]);
|
|
||||||
if size > BLOB_DATA_SIZE as u64 {
|
|
||||||
panic!(
|
|
||||||
"Serialized entry size too large: {} ({} transactions):",
|
|
||||||
size,
|
|
||||||
entry.transactions.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
entry
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_shared_blob(&self) -> SharedBlob {
|
pub fn to_shared_blob(&self) -> SharedBlob {
|
||||||
@ -101,32 +90,39 @@ impl Entry {
|
|||||||
Blob::from_serializable(&vec![&self])
|
Blob::from_serializable(&vec![&self])
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Estimate serialized_size of Entry without creating an Entry.
|
/// return serialized_size of a vector with a single Entry for given TXs
|
||||||
pub fn serialized_size(transactions: &[Transaction]) -> u64 {
|
/// since Blobs carry Vec<Entry>...
|
||||||
|
/// calculate the total without actually constructing the full Entry (which
|
||||||
|
/// would require a clone() of the transactions)
|
||||||
|
pub fn serialized_to_blob_size(transactions: &[Transaction]) -> u64 {
|
||||||
let txs_size: u64 = transactions
|
let txs_size: u64 = transactions
|
||||||
.iter()
|
.iter()
|
||||||
.map(|tx| serialized_size(tx).unwrap())
|
.map(|tx| serialized_size(tx).unwrap())
|
||||||
.sum();
|
.sum();
|
||||||
// num_hashes + hash + txs
|
|
||||||
(2 * size_of::<u64>() + size_of::<Hash>()) as u64 + txs_size
|
serialized_size(&vec![Entry {
|
||||||
|
num_hashes: 0,
|
||||||
|
hash: Hash::default(),
|
||||||
|
transactions: vec![],
|
||||||
|
}])
|
||||||
|
.unwrap()
|
||||||
|
+ txs_size
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
|
||||||
pub fn new_mut(
|
pub fn new_mut(
|
||||||
start_hash: &mut Hash,
|
start_hash: &mut Hash,
|
||||||
num_hashes: &mut u64,
|
num_hashes: &mut u64,
|
||||||
transactions: Vec<Transaction>,
|
transactions: Vec<Transaction>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
assert!(Self::serialized_to_blob_size(&transactions) <= BLOB_DATA_SIZE as u64);
|
||||||
|
|
||||||
let entry = Self::new(start_hash, *num_hashes, transactions);
|
let entry = Self::new(start_hash, *num_hashes, transactions);
|
||||||
*start_hash = entry.hash;
|
*start_hash = entry.hash;
|
||||||
*num_hashes = 0;
|
*num_hashes = 0;
|
||||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
|
||||||
entry
|
entry
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a Entry from the number of hashes `num_hashes`
|
|
||||||
/// since the previous transaction and that resulting `hash`.
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn new_tick(num_hashes: u64, hash: &Hash) -> Self {
|
pub fn new_tick(num_hashes: u64, hash: &Hash) -> Self {
|
||||||
Entry {
|
Entry {
|
||||||
@ -285,28 +281,22 @@ where
|
|||||||
let mut num = serializables.len();
|
let mut num = serializables.len();
|
||||||
let mut upper = serializables.len();
|
let mut upper = serializables.len();
|
||||||
let mut lower = 1; // if one won't fit, we have a lot of TODOs
|
let mut lower = 1; // if one won't fit, we have a lot of TODOs
|
||||||
let mut next = serializables.len(); // optimistic
|
|
||||||
loop {
|
loop {
|
||||||
debug!(
|
let next;
|
||||||
"num {}, upper {} lower {} next {} serializables.len() {}",
|
|
||||||
num,
|
|
||||||
upper,
|
|
||||||
lower,
|
|
||||||
next,
|
|
||||||
serializables.len()
|
|
||||||
);
|
|
||||||
if serialized_size(&serializables[..num]) <= max_size {
|
if serialized_size(&serializables[..num]) <= max_size {
|
||||||
next = (upper + num) / 2;
|
next = (upper + num) / 2;
|
||||||
lower = num;
|
lower = num;
|
||||||
debug!("num {} fits, maybe too well? trying {}", num, next);
|
|
||||||
} else {
|
} else {
|
||||||
|
if num == 1 {
|
||||||
|
// if not even one will fit, bail
|
||||||
|
num = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
next = (lower + num) / 2;
|
next = (lower + num) / 2;
|
||||||
upper = num;
|
upper = num;
|
||||||
debug!("num {} doesn't fit! trying {}", num, next);
|
|
||||||
}
|
}
|
||||||
// same as last time
|
// same as last time
|
||||||
if next == num {
|
if next == num {
|
||||||
debug!("converged on num {}", num);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
num = next;
|
num = next;
|
||||||
@ -338,7 +328,7 @@ where
|
|||||||
|
|
||||||
/// Creates the next entries for given transactions, outputs
|
/// Creates the next entries for given transactions, outputs
|
||||||
/// updates start_hash to hash of last Entry, sets num_hashes to 0
|
/// updates start_hash to hash of last Entry, sets num_hashes to 0
|
||||||
pub fn next_entries_mut(
|
fn next_entries_mut(
|
||||||
start_hash: &mut Hash,
|
start_hash: &mut Hash,
|
||||||
num_hashes: &mut u64,
|
num_hashes: &mut u64,
|
||||||
transactions: Vec<Transaction>,
|
transactions: Vec<Transaction>,
|
||||||
@ -346,7 +336,7 @@ pub fn next_entries_mut(
|
|||||||
split_serializable_chunks(
|
split_serializable_chunks(
|
||||||
&transactions[..],
|
&transactions[..],
|
||||||
BLOB_DATA_SIZE as u64,
|
BLOB_DATA_SIZE as u64,
|
||||||
&Entry::serialized_size,
|
&Entry::serialized_to_blob_size,
|
||||||
&mut |txs: &[Transaction]| Entry::new_mut(start_hash, num_hashes, txs.to_vec()),
|
&mut |txs: &[Transaction]| Entry::new_mut(start_hash, num_hashes, txs.to_vec()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -425,7 +415,6 @@ pub fn make_consecutive_blobs(
|
|||||||
let mut blob = blob.write().unwrap();
|
let mut blob = blob.write().unwrap();
|
||||||
blob.set_index(index);
|
blob.set_index(index);
|
||||||
blob.set_id(id);
|
blob.set_id(id);
|
||||||
blob.forward(true);
|
|
||||||
blob.meta.set_addr(addr);
|
blob.meta.set_addr(addr);
|
||||||
index += 1;
|
index += 1;
|
||||||
}
|
}
|
||||||
@ -449,6 +438,8 @@ mod tests {
|
|||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
|
use crate::packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
|
||||||
use solana_sdk::hash::hash;
|
use solana_sdk::hash::hash;
|
||||||
|
use solana_sdk::instruction::Instruction;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
use solana_vote_api::vote_instruction;
|
use solana_vote_api::vote_instruction;
|
||||||
@ -467,7 +458,7 @@ mod tests {
|
|||||||
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
|
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_sample_signature(keypair: &Keypair, hash: Hash) -> Transaction {
|
fn create_sample_apply_signature(keypair: &Keypair, hash: Hash) -> Transaction {
|
||||||
let pubkey = keypair.pubkey();
|
let pubkey = keypair.pubkey();
|
||||||
let ix = budget_instruction::apply_signature(&pubkey, &pubkey, &pubkey);
|
let ix = budget_instruction::apply_signature(&pubkey, &pubkey, &pubkey);
|
||||||
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
|
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
|
||||||
@ -513,7 +504,7 @@ mod tests {
|
|||||||
// First, verify entries
|
// First, verify entries
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let tx0 = create_sample_timestamp(&keypair, zero);
|
let tx0 = create_sample_timestamp(&keypair, zero);
|
||||||
let tx1 = create_sample_signature(&keypair, zero);
|
let tx1 = create_sample_apply_signature(&keypair, zero);
|
||||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
||||||
assert!(e0.verify(&zero));
|
assert!(e0.verify(&zero));
|
||||||
|
|
||||||
@ -551,14 +542,14 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_serialized_size() {
|
fn test_serialized_to_blob_size() {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero, 0);
|
let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero, 0);
|
||||||
let entry = next_entry(&zero, 1, vec![tx.clone()]);
|
let entry = next_entry(&zero, 1, vec![tx.clone()]);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Entry::serialized_size(&[tx]),
|
Entry::serialized_to_blob_size(&[tx]),
|
||||||
serialized_size(&entry).unwrap()
|
serialized_size(&vec![entry]).unwrap() // blobs are Vec<Entry>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -577,36 +568,56 @@ mod tests {
|
|||||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_test_entries() -> Vec<Entry> {
|
fn blob_sized_entries(num_entries: usize) -> Vec<Entry> {
|
||||||
let zero = Hash::default();
|
// rough guess
|
||||||
let one = hash(&zero.as_ref());
|
let mut magic_len = BLOB_DATA_SIZE
|
||||||
let keypair = Keypair::new();
|
- serialized_size(&vec![Entry {
|
||||||
let vote_account = Keypair::new();
|
num_hashes: 0,
|
||||||
let tx0 = create_sample_vote(&vote_account, one);
|
hash: Hash::default(),
|
||||||
let tx1 = create_sample_timestamp(&keypair, one);
|
transactions: vec![],
|
||||||
//
|
}])
|
||||||
// TODO: this magic number and the mix of transaction types
|
.unwrap() as usize;
|
||||||
// is designed to fill up a Blob more or less exactly,
|
|
||||||
// to get near enough the threshold that
|
loop {
|
||||||
// deserialization falls over if it uses the wrong size()
|
let entries = vec![Entry {
|
||||||
// parameter to index into blob.data()
|
num_hashes: 0,
|
||||||
//
|
hash: Hash::default(),
|
||||||
// magic numbers -----------------+
|
transactions: vec![Transaction::new_unsigned_instructions(vec![
|
||||||
// |
|
Instruction::new(Pubkey::default(), &vec![0u8; magic_len as usize], vec![]),
|
||||||
// V
|
])],
|
||||||
let mut transactions = vec![tx0; 362];
|
}];
|
||||||
transactions.extend(vec![tx1; 100]);
|
let size = serialized_size(&entries).unwrap() as usize;
|
||||||
next_entries(&zero, 0, transactions)
|
if size < BLOB_DATA_SIZE {
|
||||||
|
magic_len += BLOB_DATA_SIZE - size;
|
||||||
|
} else if size > BLOB_DATA_SIZE {
|
||||||
|
magic_len -= size - BLOB_DATA_SIZE;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vec![
|
||||||
|
Entry {
|
||||||
|
num_hashes: 0,
|
||||||
|
hash: Hash::default(),
|
||||||
|
transactions: vec![Transaction::new_unsigned_instructions(vec![
|
||||||
|
Instruction::new(Pubkey::default(), &vec![0u8; magic_len], vec![]),
|
||||||
|
])],
|
||||||
|
};
|
||||||
|
num_entries
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_entries_to_blobs() {
|
fn test_entries_to_blobs() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let entries = make_test_entries();
|
let entries = blob_sized_entries(10);
|
||||||
|
|
||||||
let blob_q = entries.to_blobs();
|
let blobs = entries.to_blobs();
|
||||||
|
for blob in &blobs {
|
||||||
|
assert_eq!(blob.size(), BLOB_DATA_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap().0, entries);
|
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap().0, entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -689,7 +700,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_num_fit() {
|
fn test_num_will_fit() {
|
||||||
let serializables_vec: Vec<u8> = (0..10).map(|_| 1).collect();
|
let serializables_vec: Vec<u8> = (0..10).map(|_| 1).collect();
|
||||||
let serializables = &serializables_vec[..];
|
let serializables = &serializables_vec[..];
|
||||||
let sum = |i: &[u8]| (0..i.len()).into_iter().sum::<usize>() as u64;
|
let sum = |i: &[u8]| (0..i.len()).into_iter().sum::<usize>() as u64;
|
||||||
@ -716,5 +727,11 @@ mod tests {
|
|||||||
// sum[0..9] <= 46, but contains all items
|
// sum[0..9] <= 46, but contains all items
|
||||||
let result = num_will_fit(serializables, 46, &sum);
|
let result = num_will_fit(serializables, 46, &sum);
|
||||||
assert_eq!(result, 10);
|
assert_eq!(result, 10);
|
||||||
|
|
||||||
|
// too small to fit a single u64
|
||||||
|
let result = num_will_fit(&[0u64], (std::mem::size_of::<u64>() - 1) as u64, &|i| {
|
||||||
|
(std::mem::size_of::<u64>() * i.len()) as u64
|
||||||
|
});
|
||||||
|
assert_eq!(result, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,278 +1,218 @@
|
|||||||
// Support erasure coding
|
//! # Erasure Coding and Recovery
|
||||||
use crate::packet::{Blob, SharedBlob};
|
//!
|
||||||
use crate::result::{Error, Result};
|
//! Blobs are logically grouped into erasure sets or blocks. Each set contains 16 sequential data
|
||||||
|
//! blobs and 4 sequential coding blobs.
|
||||||
|
//!
|
||||||
|
//! Coding blobs in each set starting from `start_idx`:
|
||||||
|
//! For each erasure set:
|
||||||
|
//! generate `NUM_CODING` coding_blobs.
|
||||||
|
//! index the coding blobs from `start_idx` to `start_idx + NUM_CODING - 1`.
|
||||||
|
//!
|
||||||
|
//! model of an erasure set, with top row being data blobs and second being coding
|
||||||
|
//! |<======================= NUM_DATA ==============================>|
|
||||||
|
//! |<==== NUM_CODING ===>|
|
||||||
|
//! +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
|
||||||
|
//! | D | | D | | D | | D | | D | | D | | D | | D | | D | | D |
|
||||||
|
//! +---+ +---+ +---+ +---+ +---+ . . . +---+ +---+ +---+ +---+ +---+
|
||||||
|
//! | C | | C | | C | | C | | | | | | | | | | | | |
|
||||||
|
//! +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
|
||||||
|
//!
|
||||||
|
//! blob structure for coding blobs
|
||||||
|
//!
|
||||||
|
//! + ------- meta is set and used by transport, meta.size is actual length
|
||||||
|
//! | of data in the byte array blob.data
|
||||||
|
//! |
|
||||||
|
//! | + -- data is stuff shipped over the wire, and has an included
|
||||||
|
//! | | header
|
||||||
|
//! V V
|
||||||
|
//! +----------+------------------------------------------------------------+
|
||||||
|
//! | meta | data |
|
||||||
|
//! |+---+-- |+---+---+---+---+------------------------------------------+|
|
||||||
|
//! || s | . || i | | f | s | ||
|
||||||
|
//! || i | . || n | i | l | i | ||
|
||||||
|
//! || z | . || d | d | a | z | blob.data(), or blob.data_mut() ||
|
||||||
|
//! || e | || e | | g | e | ||
|
||||||
|
//! |+---+-- || x | | s | | ||
|
||||||
|
//! | |+---+---+---+---+------------------------------------------+|
|
||||||
|
//! +----------+------------------------------------------------------------+
|
||||||
|
//! | |<=== coding blob part for "coding" =======>|
|
||||||
|
//! | |
|
||||||
|
//! |<============== data blob part for "coding" ==============>|
|
||||||
|
//!
|
||||||
|
//!
|
||||||
|
use crate::packet::{Blob, SharedBlob, BLOB_HEADER_SIZE};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
use std::convert::AsMut;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
use reed_solomon_erasure::ReedSolomon;
|
||||||
|
|
||||||
//TODO(sakridge) pick these values
|
//TODO(sakridge) pick these values
|
||||||
pub const NUM_DATA: usize = 16; // number of data blobs
|
/// Number of data blobs
|
||||||
pub const NUM_CODING: usize = 4; // number of coding blobs, also the maximum number that can go missing
|
pub const NUM_DATA: usize = 16;
|
||||||
pub const ERASURE_SET_SIZE: usize = NUM_DATA + NUM_CODING; // total number of blobs in an erasure set, includes data and coding blobs
|
/// Number of coding blobs; also the maximum number that can go missing.
|
||||||
|
pub const NUM_CODING: usize = 4;
|
||||||
|
/// Total number of blobs in an erasure set; includes data and coding blobs
|
||||||
|
pub const ERASURE_SET_SIZE: usize = NUM_DATA + NUM_CODING;
|
||||||
|
|
||||||
macro_rules! align {
|
type Result<T> = std::result::Result<T, reed_solomon_erasure::Error>;
|
||||||
($x:expr, $align:expr) => {
|
|
||||||
$x + ($align - 1) & !($align - 1)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
/// Represents an erasure "session" with a particular configuration and number of data and coding
|
||||||
pub enum ErasureError {
|
/// blobs
|
||||||
NotEnoughBlocksToDecode,
|
#[derive(Debug, Clone)]
|
||||||
DecodeError,
|
pub struct Session(ReedSolomon);
|
||||||
EncodeError,
|
|
||||||
InvalidBlockSize,
|
|
||||||
InvalidBlobData,
|
|
||||||
CorruptCoding,
|
|
||||||
}
|
|
||||||
|
|
||||||
// k = number of data devices
|
/// Generates coding blobs on demand given data blobs
|
||||||
// m = number of coding devices
|
#[derive(Debug, Clone)]
|
||||||
// w = word size
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
fn jerasure_matrix_encode(
|
|
||||||
k: i32,
|
|
||||||
m: i32,
|
|
||||||
w: i32,
|
|
||||||
matrix: *const i32,
|
|
||||||
data_ptrs: *const *const u8,
|
|
||||||
coding_ptrs: *const *mut u8,
|
|
||||||
size: i32,
|
|
||||||
);
|
|
||||||
fn jerasure_matrix_decode(
|
|
||||||
k: i32,
|
|
||||||
m: i32,
|
|
||||||
w: i32,
|
|
||||||
matrix: *const i32,
|
|
||||||
row_k_ones: i32,
|
|
||||||
erasures: *const i32,
|
|
||||||
data_ptrs: *const *mut u8,
|
|
||||||
coding_ptrs: *const *mut u8,
|
|
||||||
size: i32,
|
|
||||||
) -> i32;
|
|
||||||
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
|
|
||||||
fn galois_init_default_field(w: i32) -> i32;
|
|
||||||
}
|
|
||||||
|
|
||||||
use std::sync::Once;
|
|
||||||
static ERASURE_W_ONCE: Once = Once::new();
|
|
||||||
|
|
||||||
// jerasure word size of 32
|
|
||||||
fn w() -> i32 {
|
|
||||||
let w = 32;
|
|
||||||
unsafe {
|
|
||||||
ERASURE_W_ONCE.call_once(|| {
|
|
||||||
galois_init_default_field(w);
|
|
||||||
()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
w
|
|
||||||
}
|
|
||||||
|
|
||||||
// jerasure checks that arrays are a multiple of w()/8 in length
|
|
||||||
fn wb() -> usize {
|
|
||||||
(w() / 8) as usize
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
|
|
||||||
let mut matrix = vec![0; (m * k) as usize];
|
|
||||||
for i in 0..m {
|
|
||||||
for j in 0..k {
|
|
||||||
unsafe {
|
|
||||||
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
matrix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate coding blocks into coding
|
|
||||||
// There are some alignment restrictions, blocks should be aligned by 16 bytes
|
|
||||||
// which means their size should be >= 16 bytes
|
|
||||||
fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
|
|
||||||
if data.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
let k = data.len() as i32;
|
|
||||||
let m = coding.len() as i32;
|
|
||||||
let block_len = data[0].len() as i32;
|
|
||||||
let matrix: Vec<i32> = get_matrix(m, k, w());
|
|
||||||
let mut data_arg = Vec::with_capacity(data.len());
|
|
||||||
for block in data {
|
|
||||||
if block_len != block.len() as i32 {
|
|
||||||
error!(
|
|
||||||
"data block size incorrect {} expected {}",
|
|
||||||
block.len(),
|
|
||||||
block_len
|
|
||||||
);
|
|
||||||
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
|
||||||
}
|
|
||||||
data_arg.push(block.as_ptr());
|
|
||||||
}
|
|
||||||
let mut coding_arg = Vec::with_capacity(coding.len());
|
|
||||||
for block in coding {
|
|
||||||
if block_len != block.len() as i32 {
|
|
||||||
error!(
|
|
||||||
"coding block size incorrect {} expected {}",
|
|
||||||
block.len(),
|
|
||||||
block_len
|
|
||||||
);
|
|
||||||
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
|
||||||
}
|
|
||||||
coding_arg.push(block.as_mut_ptr());
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
jerasure_matrix_encode(
|
|
||||||
k,
|
|
||||||
m,
|
|
||||||
w(),
|
|
||||||
matrix.as_ptr(),
|
|
||||||
data_arg.as_ptr(),
|
|
||||||
coding_arg.as_ptr(),
|
|
||||||
block_len,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recover data + coding blocks into data blocks
|
|
||||||
// data: array of blocks to recover into
|
|
||||||
// coding: arry of coding blocks
|
|
||||||
// erasures: list of indices in data where blocks should be recovered
|
|
||||||
pub fn decode_blocks(
|
|
||||||
data: &mut [&mut [u8]],
|
|
||||||
coding: &mut [&mut [u8]],
|
|
||||||
erasures: &[i32],
|
|
||||||
) -> Result<()> {
|
|
||||||
if data.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
let block_len = data[0].len();
|
|
||||||
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, w());
|
|
||||||
|
|
||||||
// generate coding pointers, blocks should be the same size
|
|
||||||
let mut coding_arg: Vec<*mut u8> = Vec::new();
|
|
||||||
for x in coding.iter_mut() {
|
|
||||||
if x.len() != block_len {
|
|
||||||
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
|
||||||
}
|
|
||||||
coding_arg.push(x.as_mut_ptr());
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate data pointers, blocks should be the same size
|
|
||||||
let mut data_arg: Vec<*mut u8> = Vec::new();
|
|
||||||
for x in data.iter_mut() {
|
|
||||||
if x.len() != block_len {
|
|
||||||
return Err(Error::ErasureError(ErasureError::InvalidBlockSize));
|
|
||||||
}
|
|
||||||
data_arg.push(x.as_mut_ptr());
|
|
||||||
}
|
|
||||||
let ret = unsafe {
|
|
||||||
jerasure_matrix_decode(
|
|
||||||
data.len() as i32,
|
|
||||||
coding.len() as i32,
|
|
||||||
w(),
|
|
||||||
matrix.as_ptr(),
|
|
||||||
0,
|
|
||||||
erasures.as_ptr(),
|
|
||||||
data_arg.as_ptr(),
|
|
||||||
coding_arg.as_ptr(),
|
|
||||||
data[0].len() as i32,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
trace!("jerasure_matrix_decode ret: {}", ret);
|
|
||||||
for x in data[erasures[0] as usize][0..8].iter() {
|
|
||||||
trace!("{} ", x)
|
|
||||||
}
|
|
||||||
trace!("");
|
|
||||||
if ret < 0 {
|
|
||||||
return Err(Error::ErasureError(ErasureError::DecodeError));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate coding blocks in window starting from start_idx,
|
|
||||||
// for num_blobs.. For each block place the coding blobs
|
|
||||||
// at the start of the block like so:
|
|
||||||
//
|
|
||||||
// model of an erasure set, with top row being data blobs and second being coding
|
|
||||||
// |<======================= NUM_DATA ==============================>|
|
|
||||||
// |<==== NUM_CODING ===>|
|
|
||||||
// +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
// | D | | D | | D | | D | | D | | D | | D | | D | | D | | D |
|
|
||||||
// +---+ +---+ +---+ +---+ +---+ . . . +---+ +---+ +---+ +---+ +---+
|
|
||||||
// | C | | C | | C | | C | | | | | | | | | | | | |
|
|
||||||
// +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
//
|
|
||||||
// blob structure for coding, recover
|
|
||||||
//
|
|
||||||
// + ------- meta is set and used by transport, meta.size is actual length
|
|
||||||
// | of data in the byte array blob.data
|
|
||||||
// |
|
|
||||||
// | + -- data is stuff shipped over the wire, and has an included
|
|
||||||
// | | header
|
|
||||||
// V V
|
|
||||||
// +----------+------------------------------------------------------------+
|
|
||||||
// | meta | data |
|
|
||||||
// |+---+-- |+---+---+---+---+------------------------------------------+|
|
|
||||||
// || s | . || i | | f | s | ||
|
|
||||||
// || i | . || n | i | l | i | ||
|
|
||||||
// || z | . || d | d | a | z | blob.data(), or blob.data_mut() ||
|
|
||||||
// || e | || e | | g | e | ||
|
|
||||||
// |+---+-- || x | | s | | ||
|
|
||||||
// | |+---+---+---+---+------------------------------------------+|
|
|
||||||
// +----------+------------------------------------------------------------+
|
|
||||||
// | |<=== coding blob part for "coding" =======>|
|
|
||||||
// | |
|
|
||||||
// |<============== data blob part for "coding" ==============>|
|
|
||||||
//
|
|
||||||
//
|
|
||||||
//
|
|
||||||
pub struct CodingGenerator {
|
pub struct CodingGenerator {
|
||||||
leftover: Vec<SharedBlob>, // SharedBlobs that couldn't be used in last call to next()
|
/// SharedBlobs that couldn't be used in last call to next()
|
||||||
|
leftover: Vec<SharedBlob>,
|
||||||
|
session: Arc<Session>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for CodingGenerator {
|
impl Session {
|
||||||
fn default() -> Self {
|
pub fn new(data_count: usize, coding_count: usize) -> Result<Session> {
|
||||||
CodingGenerator {
|
let rs = ReedSolomon::new(data_count, coding_count)?;
|
||||||
leftover: Vec::with_capacity(NUM_DATA),
|
|
||||||
|
Ok(Session(rs))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create coding blocks by overwriting `parity`
|
||||||
|
pub fn encode(&self, data: &[&[u8]], parity: &mut [&mut [u8]]) -> Result<()> {
|
||||||
|
self.0.encode_sep(data, parity)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Recover data + coding blocks into data blocks
|
||||||
|
/// # Arguments
|
||||||
|
/// * `data` - array of data blocks to recover into
|
||||||
|
/// * `coding` - array of coding blocks
|
||||||
|
/// * `erasures` - list of indices in data where blocks should be recovered
|
||||||
|
pub fn decode_blocks(&self, blocks: &mut [&mut [u8]], present: &[bool]) -> Result<()> {
|
||||||
|
self.0.reconstruct(blocks, present)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns `(number_of_data_blobs, number_of_coding_blobs)`
|
||||||
|
pub fn dimensions(&self) -> (usize, usize) {
|
||||||
|
(self.0.data_shard_count(), self.0.parity_shard_count())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reconstruct any missing blobs in this erasure set if possible
|
||||||
|
/// Re-indexes any coding blobs that have been reconstructed and fixes up size in metadata
|
||||||
|
/// Assumes that the user has sliced into the blobs appropriately already. else recovery will
|
||||||
|
/// return an error or garbage data
|
||||||
|
pub fn reconstruct_blobs<B>(
|
||||||
|
&self,
|
||||||
|
blobs: &mut [B],
|
||||||
|
present: &[bool],
|
||||||
|
size: usize,
|
||||||
|
block_start_idx: u64,
|
||||||
|
slot: u64,
|
||||||
|
) -> Result<(Vec<Blob>, Vec<Blob>)>
|
||||||
|
where
|
||||||
|
B: AsMut<[u8]>,
|
||||||
|
{
|
||||||
|
let mut blocks: Vec<&mut [u8]> = blobs.iter_mut().map(AsMut::as_mut).collect();
|
||||||
|
|
||||||
|
trace!("[reconstruct_blobs] present: {:?}, size: {}", present, size,);
|
||||||
|
|
||||||
|
// Decode the blocks
|
||||||
|
self.decode_blocks(blocks.as_mut_slice(), &present)?;
|
||||||
|
|
||||||
|
let mut recovered_data = vec![];
|
||||||
|
let mut recovered_coding = vec![];
|
||||||
|
|
||||||
|
let erasures = present
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter_map(|(i, present)| if *present { None } else { Some(i) });
|
||||||
|
|
||||||
|
// Create the missing blobs from the reconstructed data
|
||||||
|
for n in erasures {
|
||||||
|
let data_size;
|
||||||
|
let idx;
|
||||||
|
let first_byte;
|
||||||
|
|
||||||
|
if n < NUM_DATA {
|
||||||
|
let mut blob = Blob::new(&blocks[n]);
|
||||||
|
|
||||||
|
data_size = blob.data_size() as usize - BLOB_HEADER_SIZE;
|
||||||
|
idx = n as u64 + block_start_idx;
|
||||||
|
first_byte = blob.data[0];
|
||||||
|
|
||||||
|
blob.set_size(data_size);
|
||||||
|
recovered_data.push(blob);
|
||||||
|
} else {
|
||||||
|
let mut blob = Blob::default();
|
||||||
|
blob.data_mut()[..size].copy_from_slice(&blocks[n]);
|
||||||
|
data_size = size;
|
||||||
|
idx = (n as u64 + block_start_idx) - NUM_DATA as u64;
|
||||||
|
first_byte = blob.data[0];
|
||||||
|
|
||||||
|
blob.set_slot(slot);
|
||||||
|
blob.set_index(idx);
|
||||||
|
blob.set_size(data_size);
|
||||||
|
recovered_coding.push(blob);
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"[reconstruct_blobs] erasures[{}] ({}) data_size: {} data[0]: {}",
|
||||||
|
n,
|
||||||
|
idx,
|
||||||
|
data_size,
|
||||||
|
first_byte
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((recovered_data, recovered_coding))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CodingGenerator {
|
impl CodingGenerator {
|
||||||
pub fn new() -> Self {
|
pub fn new(session: Arc<Session>) -> Self {
|
||||||
Self::default()
|
CodingGenerator {
|
||||||
|
leftover: Vec::with_capacity(session.0.data_shard_count()),
|
||||||
|
session,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// must be called with consecutive data blobs from previous invocation
|
/// Yields next set of coding blobs, if any.
|
||||||
// blobs from a new slot not start halfway through next_data
|
/// Must be called with consecutive data blobs within a slot.
|
||||||
|
///
|
||||||
|
/// Passing in a slice with the first blob having a new slot will cause internal state to
|
||||||
|
/// reset, so the above concern does not apply to slot boundaries, only indexes within a slot
|
||||||
|
/// must be consecutive.
|
||||||
|
///
|
||||||
|
/// If used improperly, it my return garbage coding blobs, but will not give an
|
||||||
|
/// error.
|
||||||
pub fn next(&mut self, next_data: &[SharedBlob]) -> Vec<SharedBlob> {
|
pub fn next(&mut self, next_data: &[SharedBlob]) -> Vec<SharedBlob> {
|
||||||
|
let (num_data, num_coding) = self.session.dimensions();
|
||||||
let mut next_coding =
|
let mut next_coding =
|
||||||
Vec::with_capacity((self.leftover.len() + next_data.len()) / NUM_DATA * NUM_CODING);
|
Vec::with_capacity((self.leftover.len() + next_data.len()) / num_data * num_coding);
|
||||||
|
|
||||||
if self.leftover.len() > 0 && next_data.len() > 0 {
|
if !self.leftover.is_empty()
|
||||||
if self.leftover[0].read().unwrap().slot() != next_data[0].read().unwrap().slot() {
|
&& !next_data.is_empty()
|
||||||
self.leftover.clear(); // reset on slot boundaries
|
&& self.leftover[0].read().unwrap().slot() != next_data[0].read().unwrap().slot()
|
||||||
}
|
{
|
||||||
|
self.leftover.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
let next_data: Vec<_> = self.leftover.iter().chain(next_data).cloned().collect();
|
let next_data: Vec<_> = self.leftover.iter().chain(next_data).cloned().collect();
|
||||||
|
|
||||||
for data_blobs in next_data.chunks(NUM_DATA) {
|
for data_blobs in next_data.chunks(num_data) {
|
||||||
if data_blobs.len() < NUM_DATA {
|
if data_blobs.len() < num_data {
|
||||||
self.leftover = data_blobs.to_vec();
|
self.leftover = data_blobs.to_vec();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
self.leftover.clear();
|
self.leftover.clear();
|
||||||
|
|
||||||
// find max_data_size for the chunk, round length up to a multiple of wb()
|
// find max_data_size for the erasure set
|
||||||
let max_data_size = align!(
|
let max_data_size = data_blobs
|
||||||
data_blobs
|
|
||||||
.iter()
|
.iter()
|
||||||
.fold(0, |max, blob| cmp::max(blob.read().unwrap().meta.size, max)),
|
.fold(0, |max, blob| cmp::max(blob.read().unwrap().meta.size, max));
|
||||||
wb()
|
|
||||||
);
|
|
||||||
|
|
||||||
let data_locks: Vec<_> = data_blobs.iter().map(|b| b.read().unwrap()).collect();
|
let data_locks: Vec<_> = data_blobs.iter().map(|b| b.read().unwrap()).collect();
|
||||||
let data_ptrs: Vec<_> = data_locks
|
let data_ptrs: Vec<_> = data_locks
|
||||||
@ -280,19 +220,17 @@ impl CodingGenerator {
|
|||||||
.map(|l| &l.data[..max_data_size])
|
.map(|l| &l.data[..max_data_size])
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut coding_blobs = Vec::with_capacity(NUM_CODING);
|
let mut coding_blobs = Vec::with_capacity(num_coding);
|
||||||
|
|
||||||
for data_blob in &data_locks[..NUM_CODING] {
|
for data_blob in &data_locks[..num_coding] {
|
||||||
let index = data_blob.index();
|
let index = data_blob.index();
|
||||||
let slot = data_blob.slot();
|
let slot = data_blob.slot();
|
||||||
let id = data_blob.id();
|
let id = data_blob.id();
|
||||||
let should_forward = data_blob.should_forward();
|
|
||||||
|
|
||||||
let mut coding_blob = Blob::default();
|
let mut coding_blob = Blob::default();
|
||||||
coding_blob.set_index(index);
|
coding_blob.set_index(index);
|
||||||
coding_blob.set_slot(slot);
|
coding_blob.set_slot(slot);
|
||||||
coding_blob.set_id(&id);
|
coding_blob.set_id(&id);
|
||||||
coding_blob.forward(should_forward);
|
|
||||||
coding_blob.set_size(max_data_size);
|
coding_blob.set_size(max_data_size);
|
||||||
coding_blob.set_coding();
|
coding_blob.set_coding();
|
||||||
|
|
||||||
@ -305,7 +243,7 @@ impl CodingGenerator {
|
|||||||
.map(|blob| &mut blob.data_mut()[..max_data_size])
|
.map(|blob| &mut blob.data_mut()[..max_data_size])
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)
|
self.session.encode(&data_ptrs, coding_ptrs.as_mut_slice())
|
||||||
}
|
}
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
@ -320,12 +258,27 @@ impl CodingGenerator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for Session {
|
||||||
|
fn default() -> Session {
|
||||||
|
Session::new(NUM_DATA, NUM_CODING).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CodingGenerator {
|
||||||
|
fn default() -> Self {
|
||||||
|
let session = Session::default();
|
||||||
|
CodingGenerator {
|
||||||
|
leftover: Vec::with_capacity(session.0.data_shard_count()),
|
||||||
|
session: Arc::new(session),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::blocktree::get_tmp_ledger_path;
|
use crate::blocktree::get_tmp_ledger_path;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::entry::{make_tiny_test_entries, EntrySlice};
|
|
||||||
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE};
|
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE};
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
@ -368,63 +321,63 @@ pub mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_coding() {
|
fn test_coding() {
|
||||||
let zero_vec = vec![0; 16];
|
const N_DATA: usize = 4;
|
||||||
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
|
const N_CODING: usize = 2;
|
||||||
|
|
||||||
|
let session = Session::new(N_DATA, N_CODING).unwrap();
|
||||||
|
|
||||||
|
let mut vs: Vec<Vec<u8>> = (0..N_DATA as u8).map(|i| (i..(16 + i)).collect()).collect();
|
||||||
let v_orig: Vec<u8> = vs[0].clone();
|
let v_orig: Vec<u8> = vs[0].clone();
|
||||||
|
|
||||||
let m = 2;
|
let mut coding_blocks: Vec<_> = (0..N_CODING).map(|_| vec![0u8; 16]).collect();
|
||||||
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut coding_blocks_slices: Vec<_> =
|
let mut coding_blocks_slices: Vec<_> =
|
||||||
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
|
coding_blocks.iter_mut().map(Vec::as_mut_slice).collect();
|
||||||
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
|
let v_slices: Vec<_> = vs.iter().map(Vec::as_slice).collect();
|
||||||
|
|
||||||
|
session
|
||||||
|
.encode(v_slices.as_slice(), coding_blocks_slices.as_mut_slice())
|
||||||
|
.expect("encoding must succeed");
|
||||||
|
|
||||||
assert!(generate_coding_blocks(
|
|
||||||
coding_blocks_slices.as_mut_slice(),
|
|
||||||
v_slices.as_slice(),
|
|
||||||
)
|
|
||||||
.is_ok());
|
|
||||||
}
|
|
||||||
trace!("test_coding: coding blocks:");
|
trace!("test_coding: coding blocks:");
|
||||||
for b in &coding_blocks {
|
for b in &coding_blocks {
|
||||||
trace!("test_coding: {:?}", b);
|
trace!("test_coding: {:?}", b);
|
||||||
}
|
}
|
||||||
let erasure: i32 = 1;
|
|
||||||
let erasures = vec![erasure, -1];
|
let erasure: usize = 1;
|
||||||
|
let present = &mut [true; N_DATA + N_CODING];
|
||||||
|
present[erasure] = false;
|
||||||
|
let erased = vs[erasure].clone();
|
||||||
|
|
||||||
// clear an entry
|
// clear an entry
|
||||||
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
|
vs[erasure as usize].copy_from_slice(&[0; 16]);
|
||||||
|
|
||||||
{
|
let mut blocks: Vec<_> = vs
|
||||||
let mut coding_blocks_slices: Vec<_> =
|
.iter_mut()
|
||||||
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
|
.chain(coding_blocks.iter_mut())
|
||||||
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
|
.map(Vec::as_mut_slice)
|
||||||
|
.collect();
|
||||||
|
|
||||||
assert!(decode_blocks(
|
session
|
||||||
v_slices.as_mut_slice(),
|
.decode_blocks(blocks.as_mut_slice(), present)
|
||||||
coding_blocks_slices.as_mut_slice(),
|
.expect("decoding must succeed");
|
||||||
erasures.as_slice(),
|
|
||||||
)
|
|
||||||
.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!("test_coding: vs:");
|
trace!("test_coding: vs:");
|
||||||
for v in &vs {
|
for v in &vs {
|
||||||
trace!("test_coding: {:?}", v);
|
trace!("test_coding: {:?}", v);
|
||||||
}
|
}
|
||||||
assert_eq!(v_orig, vs[0]);
|
assert_eq!(v_orig, vs[0]);
|
||||||
|
assert_eq!(erased, vs[erasure]);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_toss_and_recover(
|
fn test_toss_and_recover(
|
||||||
|
session: &Session,
|
||||||
data_blobs: &[SharedBlob],
|
data_blobs: &[SharedBlob],
|
||||||
coding_blobs: &[SharedBlob],
|
coding_blobs: &[SharedBlob],
|
||||||
block_start_idx: usize,
|
block_start_idx: usize,
|
||||||
) {
|
) {
|
||||||
let size = coding_blobs[0].read().unwrap().size();
|
let size = coding_blobs[0].read().unwrap().size();
|
||||||
|
|
||||||
// toss one data and one coding
|
|
||||||
let erasures: Vec<i32> = vec![0, NUM_DATA as i32, -1];
|
|
||||||
|
|
||||||
let mut blobs: Vec<SharedBlob> = Vec::with_capacity(ERASURE_SET_SIZE);
|
let mut blobs: Vec<SharedBlob> = Vec::with_capacity(ERASURE_SET_SIZE);
|
||||||
|
|
||||||
blobs.push(SharedBlob::default()); // empty data, erasure at zero
|
blobs.push(SharedBlob::default()); // empty data, erasure at zero
|
||||||
@ -432,14 +385,23 @@ pub mod test {
|
|||||||
// skip first blob
|
// skip first blob
|
||||||
blobs.push(blob.clone());
|
blobs.push(blob.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
blobs.push(SharedBlob::default()); // empty coding, erasure at zero
|
blobs.push(SharedBlob::default()); // empty coding, erasure at zero
|
||||||
for blob in &coding_blobs[1..NUM_CODING] {
|
for blob in &coding_blobs[1..NUM_CODING] {
|
||||||
blobs.push(blob.clone());
|
blobs.push(blob.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let corrupt = decode_blobs(&blobs, &erasures, size, block_start_idx as u64, 0).unwrap();
|
// toss one data and one coding
|
||||||
|
let mut present = vec![true; blobs.len()];
|
||||||
|
present[0] = false;
|
||||||
|
present[NUM_DATA] = false;
|
||||||
|
|
||||||
assert!(!corrupt);
|
let (recovered_data, recovered_coding) = session
|
||||||
|
.reconstruct_shared_blobs(&mut blobs, &present, size, block_start_idx as u64, 0)
|
||||||
|
.expect("reconstruction must succeed");
|
||||||
|
|
||||||
|
assert_eq!(recovered_data.len(), 1);
|
||||||
|
assert_eq!(recovered_coding.len(), 1);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blobs[1].read().unwrap().meta,
|
blobs[1].read().unwrap().meta,
|
||||||
@ -450,15 +412,15 @@ pub mod test {
|
|||||||
data_blobs[block_start_idx + 1].read().unwrap().data()
|
data_blobs[block_start_idx + 1].read().unwrap().data()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blobs[0].read().unwrap().meta,
|
recovered_data[0].meta,
|
||||||
data_blobs[block_start_idx].read().unwrap().meta
|
data_blobs[block_start_idx].read().unwrap().meta
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blobs[0].read().unwrap().data(),
|
recovered_data[0].data(),
|
||||||
data_blobs[block_start_idx].read().unwrap().data()
|
data_blobs[block_start_idx].read().unwrap().data()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blobs[NUM_DATA].read().unwrap().data(),
|
recovered_coding[0].data(),
|
||||||
coding_blobs[0].read().unwrap().data()
|
coding_blobs[0].read().unwrap().data()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -468,11 +430,11 @@ pub mod test {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
// trivial case
|
// trivial case
|
||||||
let mut coding_generator = CodingGenerator::new();
|
let mut coding_generator = CodingGenerator::default();
|
||||||
let blobs = Vec::new();
|
let blobs = Vec::new();
|
||||||
for _ in 0..NUM_DATA * 2 {
|
for _ in 0..NUM_DATA * 2 {
|
||||||
let coding = coding_generator.next(&blobs);
|
let coding = coding_generator.next(&blobs);
|
||||||
assert_eq!(coding.len(), 0);
|
assert!(coding.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
// test coding by iterating one blob at a time
|
// test coding by iterating one blob at a time
|
||||||
@ -480,6 +442,7 @@ pub mod test {
|
|||||||
|
|
||||||
for (i, blob) in data_blobs.iter().cloned().enumerate() {
|
for (i, blob) in data_blobs.iter().cloned().enumerate() {
|
||||||
let coding_blobs = coding_generator.next(&[blob]);
|
let coding_blobs = coding_generator.next(&[blob]);
|
||||||
|
|
||||||
if !coding_blobs.is_empty() {
|
if !coding_blobs.is_empty() {
|
||||||
assert_eq!(i % NUM_DATA, NUM_DATA - 1);
|
assert_eq!(i % NUM_DATA, NUM_DATA - 1);
|
||||||
assert_eq!(coding_blobs.len(), NUM_CODING);
|
assert_eq!(coding_blobs.len(), NUM_CODING);
|
||||||
@ -490,7 +453,12 @@ pub mod test {
|
|||||||
((i / NUM_DATA) * NUM_DATA + j) as u64
|
((i / NUM_DATA) * NUM_DATA + j) as u64
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
test_toss_and_recover(&data_blobs, &coding_blobs, i - (i % NUM_DATA));
|
test_toss_and_recover(
|
||||||
|
&coding_generator.session,
|
||||||
|
&data_blobs,
|
||||||
|
&coding_blobs,
|
||||||
|
i - (i % NUM_DATA),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -499,7 +467,7 @@ pub mod test {
|
|||||||
fn test_erasure_generate_coding_reset_on_new_slot() {
|
fn test_erasure_generate_coding_reset_on_new_slot() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
let mut coding_generator = CodingGenerator::new();
|
let mut coding_generator = CodingGenerator::default();
|
||||||
|
|
||||||
// test coding by iterating one blob at a time
|
// test coding by iterating one blob at a time
|
||||||
let data_blobs = generate_test_blobs(0, NUM_DATA * 2);
|
let data_blobs = generate_test_blobs(0, NUM_DATA * 2);
|
||||||
@ -509,13 +477,18 @@ pub mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let coding_blobs = coding_generator.next(&data_blobs[0..NUM_DATA - 1]);
|
let coding_blobs = coding_generator.next(&data_blobs[0..NUM_DATA - 1]);
|
||||||
assert_eq!(coding_blobs.len(), 0);
|
assert!(coding_blobs.is_empty());
|
||||||
|
|
||||||
let coding_blobs = coding_generator.next(&data_blobs[NUM_DATA..]);
|
let coding_blobs = coding_generator.next(&data_blobs[NUM_DATA..]);
|
||||||
|
|
||||||
assert_eq!(coding_blobs.len(), NUM_CODING);
|
assert_eq!(coding_blobs.len(), NUM_CODING);
|
||||||
|
|
||||||
test_toss_and_recover(&data_blobs, &coding_blobs, NUM_DATA);
|
test_toss_and_recover(
|
||||||
|
&coding_generator.session,
|
||||||
|
&data_blobs,
|
||||||
|
&coding_blobs,
|
||||||
|
NUM_DATA,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -571,24 +544,17 @@ pub mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This test is ignored because if successful, it never stops running. It is useful for
|
|
||||||
/// dicovering an initialization race-condition in the erasure FFI bindings. If this bug
|
|
||||||
/// re-emerges, running with `Z_THREADS = N` where `N > 1` should crash fairly rapidly.
|
|
||||||
#[ignore]
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_recovery_with_model() {
|
fn test_recovery_with_model() {
|
||||||
use std::env;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
const MAX_ERASURE_SETS: u64 = 16;
|
const MAX_ERASURE_SETS: u64 = 16;
|
||||||
solana_logger::setup();
|
const N_THREADS: usize = 2;
|
||||||
let n_threads: usize = env::var("Z_THREADS")
|
const N_SLOTS: u64 = 10;
|
||||||
.unwrap_or("1".to_string())
|
|
||||||
.parse()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let specs = (0..).map(|slot| {
|
solana_logger::setup();
|
||||||
|
|
||||||
|
let specs = (0..N_SLOTS).map(|slot| {
|
||||||
let num_erasure_sets = slot % MAX_ERASURE_SETS;
|
let num_erasure_sets = slot % MAX_ERASURE_SETS;
|
||||||
|
|
||||||
let set_specs = (0..num_erasure_sets)
|
let set_specs = (0..num_erasure_sets)
|
||||||
@ -602,12 +568,12 @@ pub mod test {
|
|||||||
SlotSpec { slot, set_specs }
|
SlotSpec { slot, set_specs }
|
||||||
});
|
});
|
||||||
|
|
||||||
let decode_mutex = Arc::new(Mutex::new(()));
|
|
||||||
let mut handles = vec![];
|
let mut handles = vec![];
|
||||||
|
let session = Arc::new(Session::default());
|
||||||
|
|
||||||
for i in 0..n_threads {
|
for i in 0..N_THREADS {
|
||||||
let specs = specs.clone();
|
let specs = specs.clone();
|
||||||
let decode_mutex = Arc::clone(&decode_mutex);
|
let session = Arc::clone(&session);
|
||||||
|
|
||||||
let handle = thread::Builder::new()
|
let handle = thread::Builder::new()
|
||||||
.name(i.to_string())
|
.name(i.to_string())
|
||||||
@ -617,55 +583,39 @@ pub mod test {
|
|||||||
let erased_coding = erasure_set.coding[0].clone();
|
let erased_coding = erasure_set.coding[0].clone();
|
||||||
let erased_data = erasure_set.data[..3].to_vec();
|
let erased_data = erasure_set.data[..3].to_vec();
|
||||||
|
|
||||||
let mut data = Vec::with_capacity(NUM_DATA);
|
let mut blobs = Vec::with_capacity(ERASURE_SET_SIZE);
|
||||||
let mut coding = Vec::with_capacity(NUM_CODING);
|
|
||||||
let erasures = vec![0, 1, 2, NUM_DATA as i32, -1];
|
|
||||||
|
|
||||||
data.push(SharedBlob::default());
|
blobs.push(SharedBlob::default());
|
||||||
data.push(SharedBlob::default());
|
blobs.push(SharedBlob::default());
|
||||||
data.push(SharedBlob::default());
|
blobs.push(SharedBlob::default());
|
||||||
for blob in erasure_set.data.into_iter().skip(3) {
|
for blob in erasure_set.data.into_iter().skip(3) {
|
||||||
data.push(blob);
|
blobs.push(blob);
|
||||||
}
|
}
|
||||||
|
|
||||||
coding.push(SharedBlob::default());
|
blobs.push(SharedBlob::default());
|
||||||
for blob in erasure_set.coding.into_iter().skip(1) {
|
for blob in erasure_set.coding.into_iter().skip(1) {
|
||||||
coding.push(blob);
|
blobs.push(blob);
|
||||||
}
|
}
|
||||||
|
|
||||||
let size = erased_coding.read().unwrap().data_size() as usize;
|
let size = erased_coding.read().unwrap().size() as usize;
|
||||||
|
|
||||||
let mut data_locks: Vec<_> =
|
let mut present = vec![true; ERASURE_SET_SIZE];
|
||||||
data.iter().map(|shared| shared.write().unwrap()).collect();
|
present[0] = false;
|
||||||
let mut coding_locks: Vec<_> = coding
|
present[1] = false;
|
||||||
.iter()
|
present[2] = false;
|
||||||
.map(|shared| shared.write().unwrap())
|
present[NUM_DATA] = false;
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut data_ptrs: Vec<_> = data_locks
|
session
|
||||||
.iter_mut()
|
.reconstruct_shared_blobs(
|
||||||
.map(|blob| &mut blob.data[..size])
|
&mut blobs,
|
||||||
.collect();
|
&present,
|
||||||
let mut coding_ptrs: Vec<_> = coding_locks
|
size,
|
||||||
.iter_mut()
|
erasure_set.set_index * NUM_DATA as u64,
|
||||||
.map(|blob| &mut blob.data_mut()[..size])
|
slot_model.slot,
|
||||||
.collect();
|
|
||||||
|
|
||||||
{
|
|
||||||
let _lock = decode_mutex.lock();
|
|
||||||
|
|
||||||
decode_blocks(
|
|
||||||
data_ptrs.as_mut_slice(),
|
|
||||||
coding_ptrs.as_mut_slice(),
|
|
||||||
&erasures,
|
|
||||||
)
|
)
|
||||||
.expect("decoding must succeed");
|
.expect("reconstruction must succeed");
|
||||||
}
|
|
||||||
|
|
||||||
drop(coding_locks);
|
for (expected, recovered) in erased_data.iter().zip(blobs.iter()) {
|
||||||
drop(data_locks);
|
|
||||||
|
|
||||||
for (expected, recovered) in erased_data.iter().zip(data.iter()) {
|
|
||||||
let expected = expected.read().unwrap();
|
let expected = expected.read().unwrap();
|
||||||
let mut recovered = recovered.write().unwrap();
|
let mut recovered = recovered.write().unwrap();
|
||||||
let data_size = recovered.data_size() as usize - BLOB_HEADER_SIZE;
|
let data_size = recovered.data_size() as usize - BLOB_HEADER_SIZE;
|
||||||
@ -677,7 +627,7 @@ pub mod test {
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
erased_coding.read().unwrap().data(),
|
erased_coding.read().unwrap().data(),
|
||||||
coding[0].read().unwrap().data()
|
blobs[NUM_DATA].read().unwrap().data()
|
||||||
);
|
);
|
||||||
|
|
||||||
debug!("passed set: {}", erasure_set.set_index);
|
debug!("passed set: {}", erasure_set.set_index);
|
||||||
@ -702,7 +652,9 @@ pub mod test {
|
|||||||
IntoIt: Iterator<Item = S> + Clone + 'a,
|
IntoIt: Iterator<Item = S> + Clone + 'a,
|
||||||
S: Borrow<SlotSpec>,
|
S: Borrow<SlotSpec>,
|
||||||
{
|
{
|
||||||
specs.into_iter().map(|spec| {
|
let mut coding_generator = CodingGenerator::default();
|
||||||
|
|
||||||
|
specs.into_iter().map(move |spec| {
|
||||||
let spec = spec.borrow();
|
let spec = spec.borrow();
|
||||||
let slot = spec.slot;
|
let slot = spec.slot;
|
||||||
|
|
||||||
@ -713,7 +665,7 @@ pub mod test {
|
|||||||
let set_index = erasure_spec.set_index as usize;
|
let set_index = erasure_spec.set_index as usize;
|
||||||
let start_index = set_index * NUM_DATA;
|
let start_index = set_index * NUM_DATA;
|
||||||
|
|
||||||
let mut blobs = make_tiny_test_entries(NUM_DATA).to_single_entry_shared_blobs();
|
let mut blobs = generate_test_blobs(0, NUM_DATA);
|
||||||
index_blobs(
|
index_blobs(
|
||||||
&blobs,
|
&blobs,
|
||||||
&Keypair::new().pubkey(),
|
&Keypair::new().pubkey(),
|
||||||
@ -722,7 +674,6 @@ pub mod test {
|
|||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut coding_generator = CodingGenerator::new();
|
|
||||||
let mut coding_blobs = coding_generator.next(&blobs);
|
let mut coding_blobs = coding_generator.next(&blobs);
|
||||||
|
|
||||||
blobs.drain(erasure_spec.num_data..);
|
blobs.drain(erasure_spec.num_data..);
|
||||||
@ -770,84 +721,60 @@ pub mod test {
|
|||||||
blocktree
|
blocktree
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fn verify_test_blobs(offset: usize, blobs: &[SharedBlob]) -> bool {
|
||||||
|
// let data: Vec<_> = (0..BLOB_DATA_SIZE).into_iter().map(|i| i as u8).collect();
|
||||||
|
//
|
||||||
|
// blobs.iter().enumerate().all(|(i, blob)| {
|
||||||
|
// let blob = blob.read().unwrap();
|
||||||
|
// blob.index() as usize == i + offset && blob.data() == &data[..]
|
||||||
|
// })
|
||||||
|
// }
|
||||||
|
//
|
||||||
fn generate_test_blobs(offset: usize, num_blobs: usize) -> Vec<SharedBlob> {
|
fn generate_test_blobs(offset: usize, num_blobs: usize) -> Vec<SharedBlob> {
|
||||||
let blobs = make_tiny_test_entries(num_blobs).to_single_entry_shared_blobs();
|
let data: Vec<_> = (0..BLOB_DATA_SIZE).into_iter().map(|i| i as u8).collect();
|
||||||
|
|
||||||
|
let blobs: Vec<_> = (0..num_blobs)
|
||||||
|
.into_iter()
|
||||||
|
.map(|_| {
|
||||||
|
let mut blob = Blob::default();
|
||||||
|
blob.data_mut()[..data.len()].copy_from_slice(&data);
|
||||||
|
blob.set_size(data.len());
|
||||||
|
Arc::new(RwLock::new(blob))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
index_blobs(&blobs, &Pubkey::new_rand(), offset as u64, 0, 0);
|
index_blobs(&blobs, &Pubkey::new_rand(), offset as u64, 0, 0);
|
||||||
|
|
||||||
blobs
|
blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_blobs(
|
impl Session {
|
||||||
blobs: &[SharedBlob],
|
fn reconstruct_shared_blobs(
|
||||||
erasures: &[i32],
|
&self,
|
||||||
|
blobs: &mut [SharedBlob],
|
||||||
|
present: &[bool],
|
||||||
size: usize,
|
size: usize,
|
||||||
block_start_idx: u64,
|
block_start_idx: u64,
|
||||||
slot: u64,
|
slot: u64,
|
||||||
) -> Result<bool> {
|
) -> Result<(Vec<Blob>, Vec<Blob>)> {
|
||||||
let mut locks = Vec::with_capacity(ERASURE_SET_SIZE);
|
let mut locks: Vec<std::sync::RwLockWriteGuard<_>> = blobs
|
||||||
let mut coding_ptrs: Vec<&mut [u8]> = Vec::with_capacity(NUM_CODING);
|
.iter()
|
||||||
let mut data_ptrs: Vec<&mut [u8]> = Vec::with_capacity(NUM_DATA);
|
.map(|shared_blob| shared_blob.write().unwrap())
|
||||||
|
.collect();
|
||||||
|
|
||||||
assert_eq!(blobs.len(), ERASURE_SET_SIZE);
|
let mut slices: Vec<_> = locks
|
||||||
for b in blobs {
|
.iter_mut()
|
||||||
locks.push(b.write().unwrap());
|
.enumerate()
|
||||||
}
|
.map(|(i, blob)| {
|
||||||
|
|
||||||
for (i, l) in locks.iter_mut().enumerate() {
|
|
||||||
if i < NUM_DATA {
|
if i < NUM_DATA {
|
||||||
data_ptrs.push(&mut l.data[..size]);
|
&mut blob.data[..size]
|
||||||
} else {
|
} else {
|
||||||
coding_ptrs.push(&mut l.data_mut()[..size]);
|
&mut blob.data_mut()[..size]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
self.reconstruct_blobs(&mut slices, present, size, block_start_idx, slot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode the blocks
|
|
||||||
decode_blocks(
|
|
||||||
data_ptrs.as_mut_slice(),
|
|
||||||
coding_ptrs.as_mut_slice(),
|
|
||||||
&erasures,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Create the missing blobs from the reconstructed data
|
|
||||||
let mut corrupt = false;
|
|
||||||
|
|
||||||
for i in &erasures[..erasures.len() - 1] {
|
|
||||||
let n = *i as usize;
|
|
||||||
let mut idx = n as u64 + block_start_idx;
|
|
||||||
|
|
||||||
let mut data_size;
|
|
||||||
if n < NUM_DATA {
|
|
||||||
data_size = locks[n].data_size() as usize;
|
|
||||||
data_size -= BLOB_HEADER_SIZE;
|
|
||||||
if data_size > BLOB_DATA_SIZE {
|
|
||||||
error!("corrupt data blob[{}] data_size: {}", idx, data_size);
|
|
||||||
corrupt = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
data_size = size;
|
|
||||||
idx -= NUM_DATA as u64;
|
|
||||||
locks[n].set_slot(slot);
|
|
||||||
locks[n].set_index(idx);
|
|
||||||
|
|
||||||
if data_size - BLOB_HEADER_SIZE > BLOB_DATA_SIZE {
|
|
||||||
error!("corrupt coding blob[{}] data_size: {}", idx, data_size);
|
|
||||||
corrupt = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
locks[n].set_size(data_size);
|
|
||||||
trace!(
|
|
||||||
"erasures[{}] ({}) size: {} data[0]: {}",
|
|
||||||
*i,
|
|
||||||
idx,
|
|
||||||
data_size,
|
|
||||||
locks[n].data()[0]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(corrupt)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,16 @@
|
|||||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||||
|
|
||||||
|
use crate::poh_recorder::PohRecorder;
|
||||||
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::streamer::{self, PacketReceiver, PacketSender};
|
use crate::streamer::{self, PacketReceiver, PacketSender};
|
||||||
|
use solana_metrics::counter::Counter;
|
||||||
|
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::{channel, RecvTimeoutError};
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Mutex};
|
||||||
use std::thread::{self, JoinHandle};
|
use std::thread::{self, Builder, JoinHandle};
|
||||||
|
|
||||||
pub struct FetchStage {
|
pub struct FetchStage {
|
||||||
thread_hdls: Vec<JoinHandle<()>>,
|
thread_hdls: Vec<JoinHandle<()>>,
|
||||||
@ -18,10 +22,11 @@ impl FetchStage {
|
|||||||
sockets: Vec<UdpSocket>,
|
sockets: Vec<UdpSocket>,
|
||||||
tpu_via_blobs_sockets: Vec<UdpSocket>,
|
tpu_via_blobs_sockets: Vec<UdpSocket>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
) -> (Self, PacketReceiver) {
|
) -> (Self, PacketReceiver) {
|
||||||
let (sender, receiver) = channel();
|
let (sender, receiver) = channel();
|
||||||
(
|
(
|
||||||
Self::new_with_sender(sockets, tpu_via_blobs_sockets, exit, &sender),
|
Self::new_with_sender(sockets, tpu_via_blobs_sockets, exit, &sender, &poh_recorder),
|
||||||
receiver,
|
receiver,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -30,10 +35,48 @@ impl FetchStage {
|
|||||||
tpu_via_blobs_sockets: Vec<UdpSocket>,
|
tpu_via_blobs_sockets: Vec<UdpSocket>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
sender: &PacketSender,
|
sender: &PacketSender,
|
||||||
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
|
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
|
||||||
let tpu_via_blobs_sockets = tpu_via_blobs_sockets.into_iter().map(Arc::new).collect();
|
let tpu_via_blobs_sockets = tpu_via_blobs_sockets.into_iter().map(Arc::new).collect();
|
||||||
Self::new_multi_socket(tx_sockets, tpu_via_blobs_sockets, exit, &sender)
|
Self::new_multi_socket(
|
||||||
|
tx_sockets,
|
||||||
|
tpu_via_blobs_sockets,
|
||||||
|
exit,
|
||||||
|
&sender,
|
||||||
|
&poh_recorder,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_forwarded_packets(
|
||||||
|
recvr: &PacketReceiver,
|
||||||
|
sendr: &PacketSender,
|
||||||
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let msgs = recvr.recv()?;
|
||||||
|
let mut len = msgs.packets.len();
|
||||||
|
let mut batch = vec![msgs];
|
||||||
|
while let Ok(more) = recvr.try_recv() {
|
||||||
|
len += more.packets.len();
|
||||||
|
batch.push(more);
|
||||||
|
}
|
||||||
|
|
||||||
|
if poh_recorder
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.would_be_leader(DEFAULT_TICKS_PER_SLOT)
|
||||||
|
{
|
||||||
|
inc_new_counter_info!("fetch_stage-honor_forwards", len);
|
||||||
|
for packets in batch {
|
||||||
|
if sendr.send(packets).is_err() {
|
||||||
|
return Err(Error::SendError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
inc_new_counter_info!("fetch_stage-discard_forwards", len);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_multi_socket(
|
fn new_multi_socket(
|
||||||
@ -41,16 +84,39 @@ impl FetchStage {
|
|||||||
tpu_via_blobs_sockets: Vec<Arc<UdpSocket>>,
|
tpu_via_blobs_sockets: Vec<Arc<UdpSocket>>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
sender: &PacketSender,
|
sender: &PacketSender,
|
||||||
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let tpu_threads = sockets
|
let tpu_threads = sockets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|socket| streamer::receiver(socket, &exit, sender.clone(), "fetch-stage"));
|
.map(|socket| streamer::receiver(socket, &exit, sender.clone()));
|
||||||
|
|
||||||
|
let (forward_sender, forward_receiver) = channel();
|
||||||
let tpu_via_blobs_threads = tpu_via_blobs_sockets
|
let tpu_via_blobs_threads = tpu_via_blobs_sockets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|socket| streamer::blob_packet_receiver(socket, &exit, sender.clone()));
|
.map(|socket| streamer::blob_packet_receiver(socket, &exit, forward_sender.clone()));
|
||||||
|
|
||||||
let thread_hdls: Vec<_> = tpu_threads.chain(tpu_via_blobs_threads).collect();
|
let sender = sender.clone();
|
||||||
|
let poh_recorder = poh_recorder.clone();
|
||||||
|
|
||||||
|
let fwd_thread_hdl = Builder::new()
|
||||||
|
.name("solana-fetch-stage-fwd-rcvr".to_string())
|
||||||
|
.spawn(move || loop {
|
||||||
|
if let Err(e) =
|
||||||
|
Self::handle_forwarded_packets(&forward_receiver, &sender, &poh_recorder)
|
||||||
|
{
|
||||||
|
match e {
|
||||||
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
|
Error::RecvError(_) => break,
|
||||||
|
Error::SendError => break,
|
||||||
|
_ => error!("{:?}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut thread_hdls: Vec<_> = tpu_threads.chain(tpu_via_blobs_threads).collect();
|
||||||
|
thread_hdls.push(fwd_thread_hdl);
|
||||||
Self { thread_hdls }
|
Self { thread_hdls }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ use crate::contact_info::ContactInfo;
|
|||||||
use crate::entry::create_ticks;
|
use crate::entry::create_ticks;
|
||||||
use crate::entry::next_entry_mut;
|
use crate::entry::next_entry_mut;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::gossip_service::GossipService;
|
use crate::gossip_service::{discover_nodes, GossipService};
|
||||||
use crate::leader_schedule_utils;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::poh_recorder::PohRecorder;
|
use crate::poh_recorder::PohRecorder;
|
||||||
use crate::poh_service::{PohService, PohServiceConfig};
|
use crate::poh_service::{PohService, PohServiceConfig};
|
||||||
use crate::rpc::JsonRpcConfig;
|
use crate::rpc::JsonRpcConfig;
|
||||||
@ -94,12 +94,14 @@ impl Fullnode {
|
|||||||
let id = keypair.pubkey();
|
let id = keypair.pubkey();
|
||||||
assert_eq!(id, node.info.id);
|
assert_eq!(id, node.info.id);
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, blocktree, ledger_signal_receiver) =
|
let (bank_forks, bank_forks_info, blocktree, ledger_signal_receiver, leader_schedule_cache) =
|
||||||
new_banks_from_blocktree(ledger_path, config.account_paths.clone());
|
new_banks_from_blocktree(ledger_path, config.account_paths.clone());
|
||||||
|
|
||||||
|
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let bank_info = &bank_forks_info[0];
|
let bank_info = &bank_forks_info[0];
|
||||||
let bank = bank_forks[bank_info.bank_slot].clone();
|
let bank = bank_forks[bank_info.bank_slot].clone();
|
||||||
|
let genesis_blockhash = bank.last_blockhash();
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"starting PoH... {} {}",
|
"starting PoH... {} {}",
|
||||||
@ -107,15 +109,17 @@ impl Fullnode {
|
|||||||
bank.last_blockhash(),
|
bank.last_blockhash(),
|
||||||
);
|
);
|
||||||
let blocktree = Arc::new(blocktree);
|
let blocktree = Arc::new(blocktree);
|
||||||
|
|
||||||
let (poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal(
|
let (poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal(
|
||||||
bank.tick_height(),
|
bank.tick_height(),
|
||||||
bank.last_blockhash(),
|
bank.last_blockhash(),
|
||||||
bank.slot(),
|
bank.slot(),
|
||||||
leader_schedule_utils::next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)),
|
leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)),
|
||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&id,
|
&id,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
blocktree.new_blobs_signals.first().cloned(),
|
blocktree.new_blobs_signals.first().cloned(),
|
||||||
|
&leader_schedule_cache,
|
||||||
);
|
);
|
||||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||||
let poh_service = PohService::new(poh_recorder.clone(), &config.tick_config, &exit);
|
let poh_service = PohService::new(poh_recorder.clone(), &config.tick_config, &exit);
|
||||||
@ -231,8 +235,15 @@ impl Fullnode {
|
|||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
sender.clone(),
|
sender.clone(),
|
||||||
receiver,
|
receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
&exit,
|
&exit,
|
||||||
|
&genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if config.sigverify_disabled {
|
||||||
|
warn!("signature verification disabled");
|
||||||
|
}
|
||||||
|
|
||||||
let tpu = Tpu::new(
|
let tpu = Tpu::new(
|
||||||
&id,
|
&id,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
@ -244,7 +255,9 @@ impl Fullnode {
|
|||||||
config.sigverify_disabled,
|
config.sigverify_disabled,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
sender,
|
sender,
|
||||||
|
&leader_schedule_cache,
|
||||||
&exit,
|
&exit,
|
||||||
|
&genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
inc_new_counter_info!("fullnode-new", 1);
|
inc_new_counter_info!("fullnode-new", 1);
|
||||||
@ -275,14 +288,20 @@ impl Fullnode {
|
|||||||
pub fn new_banks_from_blocktree(
|
pub fn new_banks_from_blocktree(
|
||||||
blocktree_path: &str,
|
blocktree_path: &str,
|
||||||
account_paths: Option<String>,
|
account_paths: Option<String>,
|
||||||
) -> (BankForks, Vec<BankForksInfo>, Blocktree, Receiver<bool>) {
|
) -> (
|
||||||
|
BankForks,
|
||||||
|
Vec<BankForksInfo>,
|
||||||
|
Blocktree,
|
||||||
|
Receiver<bool>,
|
||||||
|
LeaderScheduleCache,
|
||||||
|
) {
|
||||||
let genesis_block =
|
let genesis_block =
|
||||||
GenesisBlock::load(blocktree_path).expect("Expected to successfully open genesis block");
|
GenesisBlock::load(blocktree_path).expect("Expected to successfully open genesis block");
|
||||||
|
|
||||||
let (blocktree, ledger_signal_receiver) = Blocktree::open_with_signal(blocktree_path)
|
let (blocktree, ledger_signal_receiver) = Blocktree::open_with_signal(blocktree_path)
|
||||||
.expect("Expected to successfully open database ledger");
|
.expect("Expected to successfully open database ledger");
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info) =
|
let (bank_forks, bank_forks_info, leader_schedule_cache) =
|
||||||
blocktree_processor::process_blocktree(&genesis_block, &blocktree, account_paths)
|
blocktree_processor::process_blocktree(&genesis_block, &blocktree, account_paths)
|
||||||
.expect("process_blocktree failed");
|
.expect("process_blocktree failed");
|
||||||
|
|
||||||
@ -291,6 +310,7 @@ pub fn new_banks_from_blocktree(
|
|||||||
bank_forks_info,
|
bank_forks_info,
|
||||||
blocktree,
|
blocktree,
|
||||||
ledger_signal_receiver,
|
ledger_signal_receiver,
|
||||||
|
leader_schedule_cache,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,7 +415,7 @@ pub fn new_fullnode_for_tests() -> (Fullnode, ContactInfo, Keypair, String) {
|
|||||||
None,
|
None,
|
||||||
&FullnodeConfig::default(),
|
&FullnodeConfig::default(),
|
||||||
);
|
);
|
||||||
|
discover_nodes(&contact_info.gossip, 1).expect("Node startup failed");
|
||||||
(node, contact_info, mint_keypair, ledger_path)
|
(node, contact_info, mint_keypair, ledger_path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,6 @@ use rand::{Rng, SeedableRng};
|
|||||||
use rand_chacha::ChaChaRng;
|
use rand_chacha::ChaChaRng;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana_sdk::signature::Keypair;
|
use solana_sdk::signature::Keypair;
|
||||||
use untrusted::Input;
|
|
||||||
|
|
||||||
pub struct GenKeys {
|
pub struct GenKeys {
|
||||||
generator: ChaChaRng,
|
generator: ChaChaRng,
|
||||||
@ -26,10 +25,14 @@ impl GenKeys {
|
|||||||
(0..n).map(|_| self.gen_seed()).collect()
|
(0..n).map(|_| self.gen_seed()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn gen_keypair(&mut self) -> Keypair {
|
||||||
|
Keypair::generate(&mut self.generator)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn gen_n_keypairs(&mut self, n: u64) -> Vec<Keypair> {
|
pub fn gen_n_keypairs(&mut self, n: u64) -> Vec<Keypair> {
|
||||||
self.gen_n_seeds(n)
|
self.gen_n_seeds(n)
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|seed| Keypair::from_seed_unchecked(Input::from(&seed)).unwrap())
|
.map(|seed| Keypair::generate(&mut ChaChaRng::from_seed(seed)))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -52,6 +55,17 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_gen_keypair_is_deterministic() {
|
||||||
|
let seed = [0u8; 32];
|
||||||
|
let mut gen0 = GenKeys::new(seed);
|
||||||
|
let mut gen1 = GenKeys::new(seed);
|
||||||
|
assert_eq!(
|
||||||
|
gen0.gen_keypair().to_bytes().to_vec(),
|
||||||
|
gen1.gen_keypair().to_bytes().to_vec()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
fn gen_n_pubkeys(seed: [u8; 32], n: u64) -> HashSet<Pubkey> {
|
fn gen_n_pubkeys(seed: [u8; 32], n: u64) -> HashSet<Pubkey> {
|
||||||
GenKeys::new(seed)
|
GenKeys::new(seed)
|
||||||
.gen_n_keypairs(n)
|
.gen_n_keypairs(n)
|
||||||
|
@ -69,11 +69,7 @@ pub fn discover(
|
|||||||
let (gossip_service, spy_ref) = make_spy_node(gossip_addr, &exit);
|
let (gossip_service, spy_ref) = make_spy_node(gossip_addr, &exit);
|
||||||
let id = spy_ref.read().unwrap().keypair.pubkey();
|
let id = spy_ref.read().unwrap().keypair.pubkey();
|
||||||
info!("Gossip entry point: {:?}", gossip_addr);
|
info!("Gossip entry point: {:?}", gossip_addr);
|
||||||
trace!(
|
info!("Spy node id: {:?}", id);
|
||||||
"discover: spy_node {} looking for at least {:?} nodes",
|
|
||||||
id,
|
|
||||||
num_nodes
|
|
||||||
);
|
|
||||||
|
|
||||||
let (met_criteria, secs, tvu_peers) = spy(spy_ref.clone(), num_nodes, timeout, find_node);
|
let (met_criteria, secs, tvu_peers) = spy(spy_ref.clone(), num_nodes, timeout, find_node);
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ use solana_sdk::pubkey::Pubkey;
|
|||||||
use std::ops::Index;
|
use std::ops::Index;
|
||||||
|
|
||||||
/// Stake-weighted leader schedule for one epoch.
|
/// Stake-weighted leader schedule for one epoch.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, Default, PartialEq)]
|
||||||
pub struct LeaderSchedule {
|
pub struct LeaderSchedule {
|
||||||
slot_leaders: Vec<Pubkey>,
|
slot_leaders: Vec<Pubkey>,
|
||||||
}
|
}
|
||||||
|
405
core/src/leader_schedule_cache.rs
Normal file
405
core/src/leader_schedule_cache.rs
Normal file
@ -0,0 +1,405 @@
|
|||||||
|
use crate::blocktree::Blocktree;
|
||||||
|
use crate::leader_schedule::LeaderSchedule;
|
||||||
|
use crate::leader_schedule_utils;
|
||||||
|
use solana_runtime::bank::{Bank, EpochSchedule};
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use std::collections::hash_map::Entry;
|
||||||
|
use std::collections::{HashMap, VecDeque};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
type CachedSchedules = (HashMap<u64, Arc<LeaderSchedule>>, VecDeque<u64>);
|
||||||
|
const MAX_SCHEDULES: usize = 10;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct LeaderScheduleCache {
|
||||||
|
// Map from an epoch to a leader schedule for that epoch
|
||||||
|
pub cached_schedules: RwLock<CachedSchedules>,
|
||||||
|
epoch_schedule: EpochSchedule,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LeaderScheduleCache {
|
||||||
|
pub fn new_from_bank(bank: &Bank) -> Self {
|
||||||
|
Self::new(*bank.epoch_schedule())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(epoch_schedule: EpochSchedule) -> Self {
|
||||||
|
Self {
|
||||||
|
cached_schedules: RwLock::new((HashMap::new(), VecDeque::new())),
|
||||||
|
epoch_schedule,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn slot_leader_at(&self, slot: u64) -> Option<Pubkey> {
|
||||||
|
let (epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(slot);
|
||||||
|
self.cached_schedules
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.0
|
||||||
|
.get(&epoch)
|
||||||
|
.map(|schedule| schedule[slot_index])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn slot_leader_at_else_compute(&self, slot: u64, bank: &Bank) -> Option<Pubkey> {
|
||||||
|
let cache_result = self.slot_leader_at(slot);
|
||||||
|
if cache_result.is_some() {
|
||||||
|
cache_result
|
||||||
|
} else {
|
||||||
|
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
|
||||||
|
if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) {
|
||||||
|
Some(epoch_schedule[slot_index])
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the next slot after the given current_slot that the given node will be leader
|
||||||
|
pub fn next_leader_slot(
|
||||||
|
&self,
|
||||||
|
pubkey: &Pubkey,
|
||||||
|
mut current_slot: u64,
|
||||||
|
bank: &Bank,
|
||||||
|
blocktree: Option<&Blocktree>,
|
||||||
|
) -> Option<u64> {
|
||||||
|
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
|
||||||
|
while let Some(leader_schedule) = self.get_epoch_schedule_else_compute(epoch, bank) {
|
||||||
|
// clippy thinks I should do this:
|
||||||
|
// for (i, <item>) in leader_schedule
|
||||||
|
// .iter()
|
||||||
|
// .enumerate()
|
||||||
|
// .take(bank.get_slots_in_epoch(epoch))
|
||||||
|
// .skip(from_slot_index + 1) {
|
||||||
|
//
|
||||||
|
// but leader_schedule doesn't implement Iter...
|
||||||
|
#[allow(clippy::needless_range_loop)]
|
||||||
|
for i in start_index..bank.get_slots_in_epoch(epoch) {
|
||||||
|
current_slot += 1;
|
||||||
|
if *pubkey == leader_schedule[i] {
|
||||||
|
if let Some(blocktree) = blocktree {
|
||||||
|
if let Some(meta) = blocktree.meta(current_slot).unwrap() {
|
||||||
|
// We have already sent a blob for this slot, so skip it
|
||||||
|
if meta.received > 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Some(current_slot);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
epoch += 1;
|
||||||
|
start_index = 0;
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_epoch_schedule_else_compute(
|
||||||
|
&self,
|
||||||
|
epoch: u64,
|
||||||
|
bank: &Bank,
|
||||||
|
) -> Option<Arc<LeaderSchedule>> {
|
||||||
|
let epoch_schedule = self.cached_schedules.read().unwrap().0.get(&epoch).cloned();
|
||||||
|
|
||||||
|
if epoch_schedule.is_some() {
|
||||||
|
epoch_schedule
|
||||||
|
} else if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) {
|
||||||
|
Some(epoch_schedule)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_epoch_schedule(&self, epoch: u64, bank: &Bank) -> Option<Arc<LeaderSchedule>> {
|
||||||
|
let leader_schedule = leader_schedule_utils::leader_schedule(epoch, bank);
|
||||||
|
leader_schedule.map(|leader_schedule| {
|
||||||
|
let leader_schedule = Arc::new(leader_schedule);
|
||||||
|
let (ref mut cached_schedules, ref mut order) = *self.cached_schedules.write().unwrap();
|
||||||
|
// Check to see if schedule exists in case somebody already inserted in the time we were
|
||||||
|
// waiting for the lock
|
||||||
|
let entry = cached_schedules.entry(epoch);
|
||||||
|
if let Entry::Vacant(v) = entry {
|
||||||
|
v.insert(leader_schedule.clone());
|
||||||
|
order.push_back(epoch);
|
||||||
|
Self::retain_latest(cached_schedules, order);
|
||||||
|
}
|
||||||
|
leader_schedule
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn retain_latest(schedules: &mut HashMap<u64, Arc<LeaderSchedule>>, order: &mut VecDeque<u64>) {
|
||||||
|
if schedules.len() > MAX_SCHEDULES {
|
||||||
|
let first = order.pop_front().unwrap();
|
||||||
|
schedules.remove(&first);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::blocktree::tests::make_slot_entries;
|
||||||
|
use crate::voting_keypair::tests::new_vote_account;
|
||||||
|
use solana_runtime::bank::{Bank, EpochSchedule};
|
||||||
|
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
|
||||||
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread::Builder;
|
||||||
|
|
||||||
|
use crate::blocktree::get_tmp_ledger_path;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_slot_leader_at_else_compute() {
|
||||||
|
let slots_per_epoch = 10;
|
||||||
|
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
|
||||||
|
let cache = LeaderScheduleCache::new(epoch_schedule);
|
||||||
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||||
|
let bank = Bank::new(&genesis_block);
|
||||||
|
|
||||||
|
// Nothing in the cache, should return None
|
||||||
|
assert!(cache.slot_leader_at(bank.slot()).is_none());
|
||||||
|
|
||||||
|
// Add something to the cache
|
||||||
|
assert!(cache
|
||||||
|
.slot_leader_at_else_compute(bank.slot(), &bank)
|
||||||
|
.is_some());
|
||||||
|
assert!(cache.slot_leader_at(bank.slot()).is_some());
|
||||||
|
assert_eq!(cache.cached_schedules.read().unwrap().0.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_retain_latest() {
|
||||||
|
let mut cached_schedules = HashMap::new();
|
||||||
|
let mut order = VecDeque::new();
|
||||||
|
for i in 0..=MAX_SCHEDULES {
|
||||||
|
cached_schedules.insert(i as u64, Arc::new(LeaderSchedule::default()));
|
||||||
|
order.push_back(i as u64);
|
||||||
|
}
|
||||||
|
LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order);
|
||||||
|
assert_eq!(cached_schedules.len(), MAX_SCHEDULES);
|
||||||
|
let mut keys: Vec<_> = cached_schedules.keys().cloned().collect();
|
||||||
|
keys.sort();
|
||||||
|
let expected: Vec<_> = (1..=MAX_SCHEDULES as u64).collect();
|
||||||
|
let expected_order: VecDeque<_> = (1..=MAX_SCHEDULES as u64).collect();
|
||||||
|
assert_eq!(expected, keys);
|
||||||
|
assert_eq!(expected_order, order);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_thread_race_leader_schedule_cache() {
|
||||||
|
let num_runs = 10;
|
||||||
|
for _ in 0..num_runs {
|
||||||
|
run_thread_race()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_thread_race() {
|
||||||
|
let slots_per_epoch = 10;
|
||||||
|
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
|
||||||
|
let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule));
|
||||||
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||||
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
|
||||||
|
let num_threads = 10;
|
||||||
|
let (threads, senders): (Vec<_>, Vec<_>) = (0..num_threads)
|
||||||
|
.map(|_| {
|
||||||
|
let cache = cache.clone();
|
||||||
|
let bank = bank.clone();
|
||||||
|
let (sender, receiver) = channel();
|
||||||
|
(
|
||||||
|
Builder::new()
|
||||||
|
.name("test_thread_race_leader_schedule_cache".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
let _ = receiver.recv();
|
||||||
|
cache.slot_leader_at_else_compute(bank.slot(), &bank);
|
||||||
|
})
|
||||||
|
.unwrap(),
|
||||||
|
sender,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.unzip();
|
||||||
|
|
||||||
|
for sender in &senders {
|
||||||
|
sender.send(true).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
for t in threads.into_iter() {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let (ref cached_schedules, ref order) = *cache.cached_schedules.read().unwrap();
|
||||||
|
assert_eq!(cached_schedules.len(), 1);
|
||||||
|
assert_eq!(order.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_next_leader_slot() {
|
||||||
|
let pubkey = Pubkey::new_rand();
|
||||||
|
let mut genesis_block = GenesisBlock::new_with_leader(
|
||||||
|
BOOTSTRAP_LEADER_LAMPORTS,
|
||||||
|
&pubkey,
|
||||||
|
BOOTSTRAP_LEADER_LAMPORTS,
|
||||||
|
)
|
||||||
|
.0;
|
||||||
|
genesis_block.epoch_warmup = false;
|
||||||
|
|
||||||
|
let bank = Bank::new(&genesis_block);
|
||||||
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
cache
|
||||||
|
.slot_leader_at_else_compute(bank.slot(), &bank)
|
||||||
|
.unwrap(),
|
||||||
|
pubkey
|
||||||
|
);
|
||||||
|
assert_eq!(cache.next_leader_slot(&pubkey, 0, &bank, None), Some(1));
|
||||||
|
assert_eq!(cache.next_leader_slot(&pubkey, 1, &bank, None), Some(2));
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(
|
||||||
|
&pubkey,
|
||||||
|
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
|
||||||
|
&bank,
|
||||||
|
None
|
||||||
|
),
|
||||||
|
None
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(
|
||||||
|
&Pubkey::new_rand(), // not in leader_schedule
|
||||||
|
0,
|
||||||
|
&bank,
|
||||||
|
None
|
||||||
|
),
|
||||||
|
None
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_next_leader_slot_blocktree() {
|
||||||
|
let pubkey = Pubkey::new_rand();
|
||||||
|
let mut genesis_block = GenesisBlock::new_with_leader(
|
||||||
|
BOOTSTRAP_LEADER_LAMPORTS,
|
||||||
|
&pubkey,
|
||||||
|
BOOTSTRAP_LEADER_LAMPORTS,
|
||||||
|
)
|
||||||
|
.0;
|
||||||
|
genesis_block.epoch_warmup = false;
|
||||||
|
|
||||||
|
let bank = Bank::new(&genesis_block);
|
||||||
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
|
{
|
||||||
|
let blocktree = Arc::new(
|
||||||
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
cache
|
||||||
|
.slot_leader_at_else_compute(bank.slot(), &bank)
|
||||||
|
.unwrap(),
|
||||||
|
pubkey
|
||||||
|
);
|
||||||
|
// Check that the next leader slot after 0 is slot 1
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
||||||
|
Some(1)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Write a blob into slot 2 that chains to slot 1,
|
||||||
|
// but slot 1 is empty so should not be skipped
|
||||||
|
let (blobs, _) = make_slot_entries(2, 1, 1);
|
||||||
|
blocktree.write_blobs(&blobs[..]).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
||||||
|
Some(1)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Write a blob into slot 1
|
||||||
|
let (blobs, _) = make_slot_entries(1, 0, 1);
|
||||||
|
|
||||||
|
// Check that slot 1 and 2 are skipped
|
||||||
|
blocktree.write_blobs(&blobs[..]).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
||||||
|
Some(3)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Integrity checks
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(
|
||||||
|
&pubkey,
|
||||||
|
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
|
||||||
|
&bank,
|
||||||
|
Some(&blocktree)
|
||||||
|
),
|
||||||
|
None
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(
|
||||||
|
&Pubkey::new_rand(), // not in leader_schedule
|
||||||
|
0,
|
||||||
|
&bank,
|
||||||
|
Some(&blocktree)
|
||||||
|
),
|
||||||
|
None
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Blocktree::destroy(&ledger_path).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_next_leader_slot_next_epoch() {
|
||||||
|
let pubkey = Pubkey::new_rand();
|
||||||
|
let (mut genesis_block, mint_keypair) = GenesisBlock::new_with_leader(
|
||||||
|
2 * BOOTSTRAP_LEADER_LAMPORTS,
|
||||||
|
&pubkey,
|
||||||
|
BOOTSTRAP_LEADER_LAMPORTS,
|
||||||
|
);
|
||||||
|
genesis_block.epoch_warmup = false;
|
||||||
|
|
||||||
|
let bank = Bank::new(&genesis_block);
|
||||||
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
|
let delegate_id = Pubkey::new_rand();
|
||||||
|
|
||||||
|
// Create new vote account
|
||||||
|
let new_voting_keypair = Keypair::new();
|
||||||
|
new_vote_account(
|
||||||
|
&mint_keypair,
|
||||||
|
&new_voting_keypair,
|
||||||
|
&delegate_id,
|
||||||
|
&bank,
|
||||||
|
BOOTSTRAP_LEADER_LAMPORTS,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Have to wait until the epoch at after the epoch stakes generated at genesis
|
||||||
|
// for the new votes to take effect.
|
||||||
|
let mut target_slot = 1;
|
||||||
|
let epoch = bank.get_stakers_epoch(0);
|
||||||
|
while bank.get_stakers_epoch(target_slot) == epoch {
|
||||||
|
target_slot += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), target_slot);
|
||||||
|
let mut expected_slot = 0;
|
||||||
|
let epoch = bank.get_stakers_epoch(target_slot);
|
||||||
|
for i in 0..epoch {
|
||||||
|
expected_slot += bank.get_slots_in_epoch(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
let schedule = cache.compute_epoch_schedule(epoch, &bank).unwrap();
|
||||||
|
let mut index = 0;
|
||||||
|
while schedule[index] != delegate_id {
|
||||||
|
index += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_slot += index;
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
cache.next_leader_slot(&delegate_id, 0, &bank, None),
|
||||||
|
Some(expected_slot),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
@ -1,4 +1,3 @@
|
|||||||
use crate::blocktree::Blocktree;
|
|
||||||
use crate::leader_schedule::LeaderSchedule;
|
use crate::leader_schedule::LeaderSchedule;
|
||||||
use crate::staking_utils;
|
use crate::staking_utils;
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
@ -6,7 +5,7 @@ use solana_sdk::pubkey::Pubkey;
|
|||||||
use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS;
|
use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS;
|
||||||
|
|
||||||
/// Return the leader schedule for the given epoch.
|
/// Return the leader schedule for the given epoch.
|
||||||
fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
|
pub fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
|
||||||
staking_utils::delegated_stakes_at_epoch(bank, epoch_height).map(|stakes| {
|
staking_utils::delegated_stakes_at_epoch(bank, epoch_height).map(|stakes| {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
seed[0..8].copy_from_slice(&epoch_height.to_le_bytes());
|
seed[0..8].copy_from_slice(&epoch_height.to_le_bytes());
|
||||||
@ -21,6 +20,23 @@ fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the leader for the given slot.
|
||||||
|
pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> {
|
||||||
|
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
|
||||||
|
|
||||||
|
leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the number of ticks remaining from the specified tick_height to the end of the
|
||||||
|
// slot implied by the tick_height
|
||||||
|
pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 {
|
||||||
|
bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 {
|
||||||
|
tick_height / ticks_per_slot
|
||||||
|
}
|
||||||
|
|
||||||
fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
|
fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
|
||||||
// Sort first by stake. If stakes are the same, sort by pubkey to ensure a
|
// Sort first by stake. If stakes are the same, sort by pubkey to ensure a
|
||||||
// deterministic result.
|
// deterministic result.
|
||||||
@ -37,229 +53,11 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
|
|||||||
stakes.dedup();
|
stakes.dedup();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the leader for the given slot.
|
|
||||||
pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> {
|
|
||||||
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
|
|
||||||
|
|
||||||
leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index])
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the next slot after the given current_slot that the given node will be leader
|
|
||||||
pub fn next_leader_slot(
|
|
||||||
pubkey: &Pubkey,
|
|
||||||
mut current_slot: u64,
|
|
||||||
bank: &Bank,
|
|
||||||
blocktree: Option<&Blocktree>,
|
|
||||||
) -> Option<u64> {
|
|
||||||
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
|
|
||||||
while let Some(leader_schedule) = leader_schedule(epoch, bank) {
|
|
||||||
// clippy thinks I should do this:
|
|
||||||
// for (i, <item>) in leader_schedule
|
|
||||||
// .iter()
|
|
||||||
// .enumerate()
|
|
||||||
// .take(bank.get_slots_in_epoch(epoch))
|
|
||||||
// .skip(from_slot_index + 1) {
|
|
||||||
//
|
|
||||||
// but leader_schedule doesn't implement Iter...
|
|
||||||
#[allow(clippy::needless_range_loop)]
|
|
||||||
for i in start_index..bank.get_slots_in_epoch(epoch) {
|
|
||||||
current_slot += 1;
|
|
||||||
if *pubkey == leader_schedule[i] {
|
|
||||||
if let Some(blocktree) = blocktree {
|
|
||||||
if let Some(meta) = blocktree.meta(current_slot).unwrap() {
|
|
||||||
// We have already sent a blob for this slot, so skip it
|
|
||||||
if meta.received > 0 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return Some(current_slot);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
epoch += 1;
|
|
||||||
start_index = 0;
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the number of ticks remaining from the specified tick_height to the end of the
|
|
||||||
// slot implied by the tick_height
|
|
||||||
pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 {
|
|
||||||
bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 {
|
|
||||||
tick_height / ticks_per_slot
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::blocktree::get_tmp_ledger_path;
|
|
||||||
use crate::blocktree::tests::make_slot_entries;
|
|
||||||
use crate::staking_utils;
|
use crate::staking_utils;
|
||||||
use crate::voting_keypair::tests::new_vote_account;
|
|
||||||
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
|
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_next_leader_slot() {
|
|
||||||
let pubkey = Pubkey::new_rand();
|
|
||||||
let mut genesis_block = GenesisBlock::new_with_leader(
|
|
||||||
BOOTSTRAP_LEADER_LAMPORTS,
|
|
||||||
&pubkey,
|
|
||||||
BOOTSTRAP_LEADER_LAMPORTS,
|
|
||||||
)
|
|
||||||
.0;
|
|
||||||
genesis_block.epoch_warmup = false;
|
|
||||||
|
|
||||||
let bank = Bank::new(&genesis_block);
|
|
||||||
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
|
|
||||||
assert_eq!(next_leader_slot(&pubkey, 0, &bank, None), Some(1));
|
|
||||||
assert_eq!(next_leader_slot(&pubkey, 1, &bank, None), Some(2));
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(
|
|
||||||
&pubkey,
|
|
||||||
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
|
|
||||||
&bank,
|
|
||||||
None
|
|
||||||
),
|
|
||||||
None
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(
|
|
||||||
&Pubkey::new_rand(), // not in leader_schedule
|
|
||||||
0,
|
|
||||||
&bank,
|
|
||||||
None
|
|
||||||
),
|
|
||||||
None
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_next_leader_slot_blocktree() {
|
|
||||||
let pubkey = Pubkey::new_rand();
|
|
||||||
let mut genesis_block = GenesisBlock::new_with_leader(
|
|
||||||
BOOTSTRAP_LEADER_LAMPORTS,
|
|
||||||
&pubkey,
|
|
||||||
BOOTSTRAP_LEADER_LAMPORTS,
|
|
||||||
)
|
|
||||||
.0;
|
|
||||||
genesis_block.epoch_warmup = false;
|
|
||||||
|
|
||||||
let bank = Bank::new(&genesis_block);
|
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
|
||||||
{
|
|
||||||
let blocktree = Arc::new(
|
|
||||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
|
|
||||||
// Check that the next leader slot after 0 is slot 1
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
|
||||||
Some(1)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Write a blob into slot 2 that chains to slot 1,
|
|
||||||
// but slot 1 is empty so should not be skipped
|
|
||||||
let (blobs, _) = make_slot_entries(2, 1, 1);
|
|
||||||
blocktree.write_blobs(&blobs[..]).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
|
||||||
Some(1)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Write a blob into slot 1
|
|
||||||
let (blobs, _) = make_slot_entries(1, 0, 1);
|
|
||||||
|
|
||||||
// Check that slot 1 and 2 are skipped
|
|
||||||
blocktree.write_blobs(&blobs[..]).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
|
||||||
Some(3)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Integrity checks
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(
|
|
||||||
&pubkey,
|
|
||||||
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
|
|
||||||
&bank,
|
|
||||||
Some(&blocktree)
|
|
||||||
),
|
|
||||||
None
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(
|
|
||||||
&Pubkey::new_rand(), // not in leader_schedule
|
|
||||||
0,
|
|
||||||
&bank,
|
|
||||||
Some(&blocktree)
|
|
||||||
),
|
|
||||||
None
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Blocktree::destroy(&ledger_path).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_next_leader_slot_next_epoch() {
|
|
||||||
let pubkey = Pubkey::new_rand();
|
|
||||||
let (mut genesis_block, mint_keypair) = GenesisBlock::new_with_leader(
|
|
||||||
2 * BOOTSTRAP_LEADER_LAMPORTS,
|
|
||||||
&pubkey,
|
|
||||||
BOOTSTRAP_LEADER_LAMPORTS,
|
|
||||||
);
|
|
||||||
genesis_block.epoch_warmup = false;
|
|
||||||
|
|
||||||
let bank = Bank::new(&genesis_block);
|
|
||||||
let delegate_id = Pubkey::new_rand();
|
|
||||||
|
|
||||||
// Create new vote account
|
|
||||||
let new_voting_keypair = Keypair::new();
|
|
||||||
new_vote_account(
|
|
||||||
&mint_keypair,
|
|
||||||
&new_voting_keypair,
|
|
||||||
&delegate_id,
|
|
||||||
&bank,
|
|
||||||
BOOTSTRAP_LEADER_LAMPORTS,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Have to wait until the epoch at after the epoch stakes generated at genesis
|
|
||||||
// for the new votes to take effect.
|
|
||||||
let mut target_slot = 1;
|
|
||||||
let epoch = bank.get_stakers_epoch(0);
|
|
||||||
while bank.get_stakers_epoch(target_slot) == epoch {
|
|
||||||
target_slot += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
let bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), target_slot);
|
|
||||||
let mut expected_slot = 0;
|
|
||||||
let epoch = bank.get_stakers_epoch(target_slot);
|
|
||||||
for i in 0..epoch {
|
|
||||||
expected_slot += bank.get_slots_in_epoch(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
let schedule = leader_schedule(epoch, &bank).unwrap();
|
|
||||||
let mut index = 0;
|
|
||||||
while schedule[index] != delegate_id {
|
|
||||||
index += 1
|
|
||||||
}
|
|
||||||
|
|
||||||
expected_slot += index;
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
next_leader_slot(&delegate_id, 0, &bank, None),
|
|
||||||
Some(expected_slot),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_leader_schedule_via_bank() {
|
fn test_leader_schedule_via_bank() {
|
||||||
|
@ -31,13 +31,13 @@ pub mod cluster;
|
|||||||
pub mod cluster_info;
|
pub mod cluster_info;
|
||||||
pub mod cluster_tests;
|
pub mod cluster_tests;
|
||||||
pub mod entry;
|
pub mod entry;
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
pub mod erasure;
|
pub mod erasure;
|
||||||
pub mod fetch_stage;
|
pub mod fetch_stage;
|
||||||
pub mod fullnode;
|
pub mod fullnode;
|
||||||
pub mod gen_keys;
|
pub mod gen_keys;
|
||||||
pub mod gossip_service;
|
pub mod gossip_service;
|
||||||
pub mod leader_schedule;
|
pub mod leader_schedule;
|
||||||
|
pub mod leader_schedule_cache;
|
||||||
pub mod leader_schedule_utils;
|
pub mod leader_schedule_utils;
|
||||||
pub mod local_cluster;
|
pub mod local_cluster;
|
||||||
pub mod local_vote_signer_service;
|
pub mod local_vote_signer_service;
|
||||||
|
@ -5,6 +5,7 @@ use bincode;
|
|||||||
use byteorder::{ByteOrder, LittleEndian};
|
use byteorder::{ByteOrder, LittleEndian};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use solana_metrics::counter::Counter;
|
use solana_metrics::counter::Counter;
|
||||||
|
use solana_sdk::hash::Hash;
|
||||||
pub use solana_sdk::packet::PACKET_DATA_SIZE;
|
pub use solana_sdk::packet::PACKET_DATA_SIZE;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
@ -18,7 +19,6 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
|||||||
use std::ops::{Deref, DerefMut};
|
use std::ops::{Deref, DerefMut};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
|
||||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||||
pub type SharedBlobs = Vec<SharedBlob>;
|
pub type SharedBlobs = Vec<SharedBlob>;
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
|||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct Meta {
|
pub struct Meta {
|
||||||
pub size: usize,
|
pub size: usize,
|
||||||
pub num_retransmits: u64,
|
pub forward: bool,
|
||||||
pub addr: [u16; 8],
|
pub addr: [u16; 8],
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
pub v6: bool,
|
pub v6: bool,
|
||||||
@ -65,7 +65,7 @@ impl fmt::Debug for Packet {
|
|||||||
impl Default for Packet {
|
impl Default for Packet {
|
||||||
fn default() -> Packet {
|
fn default() -> Packet {
|
||||||
Packet {
|
Packet {
|
||||||
data: [0u8; PACKET_DATA_SIZE],
|
data: unsafe { std::mem::uninitialized() },
|
||||||
meta: Meta::default(),
|
meta: Meta::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,7 +117,7 @@ impl Meta {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Packets {
|
pub struct Packets {
|
||||||
pub packets: Vec<Packet>,
|
pub packets: Vec<Packet>,
|
||||||
}
|
}
|
||||||
@ -126,7 +126,7 @@ pub struct Packets {
|
|||||||
impl Default for Packets {
|
impl Default for Packets {
|
||||||
fn default() -> Packets {
|
fn default() -> Packets {
|
||||||
Packets {
|
Packets {
|
||||||
packets: vec![Packet::default(); NUM_PACKETS],
|
packets: Vec::with_capacity(NUM_RCVMMSGS),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -208,8 +208,7 @@ pub enum BlobError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Packets {
|
impl Packets {
|
||||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
//DOCUMENTED SIDE-EFFECT
|
//DOCUMENTED SIDE-EFFECT
|
||||||
//Performance out of the IO without poll
|
//Performance out of the IO without poll
|
||||||
@ -220,11 +219,10 @@ impl Packets {
|
|||||||
socket.set_nonblocking(false)?;
|
socket.set_nonblocking(false)?;
|
||||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||||
loop {
|
loop {
|
||||||
|
self.packets.resize(i + NUM_RCVMMSGS, Packet::default());
|
||||||
match recv_mmsg(socket, &mut self.packets[i..]) {
|
match recv_mmsg(socket, &mut self.packets[i..]) {
|
||||||
Err(_) if i > 0 => {
|
Err(_) if i > 0 => {
|
||||||
inc_new_counter_info!("packets-recv_count", i);
|
break;
|
||||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
|
||||||
return Ok(i);
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
trace!("recv_from err {:?}", e);
|
trace!("recv_from err {:?}", e);
|
||||||
@ -237,19 +235,16 @@ impl Packets {
|
|||||||
trace!("got {} packets", npkts);
|
trace!("got {} packets", npkts);
|
||||||
i += npkts;
|
i += npkts;
|
||||||
if npkts != NUM_RCVMMSGS || i >= 1024 {
|
if npkts != NUM_RCVMMSGS || i >= 1024 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.packets.truncate(i);
|
||||||
inc_new_counter_info!("packets-recv_count", i);
|
inc_new_counter_info!("packets-recv_count", i);
|
||||||
return Ok(i);
|
Ok(i)
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
|
|
||||||
let sz = self.run_read_from(socket)?;
|
|
||||||
self.packets.resize(sz, Packet::default());
|
|
||||||
debug!("recv_from: {}", sz);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
|
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
|
||||||
for p in &self.packets {
|
for p in &self.packets {
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
@ -259,15 +254,12 @@ impl Packets {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_packets_chunked<T: Serialize>(xs: &[T], chunks: usize) -> Vec<SharedPackets> {
|
pub fn to_packets_chunked<T: Serialize>(xs: &[T], chunks: usize) -> Vec<Packets> {
|
||||||
let mut out = vec![];
|
let mut out = vec![];
|
||||||
for x in xs.chunks(chunks) {
|
for x in xs.chunks(chunks) {
|
||||||
let p = SharedPackets::default();
|
let mut p = Packets::default();
|
||||||
p.write()
|
p.packets.resize(x.len(), Packet::default());
|
||||||
.unwrap()
|
for (i, o) in x.iter().zip(p.packets.iter_mut()) {
|
||||||
.packets
|
|
||||||
.resize(x.len(), Packet::default());
|
|
||||||
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
|
|
||||||
let mut wr = io::Cursor::new(&mut o.data[..]);
|
let mut wr = io::Cursor::new(&mut o.data[..]);
|
||||||
bincode::serialize_into(&mut wr, &i).expect("serialize request");
|
bincode::serialize_into(&mut wr, &i).expect("serialize request");
|
||||||
let len = wr.position() as usize;
|
let len = wr.position() as usize;
|
||||||
@ -278,7 +270,7 @@ pub fn to_packets_chunked<T: Serialize>(xs: &[T], chunks: usize) -> Vec<SharedPa
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_packets<T: Serialize>(xs: &[T]) -> Vec<SharedPackets> {
|
pub fn to_packets<T: Serialize>(xs: &[T]) -> Vec<Packets> {
|
||||||
to_packets_chunked(xs, NUM_PACKETS)
|
to_packets_chunked(xs, NUM_PACKETS)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,8 +354,9 @@ const PARENT_RANGE: std::ops::Range<usize> = range!(0, u64);
|
|||||||
const SLOT_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64);
|
const SLOT_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64);
|
||||||
const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64);
|
const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64);
|
||||||
const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey);
|
const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey);
|
||||||
const FORWARD_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, bool);
|
const FORWARDED_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, bool);
|
||||||
const FLAGS_RANGE: std::ops::Range<usize> = range!(FORWARD_RANGE.end, u32);
|
const GENESIS_RANGE: std::ops::Range<usize> = range!(FORWARDED_RANGE.end, Hash);
|
||||||
|
const FLAGS_RANGE: std::ops::Range<usize> = range!(GENESIS_RANGE.end, u32);
|
||||||
const SIZE_RANGE: std::ops::Range<usize> = range!(FLAGS_RANGE.end, u64);
|
const SIZE_RANGE: std::ops::Range<usize> = range!(FLAGS_RANGE.end, u64);
|
||||||
|
|
||||||
macro_rules! align {
|
macro_rules! align {
|
||||||
@ -381,7 +374,11 @@ pub const BLOB_FLAG_IS_CODING: u32 = 0x1;
|
|||||||
impl Blob {
|
impl Blob {
|
||||||
pub fn new(data: &[u8]) -> Self {
|
pub fn new(data: &[u8]) -> Self {
|
||||||
let mut blob = Self::default();
|
let mut blob = Self::default();
|
||||||
|
|
||||||
|
assert!(data.len() <= blob.data.len());
|
||||||
|
|
||||||
let data_len = cmp::min(data.len(), blob.data.len());
|
let data_len = cmp::min(data.len(), blob.data.len());
|
||||||
|
|
||||||
let bytes = &data[..data_len];
|
let bytes = &data[..data_len];
|
||||||
blob.data[..data_len].copy_from_slice(bytes);
|
blob.data[..data_len].copy_from_slice(bytes);
|
||||||
blob.meta.size = blob.data_size() as usize;
|
blob.meta.size = blob.data_size() as usize;
|
||||||
@ -432,11 +429,20 @@ impl Blob {
|
|||||||
/// A bool is used here instead of a flag because this item is not intended to be signed when
|
/// A bool is used here instead of a flag because this item is not intended to be signed when
|
||||||
/// blob signatures are introduced
|
/// blob signatures are introduced
|
||||||
pub fn should_forward(&self) -> bool {
|
pub fn should_forward(&self) -> bool {
|
||||||
self.data[FORWARD_RANGE][0] & 0x1 == 1
|
self.data[FORWARDED_RANGE][0] & 0x1 == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn forward(&mut self, forward: bool) {
|
/// Mark this blob's forwarded status
|
||||||
self.data[FORWARD_RANGE][0] = u8::from(forward)
|
pub fn set_forwarded(&mut self, forward: bool) {
|
||||||
|
self.data[FORWARDED_RANGE][0] = u8::from(forward)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_genesis_blockhash(&mut self, blockhash: &Hash) {
|
||||||
|
self.data[GENESIS_RANGE].copy_from_slice(blockhash.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn genesis_blockhash(&self) -> Hash {
|
||||||
|
Hash::new(&self.data[GENESIS_RANGE])
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn flags(&self) -> u32 {
|
pub fn flags(&self) -> u32 {
|
||||||
@ -468,8 +474,8 @@ impl Blob {
|
|||||||
LittleEndian::read_u64(&self.data[SIZE_RANGE])
|
LittleEndian::read_u64(&self.data[SIZE_RANGE])
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_data_size(&mut self, ix: u64) {
|
pub fn set_data_size(&mut self, size: u64) {
|
||||||
LittleEndian::write_u64(&mut self.data[SIZE_RANGE], ix);
|
LittleEndian::write_u64(&mut self.data[SIZE_RANGE], size);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn data(&self) -> &[u8] {
|
pub fn data(&self) -> &[u8] {
|
||||||
@ -577,16 +583,27 @@ impl Blob {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn index_blobs(blobs: &[SharedBlob], id: &Pubkey, mut blob_index: u64, slot: u64, parent: u64) {
|
pub fn index_blobs(blobs: &[SharedBlob], id: &Pubkey, blob_index: u64, slot: u64, parent: u64) {
|
||||||
|
index_blobs_with_genesis(blobs, id, &Hash::default(), blob_index, slot, parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn index_blobs_with_genesis(
|
||||||
|
blobs: &[SharedBlob],
|
||||||
|
id: &Pubkey,
|
||||||
|
genesis: &Hash,
|
||||||
|
mut blob_index: u64,
|
||||||
|
slot: u64,
|
||||||
|
parent: u64,
|
||||||
|
) {
|
||||||
// enumerate all the blobs, those are the indices
|
// enumerate all the blobs, those are the indices
|
||||||
for blob in blobs.iter() {
|
for blob in blobs.iter() {
|
||||||
let mut blob = blob.write().unwrap();
|
let mut blob = blob.write().unwrap();
|
||||||
|
|
||||||
blob.set_index(blob_index);
|
blob.set_index(blob_index);
|
||||||
|
blob.set_genesis_blockhash(genesis);
|
||||||
blob.set_slot(slot);
|
blob.set_slot(slot);
|
||||||
blob.set_parent(parent);
|
blob.set_parent(parent);
|
||||||
blob.set_id(id);
|
blob.set_id(id);
|
||||||
blob.forward(true);
|
|
||||||
blob_index += 1;
|
blob_index += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -615,19 +632,26 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn packet_send_recv() {
|
pub fn packet_send_recv() {
|
||||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
solana_logger::setup();
|
||||||
let addr = reader.local_addr().unwrap();
|
let recv_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let addr = recv_socket.local_addr().unwrap();
|
||||||
let saddr = sender.local_addr().unwrap();
|
let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let p = SharedPackets::default();
|
let saddr = send_socket.local_addr().unwrap();
|
||||||
p.write().unwrap().packets.resize(10, Packet::default());
|
let mut p = Packets::default();
|
||||||
for m in p.write().unwrap().packets.iter_mut() {
|
|
||||||
|
p.packets.resize(10, Packet::default());
|
||||||
|
|
||||||
|
for m in p.packets.iter_mut() {
|
||||||
m.meta.set_addr(&addr);
|
m.meta.set_addr(&addr);
|
||||||
m.meta.size = PACKET_DATA_SIZE;
|
m.meta.size = PACKET_DATA_SIZE;
|
||||||
}
|
}
|
||||||
p.read().unwrap().send_to(&sender).unwrap();
|
p.send_to(&send_socket).unwrap();
|
||||||
p.write().unwrap().recv_from(&reader).unwrap();
|
|
||||||
for m in p.write().unwrap().packets.iter_mut() {
|
let recvd = p.recv_from(&recv_socket).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(recvd, p.packets.len());
|
||||||
|
|
||||||
|
for m in p.packets {
|
||||||
assert_eq!(m.meta.size, PACKET_DATA_SIZE);
|
assert_eq!(m.meta.size, PACKET_DATA_SIZE);
|
||||||
assert_eq!(m.meta.addr(), saddr);
|
assert_eq!(m.meta.addr(), saddr);
|
||||||
}
|
}
|
||||||
@ -640,16 +664,16 @@ mod tests {
|
|||||||
let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 1, hash, 0);
|
let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 1, hash, 0);
|
||||||
let rv = to_packets(&vec![tx.clone(); 1]);
|
let rv = to_packets(&vec![tx.clone(); 1]);
|
||||||
assert_eq!(rv.len(), 1);
|
assert_eq!(rv.len(), 1);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
assert_eq!(rv[0].packets.len(), 1);
|
||||||
|
|
||||||
let rv = to_packets(&vec![tx.clone(); NUM_PACKETS]);
|
let rv = to_packets(&vec![tx.clone(); NUM_PACKETS]);
|
||||||
assert_eq!(rv.len(), 1);
|
assert_eq!(rv.len(), 1);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
assert_eq!(rv[0].packets.len(), NUM_PACKETS);
|
||||||
|
|
||||||
let rv = to_packets(&vec![tx.clone(); NUM_PACKETS + 1]);
|
let rv = to_packets(&vec![tx.clone(); NUM_PACKETS + 1]);
|
||||||
assert_eq!(rv.len(), 2);
|
assert_eq!(rv.len(), 2);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
assert_eq!(rv[0].packets.len(), NUM_PACKETS);
|
||||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
assert_eq!(rv[1].packets.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -706,9 +730,9 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_blob_forward() {
|
fn test_blob_forward() {
|
||||||
let mut b = Blob::default();
|
let mut b = Blob::default();
|
||||||
assert!(!b.should_forward());
|
|
||||||
b.forward(true);
|
|
||||||
assert!(b.should_forward());
|
assert!(b.should_forward());
|
||||||
|
b.set_forwarded(true);
|
||||||
|
assert!(!b.should_forward());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -835,4 +859,14 @@ mod tests {
|
|||||||
assert!(p1 != p2);
|
assert!(p1 != p2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_blob_genesis_blockhash() {
|
||||||
|
let mut blob = Blob::default();
|
||||||
|
assert_eq!(blob.genesis_blockhash(), Hash::default());
|
||||||
|
|
||||||
|
let hash = Hash::new(&Pubkey::new_rand().as_ref());
|
||||||
|
blob.set_genesis_blockhash(&hash);
|
||||||
|
assert_eq!(blob.genesis_blockhash(), hash);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
//!
|
//!
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::leader_schedule_utils;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::poh::Poh;
|
use crate::poh::Poh;
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
@ -53,13 +53,14 @@ pub struct PohRecorder {
|
|||||||
max_last_leader_grace_ticks: u64,
|
max_last_leader_grace_ticks: u64,
|
||||||
id: Pubkey,
|
id: Pubkey,
|
||||||
blocktree: Arc<Blocktree>,
|
blocktree: Arc<Blocktree>,
|
||||||
|
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PohRecorder {
|
impl PohRecorder {
|
||||||
fn clear_bank(&mut self) {
|
fn clear_bank(&mut self) {
|
||||||
if let Some(working_bank) = self.working_bank.take() {
|
if let Some(working_bank) = self.working_bank.take() {
|
||||||
let bank = working_bank.bank;
|
let bank = working_bank.bank;
|
||||||
let next_leader_slot = leader_schedule_utils::next_leader_slot(
|
let next_leader_slot = self.leader_schedule_cache.next_leader_slot(
|
||||||
&self.id,
|
&self.id,
|
||||||
bank.slot(),
|
bank.slot(),
|
||||||
&bank,
|
&bank,
|
||||||
@ -78,6 +79,18 @@ impl PohRecorder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn would_be_leader(&self, within_next_n_ticks: u64) -> bool {
|
||||||
|
let close_to_leader_tick = self.start_leader_at_tick.map_or(false, |leader_tick| {
|
||||||
|
let leader_ideal_start_tick =
|
||||||
|
leader_tick.saturating_sub(self.max_last_leader_grace_ticks);
|
||||||
|
|
||||||
|
self.tick_height() <= self.last_leader_tick.unwrap_or(0)
|
||||||
|
&& self.tick_height() >= leader_ideal_start_tick.saturating_sub(within_next_n_ticks)
|
||||||
|
});
|
||||||
|
|
||||||
|
self.working_bank.is_some() || close_to_leader_tick
|
||||||
|
}
|
||||||
|
|
||||||
pub fn hash(&mut self) {
|
pub fn hash(&mut self) {
|
||||||
// TODO: amortize the cost of this lock by doing the loop in here for
|
// TODO: amortize the cost of this lock by doing the loop in here for
|
||||||
// some min amount of hashes
|
// some min amount of hashes
|
||||||
@ -110,7 +123,6 @@ impl PohRecorder {
|
|||||||
|
|
||||||
let leader_ideal_start_tick =
|
let leader_ideal_start_tick =
|
||||||
target_tick.saturating_sub(self.max_last_leader_grace_ticks);
|
target_tick.saturating_sub(self.max_last_leader_grace_ticks);
|
||||||
|
|
||||||
// Is the current tick in the same slot as the target tick?
|
// Is the current tick in the same slot as the target tick?
|
||||||
// Check if either grace period has expired,
|
// Check if either grace period has expired,
|
||||||
// or target tick is = grace period (i.e. poh recorder was just reset)
|
// or target tick is = grace period (i.e. poh recorder was just reset)
|
||||||
@ -278,6 +290,7 @@ impl PohRecorder {
|
|||||||
id: &Pubkey,
|
id: &Pubkey,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
clear_bank_signal: Option<SyncSender<bool>>,
|
clear_bank_signal: Option<SyncSender<bool>>,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) -> (Self, Receiver<WorkingBankEntries>) {
|
) -> (Self, Receiver<WorkingBankEntries>) {
|
||||||
let poh = Poh::new(last_entry_hash, tick_height);
|
let poh = Poh::new(last_entry_hash, tick_height);
|
||||||
let (sender, receiver) = channel();
|
let (sender, receiver) = channel();
|
||||||
@ -301,6 +314,7 @@ impl PohRecorder {
|
|||||||
max_last_leader_grace_ticks,
|
max_last_leader_grace_ticks,
|
||||||
id: *id,
|
id: *id,
|
||||||
blocktree: blocktree.clone(),
|
blocktree: blocktree.clone(),
|
||||||
|
leader_schedule_cache: leader_schedule_cache.clone(),
|
||||||
},
|
},
|
||||||
receiver,
|
receiver,
|
||||||
)
|
)
|
||||||
@ -317,6 +331,7 @@ impl PohRecorder {
|
|||||||
ticks_per_slot: u64,
|
ticks_per_slot: u64,
|
||||||
id: &Pubkey,
|
id: &Pubkey,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) -> (Self, Receiver<WorkingBankEntries>) {
|
) -> (Self, Receiver<WorkingBankEntries>) {
|
||||||
Self::new_with_clear_signal(
|
Self::new_with_clear_signal(
|
||||||
tick_height,
|
tick_height,
|
||||||
@ -327,6 +342,7 @@ impl PohRecorder {
|
|||||||
id,
|
id,
|
||||||
blocktree,
|
blocktree,
|
||||||
None,
|
None,
|
||||||
|
leader_schedule_cache,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -376,6 +392,7 @@ impl PohRecorder {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
|
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||||
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::test_tx::test_tx;
|
use crate::test_tx::test_tx;
|
||||||
use solana_sdk::genesis_block::GenesisBlock;
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::hash::hash;
|
use solana_sdk::hash::hash;
|
||||||
@ -399,6 +416,7 @@ mod tests {
|
|||||||
DEFAULT_TICKS_PER_SLOT,
|
DEFAULT_TICKS_PER_SLOT,
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::default()),
|
||||||
);
|
);
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
assert_eq!(poh_recorder.tick_cache.len(), 1);
|
assert_eq!(poh_recorder.tick_cache.len(), 1);
|
||||||
@ -424,6 +442,7 @@ mod tests {
|
|||||||
DEFAULT_TICKS_PER_SLOT,
|
DEFAULT_TICKS_PER_SLOT,
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::default()),
|
||||||
);
|
);
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
@ -448,6 +467,7 @@ mod tests {
|
|||||||
DEFAULT_TICKS_PER_SLOT,
|
DEFAULT_TICKS_PER_SLOT,
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::default()),
|
||||||
);
|
);
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
assert_eq!(poh_recorder.tick_cache.len(), 1);
|
assert_eq!(poh_recorder.tick_cache.len(), 1);
|
||||||
@ -474,6 +494,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -506,6 +527,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -550,6 +572,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
@ -592,6 +615,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -628,6 +652,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -666,6 +691,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -711,6 +737,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -753,6 +780,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -786,6 +814,7 @@ mod tests {
|
|||||||
DEFAULT_TICKS_PER_SLOT,
|
DEFAULT_TICKS_PER_SLOT,
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::default()),
|
||||||
);
|
);
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
@ -816,6 +845,7 @@ mod tests {
|
|||||||
DEFAULT_TICKS_PER_SLOT,
|
DEFAULT_TICKS_PER_SLOT,
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::default()),
|
||||||
);
|
);
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
@ -846,6 +876,7 @@ mod tests {
|
|||||||
DEFAULT_TICKS_PER_SLOT,
|
DEFAULT_TICKS_PER_SLOT,
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::default()),
|
||||||
);
|
);
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
@ -876,6 +907,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
let ticks_per_slot = bank.ticks_per_slot();
|
let ticks_per_slot = bank.ticks_per_slot();
|
||||||
let working_bank = WorkingBank {
|
let working_bank = WorkingBank {
|
||||||
@ -908,6 +940,7 @@ mod tests {
|
|||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
Some(sender),
|
Some(sender),
|
||||||
|
&Arc::new(LeaderScheduleCache::default()),
|
||||||
);
|
);
|
||||||
poh_recorder.set_bank(&bank);
|
poh_recorder.set_bank(&bank);
|
||||||
poh_recorder.clear_bank();
|
poh_recorder.clear_bank();
|
||||||
@ -936,6 +969,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
let end_slot = 3;
|
let end_slot = 3;
|
||||||
@ -980,6 +1014,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Test that with no leader slot, we don't reach the leader tick
|
// Test that with no leader slot, we don't reach the leader tick
|
||||||
@ -1132,4 +1167,88 @@ mod tests {
|
|||||||
}
|
}
|
||||||
Blocktree::destroy(&ledger_path).unwrap();
|
Blocktree::destroy(&ledger_path).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_would_be_leader_soon() {
|
||||||
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
|
{
|
||||||
|
let blocktree =
|
||||||
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||||
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||||
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let prev_hash = bank.last_blockhash();
|
||||||
|
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||||
|
0,
|
||||||
|
prev_hash,
|
||||||
|
0,
|
||||||
|
None,
|
||||||
|
bank.ticks_per_slot(),
|
||||||
|
&Pubkey::default(),
|
||||||
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test that with no leader slot, we don't reach the leader tick
|
||||||
|
assert_eq!(
|
||||||
|
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
for _ in 0..bank.ticks_per_slot() {
|
||||||
|
poh_recorder.tick();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that with no leader slot, we don't reach the leader tick after sending some ticks
|
||||||
|
assert_eq!(
|
||||||
|
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
poh_recorder.reset(
|
||||||
|
poh_recorder.tick_height(),
|
||||||
|
bank.last_blockhash(),
|
||||||
|
0,
|
||||||
|
None,
|
||||||
|
bank.ticks_per_slot(),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
// We reset with leader slot after 3 slots
|
||||||
|
poh_recorder.reset(
|
||||||
|
poh_recorder.tick_height(),
|
||||||
|
bank.last_blockhash(),
|
||||||
|
0,
|
||||||
|
Some(bank.slot() + 3),
|
||||||
|
bank.ticks_per_slot(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test that the node won't be leader in next 2 slots
|
||||||
|
assert_eq!(
|
||||||
|
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test that the node will be leader in next 3 slots
|
||||||
|
assert_eq!(
|
||||||
|
poh_recorder.would_be_leader(3 * bank.ticks_per_slot()),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
// If we set the working bank, the node should be leader within next 2 slots
|
||||||
|
poh_recorder.set_bank(&bank);
|
||||||
|
assert_eq!(
|
||||||
|
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,6 +98,7 @@ impl Service for PohService {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
|
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||||
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::poh_recorder::WorkingBank;
|
use crate::poh_recorder::WorkingBank;
|
||||||
use crate::result::Result;
|
use crate::result::Result;
|
||||||
use crate::test_tx::test_tx;
|
use crate::test_tx::test_tx;
|
||||||
@ -123,6 +124,7 @@ mod tests {
|
|||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&Arc::new(blocktree),
|
&Arc::new(blocktree),
|
||||||
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||||
);
|
);
|
||||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
@ -5,6 +5,7 @@ use crate::blocktree::Blocktree;
|
|||||||
use crate::blocktree_processor;
|
use crate::blocktree_processor;
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::entry::{Entry, EntrySender, EntrySlice};
|
use crate::entry::{Entry, EntrySender, EntrySlice};
|
||||||
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::leader_schedule_utils;
|
use crate::leader_schedule_utils;
|
||||||
use crate::locktower::{Locktower, StakeLockout};
|
use crate::locktower::{Locktower, StakeLockout};
|
||||||
use crate::packet::BlobError;
|
use crate::packet::BlobError;
|
||||||
@ -83,6 +84,7 @@ impl ReplayStage {
|
|||||||
subscriptions: &Arc<RpcSubscriptions>,
|
subscriptions: &Arc<RpcSubscriptions>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
storage_entry_sender: EntrySender,
|
storage_entry_sender: EntrySender,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) -> (Self, Receiver<(u64, Pubkey)>)
|
) -> (Self, Receiver<(u64, Pubkey)>)
|
||||||
where
|
where
|
||||||
T: 'static + KeypairUtil + Send + Sync,
|
T: 'static + KeypairUtil + Send + Sync,
|
||||||
@ -103,6 +105,7 @@ impl ReplayStage {
|
|||||||
.expect("blocktree.set_root() failed at replay_stage startup");
|
.expect("blocktree.set_root() failed at replay_stage startup");
|
||||||
}
|
}
|
||||||
// Start the replay stage loop
|
// Start the replay stage loop
|
||||||
|
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||||
let t_replay = Builder::new()
|
let t_replay = Builder::new()
|
||||||
.name("solana-replay-stage".to_string())
|
.name("solana-replay-stage".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -115,7 +118,11 @@ impl ReplayStage {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
Self::generate_new_bank_forks(&blocktree, &mut bank_forks.write().unwrap());
|
Self::generate_new_bank_forks(
|
||||||
|
&blocktree,
|
||||||
|
&mut bank_forks.write().unwrap(),
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
|
|
||||||
let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some();
|
let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some();
|
||||||
|
|
||||||
@ -158,6 +165,7 @@ impl ReplayStage {
|
|||||||
&bank,
|
&bank,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
ticks_per_slot,
|
ticks_per_slot,
|
||||||
|
&leader_schedule_cache,
|
||||||
);
|
);
|
||||||
|
|
||||||
is_tpu_bank_active = false;
|
is_tpu_bank_active = false;
|
||||||
@ -185,6 +193,7 @@ impl ReplayStage {
|
|||||||
poh_slot,
|
poh_slot,
|
||||||
reached_leader_tick,
|
reached_leader_tick,
|
||||||
grace_ticks,
|
grace_ticks,
|
||||||
|
&leader_schedule_cache,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,6 +222,7 @@ impl ReplayStage {
|
|||||||
poh_slot: u64,
|
poh_slot: u64,
|
||||||
reached_leader_tick: bool,
|
reached_leader_tick: bool,
|
||||||
grace_ticks: u64,
|
grace_ticks: u64,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) {
|
) {
|
||||||
trace!("{} checking poh slot {}", my_id, poh_slot);
|
trace!("{} checking poh slot {}", my_id, poh_slot);
|
||||||
if bank_forks.read().unwrap().get(poh_slot).is_none() {
|
if bank_forks.read().unwrap().get(poh_slot).is_none() {
|
||||||
@ -225,7 +235,7 @@ impl ReplayStage {
|
|||||||
};
|
};
|
||||||
assert!(parent.is_frozen());
|
assert!(parent.is_frozen());
|
||||||
|
|
||||||
leader_schedule_utils::slot_leader_at(poh_slot, &parent)
|
leader_schedule_cache.slot_leader_at_else_compute(poh_slot, &parent)
|
||||||
.map(|next_leader| {
|
.map(|next_leader| {
|
||||||
debug!(
|
debug!(
|
||||||
"me: {} leader {} at poh slot {}",
|
"me: {} leader {} at poh slot {}",
|
||||||
@ -327,9 +337,10 @@ impl ReplayStage {
|
|||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
ticks_per_slot: u64,
|
ticks_per_slot: u64,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) {
|
) {
|
||||||
let next_leader_slot =
|
let next_leader_slot =
|
||||||
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank, Some(blocktree));
|
leader_schedule_cache.next_leader_slot(&my_id, bank.slot(), &bank, Some(blocktree));
|
||||||
poh_recorder.lock().unwrap().reset(
|
poh_recorder.lock().unwrap().reset(
|
||||||
bank.tick_height(),
|
bank.tick_height(),
|
||||||
bank.last_blockhash(),
|
bank.last_blockhash(),
|
||||||
@ -557,7 +568,11 @@ impl ReplayStage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_new_bank_forks(blocktree: &Blocktree, forks: &mut BankForks) {
|
fn generate_new_bank_forks(
|
||||||
|
blocktree: &Blocktree,
|
||||||
|
forks: &mut BankForks,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
|
) {
|
||||||
// Find the next slot that chains to the old slot
|
// Find the next slot that chains to the old slot
|
||||||
let frozen_banks = forks.frozen_banks();
|
let frozen_banks = forks.frozen_banks();
|
||||||
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
||||||
@ -577,7 +592,9 @@ impl ReplayStage {
|
|||||||
trace!("child already active or frozen {}", child_id);
|
trace!("child already active or frozen {}", child_id);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let leader = leader_schedule_utils::slot_leader_at(child_id, &parent_bank).unwrap();
|
let leader = leader_schedule_cache
|
||||||
|
.slot_leader_at_else_compute(child_id, &parent_bank)
|
||||||
|
.unwrap();
|
||||||
info!("new fork:{} parent:{}", child_id, parent_id);
|
info!("new fork:{} parent:{}", child_id, parent_id);
|
||||||
forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id));
|
forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id));
|
||||||
}
|
}
|
||||||
@ -636,10 +653,10 @@ mod test {
|
|||||||
// Set up the replay stage
|
// Set up the replay stage
|
||||||
{
|
{
|
||||||
let voting_keypair = Arc::new(Keypair::new());
|
let voting_keypair = Arc::new(Keypair::new());
|
||||||
let (bank_forks, _bank_forks_info, blocktree, l_receiver) =
|
let (bank_forks, _bank_forks_info, blocktree, l_receiver, leader_schedule_cache) =
|
||||||
new_banks_from_blocktree(&my_ledger_path, None);
|
new_banks_from_blocktree(&my_ledger_path, None);
|
||||||
let bank = bank_forks.working_bank();
|
let bank = bank_forks.working_bank();
|
||||||
|
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||||
let blocktree = Arc::new(blocktree);
|
let blocktree = Arc::new(blocktree);
|
||||||
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
@ -656,6 +673,7 @@ mod test {
|
|||||||
&Arc::new(RpcSubscriptions::default()),
|
&Arc::new(RpcSubscriptions::default()),
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
ledger_writer_sender,
|
ledger_writer_sender,
|
||||||
|
&leader_schedule_cache,
|
||||||
);
|
);
|
||||||
let vote_ix = vote_instruction::vote(&voting_keypair.pubkey(), vec![Vote::new(0)]);
|
let vote_ix = vote_instruction::vote(&voting_keypair.pubkey(), vec![Vote::new(0)]);
|
||||||
let vote_tx = Transaction::new_signed_instructions(
|
let vote_tx = Transaction::new_signed_instructions(
|
||||||
@ -754,6 +772,7 @@ mod test {
|
|||||||
|
|
||||||
let genesis_block = GenesisBlock::new(10_000).0;
|
let genesis_block = GenesisBlock::new(10_000).0;
|
||||||
let bank0 = Bank::new(&genesis_block);
|
let bank0 = Bank::new(&genesis_block);
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
||||||
let mut bank_forks = BankForks::new(0, bank0);
|
let mut bank_forks = BankForks::new(0, bank0);
|
||||||
bank_forks.working_bank().freeze();
|
bank_forks.working_bank().freeze();
|
||||||
|
|
||||||
@ -763,7 +782,11 @@ mod test {
|
|||||||
blob_slot_1.set_parent(0);
|
blob_slot_1.set_parent(0);
|
||||||
blocktree.insert_data_blobs(&vec![blob_slot_1]).unwrap();
|
blocktree.insert_data_blobs(&vec![blob_slot_1]).unwrap();
|
||||||
assert!(bank_forks.get(1).is_none());
|
assert!(bank_forks.get(1).is_none());
|
||||||
ReplayStage::generate_new_bank_forks(&blocktree, &mut bank_forks);
|
ReplayStage::generate_new_bank_forks(
|
||||||
|
&blocktree,
|
||||||
|
&mut bank_forks,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
assert!(bank_forks.get(1).is_some());
|
assert!(bank_forks.get(1).is_some());
|
||||||
|
|
||||||
// Insert blob for slot 3, generate new forks, check result
|
// Insert blob for slot 3, generate new forks, check result
|
||||||
@ -772,7 +795,11 @@ mod test {
|
|||||||
blob_slot_2.set_parent(0);
|
blob_slot_2.set_parent(0);
|
||||||
blocktree.insert_data_blobs(&vec![blob_slot_2]).unwrap();
|
blocktree.insert_data_blobs(&vec![blob_slot_2]).unwrap();
|
||||||
assert!(bank_forks.get(2).is_none());
|
assert!(bank_forks.get(2).is_none());
|
||||||
ReplayStage::generate_new_bank_forks(&blocktree, &mut bank_forks);
|
ReplayStage::generate_new_bank_forks(
|
||||||
|
&blocktree,
|
||||||
|
&mut bank_forks,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
);
|
||||||
assert!(bank_forks.get(1).is_some());
|
assert!(bank_forks.get(1).is_some());
|
||||||
assert!(bank_forks.get(2).is_some());
|
assert!(bank_forks.get(2).is_some());
|
||||||
}
|
}
|
||||||
|
@ -14,17 +14,18 @@ use crate::streamer::receiver;
|
|||||||
use crate::streamer::responder;
|
use crate::streamer::responder;
|
||||||
use crate::window_service::WindowService;
|
use crate::window_service::WindowService;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
|
use ed25519_dalek;
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use solana_client::rpc_client::RpcClient;
|
use solana_client::rpc_client::RpcClient;
|
||||||
use solana_client::rpc_request::RpcRequest;
|
use solana_client::rpc_request::RpcRequest;
|
||||||
use solana_client::thin_client::{create_client, ThinClient};
|
use solana_client::thin_client::{create_client, ThinClient};
|
||||||
use solana_sdk::client::{AsyncClient, SyncClient};
|
use solana_sdk::client::{AsyncClient, SyncClient};
|
||||||
|
|
||||||
use solana_sdk::hash::{Hash, Hasher};
|
use solana_sdk::hash::{Hash, Hasher};
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
|
use solana_sdk::transport::TransportError;
|
||||||
use solana_storage_api::storage_instruction;
|
use solana_storage_api::storage_instruction;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io;
|
use std::io;
|
||||||
@ -60,7 +61,7 @@ pub struct Replicator {
|
|||||||
slot: u64,
|
slot: u64,
|
||||||
ledger_path: String,
|
ledger_path: String,
|
||||||
storage_keypair: Arc<Keypair>,
|
storage_keypair: Arc<Keypair>,
|
||||||
signature: ring::signature::Signature,
|
signature: ed25519_dalek::Signature,
|
||||||
cluster_entrypoint: ContactInfo,
|
cluster_entrypoint: ContactInfo,
|
||||||
ledger_data_file_encrypted: PathBuf,
|
ledger_data_file_encrypted: PathBuf,
|
||||||
sampling_offsets: Vec<u64>,
|
sampling_offsets: Vec<u64>,
|
||||||
@ -107,10 +108,10 @@ pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_entry_heights_from_blockhash(
|
fn get_entry_heights_from_blockhash(
|
||||||
signature: &ring::signature::Signature,
|
signature: &ed25519_dalek::Signature,
|
||||||
storage_entry_height: u64,
|
storage_entry_height: u64,
|
||||||
) -> u64 {
|
) -> u64 {
|
||||||
let signature_vec = signature.as_ref();
|
let signature_vec = signature.to_bytes();
|
||||||
let mut segment_index = u64::from(signature_vec[0])
|
let mut segment_index = u64::from(signature_vec[0])
|
||||||
| (u64::from(signature_vec[1]) << 8)
|
| (u64::from(signature_vec[1]) << 8)
|
||||||
| (u64::from(signature_vec[1]) << 16)
|
| (u64::from(signature_vec[1]) << 16)
|
||||||
@ -129,12 +130,7 @@ fn create_request_processor(
|
|||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let storage_socket = Arc::new(socket);
|
let storage_socket = Arc::new(socket);
|
||||||
let t_receiver = receiver(
|
let t_receiver = receiver(storage_socket.clone(), exit, s_reader);
|
||||||
storage_socket.clone(),
|
|
||||||
exit,
|
|
||||||
s_reader,
|
|
||||||
"replicator-receiver",
|
|
||||||
);
|
|
||||||
thread_handles.push(t_receiver);
|
thread_handles.push(t_receiver);
|
||||||
|
|
||||||
let t_responder = responder("replicator-responder", storage_socket.clone(), r_responder);
|
let t_responder = responder("replicator-responder", storage_socket.clone(), r_responder);
|
||||||
@ -144,7 +140,7 @@ fn create_request_processor(
|
|||||||
let t_processor = spawn(move || loop {
|
let t_processor = spawn(move || loop {
|
||||||
let packets = r_reader.recv_timeout(Duration::from_secs(1));
|
let packets = r_reader.recv_timeout(Duration::from_secs(1));
|
||||||
if let Ok(packets) = packets {
|
if let Ok(packets) = packets {
|
||||||
for packet in &packets.read().unwrap().packets {
|
for packet in &packets.packets {
|
||||||
let req: result::Result<ReplicatorRequest, Box<bincode::ErrorKind>> =
|
let req: result::Result<ReplicatorRequest, Box<bincode::ErrorKind>> =
|
||||||
deserialize(&packet.data[..packet.meta.size]);
|
deserialize(&packet.data[..packet.meta.size]);
|
||||||
match req {
|
match req {
|
||||||
@ -239,6 +235,8 @@ impl Replicator {
|
|||||||
let (retransmit_sender, retransmit_receiver) = channel();
|
let (retransmit_sender, retransmit_receiver) = channel();
|
||||||
|
|
||||||
let window_service = WindowService::new(
|
let window_service = WindowService::new(
|
||||||
|
None, //TODO: need a way to validate blobs... https://github.com/solana-labs/solana/issues/3924
|
||||||
|
None, //TODO: see above ^
|
||||||
blocktree.clone(),
|
blocktree.clone(),
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
blob_fetch_receiver,
|
blob_fetch_receiver,
|
||||||
@ -246,6 +244,7 @@ impl Replicator {
|
|||||||
repair_socket,
|
repair_socket,
|
||||||
&exit,
|
&exit,
|
||||||
Some(repair_slot_range),
|
Some(repair_slot_range),
|
||||||
|
&Hash::default(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let client = create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
|
let client = create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||||
@ -359,7 +358,7 @@ impl Replicator {
|
|||||||
#[cfg(feature = "chacha")]
|
#[cfg(feature = "chacha")]
|
||||||
{
|
{
|
||||||
let mut ivec = [0u8; 64];
|
let mut ivec = [0u8; 64];
|
||||||
ivec.copy_from_slice(self.signature.as_ref());
|
ivec.copy_from_slice(&self.signature.to_bytes());
|
||||||
|
|
||||||
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
|
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
|
||||||
&self.blocktree,
|
&self.blocktree,
|
||||||
@ -388,7 +387,7 @@ impl Replicator {
|
|||||||
use rand_chacha::ChaChaRng;
|
use rand_chacha::ChaChaRng;
|
||||||
|
|
||||||
let mut rng_seed = [0u8; 32];
|
let mut rng_seed = [0u8; 32];
|
||||||
rng_seed.copy_from_slice(&self.signature.as_ref()[0..32]);
|
rng_seed.copy_from_slice(&self.signature.to_bytes()[0..32]);
|
||||||
let mut rng = ChaChaRng::from_seed(rng_seed);
|
let mut rng = ChaChaRng::from_seed(rng_seed);
|
||||||
for _ in 0..NUM_STORAGE_SAMPLES {
|
for _ in 0..NUM_STORAGE_SAMPLES {
|
||||||
self.sampling_offsets
|
self.sampling_offsets
|
||||||
@ -407,7 +406,7 @@ impl Replicator {
|
|||||||
client: &ThinClient,
|
client: &ThinClient,
|
||||||
keypair: &Keypair,
|
keypair: &Keypair,
|
||||||
storage_keypair: &Keypair,
|
storage_keypair: &Keypair,
|
||||||
) -> io::Result<()> {
|
) -> Result<()> {
|
||||||
// make sure replicator has some balance
|
// make sure replicator has some balance
|
||||||
if client.poll_get_balance(&keypair.pubkey())? == 0 {
|
if client.poll_get_balance(&keypair.pubkey())? == 0 {
|
||||||
Err(io::Error::new(
|
Err(io::Error::new(
|
||||||
@ -431,7 +430,14 @@ impl Replicator {
|
|||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
let signature = client.async_send_transaction(tx)?;
|
let signature = client.async_send_transaction(tx)?;
|
||||||
client.poll_for_signature(&signature)?;
|
client
|
||||||
|
.poll_for_signature(&signature)
|
||||||
|
.map_err(|err| match err {
|
||||||
|
TransportError::IoError(e) => e,
|
||||||
|
TransportError::TransactionError(_) => {
|
||||||
|
io::Error::new(ErrorKind::Other, "signature not found")
|
||||||
|
}
|
||||||
|
})?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -453,7 +459,7 @@ impl Replicator {
|
|||||||
&self.storage_keypair.pubkey(),
|
&self.storage_keypair.pubkey(),
|
||||||
self.hash,
|
self.hash,
|
||||||
self.slot,
|
self.slot,
|
||||||
Signature::new(self.signature.as_ref()),
|
Signature::new(&self.signature.to_bytes()),
|
||||||
);
|
);
|
||||||
let mut tx = Transaction::new_unsigned_instructions(vec![ix]);
|
let mut tx = Transaction::new_unsigned_instructions(vec![ix]);
|
||||||
client
|
client
|
||||||
|
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
use crate::blocktree;
|
use crate::blocktree;
|
||||||
use crate::cluster_info;
|
use crate::cluster_info;
|
||||||
#[cfg(feature = "erasure")]
|
|
||||||
use crate::erasure;
|
|
||||||
use crate::packet;
|
use crate::packet;
|
||||||
use crate::poh_recorder;
|
use crate::poh_recorder;
|
||||||
use bincode;
|
use bincode;
|
||||||
@ -25,8 +23,7 @@ pub enum Error {
|
|||||||
TransactionError(transaction::TransactionError),
|
TransactionError(transaction::TransactionError),
|
||||||
ClusterInfoError(cluster_info::ClusterInfoError),
|
ClusterInfoError(cluster_info::ClusterInfoError),
|
||||||
BlobError(packet::BlobError),
|
BlobError(packet::BlobError),
|
||||||
#[cfg(feature = "erasure")]
|
ErasureError(reed_solomon_erasure::Error),
|
||||||
ErasureError(erasure::ErasureError),
|
|
||||||
SendError,
|
SendError,
|
||||||
PohRecorderError(poh_recorder::PohRecorderError),
|
PohRecorderError(poh_recorder::PohRecorderError),
|
||||||
BlocktreeError(blocktree::BlocktreeError),
|
BlocktreeError(blocktree::BlocktreeError),
|
||||||
@ -67,9 +64,8 @@ impl std::convert::From<cluster_info::ClusterInfoError> for Error {
|
|||||||
Error::ClusterInfoError(e)
|
Error::ClusterInfoError(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg(feature = "erasure")]
|
impl std::convert::From<reed_solomon_erasure::Error> for Error {
|
||||||
impl std::convert::From<erasure::ErasureError> for Error {
|
fn from(e: reed_solomon_erasure::Error) -> Error {
|
||||||
fn from(e: erasure::ErasureError) -> Error {
|
|
||||||
Error::ErasureError(e)
|
Error::ErasureError(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,10 +3,9 @@
|
|||||||
use crate::bank_forks::BankForks;
|
use crate::bank_forks::BankForks;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::cluster_info::{
|
use crate::cluster_info::{
|
||||||
compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT, GROW_LAYER_CAPACITY,
|
compute_retransmit_peers, ClusterInfo, GROW_LAYER_CAPACITY, NEIGHBORHOOD_SIZE,
|
||||||
NEIGHBORHOOD_SIZE,
|
|
||||||
};
|
};
|
||||||
use crate::packet::SharedBlob;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::staking_utils;
|
use crate::staking_utils;
|
||||||
@ -14,6 +13,7 @@ use crate::streamer::BlobReceiver;
|
|||||||
use crate::window_service::WindowService;
|
use crate::window_service::WindowService;
|
||||||
use solana_metrics::counter::Counter;
|
use solana_metrics::counter::Counter;
|
||||||
use solana_metrics::{influxdb, submit};
|
use solana_metrics::{influxdb, submit};
|
||||||
|
use solana_sdk::hash::Hash;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
@ -24,47 +24,44 @@ use std::time::Duration;
|
|||||||
|
|
||||||
fn retransmit(
|
fn retransmit(
|
||||||
bank_forks: &Arc<RwLock<BankForks>>,
|
bank_forks: &Arc<RwLock<BankForks>>,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
r: &BlobReceiver,
|
r: &BlobReceiver,
|
||||||
sock: &UdpSocket,
|
sock: &UdpSocket,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let mut dq = r.recv_timeout(timer)?;
|
let mut blobs = r.recv_timeout(timer)?;
|
||||||
while let Ok(mut nq) = r.try_recv() {
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
dq.append(&mut nq);
|
blobs.append(&mut nq);
|
||||||
}
|
}
|
||||||
|
|
||||||
submit(
|
submit(
|
||||||
influxdb::Point::new("retransmit-stage")
|
influxdb::Point::new("retransmit-stage")
|
||||||
.add_field("count", influxdb::Value::Integer(dq.len() as i64))
|
.add_field("count", influxdb::Value::Integer(blobs.len() as i64))
|
||||||
.to_owned(),
|
.to_owned(),
|
||||||
);
|
);
|
||||||
|
let r_bank = bank_forks.read().unwrap().working_bank();
|
||||||
|
let bank_epoch = r_bank.get_stakers_epoch(r_bank.slot());
|
||||||
let (neighbors, children) = compute_retransmit_peers(
|
let (neighbors, children) = compute_retransmit_peers(
|
||||||
&staking_utils::delegated_stakes(&bank_forks.read().unwrap().working_bank()),
|
&staking_utils::delegated_stakes_at_epoch(&r_bank, bank_epoch).unwrap(),
|
||||||
cluster_info,
|
cluster_info,
|
||||||
DATA_PLANE_FANOUT,
|
NEIGHBORHOOD_SIZE,
|
||||||
NEIGHBORHOOD_SIZE,
|
NEIGHBORHOOD_SIZE,
|
||||||
GROW_LAYER_CAPACITY,
|
GROW_LAYER_CAPACITY,
|
||||||
);
|
);
|
||||||
for b in &dq {
|
for blob in &blobs {
|
||||||
if b.read().unwrap().should_forward() {
|
let leader = leader_schedule_cache
|
||||||
ClusterInfo::retransmit_to(&cluster_info, &neighbors, ©_for_neighbors(b), sock)?;
|
.slot_leader_at_else_compute(blob.read().unwrap().slot(), r_bank.as_ref());
|
||||||
|
if blob.read().unwrap().meta.forward {
|
||||||
|
ClusterInfo::retransmit_to(&cluster_info, &neighbors, blob, leader, sock, true)?;
|
||||||
|
ClusterInfo::retransmit_to(&cluster_info, &children, blob, leader, sock, false)?;
|
||||||
|
} else {
|
||||||
|
ClusterInfo::retransmit_to(&cluster_info, &children, blob, leader, sock, true)?;
|
||||||
}
|
}
|
||||||
// Always send blobs to children
|
|
||||||
ClusterInfo::retransmit_to(&cluster_info, &children, b, sock)?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Modifies a blob for neighbors nodes
|
|
||||||
#[inline]
|
|
||||||
fn copy_for_neighbors(b: &SharedBlob) -> SharedBlob {
|
|
||||||
let mut blob = b.read().unwrap().clone();
|
|
||||||
// Disable blob forwarding for neighbors
|
|
||||||
blob.forward(false);
|
|
||||||
Arc::new(RwLock::new(blob))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Service to retransmit messages from the leader or layer 1 to relevant peer nodes.
|
/// Service to retransmit messages from the leader or layer 1 to relevant peer nodes.
|
||||||
/// See `cluster_info` for network layer definitions.
|
/// See `cluster_info` for network layer definitions.
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
@ -76,15 +73,24 @@ fn copy_for_neighbors(b: &SharedBlob) -> SharedBlob {
|
|||||||
fn retransmitter(
|
fn retransmitter(
|
||||||
sock: Arc<UdpSocket>,
|
sock: Arc<UdpSocket>,
|
||||||
bank_forks: Arc<RwLock<BankForks>>,
|
bank_forks: Arc<RwLock<BankForks>>,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
|
let bank_forks = bank_forks.clone();
|
||||||
|
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-retransmitter".to_string())
|
.name("solana-retransmitter".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
trace!("retransmitter started");
|
trace!("retransmitter started");
|
||||||
loop {
|
loop {
|
||||||
if let Err(e) = retransmit(&bank_forks, &cluster_info, &r, &sock) {
|
if let Err(e) = retransmit(
|
||||||
|
&bank_forks,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
&cluster_info,
|
||||||
|
&r,
|
||||||
|
&sock,
|
||||||
|
) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
@ -107,23 +113,28 @@ pub struct RetransmitStage {
|
|||||||
impl RetransmitStage {
|
impl RetransmitStage {
|
||||||
#[allow(clippy::new_ret_no_self)]
|
#[allow(clippy::new_ret_no_self)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
bank_forks: &Arc<RwLock<BankForks>>,
|
bank_forks: Arc<RwLock<BankForks>>,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
blocktree: Arc<Blocktree>,
|
blocktree: Arc<Blocktree>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
retransmit_socket: Arc<UdpSocket>,
|
retransmit_socket: Arc<UdpSocket>,
|
||||||
repair_socket: Arc<UdpSocket>,
|
repair_socket: Arc<UdpSocket>,
|
||||||
fetch_stage_receiver: BlobReceiver,
|
fetch_stage_receiver: BlobReceiver,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let (retransmit_sender, retransmit_receiver) = channel();
|
let (retransmit_sender, retransmit_receiver) = channel();
|
||||||
|
|
||||||
let t_retransmit = retransmitter(
|
let t_retransmit = retransmitter(
|
||||||
retransmit_socket,
|
retransmit_socket,
|
||||||
bank_forks.clone(),
|
bank_forks.clone(),
|
||||||
|
leader_schedule_cache,
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
retransmit_receiver,
|
retransmit_receiver,
|
||||||
);
|
);
|
||||||
let window_service = WindowService::new(
|
let window_service = WindowService::new(
|
||||||
|
Some(bank_forks),
|
||||||
|
Some(leader_schedule_cache.clone()),
|
||||||
blocktree,
|
blocktree,
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
fetch_stage_receiver,
|
fetch_stage_receiver,
|
||||||
@ -131,6 +142,7 @@ impl RetransmitStage {
|
|||||||
repair_socket,
|
repair_socket,
|
||||||
exit,
|
exit,
|
||||||
None,
|
None,
|
||||||
|
genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
let thread_hdls = vec![t_retransmit];
|
let thread_hdls = vec![t_retransmit];
|
||||||
@ -152,17 +164,3 @@ impl Service for RetransmitStage {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
// Test that blobs always come out with forward unset for neighbors
|
|
||||||
#[test]
|
|
||||||
fn test_blob_for_neighbors() {
|
|
||||||
let blob = SharedBlob::default();
|
|
||||||
blob.write().unwrap().forward(true);
|
|
||||||
let for_hoodies = copy_for_neighbors(&blob);
|
|
||||||
assert!(!for_hoodies.read().unwrap().should_forward());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
111
core/src/rpc.rs
111
core/src/rpc.rs
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
use crate::bank_forks::BankForks;
|
use crate::bank_forks::BankForks;
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
|
use crate::contact_info::ContactInfo;
|
||||||
use crate::packet::PACKET_DATA_SIZE;
|
use crate::packet::PACKET_DATA_SIZE;
|
||||||
use crate::storage_stage::StorageState;
|
use crate::storage_stage::StorageState;
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
@ -171,6 +172,18 @@ pub struct Meta {
|
|||||||
}
|
}
|
||||||
impl Metadata for Meta {}
|
impl Metadata for Meta {}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct RpcContactInfo {
|
||||||
|
/// Base58 id
|
||||||
|
pub id: String,
|
||||||
|
/// Gossip port
|
||||||
|
pub gossip: Option<SocketAddr>,
|
||||||
|
/// Tpu port
|
||||||
|
pub tpu: Option<SocketAddr>,
|
||||||
|
/// JSON RPC port
|
||||||
|
pub rpc: Option<SocketAddr>,
|
||||||
|
}
|
||||||
|
|
||||||
#[rpc(server)]
|
#[rpc(server)]
|
||||||
pub trait RpcSol {
|
pub trait RpcSol {
|
||||||
type Metadata;
|
type Metadata;
|
||||||
@ -184,6 +197,9 @@ pub trait RpcSol {
|
|||||||
#[rpc(meta, name = "getBalance")]
|
#[rpc(meta, name = "getBalance")]
|
||||||
fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>;
|
fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>;
|
||||||
|
|
||||||
|
#[rpc(meta, name = "getClusterNodes")]
|
||||||
|
fn get_cluster_nodes(&self, _: Self::Metadata) -> Result<Vec<RpcContactInfo>>;
|
||||||
|
|
||||||
#[rpc(meta, name = "getRecentBlockhash")]
|
#[rpc(meta, name = "getRecentBlockhash")]
|
||||||
fn get_recent_blockhash(&self, _: Self::Metadata) -> Result<String>;
|
fn get_recent_blockhash(&self, _: Self::Metadata) -> Result<String>;
|
||||||
|
|
||||||
@ -203,6 +219,9 @@ pub trait RpcSol {
|
|||||||
#[rpc(meta, name = "sendTransaction")]
|
#[rpc(meta, name = "sendTransaction")]
|
||||||
fn send_transaction(&self, _: Self::Metadata, _: Vec<u8>) -> Result<String>;
|
fn send_transaction(&self, _: Self::Metadata, _: Vec<u8>) -> Result<String>;
|
||||||
|
|
||||||
|
#[rpc(meta, name = "getSlotLeader")]
|
||||||
|
fn get_slot_leader(&self, _: Self::Metadata) -> Result<String>;
|
||||||
|
|
||||||
#[rpc(meta, name = "getStorageBlockhash")]
|
#[rpc(meta, name = "getStorageBlockhash")]
|
||||||
fn get_storage_blockhash(&self, _: Self::Metadata) -> Result<String>;
|
fn get_storage_blockhash(&self, _: Self::Metadata) -> Result<String>;
|
||||||
|
|
||||||
@ -263,6 +282,33 @@ impl RpcSol for RpcSolImpl {
|
|||||||
Ok(meta.request_processor.read().unwrap().get_balance(&pubkey))
|
Ok(meta.request_processor.read().unwrap().get_balance(&pubkey))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> {
|
||||||
|
let cluster_info = meta.cluster_info.read().unwrap();
|
||||||
|
fn valid_address_or_none(addr: &SocketAddr) -> Option<SocketAddr> {
|
||||||
|
if ContactInfo::is_valid_address(addr) {
|
||||||
|
Some(*addr)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(cluster_info
|
||||||
|
.all_peers()
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(contact_info, _)| {
|
||||||
|
if ContactInfo::is_valid_address(&contact_info.gossip) {
|
||||||
|
Some(RpcContactInfo {
|
||||||
|
id: contact_info.id.to_string(),
|
||||||
|
gossip: Some(contact_info.gossip),
|
||||||
|
tpu: valid_address_or_none(&contact_info.tpu),
|
||||||
|
rpc: valid_address_or_none(&contact_info.rpc),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None // Exclude spy nodes
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
fn get_recent_blockhash(&self, meta: Self::Metadata) -> Result<String> {
|
fn get_recent_blockhash(&self, meta: Self::Metadata) -> Result<String> {
|
||||||
debug!("get_recent_blockhash rpc request received");
|
debug!("get_recent_blockhash rpc request received");
|
||||||
Ok(meta
|
Ok(meta
|
||||||
@ -402,6 +448,15 @@ impl RpcSol for RpcSolImpl {
|
|||||||
Ok(signature)
|
Ok(signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_slot_leader(&self, meta: Self::Metadata) -> Result<String> {
|
||||||
|
let cluster_info = meta.cluster_info.read().unwrap();
|
||||||
|
let leader_data_option = cluster_info.leader_data();
|
||||||
|
Ok(leader_data_option
|
||||||
|
.and_then(|leader_data| Some(leader_data.id))
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
fn get_storage_blockhash(&self, meta: Self::Metadata) -> Result<String> {
|
fn get_storage_blockhash(&self, meta: Self::Metadata) -> Result<String> {
|
||||||
meta.request_processor
|
meta.request_processor
|
||||||
.read()
|
.read()
|
||||||
@ -445,7 +500,9 @@ mod tests {
|
|||||||
use solana_sdk::transaction::TransactionError;
|
use solana_sdk::transaction::TransactionError;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> (MetaIoHandler<Meta>, Meta, Hash, Keypair) {
|
fn start_rpc_handler_with_tx(
|
||||||
|
pubkey: &Pubkey,
|
||||||
|
) -> (MetaIoHandler<Meta>, Meta, Hash, Keypair, Pubkey) {
|
||||||
let (bank_forks, alice) = new_bank_forks();
|
let (bank_forks, alice) = new_bank_forks();
|
||||||
let bank = bank_forks.read().unwrap().working_bank();
|
let bank = bank_forks.read().unwrap().working_bank();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
@ -477,7 +534,7 @@ mod tests {
|
|||||||
request_processor,
|
request_processor,
|
||||||
cluster_info,
|
cluster_info,
|
||||||
};
|
};
|
||||||
(io, meta, blockhash, alice)
|
(io, meta, blockhash, alice, leader.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -505,7 +562,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_balance() {
|
fn test_rpc_get_balance() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _blockhash, _alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
|
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
|
||||||
@ -520,10 +577,46 @@ mod tests {
|
|||||||
assert_eq!(expected, result);
|
assert_eq!(expected, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rpc_get_cluster_nodes() {
|
||||||
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
|
let (io, meta, _blockhash, _alice, leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}}"#);
|
||||||
|
let res = io.handle_request_sync(&req, meta);
|
||||||
|
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||||
|
.expect("actual response deserialization");
|
||||||
|
|
||||||
|
let expected = format!(
|
||||||
|
r#"{{"jsonrpc":"2.0","result":[{{"id": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:8899"}}],"id":1}}"#,
|
||||||
|
leader_id,
|
||||||
|
);
|
||||||
|
|
||||||
|
let expected: Response =
|
||||||
|
serde_json::from_str(&expected).expect("expected response deserialization");
|
||||||
|
assert_eq!(expected, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rpc_get_slot_leader() {
|
||||||
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
|
let (io, meta, _blockhash, _alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}}"#);
|
||||||
|
let res = io.handle_request_sync(&req, meta);
|
||||||
|
let expected =
|
||||||
|
format!(r#"{{"jsonrpc":"2.0","result":"11111111111111111111111111111111","id":1}}"#);
|
||||||
|
let expected: Response =
|
||||||
|
serde_json::from_str(&expected).expect("expected response deserialization");
|
||||||
|
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||||
|
.expect("actual response deserialization");
|
||||||
|
assert_eq!(expected, result);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_tx_count() {
|
fn test_rpc_get_tx_count() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _blockhash, _alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
|
||||||
let res = io.handle_request_sync(&req, meta);
|
let res = io.handle_request_sync(&req, meta);
|
||||||
@ -538,7 +631,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_account_info() {
|
fn test_rpc_get_account_info() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _blockhash, _alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#,
|
r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#,
|
||||||
@ -565,7 +658,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_confirm_tx() {
|
fn test_rpc_confirm_tx() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, blockhash, alice) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, blockhash, alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash, 0);
|
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash, 0);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
@ -584,7 +677,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_signature_status() {
|
fn test_rpc_get_signature_status() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, blockhash, alice) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, blockhash, alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash, 0);
|
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash, 0);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
@ -648,7 +741,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_recent_blockhash() {
|
fn test_rpc_get_recent_blockhash() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, blockhash, _alice) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, blockhash, _alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#);
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#);
|
||||||
let res = io.handle_request_sync(&req, meta);
|
let res = io.handle_request_sync(&req, meta);
|
||||||
@ -663,7 +756,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_fail_request_airdrop() {
|
fn test_rpc_fail_request_airdrop() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _blockhash, _alice, _leader_id) = start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
// Expect internal error because no drone is available
|
// Expect internal error because no drone is available
|
||||||
let req = format!(
|
let req = format!(
|
||||||
|
@ -15,7 +15,9 @@ use std::time::Duration;
|
|||||||
|
|
||||||
pub struct JsonRpcService {
|
pub struct JsonRpcService {
|
||||||
thread_hdl: JoinHandle<()>,
|
thread_hdl: JoinHandle<()>,
|
||||||
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>, // Used only by tests...
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>, // Used only by test_rpc_new()...
|
||||||
}
|
}
|
||||||
|
|
||||||
impl JsonRpcService {
|
impl JsonRpcService {
|
||||||
@ -37,7 +39,7 @@ impl JsonRpcService {
|
|||||||
)));
|
)));
|
||||||
let request_processor_ = request_processor.clone();
|
let request_processor_ = request_processor.clone();
|
||||||
|
|
||||||
let info = cluster_info.clone();
|
let cluster_info = cluster_info.clone();
|
||||||
let exit_ = exit.clone();
|
let exit_ = exit.clone();
|
||||||
|
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
@ -50,7 +52,7 @@ impl JsonRpcService {
|
|||||||
let server =
|
let server =
|
||||||
ServerBuilder::with_meta_extractor(io, move |_req: &hyper::Request<hyper::Body>| Meta {
|
ServerBuilder::with_meta_extractor(io, move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||||
request_processor: request_processor_.clone(),
|
request_processor: request_processor_.clone(),
|
||||||
cluster_info: info.clone(),
|
cluster_info: cluster_info.clone(),
|
||||||
}).threads(4)
|
}).threads(4)
|
||||||
.cors(DomainsValidation::AllowOnly(vec![
|
.cors(DomainsValidation::AllowOnly(vec![
|
||||||
AccessControlAllowOrigin::Any,
|
AccessControlAllowOrigin::Any,
|
||||||
@ -68,6 +70,7 @@ impl JsonRpcService {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
Self {
|
Self {
|
||||||
thread_hdl,
|
thread_hdl,
|
||||||
|
#[cfg(test)]
|
||||||
request_processor,
|
request_processor,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -229,7 +229,7 @@ mod tests {
|
|||||||
subscriptions.check_account(&alice.pubkey(), &account);
|
subscriptions.check_account(&alice.pubkey(), &account);
|
||||||
let string = transport_receiver.poll();
|
let string = transport_receiver.poll();
|
||||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
|
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[2,203,81,223,225,24,34,35,203,214,138,130,144,208,35,77,63,16,87,51,47,198,115,123,98,188,19,160,0,0,0,0]}},"subscription":0}}}}"#);
|
||||||
assert_eq!(expected, response);
|
assert_eq!(expected, response);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,7 +275,7 @@ mod tests {
|
|||||||
subscriptions.check_program(&solana_budget_api::id(), &alice.pubkey(), &account);
|
subscriptions.check_program(&solana_budget_api::id(), &alice.pubkey(), &account);
|
||||||
let string = transport_receiver.poll();
|
let string = transport_receiver.poll();
|
||||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}}],"subscription":0}}}}"#, alice.pubkey());
|
let expected = format!(r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[2,203,81,223,225,24,34,35,203,214,138,130,144,208,35,77,63,16,87,51,47,198,115,123,98,188,19,160,0,0,0,0]}}],"subscription":0}}}}"#, alice.pubkey());
|
||||||
assert_eq!(expected, response);
|
assert_eq!(expected, response);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
//! offloaded to the GPU.
|
//! offloaded to the GPU.
|
||||||
//!
|
//!
|
||||||
|
|
||||||
use crate::packet::{Packet, SharedPackets};
|
use crate::packet::{Packet, Packets};
|
||||||
use crate::result::Result;
|
use crate::result::Result;
|
||||||
use solana_metrics::counter::Counter;
|
use solana_metrics::counter::Counter;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
@ -67,10 +67,8 @@ pub fn init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn verify_packet(packet: &Packet) -> u8 {
|
fn verify_packet(packet: &Packet) -> u8 {
|
||||||
use ring::signature;
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::Signature;
|
use solana_sdk::signature::Signature;
|
||||||
use untrusted;
|
|
||||||
|
|
||||||
let (sig_len, sig_start, msg_start, pubkey_start) = get_packet_offsets(packet, 0);
|
let (sig_len, sig_start, msg_start, pubkey_start) = get_packet_offsets(packet, 0);
|
||||||
let mut sig_start = sig_start as usize;
|
let mut sig_start = sig_start as usize;
|
||||||
@ -90,14 +88,11 @@ fn verify_packet(packet: &Packet) -> u8 {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if signature::verify(
|
let signature = Signature::new(&packet.data[sig_start..sig_end]);
|
||||||
&signature::ED25519,
|
if !signature.verify(
|
||||||
untrusted::Input::from(&packet.data[pubkey_start..pubkey_end]),
|
&packet.data[pubkey_start..pubkey_end],
|
||||||
untrusted::Input::from(&packet.data[msg_start..msg_end]),
|
&packet.data[msg_start..msg_end],
|
||||||
untrusted::Input::from(&packet.data[sig_start..sig_end]),
|
) {
|
||||||
)
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
pubkey_start += size_of::<Pubkey>();
|
pubkey_start += size_of::<Pubkey>();
|
||||||
@ -106,20 +101,12 @@ fn verify_packet(packet: &Packet) -> u8 {
|
|||||||
1
|
1
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_packet_disabled(_packet: &Packet) -> u8 {
|
fn batch_size(batches: &[Packets]) -> usize {
|
||||||
warn!("signature verification is disabled");
|
batches.iter().map(|p| p.packets.len()).sum()
|
||||||
1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn batch_size(batches: &[SharedPackets]) -> usize {
|
|
||||||
batches
|
|
||||||
.iter()
|
|
||||||
.map(|p| p.read().unwrap().packets.len())
|
|
||||||
.sum()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "cuda"))]
|
#[cfg(not(feature = "cuda"))]
|
||||||
pub fn ed25519_verify(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
pub fn ed25519_verify(batches: &[Packets]) -> Vec<Vec<u8>> {
|
||||||
ed25519_verify_cpu(batches)
|
ed25519_verify_cpu(batches)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +128,7 @@ pub fn get_packet_offsets(packet: &Packet, current_offset: u32) -> (u32, u32, u3
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_offsets(batches: &[SharedPackets]) -> Result<TxOffsets> {
|
pub fn generate_offsets(batches: &[Packets]) -> Result<TxOffsets> {
|
||||||
let mut signature_offsets: Vec<_> = Vec::new();
|
let mut signature_offsets: Vec<_> = Vec::new();
|
||||||
let mut pubkey_offsets: Vec<_> = Vec::new();
|
let mut pubkey_offsets: Vec<_> = Vec::new();
|
||||||
let mut msg_start_offsets: Vec<_> = Vec::new();
|
let mut msg_start_offsets: Vec<_> = Vec::new();
|
||||||
@ -150,7 +137,7 @@ pub fn generate_offsets(batches: &[SharedPackets]) -> Result<TxOffsets> {
|
|||||||
let mut v_sig_lens = Vec::new();
|
let mut v_sig_lens = Vec::new();
|
||||||
batches.iter().for_each(|p| {
|
batches.iter().for_each(|p| {
|
||||||
let mut sig_lens = Vec::new();
|
let mut sig_lens = Vec::new();
|
||||||
p.read().unwrap().packets.iter().for_each(|packet| {
|
p.packets.iter().for_each(|packet| {
|
||||||
let current_offset = current_packet as u32 * size_of::<Packet>() as u32;
|
let current_offset = current_packet as u32 * size_of::<Packet>() as u32;
|
||||||
|
|
||||||
let (sig_len, sig_start, msg_start_offset, pubkey_offset) =
|
let (sig_len, sig_start, msg_start_offset, pubkey_offset) =
|
||||||
@ -185,39 +172,25 @@ pub fn generate_offsets(batches: &[SharedPackets]) -> Result<TxOffsets> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ed25519_verify_cpu(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
pub fn ed25519_verify_cpu(batches: &[Packets]) -> Vec<Vec<u8>> {
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
let count = batch_size(batches);
|
let count = batch_size(batches);
|
||||||
info!("CPU ECDSA for {}", batch_size(batches));
|
debug!("CPU ECDSA for {}", batch_size(batches));
|
||||||
let rv = batches
|
let rv = batches
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|p| {
|
.map(|p| p.packets.par_iter().map(verify_packet).collect())
|
||||||
p.read()
|
|
||||||
.unwrap()
|
|
||||||
.packets
|
|
||||||
.par_iter()
|
|
||||||
.map(verify_packet)
|
|
||||||
.collect()
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
inc_new_counter_info!("ed25519_verify_cpu", count);
|
inc_new_counter_info!("ed25519_verify_cpu", count);
|
||||||
rv
|
rv
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ed25519_verify_disabled(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
pub fn ed25519_verify_disabled(batches: &[Packets]) -> Vec<Vec<u8>> {
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
let count = batch_size(batches);
|
let count = batch_size(batches);
|
||||||
info!("disabled ECDSA for {}", batch_size(batches));
|
debug!("disabled ECDSA for {}", batch_size(batches));
|
||||||
let rv = batches
|
let rv = batches
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|p| {
|
.map(|p| vec![1u8; p.packets.len()])
|
||||||
p.read()
|
|
||||||
.unwrap()
|
|
||||||
.packets
|
|
||||||
.par_iter()
|
|
||||||
.map(verify_packet_disabled)
|
|
||||||
.collect()
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
inc_new_counter_info!("ed25519_verify_disabled", count);
|
inc_new_counter_info!("ed25519_verify_disabled", count);
|
||||||
rv
|
rv
|
||||||
@ -235,7 +208,7 @@ pub fn init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "cuda")]
|
#[cfg(feature = "cuda")]
|
||||||
pub fn ed25519_verify(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
pub fn ed25519_verify(batches: &[Packets]) -> Vec<Vec<u8>> {
|
||||||
use crate::packet::PACKET_DATA_SIZE;
|
use crate::packet::PACKET_DATA_SIZE;
|
||||||
let count = batch_size(batches);
|
let count = batch_size(batches);
|
||||||
|
|
||||||
@ -251,17 +224,13 @@ pub fn ed25519_verify(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
|||||||
let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) =
|
let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) =
|
||||||
generate_offsets(batches).unwrap();
|
generate_offsets(batches).unwrap();
|
||||||
|
|
||||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
debug!("CUDA ECDSA for {}", batch_size(batches));
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
let mut elems = Vec::new();
|
let mut elems = Vec::new();
|
||||||
let mut locks = Vec::new();
|
|
||||||
let mut rvs = Vec::new();
|
let mut rvs = Vec::new();
|
||||||
|
|
||||||
for packets in batches {
|
|
||||||
locks.push(packets.read().unwrap());
|
|
||||||
}
|
|
||||||
let mut num_packets = 0;
|
let mut num_packets = 0;
|
||||||
for p in locks {
|
for p in batches {
|
||||||
elems.push(Elems {
|
elems.push(Elems {
|
||||||
elems: p.packets.as_ptr(),
|
elems: p.packets.as_ptr(),
|
||||||
num: p.packets.len() as u32,
|
num: p.packets.len() as u32,
|
||||||
@ -327,7 +296,7 @@ pub fn make_packet_from_transaction(tx: Transaction) -> Packet {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::packet::{Packet, SharedPackets};
|
use crate::packet::{Packet, Packets};
|
||||||
use crate::sigverify;
|
use crate::sigverify;
|
||||||
use crate::test_tx::{test_multisig_tx, test_tx};
|
use crate::test_tx::{test_multisig_tx, test_tx};
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
@ -440,20 +409,16 @@ mod tests {
|
|||||||
packet: &Packet,
|
packet: &Packet,
|
||||||
num_packets_per_batch: usize,
|
num_packets_per_batch: usize,
|
||||||
num_batches: usize,
|
num_batches: usize,
|
||||||
) -> Vec<SharedPackets> {
|
) -> Vec<Packets> {
|
||||||
// generate packet vector
|
// generate packet vector
|
||||||
let batches: Vec<_> = (0..num_batches)
|
let batches: Vec<_> = (0..num_batches)
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let packets = SharedPackets::default();
|
let mut packets = Packets::default();
|
||||||
packets
|
packets.packets.resize(0, Packet::default());
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.packets
|
|
||||||
.resize(0, Packet::default());
|
|
||||||
for _ in 0..num_packets_per_batch {
|
for _ in 0..num_packets_per_batch {
|
||||||
packets.write().unwrap().packets.push(packet.clone());
|
packets.packets.push(packet.clone());
|
||||||
}
|
}
|
||||||
assert_eq!(packets.read().unwrap().packets.len(), num_packets_per_batch);
|
assert_eq!(packets.packets.len(), num_packets_per_batch);
|
||||||
packets
|
packets
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@ -505,11 +470,11 @@ mod tests {
|
|||||||
|
|
||||||
let n = 4;
|
let n = 4;
|
||||||
let num_batches = 3;
|
let num_batches = 3;
|
||||||
let batches = generate_packet_vec(&packet, n, num_batches);
|
let mut batches = generate_packet_vec(&packet, n, num_batches);
|
||||||
|
|
||||||
packet.data[40] = packet.data[40].wrapping_add(8);
|
packet.data[40] = packet.data[40].wrapping_add(8);
|
||||||
|
|
||||||
batches[0].write().unwrap().packets.push(packet);
|
batches[0].packets.push(packet);
|
||||||
|
|
||||||
// verify packets
|
// verify packets
|
||||||
let ans = sigverify::ed25519_verify(&batches);
|
let ans = sigverify::ed25519_verify(&batches);
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
//! transaction. All processing is done on the CPU by default and on a GPU
|
//! transaction. All processing is done on the CPU by default and on a GPU
|
||||||
//! if the `cuda` feature is enabled with `--features=cuda`.
|
//! if the `cuda` feature is enabled with `--features=cuda`.
|
||||||
|
|
||||||
use crate::packet::SharedPackets;
|
use crate::packet::Packets;
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::sigverify;
|
use crate::sigverify;
|
||||||
@ -18,7 +18,13 @@ use std::sync::{Arc, Mutex};
|
|||||||
use std::thread::{self, Builder, JoinHandle};
|
use std::thread::{self, Builder, JoinHandle};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
pub type VerifiedPackets = Vec<(SharedPackets, Vec<u8>)>;
|
#[cfg(feature = "cuda")]
|
||||||
|
const RECV_BATCH_MAX: usize = 60_000;
|
||||||
|
|
||||||
|
#[cfg(not(feature = "cuda"))]
|
||||||
|
const RECV_BATCH_MAX: usize = 1000;
|
||||||
|
|
||||||
|
pub type VerifiedPackets = Vec<(Packets, Vec<u8>)>;
|
||||||
|
|
||||||
pub struct SigVerifyStage {
|
pub struct SigVerifyStage {
|
||||||
thread_hdls: Vec<JoinHandle<()>>,
|
thread_hdls: Vec<JoinHandle<()>>,
|
||||||
@ -27,7 +33,7 @@ pub struct SigVerifyStage {
|
|||||||
impl SigVerifyStage {
|
impl SigVerifyStage {
|
||||||
#[allow(clippy::new_ret_no_self)]
|
#[allow(clippy::new_ret_no_self)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
packet_receiver: Receiver<SharedPackets>,
|
packet_receiver: Receiver<Packets>,
|
||||||
sigverify_disabled: bool,
|
sigverify_disabled: bool,
|
||||||
verified_sender: Sender<VerifiedPackets>,
|
verified_sender: Sender<VerifiedPackets>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -37,7 +43,7 @@ impl SigVerifyStage {
|
|||||||
Self { thread_hdls }
|
Self { thread_hdls }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_batch(batch: Vec<SharedPackets>, sigverify_disabled: bool) -> VerifiedPackets {
|
fn verify_batch(batch: Vec<Packets>, sigverify_disabled: bool) -> VerifiedPackets {
|
||||||
let r = if sigverify_disabled {
|
let r = if sigverify_disabled {
|
||||||
sigverify::ed25519_verify_disabled(&batch)
|
sigverify::ed25519_verify_disabled(&batch)
|
||||||
} else {
|
} else {
|
||||||
@ -52,8 +58,10 @@ impl SigVerifyStage {
|
|||||||
sigverify_disabled: bool,
|
sigverify_disabled: bool,
|
||||||
id: usize,
|
id: usize,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (batch, len, recv_time) =
|
let (batch, len, recv_time) = streamer::recv_batch(
|
||||||
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
|
&recvr.lock().expect("'recvr' lock in fn verifier"),
|
||||||
|
RECV_BATCH_MAX,
|
||||||
|
)?;
|
||||||
inc_new_counter_info!("sigverify_stage-packets_received", len);
|
inc_new_counter_info!("sigverify_stage-packets_received", len);
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
@ -42,7 +42,7 @@ pub fn vote_account_balances_at_epoch(
|
|||||||
node_staked_accounts.map(|epoch_state| epoch_state.map(|(id, stake, _)| (*id, stake)).collect())
|
node_staked_accounts.map(|epoch_state| epoch_state.map(|(id, stake, _)| (*id, stake)).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// At the specified epoch, collect the delgate account balance and vote states for delegates
|
/// At the specified epoch, collect the delegate account balance and vote states for delegates
|
||||||
/// that have non-zero balance in any of their managed staking accounts
|
/// that have non-zero balance in any of their managed staking accounts
|
||||||
pub fn delegated_stakes_at_epoch(bank: &Bank, epoch_height: u64) -> Option<HashMap<Pubkey, u64>> {
|
pub fn delegated_stakes_at_epoch(bank: &Bank, epoch_height: u64) -> Option<HashMap<Pubkey, u64>> {
|
||||||
let node_staked_accounts = node_staked_accounts_at_epoch(bank, epoch_height);
|
let node_staked_accounts = node_staked_accounts_at_epoch(bank, epoch_height);
|
||||||
|
@ -2,27 +2,28 @@
|
|||||||
// for storage mining. Replicators submit storage proofs, validator then bundles them
|
// for storage mining. Replicators submit storage proofs, validator then bundles them
|
||||||
// to submit its proof for mining to be rewarded.
|
// to submit its proof for mining to be rewarded.
|
||||||
|
|
||||||
|
use crate::bank_forks::BankForks;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
||||||
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||||
use crate::cluster_info::{ClusterInfo, FULLNODE_PORT_RANGE};
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::entry::{Entry, EntryReceiver};
|
use crate::entry::{Entry, EntryReceiver};
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, SeedableRng};
|
||||||
use rand_chacha::ChaChaRng;
|
use rand_chacha::ChaChaRng;
|
||||||
use solana_client::thin_client::{create_client_with_timeout, ThinClient};
|
|
||||||
use solana_sdk::client::{AsyncClient, SyncClient};
|
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
|
use solana_sdk::instruction::Instruction;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_instruction;
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
use solana_storage_api::storage_instruction::{self, StorageInstruction};
|
use solana_storage_api::storage_instruction::{self, StorageInstruction};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, RecvTimeoutError, Sender};
|
use std::sync::mpsc::{channel, RecvTimeoutError, Sender};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
@ -67,7 +68,7 @@ pub const NUM_STORAGE_SAMPLES: usize = 4;
|
|||||||
pub const ENTRIES_PER_SEGMENT: u64 = 16;
|
pub const ENTRIES_PER_SEGMENT: u64 = 16;
|
||||||
const KEY_SIZE: usize = 64;
|
const KEY_SIZE: usize = 64;
|
||||||
|
|
||||||
type TransactionSender = Sender<Transaction>;
|
type InstructionSender = Sender<Instruction>;
|
||||||
|
|
||||||
pub fn get_segment_from_entry(entry_height: u64) -> u64 {
|
pub fn get_segment_from_entry(entry_height: u64) -> u64 {
|
||||||
entry_height / ENTRIES_PER_SEGMENT
|
entry_height / ENTRIES_PER_SEGMENT
|
||||||
@ -138,6 +139,7 @@ impl StorageState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StorageStage {
|
impl StorageStage {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
storage_state: &StorageState,
|
storage_state: &StorageState,
|
||||||
storage_entry_receiver: EntryReceiver,
|
storage_entry_receiver: EntryReceiver,
|
||||||
@ -146,6 +148,7 @@ impl StorageStage {
|
|||||||
storage_keypair: &Arc<Keypair>,
|
storage_keypair: &Arc<Keypair>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
|
bank_forks: &Arc<RwLock<BankForks>>,
|
||||||
storage_rotate_count: u64,
|
storage_rotate_count: u64,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -155,7 +158,7 @@ impl StorageStage {
|
|||||||
let exit0 = exit.clone();
|
let exit0 = exit.clone();
|
||||||
let keypair0 = storage_keypair.clone();
|
let keypair0 = storage_keypair.clone();
|
||||||
|
|
||||||
let (tx_sender, tx_receiver) = channel();
|
let (instruction_sender, instruction_receiver) = channel();
|
||||||
|
|
||||||
let t_storage_mining_verifier = Builder::new()
|
let t_storage_mining_verifier = Builder::new()
|
||||||
.name("solana-storage-mining-verify-stage".to_string())
|
.name("solana-storage-mining-verify-stage".to_string())
|
||||||
@ -174,7 +177,7 @@ impl StorageStage {
|
|||||||
&mut entry_height,
|
&mut entry_height,
|
||||||
&mut current_key,
|
&mut current_key,
|
||||||
storage_rotate_count,
|
storage_rotate_count,
|
||||||
&tx_sender,
|
&instruction_sender,
|
||||||
) {
|
) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
@ -194,22 +197,26 @@ impl StorageStage {
|
|||||||
let exit1 = exit.clone();
|
let exit1 = exit.clone();
|
||||||
let keypair1 = keypair.clone();
|
let keypair1 = keypair.clone();
|
||||||
let storage_keypair1 = storage_keypair.clone();
|
let storage_keypair1 = storage_keypair.clone();
|
||||||
|
let bank_forks1 = bank_forks.clone();
|
||||||
let t_storage_create_accounts = Builder::new()
|
let t_storage_create_accounts = Builder::new()
|
||||||
.name("solana-storage-create-accounts".to_string())
|
.name("solana-storage-create-accounts".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || {
|
||||||
match tx_receiver.recv_timeout(Duration::from_secs(1)) {
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
Ok(mut tx) => {
|
loop {
|
||||||
|
match instruction_receiver.recv_timeout(Duration::from_secs(1)) {
|
||||||
|
Ok(instruction) => {
|
||||||
if Self::send_transaction(
|
if Self::send_transaction(
|
||||||
|
&bank_forks1,
|
||||||
&cluster_info0,
|
&cluster_info0,
|
||||||
&mut tx,
|
instruction,
|
||||||
&exit1,
|
|
||||||
&keypair1,
|
&keypair1,
|
||||||
&storage_keypair1,
|
&storage_keypair1,
|
||||||
Some(storage_keypair1.pubkey()),
|
Some(storage_keypair1.pubkey()),
|
||||||
|
&transactions_socket,
|
||||||
)
|
)
|
||||||
.is_ok()
|
.is_err()
|
||||||
{
|
{
|
||||||
debug!("sent transaction: {:?}", tx);
|
debug!("Failed to send storage transaction");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => match e {
|
Err(e) => match e {
|
||||||
@ -222,6 +229,7 @@ impl StorageStage {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@ -231,81 +239,43 @@ impl StorageStage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_signature(
|
|
||||||
client: &ThinClient,
|
|
||||||
signature: &Signature,
|
|
||||||
exit: &Arc<AtomicBool>,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
for _ in 0..10 {
|
|
||||||
if client.check_signature(&signature) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if exit.load(Ordering::Relaxed) {
|
|
||||||
Err(io::Error::new(io::ErrorKind::Other, "exit signaled"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_millis(200));
|
|
||||||
}
|
|
||||||
Err(io::Error::new(io::ErrorKind::Other, "other failure"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_transaction(
|
fn send_transaction(
|
||||||
|
bank_forks: &Arc<RwLock<BankForks>>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
transaction: &mut Transaction,
|
instruction: Instruction,
|
||||||
exit: &Arc<AtomicBool>,
|
|
||||||
keypair: &Arc<Keypair>,
|
keypair: &Arc<Keypair>,
|
||||||
storage_keypair: &Arc<Keypair>,
|
storage_keypair: &Arc<Keypair>,
|
||||||
account_to_create: Option<Pubkey>,
|
account_to_create: Option<Pubkey>,
|
||||||
|
transactions_socket: &UdpSocket,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let contact_info = cluster_info.read().unwrap().my_data();
|
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||||
let client = create_client_with_timeout(
|
let blockhash = working_bank.confirmed_last_blockhash();
|
||||||
contact_info.client_facing_addr(),
|
let mut instructions = vec![];
|
||||||
FULLNODE_PORT_RANGE,
|
let mut signing_keys = vec![];
|
||||||
Duration::from_secs(5),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut blockhash = None;
|
|
||||||
for _ in 0..10 {
|
|
||||||
if let Ok(new_blockhash) = client.get_recent_blockhash() {
|
|
||||||
blockhash = Some(new_blockhash);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if exit.load(Ordering::Relaxed) {
|
|
||||||
Err(io::Error::new(io::ErrorKind::Other, "exit signaled"))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(blockhash) = blockhash {
|
|
||||||
if let Some(account) = account_to_create {
|
if let Some(account) = account_to_create {
|
||||||
if client.get_account_data(&account).is_err() {
|
if working_bank.get_account(&account).is_none() {
|
||||||
// TODO the account space needs to be well defined somewhere
|
// TODO the account space needs to be well defined somewhere
|
||||||
let tx = system_transaction::create_account(
|
let create_instruction = system_instruction::create_account(
|
||||||
keypair,
|
&keypair.pubkey(),
|
||||||
&storage_keypair.pubkey(),
|
&storage_keypair.pubkey(),
|
||||||
blockhash,
|
|
||||||
1,
|
1,
|
||||||
1024 * 4,
|
1024 * 4,
|
||||||
&solana_storage_api::id(),
|
&solana_storage_api::id(),
|
||||||
0,
|
|
||||||
);
|
);
|
||||||
let signature = client.async_send_transaction(tx).unwrap();
|
instructions.push(create_instruction);
|
||||||
Self::check_signature(&client, &signature, &exit)?;
|
signing_keys.push(keypair.as_ref());
|
||||||
|
info!("storage account requested");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
transaction.sign(&[storage_keypair.as_ref()], blockhash);
|
instructions.push(instruction);
|
||||||
|
signing_keys.push(storage_keypair.as_ref());
|
||||||
if exit.load(Ordering::Relaxed) {
|
let mut transaction = Transaction::new_unsigned_instructions(instructions);
|
||||||
Err(io::Error::new(io::ErrorKind::Other, "exit signaled"))?;
|
transaction.sign(&signing_keys, blockhash);
|
||||||
}
|
transactions_socket.send_to(
|
||||||
|
&bincode::serialize(&transaction).unwrap(),
|
||||||
if let Ok(signature) = client.async_send_transaction(transaction.clone()) {
|
cluster_info.read().unwrap().my_data().tpu,
|
||||||
Self::check_signature(&client, &signature, &exit)?;
|
)?;
|
||||||
return Ok(());
|
Ok(())
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(io::Error::new(io::ErrorKind::Other, "other failure"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_entry_crossing(
|
fn process_entry_crossing(
|
||||||
@ -314,7 +284,7 @@ impl StorageStage {
|
|||||||
_blocktree: &Arc<Blocktree>,
|
_blocktree: &Arc<Blocktree>,
|
||||||
entry_id: Hash,
|
entry_id: Hash,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
tx_sender: &TransactionSender,
|
instruction_sender: &InstructionSender,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
let signature = keypair.sign(&entry_id.as_ref());
|
let signature = keypair.sign(&entry_id.as_ref());
|
||||||
@ -324,10 +294,9 @@ impl StorageStage {
|
|||||||
entry_id,
|
entry_id,
|
||||||
entry_height,
|
entry_height,
|
||||||
);
|
);
|
||||||
let tx = Transaction::new_unsigned_instructions(vec![ix]);
|
instruction_sender.send(ix)?;
|
||||||
tx_sender.send(tx)?;
|
|
||||||
|
|
||||||
seed.copy_from_slice(&signature.as_ref()[..32]);
|
seed.copy_from_slice(&signature.to_bytes()[..32]);
|
||||||
|
|
||||||
let mut rng = ChaChaRng::from_seed(seed);
|
let mut rng = ChaChaRng::from_seed(seed);
|
||||||
|
|
||||||
@ -340,7 +309,7 @@ impl StorageStage {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// TODO: what if the validator does not have this segment
|
// TODO: what if the validator does not have this segment
|
||||||
let segment = signature.as_ref()[0] as usize % num_segments;
|
let segment = signature.to_bytes()[0] as usize % num_segments;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"storage verifying: segment: {} identities: {}",
|
"storage verifying: segment: {} identities: {}",
|
||||||
@ -394,7 +363,7 @@ impl StorageStage {
|
|||||||
entry_height: &mut u64,
|
entry_height: &mut u64,
|
||||||
current_key_idx: &mut usize,
|
current_key_idx: &mut usize,
|
||||||
storage_rotate_count: u64,
|
storage_rotate_count: u64,
|
||||||
tx_sender: &TransactionSender,
|
instruction_sender: &InstructionSender,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timeout = Duration::new(1, 0);
|
let timeout = Duration::new(1, 0);
|
||||||
let entries: Vec<Entry> = entry_receiver.recv_timeout(timeout)?;
|
let entries: Vec<Entry> = entry_receiver.recv_timeout(timeout)?;
|
||||||
@ -465,7 +434,7 @@ impl StorageStage {
|
|||||||
&blocktree,
|
&blocktree,
|
||||||
entry.hash,
|
entry.hash,
|
||||||
*entry_height,
|
*entry_height,
|
||||||
tx_sender,
|
instruction_sender,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
*entry_height += 1;
|
*entry_height += 1;
|
||||||
@ -493,6 +462,7 @@ mod tests {
|
|||||||
use crate::entry::{make_tiny_test_entries, Entry};
|
use crate::entry::{make_tiny_test_entries, Entry};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
|
use solana_runtime::bank::Bank;
|
||||||
use solana_sdk::genesis_block::GenesisBlock;
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::hash::{Hash, Hasher};
|
use solana_sdk::hash::{Hash, Hasher};
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
@ -512,7 +482,9 @@ mod tests {
|
|||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||||
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(1000);
|
||||||
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
||||||
let (_storage_entry_sender, storage_entry_receiver) = channel();
|
let (_storage_entry_sender, storage_entry_receiver) = channel();
|
||||||
let storage_state = StorageState::new();
|
let storage_state = StorageState::new();
|
||||||
let storage_stage = StorageStage::new(
|
let storage_stage = StorageStage::new(
|
||||||
@ -523,6 +495,7 @@ mod tests {
|
|||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit.clone(),
|
&exit.clone(),
|
||||||
0,
|
0,
|
||||||
|
&bank_forks,
|
||||||
STORAGE_ROTATE_TEST_COUNT,
|
STORAGE_ROTATE_TEST_COUNT,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
);
|
);
|
||||||
@ -549,6 +522,8 @@ mod tests {
|
|||||||
|
|
||||||
let entries = make_tiny_test_entries(64);
|
let entries = make_tiny_test_entries(64);
|
||||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||||
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
||||||
blocktree
|
blocktree
|
||||||
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -565,6 +540,7 @@ mod tests {
|
|||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit.clone(),
|
&exit.clone(),
|
||||||
0,
|
0,
|
||||||
|
&bank_forks,
|
||||||
STORAGE_ROTATE_TEST_COUNT,
|
STORAGE_ROTATE_TEST_COUNT,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
);
|
);
|
||||||
@ -618,7 +594,8 @@ mod tests {
|
|||||||
blocktree
|
blocktree
|
||||||
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
||||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||||
|
|
||||||
let (storage_entry_sender, storage_entry_receiver) = channel();
|
let (storage_entry_sender, storage_entry_receiver) = channel();
|
||||||
@ -631,6 +608,7 @@ mod tests {
|
|||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit.clone(),
|
&exit.clone(),
|
||||||
0,
|
0,
|
||||||
|
&bank_forks,
|
||||||
STORAGE_ROTATE_TEST_COUNT,
|
STORAGE_ROTATE_TEST_COUNT,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
);
|
);
|
||||||
|
@ -2,45 +2,33 @@
|
|||||||
//!
|
//!
|
||||||
|
|
||||||
use crate::packet::{
|
use crate::packet::{
|
||||||
deserialize_packets_in_blob, Blob, Meta, Packets, SharedBlobs, SharedPackets, PACKET_DATA_SIZE,
|
deserialize_packets_in_blob, Blob, Meta, Packets, SharedBlobs, PACKET_DATA_SIZE,
|
||||||
};
|
};
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use bincode;
|
use bincode;
|
||||||
use solana_metrics::{influxdb, submit};
|
|
||||||
use solana_sdk::timing::duration_as_ms;
|
use solana_sdk::timing::duration_as_ms;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{Receiver, RecvTimeoutError, Sender};
|
use std::sync::mpsc::{Receiver, RecvTimeoutError, Sender};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::thread::{Builder, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
pub type PacketReceiver = Receiver<SharedPackets>;
|
pub type PacketReceiver = Receiver<Packets>;
|
||||||
pub type PacketSender = Sender<SharedPackets>;
|
pub type PacketSender = Sender<Packets>;
|
||||||
pub type BlobSender = Sender<SharedBlobs>;
|
pub type BlobSender = Sender<SharedBlobs>;
|
||||||
pub type BlobReceiver = Receiver<SharedBlobs>;
|
pub type BlobReceiver = Receiver<SharedBlobs>;
|
||||||
|
|
||||||
fn recv_loop(
|
fn recv_loop(sock: &UdpSocket, exit: Arc<AtomicBool>, channel: &PacketSender) -> Result<()> {
|
||||||
sock: &UdpSocket,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
channel: &PacketSender,
|
|
||||||
channel_tag: &'static str,
|
|
||||||
) -> Result<()> {
|
|
||||||
loop {
|
loop {
|
||||||
let msgs = SharedPackets::default();
|
let mut msgs = Packets::default();
|
||||||
loop {
|
loop {
|
||||||
// Check for exit signal, even if socket is busy
|
// Check for exit signal, even if socket is busy
|
||||||
// (for instance the leader trasaction socket)
|
// (for instance the leader trasaction socket)
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
if msgs.write().unwrap().recv_from(sock).is_ok() {
|
if let Ok(_len) = msgs.recv_from(sock) {
|
||||||
let len = msgs.read().unwrap().packets.len();
|
|
||||||
submit(
|
|
||||||
influxdb::Point::new(channel_tag)
|
|
||||||
.add_field("count", influxdb::Value::Integer(len as i64))
|
|
||||||
.to_owned(),
|
|
||||||
);
|
|
||||||
channel.send(msgs)?;
|
channel.send(msgs)?;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -52,7 +40,6 @@ pub fn receiver(
|
|||||||
sock: Arc<UdpSocket>,
|
sock: Arc<UdpSocket>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
packet_sender: PacketSender,
|
packet_sender: PacketSender,
|
||||||
sender_tag: &'static str,
|
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
|
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
@ -62,7 +49,7 @@ pub fn receiver(
|
|||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-receiver".to_string())
|
.name("solana-receiver".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
let _ = recv_loop(&sock, exit, &packet_sender, sender_tag);
|
let _ = recv_loop(&sock, exit, &packet_sender);
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
@ -74,19 +61,19 @@ fn recv_send(sock: &UdpSocket, r: &BlobReceiver) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize, u64)> {
|
pub fn recv_batch(recvr: &PacketReceiver, max_batch: usize) -> Result<(Vec<Packets>, usize, u64)> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let msgs = recvr.recv_timeout(timer)?;
|
let msgs = recvr.recv_timeout(timer)?;
|
||||||
let recv_start = Instant::now();
|
let recv_start = Instant::now();
|
||||||
trace!("got msgs");
|
trace!("got msgs");
|
||||||
let mut len = msgs.read().unwrap().packets.len();
|
let mut len = msgs.packets.len();
|
||||||
let mut batch = vec![msgs];
|
let mut batch = vec![msgs];
|
||||||
while let Ok(more) = recvr.try_recv() {
|
while let Ok(more) = recvr.try_recv() {
|
||||||
trace!("got more msgs");
|
trace!("got more msgs");
|
||||||
len += more.read().unwrap().packets.len();
|
len += more.packets.len();
|
||||||
batch.push(more);
|
batch.push(more);
|
||||||
|
|
||||||
if len > 100_000 {
|
if len > max_batch {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -167,7 +154,7 @@ fn recv_blob_packets(sock: &UdpSocket, s: &PacketSender) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let packets = packets?;
|
let packets = packets?;
|
||||||
s.send(Arc::new(RwLock::new(Packets::new(packets))))?;
|
s.send(Packets::new(packets))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -212,7 +199,7 @@ mod test {
|
|||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
let m = r.recv_timeout(Duration::new(1, 0))?;
|
let m = r.recv_timeout(Duration::new(1, 0))?;
|
||||||
|
|
||||||
*num -= m.read().unwrap().packets.len();
|
*num -= m.packets.len();
|
||||||
|
|
||||||
if *num == 0 {
|
if *num == 0 {
|
||||||
break;
|
break;
|
||||||
@ -236,7 +223,7 @@ mod test {
|
|||||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_receiver = receiver(Arc::new(read), &exit, s_reader, "streamer-test");
|
let t_receiver = receiver(Arc::new(read), &exit, s_reader);
|
||||||
let t_responder = {
|
let t_responder = {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
||||||
|
@ -8,9 +8,11 @@ use crate::cluster_info::ClusterInfo;
|
|||||||
use crate::cluster_info_vote_listener::ClusterInfoVoteListener;
|
use crate::cluster_info_vote_listener::ClusterInfoVoteListener;
|
||||||
use crate::entry::EntrySender;
|
use crate::entry::EntrySender;
|
||||||
use crate::fetch_stage::FetchStage;
|
use crate::fetch_stage::FetchStage;
|
||||||
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::poh_recorder::{PohRecorder, WorkingBankEntries};
|
use crate::poh_recorder::{PohRecorder, WorkingBankEntries};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::sigverify_stage::SigVerifyStage;
|
use crate::sigverify_stage::SigVerifyStage;
|
||||||
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
@ -39,7 +41,9 @@ impl Tpu {
|
|||||||
sigverify_disabled: bool,
|
sigverify_disabled: bool,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: EntrySender,
|
storage_entry_sender: EntrySender,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
cluster_info.write().unwrap().set_leader(id);
|
cluster_info.write().unwrap().set_leader(id);
|
||||||
|
|
||||||
@ -49,21 +53,29 @@ impl Tpu {
|
|||||||
tpu_via_blobs_sockets,
|
tpu_via_blobs_sockets,
|
||||||
&exit,
|
&exit,
|
||||||
&packet_sender,
|
&packet_sender,
|
||||||
|
&poh_recorder,
|
||||||
);
|
);
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
|
|
||||||
let sigverify_stage =
|
let sigverify_stage =
|
||||||
SigVerifyStage::new(packet_receiver, sigverify_disabled, verified_sender.clone());
|
SigVerifyStage::new(packet_receiver, sigverify_disabled, verified_sender.clone());
|
||||||
|
|
||||||
|
let (verified_vote_sender, verified_vote_receiver) = channel();
|
||||||
let cluster_info_vote_listener = ClusterInfoVoteListener::new(
|
let cluster_info_vote_listener = ClusterInfoVoteListener::new(
|
||||||
&exit,
|
&exit,
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
sigverify_disabled,
|
sigverify_disabled,
|
||||||
verified_sender,
|
verified_vote_sender,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
);
|
);
|
||||||
|
|
||||||
let banking_stage = BankingStage::new(&cluster_info, poh_recorder, verified_receiver);
|
let banking_stage = BankingStage::new(
|
||||||
|
&cluster_info,
|
||||||
|
poh_recorder,
|
||||||
|
verified_receiver,
|
||||||
|
verified_vote_receiver,
|
||||||
|
leader_schedule_cache,
|
||||||
|
);
|
||||||
|
|
||||||
let broadcast_stage = BroadcastStage::new(
|
let broadcast_stage = BroadcastStage::new(
|
||||||
broadcast_socket,
|
broadcast_socket,
|
||||||
@ -72,6 +84,7 @@ impl Tpu {
|
|||||||
&exit,
|
&exit,
|
||||||
blocktree,
|
blocktree,
|
||||||
storage_entry_sender,
|
storage_entry_sender,
|
||||||
|
genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
@ -19,12 +19,14 @@ use crate::blocktree::Blocktree;
|
|||||||
use crate::blocktree_processor::BankForksInfo;
|
use crate::blocktree_processor::BankForksInfo;
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::entry::{EntryReceiver, EntrySender};
|
use crate::entry::{EntryReceiver, EntrySender};
|
||||||
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::poh_recorder::PohRecorder;
|
use crate::poh_recorder::PohRecorder;
|
||||||
use crate::replay_stage::ReplayStage;
|
use crate::replay_stage::ReplayStage;
|
||||||
use crate::retransmit_stage::RetransmitStage;
|
use crate::retransmit_stage::RetransmitStage;
|
||||||
use crate::rpc_subscriptions::RpcSubscriptions;
|
use crate::rpc_subscriptions::RpcSubscriptions;
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::storage_stage::{StorageStage, StorageState};
|
use crate::storage_stage::{StorageStage, StorageState};
|
||||||
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@ -71,7 +73,9 @@ impl Tvu {
|
|||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
storage_entry_sender: EntrySender,
|
storage_entry_sender: EntrySender,
|
||||||
storage_entry_receiver: EntryReceiver,
|
storage_entry_receiver: EntryReceiver,
|
||||||
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> Self
|
) -> Self
|
||||||
where
|
where
|
||||||
T: 'static + KeypairUtil + Sync + Send,
|
T: 'static + KeypairUtil + Sync + Send,
|
||||||
@ -100,13 +104,15 @@ impl Tvu {
|
|||||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||||
//then sent to the window, which does the erasure coding reconstruction
|
//then sent to the window, which does the erasure coding reconstruction
|
||||||
let retransmit_stage = RetransmitStage::new(
|
let retransmit_stage = RetransmitStage::new(
|
||||||
&bank_forks,
|
bank_forks.clone(),
|
||||||
|
leader_schedule_cache,
|
||||||
blocktree.clone(),
|
blocktree.clone(),
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
Arc::new(retransmit_socket),
|
Arc::new(retransmit_socket),
|
||||||
repair_socket,
|
repair_socket,
|
||||||
blob_fetch_receiver,
|
blob_fetch_receiver,
|
||||||
&exit,
|
&exit,
|
||||||
|
genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
let (replay_stage, slot_full_receiver) = ReplayStage::new(
|
let (replay_stage, slot_full_receiver) = ReplayStage::new(
|
||||||
@ -121,6 +127,7 @@ impl Tvu {
|
|||||||
subscriptions,
|
subscriptions,
|
||||||
poh_recorder,
|
poh_recorder,
|
||||||
storage_entry_sender,
|
storage_entry_sender,
|
||||||
|
leader_schedule_cache,
|
||||||
);
|
);
|
||||||
|
|
||||||
let blockstream_service = if blockstream.is_some() {
|
let blockstream_service = if blockstream.is_some() {
|
||||||
@ -144,6 +151,7 @@ impl Tvu {
|
|||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit,
|
&exit,
|
||||||
bank_forks_info[0].entry_height, // TODO: StorageStage needs to deal with BankForks somehow still
|
bank_forks_info[0].entry_height, // TODO: StorageStage needs to deal with BankForks somehow still
|
||||||
|
&bank_forks,
|
||||||
storage_rotate_count,
|
storage_rotate_count,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
);
|
);
|
||||||
@ -214,6 +222,7 @@ pub mod tests {
|
|||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let voting_keypair = Keypair::new();
|
let voting_keypair = Keypair::new();
|
||||||
let (storage_entry_sender, storage_entry_receiver) = channel();
|
let (storage_entry_sender, storage_entry_receiver) = channel();
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
&voting_keypair.pubkey(),
|
&voting_keypair.pubkey(),
|
||||||
Some(Arc::new(voting_keypair)),
|
Some(Arc::new(voting_keypair)),
|
||||||
@ -236,7 +245,9 @@ pub mod tests {
|
|||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
storage_entry_sender,
|
storage_entry_sender,
|
||||||
storage_entry_receiver,
|
storage_entry_receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
&exit,
|
&exit,
|
||||||
|
&Hash::default(),
|
||||||
);
|
);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
tvu.join().unwrap();
|
tvu.join().unwrap();
|
||||||
|
@ -1,15 +1,19 @@
|
|||||||
//! `window_service` handles the data plane incoming blobs, storing them in
|
//! `window_service` handles the data plane incoming blobs, storing them in
|
||||||
//! blocktree and retransmitting where required
|
//! blocktree and retransmitting where required
|
||||||
//!
|
//!
|
||||||
|
use crate::bank_forks::BankForks;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::packet::{SharedBlob, BLOB_HEADER_SIZE};
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
|
use crate::leader_schedule_utils::slot_leader_at;
|
||||||
|
use crate::packet::{Blob, SharedBlob, BLOB_HEADER_SIZE};
|
||||||
use crate::repair_service::{RepairService, RepairSlotRange};
|
use crate::repair_service::{RepairService, RepairSlotRange};
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::streamer::{BlobReceiver, BlobSender};
|
use crate::streamer::{BlobReceiver, BlobSender};
|
||||||
use solana_metrics::counter::Counter;
|
use solana_metrics::counter::Counter;
|
||||||
use solana_metrics::{influxdb, submit};
|
use solana_runtime::bank::Bank;
|
||||||
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::timing::duration_as_ms;
|
use solana_sdk::timing::duration_as_ms;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@ -24,12 +28,20 @@ fn retransmit_blobs(blobs: &[SharedBlob], retransmit: &BlobSender, id: &Pubkey)
|
|||||||
for blob in blobs {
|
for blob in blobs {
|
||||||
// Don't add blobs generated by this node to the retransmit queue
|
// Don't add blobs generated by this node to the retransmit queue
|
||||||
if blob.read().unwrap().id() != *id {
|
if blob.read().unwrap().id() != *id {
|
||||||
|
let mut w_blob = blob.write().unwrap();
|
||||||
|
w_blob.meta.forward = w_blob.should_forward();
|
||||||
|
w_blob.set_forwarded(false);
|
||||||
retransmit_queue.push(blob.clone());
|
retransmit_queue.push(blob.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !retransmit_queue.is_empty() {
|
if !retransmit_queue.is_empty() {
|
||||||
inc_new_counter_info!("streamer-recv_window-retransmit", retransmit_queue.len());
|
inc_new_counter_info!(
|
||||||
|
"streamer-recv_window-retransmit",
|
||||||
|
retransmit_queue.len(),
|
||||||
|
0,
|
||||||
|
1000
|
||||||
|
);
|
||||||
retransmit.send(retransmit_queue)?;
|
retransmit.send(retransmit_queue)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -64,33 +76,70 @@ fn process_blobs(blobs: &[SharedBlob], blocktree: &Arc<Blocktree>) -> Result<()>
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// drop blobs that are from myself or not from the correct leader for the
|
||||||
|
/// blob's slot
|
||||||
|
fn should_retransmit_and_persist(
|
||||||
|
blob: &Blob,
|
||||||
|
bank: Option<&Arc<Bank>>,
|
||||||
|
leader_schedule_cache: Option<&Arc<LeaderScheduleCache>>,
|
||||||
|
my_id: &Pubkey,
|
||||||
|
) -> bool {
|
||||||
|
let slot_leader_id = match bank {
|
||||||
|
None => leader_schedule_cache.and_then(|cache| cache.slot_leader_at(blob.slot())),
|
||||||
|
Some(bank) => match leader_schedule_cache {
|
||||||
|
None => slot_leader_at(blob.slot(), &bank),
|
||||||
|
Some(cache) => cache.slot_leader_at_else_compute(blob.slot(), bank),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
if blob.id() == *my_id {
|
||||||
|
inc_new_counter_info!("streamer-recv_window-circular_transmission", 1);
|
||||||
|
false
|
||||||
|
} else if slot_leader_id == None {
|
||||||
|
inc_new_counter_info!("streamer-recv_window-unknown_leader", 1);
|
||||||
|
true
|
||||||
|
} else if slot_leader_id != Some(blob.id()) {
|
||||||
|
inc_new_counter_info!("streamer-recv_window-wrong_leader", 1);
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn recv_window(
|
fn recv_window(
|
||||||
|
bank_forks: Option<&Arc<RwLock<BankForks>>>,
|
||||||
|
leader_schedule_cache: Option<&Arc<LeaderScheduleCache>>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
id: &Pubkey,
|
my_id: &Pubkey,
|
||||||
r: &BlobReceiver,
|
r: &BlobReceiver,
|
||||||
retransmit: &BlobSender,
|
retransmit: &BlobSender,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::from_millis(200);
|
let timer = Duration::from_millis(200);
|
||||||
let mut dq = r.recv_timeout(timer)?;
|
let mut blobs = r.recv_timeout(timer)?;
|
||||||
|
|
||||||
while let Ok(mut nq) = r.try_recv() {
|
while let Ok(mut blob) = r.try_recv() {
|
||||||
dq.append(&mut nq)
|
blobs.append(&mut blob)
|
||||||
}
|
}
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
inc_new_counter_info!("streamer-recv_window-recv", dq.len(), 100);
|
inc_new_counter_info!("streamer-recv_window-recv", blobs.len(), 0, 1000);
|
||||||
|
|
||||||
submit(
|
blobs.retain(|blob| {
|
||||||
influxdb::Point::new("recv-window")
|
should_retransmit_and_persist(
|
||||||
.add_field("count", influxdb::Value::Integer(dq.len() as i64))
|
&blob.read().unwrap(),
|
||||||
.to_owned(),
|
bank_forks
|
||||||
);
|
.map(|bank_forks| bank_forks.read().unwrap().working_bank())
|
||||||
|
.as_ref(),
|
||||||
|
leader_schedule_cache,
|
||||||
|
my_id,
|
||||||
|
) && blob.read().unwrap().genesis_blockhash() == *genesis_blockhash
|
||||||
|
});
|
||||||
|
|
||||||
retransmit_blobs(&dq, retransmit, id)?;
|
retransmit_blobs(&blobs, retransmit, my_id)?;
|
||||||
|
|
||||||
//send a contiguous set of blocks
|
trace!("{} num blobs received: {}", my_id, blobs.len());
|
||||||
trace!("{} num blobs received: {}", id, dq.len());
|
|
||||||
|
|
||||||
process_blobs(&dq, blocktree)?;
|
process_blobs(&blobs, blocktree)?;
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
"Elapsed processing time in recv_window(): {}",
|
"Elapsed processing time in recv_window(): {}",
|
||||||
@ -124,7 +173,10 @@ pub struct WindowService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl WindowService {
|
impl WindowService {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||||
|
leader_schedule_cache: Option<Arc<LeaderScheduleCache>>,
|
||||||
blocktree: Arc<Blocktree>,
|
blocktree: Arc<Blocktree>,
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
@ -132,6 +184,7 @@ impl WindowService {
|
|||||||
repair_socket: Arc<UdpSocket>,
|
repair_socket: Arc<UdpSocket>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
repair_slot_range: Option<RepairSlotRange>,
|
repair_slot_range: Option<RepairSlotRange>,
|
||||||
|
genesis_blockhash: &Hash,
|
||||||
) -> WindowService {
|
) -> WindowService {
|
||||||
let repair_service = RepairService::new(
|
let repair_service = RepairService::new(
|
||||||
blocktree.clone(),
|
blocktree.clone(),
|
||||||
@ -141,6 +194,9 @@ impl WindowService {
|
|||||||
repair_slot_range,
|
repair_slot_range,
|
||||||
);
|
);
|
||||||
let exit = exit.clone();
|
let exit = exit.clone();
|
||||||
|
let bank_forks = bank_forks.clone();
|
||||||
|
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||||
|
let hash = *genesis_blockhash;
|
||||||
let t_window = Builder::new()
|
let t_window = Builder::new()
|
||||||
.name("solana-window".to_string())
|
.name("solana-window".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -151,7 +207,15 @@ impl WindowService {
|
|||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if let Err(e) = recv_window(&blocktree, &id, &r, &retransmit) {
|
if let Err(e) = recv_window(
|
||||||
|
bank_forks.as_ref(),
|
||||||
|
leader_schedule_cache.as_ref(),
|
||||||
|
&blocktree,
|
||||||
|
&id,
|
||||||
|
&r,
|
||||||
|
&retransmit,
|
||||||
|
&hash,
|
||||||
|
) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
@ -184,12 +248,15 @@ impl Service for WindowService {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::bank_forks::BankForks;
|
||||||
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
|
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||||
use crate::cluster_info::{ClusterInfo, Node};
|
use crate::cluster_info::{ClusterInfo, Node};
|
||||||
use crate::entry::{make_consecutive_blobs, make_tiny_test_entries, EntrySlice};
|
use crate::entry::{make_consecutive_blobs, make_tiny_test_entries, EntrySlice};
|
||||||
use crate::packet::index_blobs;
|
use crate::packet::{index_blobs, Blob};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::streamer::{blob_receiver, responder};
|
use crate::streamer::{blob_receiver, responder};
|
||||||
|
use solana_runtime::bank::Bank;
|
||||||
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use std::fs::remove_dir_all;
|
use std::fs::remove_dir_all;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@ -221,6 +288,53 @@ mod test {
|
|||||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_should_retransmit_and_persist() {
|
||||||
|
let me_id = Pubkey::new_rand();
|
||||||
|
let leader_id = Pubkey::new_rand();
|
||||||
|
let bank = Arc::new(Bank::new(
|
||||||
|
&GenesisBlock::new_with_leader(100, &leader_id, 10).0,
|
||||||
|
));
|
||||||
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
|
|
||||||
|
let mut blob = Blob::default();
|
||||||
|
blob.set_id(&leader_id);
|
||||||
|
|
||||||
|
// without a Bank and blobs not from me, blob continues
|
||||||
|
assert_eq!(
|
||||||
|
should_retransmit_and_persist(&blob, None, None, &me_id),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
|
// with a Bank for slot 0, blob continues
|
||||||
|
assert_eq!(
|
||||||
|
should_retransmit_and_persist(&blob, Some(&bank), Some(&cache), &me_id),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
|
// set the blob to have come from the wrong leader
|
||||||
|
blob.set_id(&Pubkey::new_rand());
|
||||||
|
assert_eq!(
|
||||||
|
should_retransmit_and_persist(&blob, Some(&bank), Some(&cache), &me_id),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
// with a Bank and no idea who leader is, we keep the blobs (for now)
|
||||||
|
// TODO: persistr in blocktree that we didn't know who the leader was at the time?
|
||||||
|
blob.set_slot(100);
|
||||||
|
assert_eq!(
|
||||||
|
should_retransmit_and_persist(&blob, Some(&bank), Some(&cache), &me_id),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
|
// if the blob came back from me, it doesn't continue, whether or not I have a bank
|
||||||
|
blob.set_id(&me_id);
|
||||||
|
assert_eq!(
|
||||||
|
should_retransmit_and_persist(&blob, None, None, &me_id),
|
||||||
|
false
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn window_send_test() {
|
pub fn window_send_test() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
@ -240,7 +354,13 @@ mod test {
|
|||||||
let blocktree = Arc::new(
|
let blocktree = Arc::new(
|
||||||
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
|
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let bank = Bank::new(&GenesisBlock::new_with_leader(100, &me_id, 10).0);
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
|
let bank_forks = Some(Arc::new(RwLock::new(BankForks::new(0, bank))));
|
||||||
let t_window = WindowService::new(
|
let t_window = WindowService::new(
|
||||||
|
bank_forks,
|
||||||
|
Some(leader_schedule_cache),
|
||||||
blocktree,
|
blocktree,
|
||||||
subs,
|
subs,
|
||||||
r_reader,
|
r_reader,
|
||||||
@ -248,6 +368,7 @@ mod test {
|
|||||||
Arc::new(leader_node.sockets.repair),
|
Arc::new(leader_node.sockets.repair),
|
||||||
&exit,
|
&exit,
|
||||||
None,
|
None,
|
||||||
|
&Hash::default(),
|
||||||
);
|
);
|
||||||
let t_responder = {
|
let t_responder = {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
@ -312,7 +433,12 @@ mod test {
|
|||||||
let blocktree = Arc::new(
|
let blocktree = Arc::new(
|
||||||
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
|
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
|
||||||
);
|
);
|
||||||
|
let bank = Bank::new(&GenesisBlock::new_with_leader(100, &me_id, 10).0);
|
||||||
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
|
let bank_forks = Some(Arc::new(RwLock::new(BankForks::new(0, bank))));
|
||||||
let t_window = WindowService::new(
|
let t_window = WindowService::new(
|
||||||
|
bank_forks,
|
||||||
|
Some(leader_schedule_cache),
|
||||||
blocktree,
|
blocktree,
|
||||||
subs.clone(),
|
subs.clone(),
|
||||||
r_reader,
|
r_reader,
|
||||||
@ -320,6 +446,7 @@ mod test {
|
|||||||
Arc::new(leader_node.sockets.repair),
|
Arc::new(leader_node.sockets.repair),
|
||||||
&exit,
|
&exit,
|
||||||
None,
|
None,
|
||||||
|
&Hash::default(),
|
||||||
);
|
);
|
||||||
let t_responder = {
|
let t_responder = {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
|
@ -2,8 +2,7 @@ use hashbrown::{HashMap, HashSet};
|
|||||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana::cluster_info::{
|
use solana::cluster_info::{
|
||||||
compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT, GROW_LAYER_CAPACITY,
|
compute_retransmit_peers, ClusterInfo, GROW_LAYER_CAPACITY, NEIGHBORHOOD_SIZE,
|
||||||
NEIGHBORHOOD_SIZE,
|
|
||||||
};
|
};
|
||||||
use solana::contact_info::ContactInfo;
|
use solana::contact_info::ContactInfo;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
@ -29,7 +28,7 @@ fn find_insert_blob(id: &Pubkey, blob: i32, batches: &mut [Nodes]) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_simulation(num_nodes: u64, fanout: usize, hood_size: usize) {
|
fn run_simulation(stakes: &[u64], fanout: usize, hood_size: usize) {
|
||||||
let num_threads = num_threads();
|
let num_threads = num_threads();
|
||||||
// set timeout to 5 minutes
|
// set timeout to 5 minutes
|
||||||
let timeout = 60 * 5;
|
let timeout = 60 * 5;
|
||||||
@ -38,8 +37,8 @@ fn run_simulation(num_nodes: u64, fanout: usize, hood_size: usize) {
|
|||||||
let leader_info = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
let leader_info = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.clone());
|
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.clone());
|
||||||
|
|
||||||
// setup stakes
|
// setup staked nodes
|
||||||
let mut stakes = HashMap::new();
|
let mut staked_nodes = HashMap::new();
|
||||||
|
|
||||||
// setup accounts for all nodes (leader has 0 bal)
|
// setup accounts for all nodes (leader has 0 bal)
|
||||||
let (s, r) = channel();
|
let (s, r) = channel();
|
||||||
@ -52,14 +51,14 @@ fn run_simulation(num_nodes: u64, fanout: usize, hood_size: usize) {
|
|||||||
.get_mut(0)
|
.get_mut(0)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.insert(leader_info.id, (HashSet::new(), r));
|
.insert(leader_info.id, (HashSet::new(), r));
|
||||||
let range: Vec<_> = (1..=num_nodes).collect();
|
let range: Vec<_> = (1..=stakes.len()).collect();
|
||||||
let chunk_size = (num_nodes as usize + num_threads - 1) / num_threads;
|
let chunk_size = (stakes.len() + num_threads - 1) / num_threads;
|
||||||
range.chunks(chunk_size).for_each(|chunk| {
|
range.chunks(chunk_size).for_each(|chunk| {
|
||||||
chunk.into_iter().for_each(|i| {
|
chunk.into_iter().for_each(|i| {
|
||||||
//distribute neighbors across threads to maximize parallel compute
|
//distribute neighbors across threads to maximize parallel compute
|
||||||
let batch_ix = *i as usize % batches.len();
|
let batch_ix = *i as usize % batches.len();
|
||||||
let node = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
let node = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||||
stakes.insert(node.id, *i);
|
staked_nodes.insert(node.id, stakes[*i - 1]);
|
||||||
cluster_info.insert_info(node.clone());
|
cluster_info.insert_info(node.clone());
|
||||||
let (s, r) = channel();
|
let (s, r) = channel();
|
||||||
batches
|
batches
|
||||||
@ -75,7 +74,7 @@ fn run_simulation(num_nodes: u64, fanout: usize, hood_size: usize) {
|
|||||||
let blobs: Vec<(_, _)> = (0..100).into_par_iter().map(|i| (i as i32, true)).collect();
|
let blobs: Vec<(_, _)> = (0..100).into_par_iter().map(|i| (i as i32, true)).collect();
|
||||||
|
|
||||||
// pretend to broadcast from leader - cluster_info::create_broadcast_orders
|
// pretend to broadcast from leader - cluster_info::create_broadcast_orders
|
||||||
let mut broadcast_table = cluster_info.sorted_tvu_peers(&stakes);
|
let mut broadcast_table = cluster_info.sorted_tvu_peers(&staked_nodes);
|
||||||
broadcast_table.truncate(fanout);
|
broadcast_table.truncate(fanout);
|
||||||
let orders = ClusterInfo::create_broadcast_orders(false, &blobs, &broadcast_table);
|
let orders = ClusterInfo::create_broadcast_orders(false, &blobs, &broadcast_table);
|
||||||
|
|
||||||
@ -105,7 +104,7 @@ fn run_simulation(num_nodes: u64, fanout: usize, hood_size: usize) {
|
|||||||
cluster.gossip.set_self(&*id);
|
cluster.gossip.set_self(&*id);
|
||||||
if !mapped_peers.contains_key(id) {
|
if !mapped_peers.contains_key(id) {
|
||||||
let (neighbors, children) = compute_retransmit_peers(
|
let (neighbors, children) = compute_retransmit_peers(
|
||||||
&stakes,
|
&staked_nodes,
|
||||||
&Arc::new(RwLock::new(cluster.clone())),
|
&Arc::new(RwLock::new(cluster.clone())),
|
||||||
fanout,
|
fanout,
|
||||||
hood_size,
|
hood_size,
|
||||||
@ -173,23 +172,30 @@ fn run_simulation(num_nodes: u64, fanout: usize, hood_size: usize) {
|
|||||||
// Run with a single layer
|
// Run with a single layer
|
||||||
#[test]
|
#[test]
|
||||||
fn test_retransmit_small() {
|
fn test_retransmit_small() {
|
||||||
run_simulation(
|
let stakes: Vec<_> = (0..NEIGHBORHOOD_SIZE as u64).map(|i| i).collect();
|
||||||
DATA_PLANE_FANOUT as u64,
|
run_simulation(&stakes, NEIGHBORHOOD_SIZE, NEIGHBORHOOD_SIZE);
|
||||||
DATA_PLANE_FANOUT,
|
|
||||||
NEIGHBORHOOD_SIZE,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure at least 2 layers are used
|
// Make sure at least 2 layers are used
|
||||||
#[test]
|
#[test]
|
||||||
fn test_retransmit_medium() {
|
fn test_retransmit_medium() {
|
||||||
let num_nodes = DATA_PLANE_FANOUT as u64 * 10;
|
let num_nodes = NEIGHBORHOOD_SIZE as u64 * 10;
|
||||||
run_simulation(num_nodes, DATA_PLANE_FANOUT, NEIGHBORHOOD_SIZE);
|
let stakes: Vec<_> = (0..num_nodes).map(|i| i).collect();
|
||||||
|
run_simulation(&stakes, NEIGHBORHOOD_SIZE, NEIGHBORHOOD_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure at least 2 layers are used but with equal stakes
|
||||||
|
#[test]
|
||||||
|
fn test_retransmit_medium_equal_stakes() {
|
||||||
|
let num_nodes = NEIGHBORHOOD_SIZE as u64 * 10;
|
||||||
|
let stakes: Vec<_> = (0..num_nodes).map(|_| 10).collect();
|
||||||
|
run_simulation(&stakes, NEIGHBORHOOD_SIZE, NEIGHBORHOOD_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scale down the network and make sure at least 3 layers are used
|
// Scale down the network and make sure at least 3 layers are used
|
||||||
#[test]
|
#[test]
|
||||||
fn test_retransmit_large() {
|
fn test_retransmit_large() {
|
||||||
let num_nodes = DATA_PLANE_FANOUT as u64 * 20;
|
let num_nodes = NEIGHBORHOOD_SIZE as u64 * 20;
|
||||||
run_simulation(num_nodes, DATA_PLANE_FANOUT / 10, NEIGHBORHOOD_SIZE / 10);
|
let stakes: Vec<_> = (0..num_nodes).map(|i| i).collect();
|
||||||
|
run_simulation(&stakes, NEIGHBORHOOD_SIZE / 10, NEIGHBORHOOD_SIZE / 10);
|
||||||
}
|
}
|
||||||
|
@ -176,7 +176,8 @@ pub fn cluster_info_retransmit() -> result::Result<()> {
|
|||||||
assert!(done);
|
assert!(done);
|
||||||
let b = SharedBlob::default();
|
let b = SharedBlob::default();
|
||||||
b.write().unwrap().meta.size = 10;
|
b.write().unwrap().meta.size = 10;
|
||||||
ClusterInfo::retransmit(&c1, &b, &tn1)?;
|
let peers = c1.read().unwrap().retransmit_peers();
|
||||||
|
ClusterInfo::retransmit_to(&c1, &peers, &b, None, &tn1, false)?;
|
||||||
let res: Vec<_> = [tn1, tn2, tn3]
|
let res: Vec<_> = [tn1, tn2, tn3]
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
|
@ -45,6 +45,22 @@ fn test_spend_and_verify_all_nodes_3() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_spend_and_verify_all_nodes_env_num_nodes() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let num_nodes: usize = std::env::var("NUM_NODES")
|
||||||
|
.expect("please set environment variable NUM_NODES")
|
||||||
|
.parse()
|
||||||
|
.expect("could not parse NUM_NODES as a number");
|
||||||
|
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
|
||||||
|
cluster_tests::spend_and_verify_all_nodes(
|
||||||
|
&local.entry_point_info,
|
||||||
|
&local.funding_keypair,
|
||||||
|
num_nodes,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn test_fullnode_exit_default_config_should_panic() {
|
fn test_fullnode_exit_default_config_should_panic() {
|
||||||
|
@ -83,7 +83,7 @@ fn test_replay() {
|
|||||||
|
|
||||||
let tvu_addr = target1.info.tvu;
|
let tvu_addr = target1.info.tvu;
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, blocktree, ledger_signal_receiver) =
|
let (bank_forks, bank_forks_info, blocktree, ledger_signal_receiver, leader_schedule_cache) =
|
||||||
fullnode::new_banks_from_blocktree(&blocktree_path, None);
|
fullnode::new_banks_from_blocktree(&blocktree_path, None);
|
||||||
let bank = bank_forks.working_bank();
|
let bank = bank_forks.working_bank();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -91,6 +91,7 @@ fn test_replay() {
|
|||||||
starting_mint_balance
|
starting_mint_balance
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||||
// start cluster_info1
|
// start cluster_info1
|
||||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||||
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
||||||
@ -126,7 +127,9 @@ fn test_replay() {
|
|||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
storage_sender,
|
storage_sender,
|
||||||
storage_receiver,
|
storage_receiver,
|
||||||
|
&leader_schedule_cache,
|
||||||
&exit,
|
&exit,
|
||||||
|
&solana_sdk::hash::Hash::default(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut mint_ref_balance = starting_mint_balance;
|
let mut mint_ref_balance = starting_mint_balance;
|
||||||
@ -171,7 +174,7 @@ fn test_replay() {
|
|||||||
// receive retransmitted messages
|
// receive retransmitted messages
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
while let Ok(_msg) = r_reader.recv_timeout(timer) {
|
while let Ok(_msg) = r_reader.recv_timeout(timer) {
|
||||||
trace!("got msg");
|
info!("got msg");
|
||||||
}
|
}
|
||||||
|
|
||||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-drone"
|
name = "solana-drone"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
description = "Solana Drone"
|
description = "Solana Drone"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -20,9 +20,9 @@ clap = "2.33"
|
|||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
serde = "1.0.90"
|
serde = "1.0.90"
|
||||||
serde_derive = "1.0.90"
|
serde_derive = "1.0.90"
|
||||||
solana-logger = { path = "../logger", version = "0.13.0" }
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.13.0" }
|
solana-metrics = { path = "../metrics", version = "0.14.0" }
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
tokio-codec = "0.1"
|
tokio-codec = "0.1"
|
||||||
|
|
||||||
|
@ -1,16 +1,11 @@
|
|||||||
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
||||||
use log::*;
|
use solana_drone::drone::{run_drone, Drone, DRONE_PORT};
|
||||||
use solana_drone::drone::{Drone, DRONE_PORT};
|
|
||||||
use solana_drone::socketaddr;
|
use solana_drone::socketaddr;
|
||||||
use solana_sdk::signature::read_keypair;
|
use solana_sdk::signature::read_keypair;
|
||||||
use std::error;
|
use std::error;
|
||||||
use std::io;
|
|
||||||
use std::net::{Ipv4Addr, SocketAddr};
|
use std::net::{Ipv4Addr, SocketAddr};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use tokio::prelude::{Future, Sink, Stream};
|
|
||||||
use tokio_codec::{BytesCodec, Decoder};
|
|
||||||
|
|
||||||
fn main() -> Result<(), Box<error::Error>> {
|
fn main() -> Result<(), Box<error::Error>> {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
@ -74,34 +69,6 @@ fn main() -> Result<(), Box<error::Error>> {
|
|||||||
drone1.lock().unwrap().clear_request_count();
|
drone1.lock().unwrap().clear_request_count();
|
||||||
});
|
});
|
||||||
|
|
||||||
let socket = TcpListener::bind(&drone_addr).unwrap();
|
run_drone(drone, drone_addr, None);
|
||||||
info!("Drone started. Listening on: {}", drone_addr);
|
|
||||||
let done = socket
|
|
||||||
.incoming()
|
|
||||||
.map_err(|e| warn!("failed to accept socket; error = {:?}", e))
|
|
||||||
.for_each(move |socket| {
|
|
||||||
let drone2 = drone.clone();
|
|
||||||
let framed = BytesCodec::new().framed(socket);
|
|
||||||
let (writer, reader) = framed.split();
|
|
||||||
|
|
||||||
let processor = reader.and_then(move |bytes| {
|
|
||||||
let response_bytes = drone2
|
|
||||||
.lock()
|
|
||||||
.unwrap()
|
|
||||||
.process_drone_request(&bytes)
|
|
||||||
.unwrap();
|
|
||||||
Ok(response_bytes)
|
|
||||||
});
|
|
||||||
let server = writer
|
|
||||||
.send_all(processor.or_else(|err| {
|
|
||||||
Err(io::Error::new(
|
|
||||||
io::ErrorKind::Other,
|
|
||||||
format!("Drone response: {:?}", err),
|
|
||||||
))
|
|
||||||
}))
|
|
||||||
.then(|_| Ok(()));
|
|
||||||
tokio::spawn(server)
|
|
||||||
});
|
|
||||||
tokio::run(done);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ macro_rules! socketaddr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub const TIME_SLICE: u64 = 60;
|
pub const TIME_SLICE: u64 = 60;
|
||||||
pub const REQUEST_CAP: u64 = 500_000_000;
|
pub const REQUEST_CAP: u64 = 100_000_000_000_000;
|
||||||
pub const DRONE_PORT: u16 = 9900;
|
pub const DRONE_PORT: u16 = 9900;
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||||
@ -135,7 +135,13 @@ impl Drone {
|
|||||||
let message = Message::new(vec![create_instruction]);
|
let message = Message::new(vec![create_instruction]);
|
||||||
Ok(Transaction::new(&[&self.mint_keypair], message, blockhash))
|
Ok(Transaction::new(&[&self.mint_keypair], message, blockhash))
|
||||||
} else {
|
} else {
|
||||||
Err(Error::new(ErrorKind::Other, "token limit reached"))
|
Err(Error::new(
|
||||||
|
ErrorKind::Other,
|
||||||
|
format!(
|
||||||
|
"token limit reached; req: {} current: {} cap: {}",
|
||||||
|
lamports, self.request_current, self.request_cap
|
||||||
|
),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -255,8 +261,22 @@ pub fn run_local_drone(
|
|||||||
None,
|
None,
|
||||||
request_cap_input,
|
request_cap_input,
|
||||||
)));
|
)));
|
||||||
|
run_drone(drone, drone_addr, Some(sender));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn run_drone(
|
||||||
|
drone: Arc<Mutex<Drone>>,
|
||||||
|
drone_addr: SocketAddr,
|
||||||
|
send_addr: Option<Sender<SocketAddr>>,
|
||||||
|
) {
|
||||||
let socket = TcpListener::bind(&drone_addr).unwrap();
|
let socket = TcpListener::bind(&drone_addr).unwrap();
|
||||||
sender.send(socket.local_addr().unwrap()).unwrap();
|
if send_addr.is_some() {
|
||||||
|
send_addr
|
||||||
|
.unwrap()
|
||||||
|
.send(socket.local_addr().unwrap())
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
info!("Drone started. Listening on: {}", drone_addr);
|
info!("Drone started. Listening on: {}", drone_addr);
|
||||||
let done = socket
|
let done = socket
|
||||||
.incoming()
|
.incoming()
|
||||||
@ -267,13 +287,16 @@ pub fn run_local_drone(
|
|||||||
let (writer, reader) = framed.split();
|
let (writer, reader) = framed.split();
|
||||||
|
|
||||||
let processor = reader.and_then(move |bytes| {
|
let processor = reader.and_then(move |bytes| {
|
||||||
let response_bytes = drone2
|
match drone2.lock().unwrap().process_drone_request(&bytes) {
|
||||||
.lock()
|
Ok(response_bytes) => {
|
||||||
.unwrap()
|
|
||||||
.process_drone_request(&bytes)
|
|
||||||
.unwrap();
|
|
||||||
trace!("Airdrop response_bytes: {:?}", response_bytes.to_vec());
|
trace!("Airdrop response_bytes: {:?}", response_bytes.to_vec());
|
||||||
Ok(response_bytes)
|
Ok(response_bytes)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
info!("Error in request: {:?}", e);
|
||||||
|
Ok(Bytes::from(&b""[..]))
|
||||||
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
let server = writer
|
let server = writer
|
||||||
.send_all(processor.or_else(|err| {
|
.send_all(processor.or_else(|err| {
|
||||||
@ -286,7 +309,6 @@ pub fn run_local_drone(
|
|||||||
tokio::spawn(server)
|
tokio::spawn(server)
|
||||||
});
|
});
|
||||||
tokio::run(done);
|
tokio::run(done);
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-fullnode"
|
name = "solana-fullnode"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -12,15 +12,15 @@ homepage = "https://solana.com/"
|
|||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana = { path = "../core", version = "0.13.0" }
|
solana = { path = "../core", version = "0.14.0" }
|
||||||
solana-drone = { path = "../drone", version = "0.13.0" }
|
solana-drone = { path = "../drone", version = "0.14.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.13.0" }
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.13.0" }
|
solana-netutil = { path = "../netutil", version = "0.14.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.13.0" }
|
solana-metrics = { path = "../metrics", version = "0.14.0" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.13.0" }
|
solana-runtime = { path = "../runtime", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
solana-vote-api = { path = "../programs/vote_api", version = "0.13.0" }
|
solana-vote-api = { path = "../programs/vote_api", version = "0.14.0" }
|
||||||
solana-vote-signer = { path = "../vote-signer", version = "0.13.0" }
|
solana-vote-signer = { path = "../vote-signer", version = "0.14.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
chacha = ["solana/chacha"]
|
chacha = ["solana/chacha"]
|
||||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-genesis"
|
name = "solana-genesis"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -11,18 +11,19 @@ homepage = "https://solana.com/"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana = { path = "../core", version = "0.13.0" }
|
solana = { path = "../core", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
solana-budget-api = { path = "../programs/budget_api", version = "0.13.0" }
|
solana-budget-api = { path = "../programs/budget_api", version = "0.14.0" }
|
||||||
solana-storage-api = { path = "../programs/storage_api", version = "0.13.0" }
|
solana-stake-api = { path = "../programs/stake_api", version = "0.14.0" }
|
||||||
solana-token-api = { path = "../programs/token_api", version = "0.13.0" }
|
solana-storage-api = { path = "../programs/storage_api", version = "0.14.0" }
|
||||||
solana-config-api = { path = "../programs/config_api", version = "0.13.0" }
|
solana-token-api = { path = "../programs/token_api", version = "0.14.0" }
|
||||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.13.0" }
|
solana-config-api = { path = "../programs/config_api", version = "0.14.0" }
|
||||||
|
solana-exchange-api = { path = "../programs/exchange_api", version = "0.14.0" }
|
||||||
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hashbrown = "0.2.1"
|
hashbrown = "0.3.0"
|
||||||
solana-vote-api = { path = "../programs/vote_api", version = "0.13.0" }
|
solana-vote-api = { path = "../programs/vote_api", version = "0.14.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
@ -14,6 +14,7 @@ use std::error;
|
|||||||
pub const BOOTSTRAP_LEADER_LAMPORTS: u64 = 43;
|
pub const BOOTSTRAP_LEADER_LAMPORTS: u64 = 43;
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn error::Error>> {
|
fn main() -> Result<(), Box<dyn error::Error>> {
|
||||||
|
let default_bootstrap_leader_lamports = &BOOTSTRAP_LEADER_LAMPORTS.to_string();
|
||||||
let matches = App::new(crate_name!())
|
let matches = App::new(crate_name!())
|
||||||
.about(crate_description!())
|
.about(crate_description!())
|
||||||
.version(crate_version!())
|
.version(crate_version!())
|
||||||
@ -62,6 +63,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
.required(true)
|
.required(true)
|
||||||
.help("Path to file containing the bootstrap leader's staking keypair"),
|
.help("Path to file containing the bootstrap leader's staking keypair"),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("bootstrap_leader_lamports")
|
||||||
|
.long("bootstrap-leader-lamports")
|
||||||
|
.value_name("LAMPORTS")
|
||||||
|
.takes_value(true)
|
||||||
|
.default_value(default_bootstrap_leader_lamports)
|
||||||
|
.required(true)
|
||||||
|
.help("Number of lamports to assign to the bootstrap leader"),
|
||||||
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
let bootstrap_leader_keypair_file = matches.value_of("bootstrap_leader_keypair_file").unwrap();
|
let bootstrap_leader_keypair_file = matches.value_of("bootstrap_leader_keypair_file").unwrap();
|
||||||
@ -69,6 +79,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
let ledger_path = matches.value_of("ledger_path").unwrap();
|
let ledger_path = matches.value_of("ledger_path").unwrap();
|
||||||
let mint_keypair_file = matches.value_of("mint_keypair_file").unwrap();
|
let mint_keypair_file = matches.value_of("mint_keypair_file").unwrap();
|
||||||
let lamports = value_t_or_exit!(matches, "lamports", u64);
|
let lamports = value_t_or_exit!(matches, "lamports", u64);
|
||||||
|
let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64);
|
||||||
|
|
||||||
let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?;
|
let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?;
|
||||||
let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?;
|
let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?;
|
||||||
@ -77,7 +88,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
let (mut genesis_block, _mint_keypair) = GenesisBlock::new_with_leader(
|
let (mut genesis_block, _mint_keypair) = GenesisBlock::new_with_leader(
|
||||||
lamports,
|
lamports,
|
||||||
&bootstrap_leader_keypair.pubkey(),
|
&bootstrap_leader_keypair.pubkey(),
|
||||||
BOOTSTRAP_LEADER_LAMPORTS,
|
bootstrap_leader_lamports,
|
||||||
);
|
);
|
||||||
genesis_block.mint_id = mint_keypair.pubkey();
|
genesis_block.mint_id = mint_keypair.pubkey();
|
||||||
genesis_block.bootstrap_leader_vote_account_id = bootstrap_vote_keypair.pubkey();
|
genesis_block.bootstrap_leader_vote_account_id = bootstrap_vote_keypair.pubkey();
|
||||||
@ -112,43 +123,39 @@ mod tests {
|
|||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0,
|
0, 0, 0,
|
||||||
]);
|
]);
|
||||||
let native = Pubkey::new(&[
|
let native_loader = "NativeLoader1111111111111111111111111111111"
|
||||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
.parse::<Pubkey>()
|
||||||
0, 0, 0,
|
.unwrap();
|
||||||
]);
|
let bpf_loader = "BPFLoader1111111111111111111111111111111111"
|
||||||
let bpf = Pubkey::new(&[
|
.parse::<Pubkey>()
|
||||||
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
.unwrap();
|
||||||
0, 0, 0, 0,
|
let budget = "Budget1111111111111111111111111111111111111"
|
||||||
]);
|
.parse::<Pubkey>()
|
||||||
let budget = Pubkey::new(&[
|
.unwrap();
|
||||||
129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
let stake = "Stake11111111111111111111111111111111111111"
|
||||||
0, 0, 0, 0,
|
.parse::<Pubkey>()
|
||||||
]);
|
.unwrap();
|
||||||
let storage = Pubkey::new(&[
|
let storage = "Storage111111111111111111111111111111111111"
|
||||||
130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
.parse::<Pubkey>()
|
||||||
0, 0, 0, 0,
|
.unwrap();
|
||||||
]);
|
let token = "Token11111111111111111111111111111111111111"
|
||||||
let token = Pubkey::new(&[
|
.parse::<Pubkey>()
|
||||||
131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
.unwrap();
|
||||||
0, 0, 0, 0,
|
let vote = "Vote111111111111111111111111111111111111111"
|
||||||
]);
|
.parse::<Pubkey>()
|
||||||
let vote = Pubkey::new(&[
|
.unwrap();
|
||||||
132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
let config = "Config1111111111111111111111111111111111111"
|
||||||
0, 0, 0, 0,
|
.parse::<Pubkey>()
|
||||||
]);
|
.unwrap();
|
||||||
let config = Pubkey::new(&[
|
let exchange = "Exchange11111111111111111111111111111111111"
|
||||||
133, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
.parse::<Pubkey>()
|
||||||
0, 0, 0, 0,
|
.unwrap();
|
||||||
]);
|
|
||||||
let exchange = Pubkey::new(&[
|
|
||||||
134, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
||||||
0, 0, 0, 0,
|
|
||||||
]);
|
|
||||||
|
|
||||||
assert_eq!(solana_sdk::system_program::id(), system);
|
assert_eq!(solana_sdk::system_program::id(), system);
|
||||||
assert_eq!(solana_sdk::native_loader::id(), native);
|
assert_eq!(solana_sdk::native_loader::id(), native_loader);
|
||||||
assert_eq!(solana_sdk::bpf_loader::id(), bpf);
|
assert_eq!(solana_sdk::bpf_loader::id(), bpf_loader);
|
||||||
assert_eq!(solana_budget_api::id(), budget);
|
assert_eq!(solana_budget_api::id(), budget);
|
||||||
|
assert_eq!(solana_stake_api::id(), stake);
|
||||||
assert_eq!(solana_storage_api::id(), storage);
|
assert_eq!(solana_storage_api::id(), storage);
|
||||||
assert_eq!(solana_token_api::id(), token);
|
assert_eq!(solana_token_api::id(), token);
|
||||||
assert_eq!(solana_vote_api::id(), vote);
|
assert_eq!(solana_vote_api::id(), vote);
|
||||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-gossip"
|
name = "solana-gossip"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -11,9 +11,10 @@ homepage = "https://solana.com/"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
env_logger = "0.6.1"
|
env_logger = "0.6.1"
|
||||||
solana = { path = "../core", version = "0.13.0" }
|
solana = { path = "../core", version = "0.14.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.13.0" }
|
solana-client = { path = "../client", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-netutil = { path = "../netutil", version = "0.14.0" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
chacha = []
|
chacha = []
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
//! A command-line executable for monitoring a cluster's gossip plane.
|
//! A command-line executable for monitoring a cluster's gossip plane.
|
||||||
|
|
||||||
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
use clap::{crate_description, crate_name, crate_version, App, AppSettings, Arg, SubCommand};
|
||||||
|
use solana::contact_info::ContactInfo;
|
||||||
use solana::gossip_service::discover;
|
use solana::gossip_service::discover;
|
||||||
|
use solana_client::rpc_client::RpcClient;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::error;
|
use std::error;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
@ -22,6 +24,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
let matches = App::new(crate_name!())
|
let matches = App::new(crate_name!())
|
||||||
.about(crate_description!())
|
.about(crate_description!())
|
||||||
.version(crate_version!())
|
.version(crate_version!())
|
||||||
|
.setting(AppSettings::SubcommandRequiredElseHelp)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("network")
|
Arg::with_name("network")
|
||||||
.short("n")
|
.short("n")
|
||||||
@ -31,6 +34,10 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
.default_value(&network_string)
|
.default_value(&network_string)
|
||||||
.help("Rendezvous with the cluster at this gossip entry point"),
|
.help("Rendezvous with the cluster at this gossip entry point"),
|
||||||
)
|
)
|
||||||
|
.subcommand(
|
||||||
|
SubCommand::with_name("spy")
|
||||||
|
.about("Monitor the gossip network")
|
||||||
|
.setting(AppSettings::DisableVersion)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("num_nodes")
|
Arg::with_name("num_nodes")
|
||||||
.short("N")
|
.short("N")
|
||||||
@ -62,7 +69,23 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
.long("timeout")
|
.long("timeout")
|
||||||
.value_name("SECS")
|
.value_name("SECS")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("Maximum time to wait for cluster to converge [default: wait forever]"),
|
.help(
|
||||||
|
"Maximum time to wait for cluster to converge [default: wait forever]",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.subcommand(
|
||||||
|
SubCommand::with_name("stop")
|
||||||
|
.about("Send stop request to a node")
|
||||||
|
.setting(AppSettings::DisableVersion)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("node_pubkey")
|
||||||
|
.index(1)
|
||||||
|
.required(true)
|
||||||
|
.value_name("PUBKEY")
|
||||||
|
.validator(pubkey_validator)
|
||||||
|
.help("Public key of a specific node to stop"),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
@ -72,7 +95,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
exit(1)
|
exit(1)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
match matches.subcommand() {
|
||||||
|
("spy", Some(matches)) => {
|
||||||
let num_nodes_exactly = matches
|
let num_nodes_exactly = matches
|
||||||
.value_of("num_nodes_exactly")
|
.value_of("num_nodes_exactly")
|
||||||
.map(|num| num.to_string().parse().unwrap());
|
.map(|num| num.to_string().parse().unwrap());
|
||||||
@ -115,5 +139,30 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
num_nodes_exactly.unwrap()
|
num_nodes_exactly.unwrap()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
("stop", Some(matches)) => {
|
||||||
|
let pubkey = matches
|
||||||
|
.value_of("node_pubkey")
|
||||||
|
.unwrap()
|
||||||
|
.parse::<Pubkey>()
|
||||||
|
.unwrap();
|
||||||
|
let nodes = discover(&network_addr, None, None, Some(pubkey))?;
|
||||||
|
let node = nodes.iter().find(|x| x.id == pubkey).unwrap();
|
||||||
|
|
||||||
|
if !ContactInfo::is_valid_address(&node.rpc) {
|
||||||
|
eprintln!("Error: RPC service is not enabled on node {:?}", pubkey);
|
||||||
|
}
|
||||||
|
println!("\nSending stop request to node {:?}", pubkey);
|
||||||
|
|
||||||
|
let result = RpcClient::new_socket(node.rpc).fullnode_exit()?;
|
||||||
|
if result {
|
||||||
|
println!("Stop signal accepted");
|
||||||
|
} else {
|
||||||
|
eprintln!("Error: Stop signal ignored");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-install"
|
name = "solana-install"
|
||||||
description = "The solana cluster software installer"
|
description = "The solana cluster software installer"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -28,10 +28,10 @@ ring = "0.13.2"
|
|||||||
serde = "1.0.90"
|
serde = "1.0.90"
|
||||||
serde_derive = "1.0.90"
|
serde_derive = "1.0.90"
|
||||||
serde_yaml = "0.8.8"
|
serde_yaml = "0.8.8"
|
||||||
solana-client = { path = "../client", version = "0.13.0" }
|
solana-client = { path = "../client", version = "0.14.0" }
|
||||||
solana-config-api = { path = "../programs/config_api", version = "0.13.0" }
|
solana-config-api = { path = "../programs/config_api", version = "0.14.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.13.0" }
|
solana-logger = { path = "../logger", version = "0.14.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.13.0" }
|
solana-sdk = { path = "../sdk", version = "0.14.0" }
|
||||||
tar = "0.4.22"
|
tar = "0.4.23"
|
||||||
tempdir = "0.3.7"
|
tempdir = "0.3.7"
|
||||||
url = "1.7.2"
|
url = "1.7.2"
|
||||||
|
@ -48,7 +48,6 @@ main() {
|
|||||||
need_cmd chmod
|
need_cmd chmod
|
||||||
need_cmd mkdir
|
need_cmd mkdir
|
||||||
need_cmd rm
|
need_cmd rm
|
||||||
need_cmd rmdir
|
|
||||||
need_cmd sed
|
need_cmd sed
|
||||||
need_cmd grep
|
need_cmd grep
|
||||||
|
|
||||||
@ -113,7 +112,7 @@ main() {
|
|||||||
retval=$?
|
retval=$?
|
||||||
|
|
||||||
ignore rm "$solana_install"
|
ignore rm "$solana_install"
|
||||||
ignore rmdir "$temp_dir"
|
ignore rm -rf "$temp_dir"
|
||||||
|
|
||||||
return "$retval"
|
return "$retval"
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user