Compare commits
65 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
6fbad24477 | ||
|
4bd018e68b | ||
|
c73e40a351 | ||
|
8f790e3153 | ||
|
1972d8b5c0 | ||
|
bc2fd56516 | ||
|
00916b2ca6 | ||
|
006a5c5c88 | ||
|
6666e54a1f | ||
|
d6ea4f50c9 | ||
|
a0965e1eba | ||
|
7ca65341e6 | ||
|
141a5928c4 | ||
|
5f0584b6e8 | ||
|
b7fb739cd9 | ||
|
5a4a238029 | ||
|
01987f8f89 | ||
|
82caa50781 | ||
|
60b1bcddb5 | ||
|
dce7739b75 | ||
|
1c703af6a2 | ||
|
f49de3b1ad | ||
|
5c1b79f500 | ||
|
da04616fd4 | ||
|
8653c86284 | ||
|
809e4cbf25 | ||
|
1aef482972 | ||
|
248ab3a6ec | ||
|
ec1f2b4f90 | ||
|
c853632fc4 | ||
|
e651209f73 | ||
|
641f439a45 | ||
|
a2486f8094 | ||
|
d48bd80619 | ||
|
4ff70a05f1 | ||
|
7831cef9a7 | ||
|
7dd22d6601 | ||
|
3bb0388299 | ||
|
a0a2c61856 | ||
|
4afa64c20d | ||
|
be6edb950c | ||
|
62bc83ef39 | ||
|
f26824f2b5 | ||
|
bc808d785b | ||
|
a5e91f8b14 | ||
|
79b1d49e42 | ||
|
5c5207b7c4 | ||
|
6280ea1b6e | ||
|
f016ccdbb5 | ||
|
a528e966e6 | ||
|
4be9d926c8 | ||
|
94e162b0f0 | ||
|
26ca3c6d6d | ||
|
729b997392 | ||
|
37b381f47f | ||
|
0115bfa2ea | ||
|
3f60fe62c2 | ||
|
ea44e64d21 | ||
|
8e1c2d2df4 | ||
|
a79702c62c | ||
|
3c94084177 | ||
|
7d448eb1a9 | ||
|
a705764ca7 | ||
|
3110def6c3 | ||
|
afc89beefa |
14
.buildkite/env/secrets.ejson
vendored
14
.buildkite/env/secrets.ejson
vendored
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
|
||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
|
||||
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
|
||||
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
|
||||
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
|
||||
}
|
||||
}
|
||||
|
33
.mergify.yml
33
.mergify.yml
@@ -1,9 +1,40 @@
|
||||
# Validate your changes with:
|
||||
#
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate/
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: automatic merge (squash) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
# Join the dont-squash-my-commits group if you won't like your commits squashed
|
||||
- name: automatic merge (rebase) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
- name: remove automerge label on CI failure
|
||||
conditions:
|
||||
- label=automerge
|
||||
- "#status-failure!=0"
|
||||
actions:
|
||||
label:
|
||||
remove:
|
||||
- automerge
|
||||
comment:
|
||||
message: automerge label removed due to a CI failure
|
||||
- name: remove outdated reviews
|
||||
conditions:
|
||||
- base=master
|
||||
|
@@ -18,6 +18,8 @@ branches:
|
||||
- master
|
||||
- /^v\d+\.\d+/
|
||||
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
on_success: change
|
||||
|
798
Cargo.lock
generated
798
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-measure = { path = "../measure", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-measure = { path = "../measure", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.0"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -15,23 +15,23 @@ ed25519-dalek = "=1.0.0-pre.3"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.16" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-perf = { path = "../perf", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.16" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.20" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-perf = { path = "../perf", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.20" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,12 +11,12 @@ edition = "2018"
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
solana-chacha = { path = "../chacha", version = "1.1.16" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-perf = { path = "../perf", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.20" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-perf = { path = "../perf", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,13 +10,13 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.10.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,15 +13,15 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.16" }
|
||||
solana-perf = { path = "../perf", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-measure = { path = "../measure", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-version = { path = "../version", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.20" }
|
||||
solana-perf = { path = "../perf", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-measure = { path = "../measure", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-version = { path = "../version", version = "1.1.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,20 +18,20 @@ rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.16" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.20" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.20" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.16" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,17 +2,17 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,24 +14,24 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.20" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.20" }
|
||||
#solana-librapay = { path = "../programs/librapay", version = "1.1.8", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-measure = { path = "../measure", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
solana-measure = { path = "../measure", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.8", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.16" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.20" }
|
||||
|
||||
#[features]
|
||||
#move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,12 +10,12 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.16" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-perf = { path = "../perf", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.20" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-perf = { path = "../perf", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,11 +12,11 @@ edition = "2018"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-perf = { path = "../perf", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-perf = { path = "../perf", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@@ -5,6 +5,9 @@
|
||||
# Release tags use buildkite-release.yml instead
|
||||
|
||||
steps:
|
||||
- command: "ci/test-sanity.sh"
|
||||
name: "sanity"
|
||||
timeout_in_minutes: 5
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 20
|
||||
|
@@ -19,13 +19,16 @@ while [[ ! -f config/run/init-completed ]]; do
|
||||
fi
|
||||
done
|
||||
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -eq 0 ]]; do
|
||||
snapshot_slot=1
|
||||
|
||||
# wait a bit longer than snapshot_slot
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
|
||||
sleep 1
|
||||
done
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
||||
|
||||
wait $pid
|
||||
|
||||
$solana_ledger_tool create-snapshot --ledger config/ledger 1 config/snapshot-ledger
|
||||
$solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" config/snapshot-ledger
|
||||
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
|
||||
$solana_ledger_tool verify --ledger config/snapshot-ledger
|
||||
|
@@ -10,9 +10,6 @@ source ci/rust-version.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
# Look for failed mergify.io backports
|
||||
_ git show HEAD --check --oneline
|
||||
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
|
||||
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
|
||||
@@ -23,10 +20,8 @@ _ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnin
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
|
27
ci/test-sanity.sh
Executable file
27
ci/test-sanity.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
source ci/_
|
||||
|
||||
(
|
||||
echo --- git diff --check
|
||||
set -x
|
||||
# Look for failed mergify.io backports by searching leftover conflict markers
|
||||
# Also check for any trailing whitespaces!
|
||||
if [[ -n $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
|
||||
base_branch=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
else
|
||||
base_branch=$BUILDKITE_BRANCH
|
||||
fi
|
||||
git fetch origin "$base_branch"
|
||||
git diff "$(git merge-base HEAD "origin/$base_branch")..HEAD" --check --oneline
|
||||
)
|
||||
|
||||
echo
|
||||
|
||||
_ ci/nits.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
echo --- ok
|
@@ -23,10 +23,14 @@ if [[ -z $CI_TAG ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $CI_REPO_SLUG ]]; then
|
||||
echo Error: CI_REPO_SLUG not defined
|
||||
exit 1
|
||||
fi
|
||||
# Force CI_REPO_SLUG since sometimes
|
||||
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
|
||||
# artifact upload to fail
|
||||
CI_REPO_SLUG=solana-labs/solana
|
||||
#if [[ -z $CI_REPO_SLUG ]]; then
|
||||
# echo Error: CI_REPO_SLUG not defined
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
releaseId=$( \
|
||||
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@@ -6,50 +6,86 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Signature},
|
||||
};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
U: FromStr,
|
||||
U::Err: Display,
|
||||
{
|
||||
string
|
||||
.as_ref()
|
||||
.parse::<U>()
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("error parsing '{}': {}", string, err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as type T.
|
||||
// Takes a String to avoid second type parameter when used as a clap validator
|
||||
pub fn is_parsable<T>(string: String) -> Result<(), String>
|
||||
where
|
||||
T: FromStr,
|
||||
T::Err: Display,
|
||||
{
|
||||
is_parsable_generic::<T, String>(string)
|
||||
}
|
||||
|
||||
// Return an error if a pubkey cannot be parsed.
|
||||
pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
match string.parse::<Pubkey>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Pubkey, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a hash cannot be parsed.
|
||||
pub fn is_hash(string: String) -> Result<(), String> {
|
||||
match string.parse::<Hash>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_hash<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Hash, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed.
|
||||
pub fn is_keypair(string: String) -> Result<(), String> {
|
||||
read_keypair_file(&string)
|
||||
pub fn is_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed
|
||||
pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
|
||||
if string.as_str() == ASK_KEYWORD {
|
||||
pub fn is_keypair_or_ask_keyword<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if string.as_ref() == ASK_KEYWORD {
|
||||
return Ok(());
|
||||
}
|
||||
read_keypair_file(&string)
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
||||
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
|
||||
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_pubkey(string.as_ref()).or_else(|_| is_keypair(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
|
||||
// produce a pubkey()
|
||||
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
match parse_keypair_path(&string) {
|
||||
pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match parse_keypair_path(string.as_ref()) {
|
||||
KeypairUrl::Filepath(path) => is_keypair(path),
|
||||
_ => Ok(()),
|
||||
}
|
||||
@@ -63,13 +99,19 @@ pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
|
||||
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
|
||||
// also provided and correct happens in parsing, not in validation.
|
||||
pub fn is_valid_signer(string: String) -> Result<(), String> {
|
||||
pub fn is_valid_signer<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_valid_pubkey(string)
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey=signature string
|
||||
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
let mut signer = string.split('=');
|
||||
pub fn is_pubkey_sig<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let mut signer = string.as_ref().split('=');
|
||||
match Pubkey::from_str(
|
||||
signer
|
||||
.next()
|
||||
@@ -90,8 +132,11 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
}
|
||||
|
||||
// Return an error if a url cannot be parsed.
|
||||
pub fn is_url(string: String) -> Result<(), String> {
|
||||
match url::Url::parse(&string) {
|
||||
pub fn is_url<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match url::Url::parse(string.as_ref()) {
|
||||
Ok(url) => {
|
||||
if url.has_host() {
|
||||
Ok(())
|
||||
@@ -103,20 +148,26 @@ pub fn is_url(string: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_slot(slot: String) -> Result<(), String> {
|
||||
slot.parse::<Slot>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_slot<T>(slot: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Slot, _>(slot)
|
||||
}
|
||||
|
||||
pub fn is_port(port: String) -> Result<(), String> {
|
||||
port.parse::<u16>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_port<T>(port: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<u16, _>(port)
|
||||
}
|
||||
|
||||
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
pub fn is_valid_percentage<T>(percentage: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
percentage
|
||||
.as_ref()
|
||||
.parse::<u8>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
@@ -136,8 +187,11 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
if amount.parse::<u64>().is_ok() || amount.parse::<f64>().is_ok() {
|
||||
pub fn is_amount<T>(amount: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if amount.as_ref().parse::<u64>().is_ok() || amount.as_ref().parse::<f64>().is_ok() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!(
|
||||
@@ -147,14 +201,20 @@ pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
|
||||
DateTime::parse_from_rfc3339(&value)
|
||||
pub fn is_rfc3339_datetime<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
DateTime::parse_from_rfc3339(value.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
}
|
||||
|
||||
pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
let value = value.replace("'", "");
|
||||
pub fn is_derivation<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let value = value.as_ref().replace("'", "");
|
||||
let mut parts = value.split('/');
|
||||
let account = parts.next().unwrap();
|
||||
account
|
||||
@@ -186,14 +246,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_derivation() {
|
||||
assert_eq!(is_derivation("2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("65537".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
|
||||
assert!(is_derivation("a".to_string()).is_err());
|
||||
assert!(is_derivation("4294967296".to_string()).is_err());
|
||||
assert!(is_derivation("a/b".to_string()).is_err());
|
||||
assert!(is_derivation("0/4294967296".to_string()).is_err());
|
||||
assert_eq!(is_derivation("2"), Ok(()));
|
||||
assert_eq!(is_derivation("0"), Ok(()));
|
||||
assert_eq!(is_derivation("65537"), Ok(()));
|
||||
assert_eq!(is_derivation("0/2"), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'"), Ok(()));
|
||||
assert!(is_derivation("a").is_err());
|
||||
assert!(is_derivation("4294967296").is_err());
|
||||
assert!(is_derivation("a/b").is_err());
|
||||
assert!(is_derivation("0/4294967296").is_err());
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::io;
|
||||
use std::{collections::HashMap, io};
|
||||
use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
@@ -17,6 +17,8 @@ pub struct Config {
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub keypair_path: String,
|
||||
#[serde(default)]
|
||||
pub address_labels: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@@ -36,6 +38,7 @@ impl Default for Config {
|
||||
json_rpc_url,
|
||||
websocket_url,
|
||||
keypair_path,
|
||||
address_labels: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -27,28 +27,28 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.16" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.16" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.16" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.16" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.16" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.16" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.20" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.20" }
|
||||
thiserror = "1.0.13"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -382,6 +382,7 @@ pub enum CliCommand {
|
||||
},
|
||||
// Vote Commands
|
||||
CreateVoteAccount {
|
||||
vote_account: SignerIndex,
|
||||
seed: Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: Option<Pubkey>,
|
||||
@@ -407,6 +408,12 @@ pub enum CliCommand {
|
||||
VoteUpdateValidator {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
VoteUpdateCommission {
|
||||
vote_account_pubkey: Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
@@ -728,6 +735,9 @@ pub fn parse_command(
|
||||
("vote-update-validator", Some(matches)) => {
|
||||
parse_vote_update_validator(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-update-commission", Some(matches)) => {
|
||||
parse_vote_update_commission(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-authorize-voter", Some(matches)) => parse_vote_authorize(
|
||||
matches,
|
||||
default_signer_path,
|
||||
@@ -2159,6 +2169,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
|
||||
// Create vote account
|
||||
CliCommand::CreateVoteAccount {
|
||||
vote_account,
|
||||
seed,
|
||||
identity_account,
|
||||
authorized_voter,
|
||||
@@ -2167,6 +2178,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_create_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
*vote_account,
|
||||
seed,
|
||||
*identity_account,
|
||||
authorized_voter,
|
||||
@@ -2211,11 +2223,24 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
*withdraw_authority,
|
||||
),
|
||||
CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_commission(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*commission,
|
||||
*withdraw_authority,
|
||||
),
|
||||
|
||||
// Wallet Commands
|
||||
@@ -3461,6 +3486,7 @@ mod tests {
|
||||
let bob_pubkey = bob_keypair.pubkey();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@@ -3486,6 +3512,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
@@ -3686,6 +3713,7 @@ mod tests {
|
||||
let bob_keypair = Keypair::new();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@@ -3705,6 +3733,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 1,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
128
cli/src/vote.rs
128
cli/src/vote.rs
@@ -174,6 +174,37 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-commission")
|
||||
.about("Update the vote account's commission")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Vote account to update"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("commission")
|
||||
.index(2)
|
||||
.value_name("PERCENTAGE")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_percentage)
|
||||
.help("The new commission")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-account")
|
||||
.about("Show the contents of a vote account")
|
||||
@@ -242,7 +273,7 @@ pub fn parse_create_vote_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let (vote_account, vote_account_pubkey) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let (identity_account, identity_pubkey) =
|
||||
signer_of(matches, "identity_account", wallet_manager)?;
|
||||
@@ -260,6 +291,7 @@ pub fn parse_create_vote_account(
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: signer_info.index_of(vote_account_pubkey).unwrap(),
|
||||
seed,
|
||||
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
|
||||
authorized_voter,
|
||||
@@ -309,7 +341,8 @@ pub fn parse_vote_update_validator(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (new_identity_account, new_identity_pubkey) =
|
||||
signer_of(matches, "new_identity_account", wallet_manager)?;
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
@@ -323,6 +356,36 @@ pub fn parse_vote_update_validator(
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_update_commission(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, authorized_withdrawer],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@@ -381,13 +444,14 @@ pub fn parse_withdraw_from_vote_account(
|
||||
pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account: SignerIndex,
|
||||
seed: &Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: &Option<Pubkey>,
|
||||
authorized_withdrawer: &Option<Pubkey>,
|
||||
commission: u8,
|
||||
) -> ProcessResult {
|
||||
let vote_account = config.signers[1];
|
||||
let vote_account = config.signers[vote_account];
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let vote_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
@@ -506,8 +570,9 @@ pub fn process_vote_update_validator(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let new_identity_account = config.signers[new_identity_account];
|
||||
let new_identity_pubkey = new_identity_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
@@ -534,6 +599,34 @@ pub fn process_vote_update_validator(
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
}
|
||||
|
||||
pub fn process_vote_update_commission(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_commission(
|
||||
vote_account_pubkey,
|
||||
&authorized_withdrawer.pubkey(),
|
||||
commission,
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.signers[0].pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
}
|
||||
|
||||
fn get_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
@@ -729,6 +822,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -757,6 +851,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -789,6 +884,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(authed),
|
||||
@@ -819,6 +915,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -846,6 +943,7 @@ mod tests {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -855,6 +953,28 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_update_commission = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-update-commission",
|
||||
&pubkey_string,
|
||||
"42",
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_update_commission, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey: pubkey,
|
||||
commission: 42,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(read_keypair_file(&keypair_file).unwrap()),
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromVoteAccount subcommand
|
||||
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
|
@@ -68,6 +68,7 @@ fn test_stake_delegation_force() {
|
||||
let vote_keypair = Keypair::new();
|
||||
config.signers = vec![&default_signer, &vote_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
|
@@ -57,6 +57,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
let vote_account_pubkey = vote_account_keypair.pubkey();
|
||||
config.signers = vec![&default_signer, &vote_account_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
@@ -111,6 +112,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.16" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -31,7 +31,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -102,6 +102,7 @@ impl fmt::Display for RpcRequest {
|
||||
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
|
||||
|
||||
impl RpcRequest {
|
||||
pub(crate) fn build_request_json(self, id: u64, params: Value) -> Value {
|
||||
|
@@ -169,6 +169,9 @@ pub struct RpcEpochInfo {
|
||||
|
||||
/// The absolute current slot
|
||||
pub absolute_slot: Slot,
|
||||
|
||||
/// The current block height
|
||||
pub block_height: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -42,37 +42,37 @@ regex = "1.3.6"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.16" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.16" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.16" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-measure = { path = "../measure", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.16" }
|
||||
solana-perf = { path = "../perf", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.16" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.16" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.16" }
|
||||
solana-version = { path = "../version", version = "1.1.16" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.16" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.16" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.16" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.20" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.20" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
solana-measure = { path = "../measure", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.20" }
|
||||
solana-perf = { path = "../perf", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.20" }
|
||||
solana-version = { path = "../version", version = "1.1.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.20" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.20" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.16" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.20" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -3,6 +3,7 @@
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
|
||||
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
@@ -48,7 +49,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&last_datapoint,
|
||||
&mut 0,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
|
@@ -51,7 +51,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
|
||||
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
|
||||
|
||||
/// Transaction forwarding
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 4;
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
|
||||
|
||||
// Fixed thread size seems to be fastest on GCP setup
|
||||
pub const NUM_THREADS: u32 = 4;
|
||||
@@ -549,6 +549,7 @@ impl BankingStage {
|
||||
send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
transaction_statuses,
|
||||
TransactionBalancesSet::new(pre_balances, post_balances),
|
||||
sender,
|
||||
|
@@ -35,7 +35,7 @@ use std::{
|
||||
};
|
||||
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub(crate) mod broadcast_metrics;
|
||||
pub mod broadcast_metrics;
|
||||
pub(crate) mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
@@ -374,13 +374,14 @@ pub fn broadcast_shreds(
|
||||
peers_and_stakes: &[(u64, usize)],
|
||||
peers: &[ContactInfo],
|
||||
last_datapoint_submit: &Arc<AtomicU64>,
|
||||
send_mmsg_total: &mut u64,
|
||||
transmit_stats: &mut TransmitShredsStats,
|
||||
) -> Result<()> {
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
if broadcast_len == 0 {
|
||||
update_peer_stats(1, 1, last_datapoint_submit);
|
||||
return Ok(());
|
||||
}
|
||||
let mut shred_select = Measure::start("shred_select");
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
@@ -389,6 +390,8 @@ pub fn broadcast_shreds(
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
})
|
||||
.collect();
|
||||
shred_select.stop();
|
||||
transmit_stats.shred_select += shred_select.as_us();
|
||||
|
||||
let mut sent = 0;
|
||||
let mut send_mmsg_time = Measure::start("send_mmsg");
|
||||
@@ -401,7 +404,7 @@ pub fn broadcast_shreds(
|
||||
}
|
||||
}
|
||||
send_mmsg_time.stop();
|
||||
*send_mmsg_total += send_mmsg_time.as_us();
|
||||
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(&peers);
|
||||
update_peer_stats(
|
||||
|
@@ -29,11 +29,12 @@ impl ProcessShredsStats {
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct TransmitShredsStats {
|
||||
pub(crate) transmit_elapsed: u64,
|
||||
pub(crate) send_mmsg_elapsed: u64,
|
||||
pub(crate) get_peers_elapsed: u64,
|
||||
pub(crate) num_shreds: usize,
|
||||
pub struct TransmitShredsStats {
|
||||
pub transmit_elapsed: u64,
|
||||
pub send_mmsg_elapsed: u64,
|
||||
pub get_peers_elapsed: u64,
|
||||
pub shred_select: u64,
|
||||
pub num_shreds: usize,
|
||||
}
|
||||
|
||||
impl BroadcastStats for TransmitShredsStats {
|
||||
@@ -42,6 +43,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
|
||||
self.get_peers_elapsed += new_stats.get_peers_elapsed;
|
||||
self.num_shreds += new_stats.num_shreds;
|
||||
self.shred_select += new_stats.shred_select;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
@@ -58,6 +60,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("shred_select", self.shred_select as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -176,15 +179,16 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update() {
|
||||
fn test_update_broadcast() {
|
||||
let start = Instant::now();
|
||||
let mut slot_broadcast_stats = SlotBroadcastStats::default();
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
get_peers_elapsed: 2,
|
||||
send_mmsg_elapsed: 3,
|
||||
shred_select: 4,
|
||||
num_shreds: 5,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
slot: 0,
|
||||
@@ -198,16 +202,18 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
transmit_elapsed: 7,
|
||||
get_peers_elapsed: 8,
|
||||
send_mmsg_elapsed: 9,
|
||||
shred_select: 10,
|
||||
num_shreds: 11,
|
||||
},
|
||||
&None,
|
||||
);
|
||||
@@ -217,9 +223,10 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
// If another batch is given, then total number of batches == num_expected_batches == 2,
|
||||
// so the batch should be purged from the HashMap
|
||||
@@ -228,6 +235,7 @@ mod test {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
shred_select: 1,
|
||||
num_shreds: 1,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
|
@@ -81,14 +81,13 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
// Broadcast data
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
let mut send_mmsg_total = 0;
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&Arc::new(AtomicU64::new(0)),
|
||||
&mut send_mmsg_total,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
@@ -9,6 +9,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -23,6 +24,14 @@ pub struct StandardBroadcastRun {
|
||||
shred_version: u16,
|
||||
last_datapoint_submit: Arc<AtomicU64>,
|
||||
num_batches: usize,
|
||||
broadcast_peer_cache: Arc<RwLock<BroadcastPeerCache>>,
|
||||
last_peer_update: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct BroadcastPeerCache {
|
||||
peers: Vec<ContactInfo>,
|
||||
peers_and_stakes: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
@@ -38,6 +47,8 @@ impl StandardBroadcastRun {
|
||||
shred_version,
|
||||
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
|
||||
num_batches: 0,
|
||||
broadcast_peer_cache: Arc::new(RwLock::new(BroadcastPeerCache::default())),
|
||||
last_peer_update: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,33 +304,46 @@ impl StandardBroadcastRun {
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) -> Result<()> {
|
||||
const BROADCAST_PEER_UPDATE_INTERVAL_MS: u64 = 1000;
|
||||
trace!("Broadcasting {:?} shreds", shreds.len());
|
||||
// Get the list of peers to broadcast to
|
||||
let get_peers_start = Instant::now();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
let get_peers_elapsed = get_peers_start.elapsed();
|
||||
let mut get_peers_time = Measure::start("broadcast::get_peers");
|
||||
let now = timestamp();
|
||||
let last = self.last_peer_update.load(Ordering::Relaxed);
|
||||
if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS
|
||||
&& self
|
||||
.last_peer_update
|
||||
.compare_and_swap(now, last, Ordering::Relaxed)
|
||||
== last
|
||||
{
|
||||
let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
w_broadcast_peer_cache.peers = peers;
|
||||
w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes;
|
||||
}
|
||||
get_peers_time.stop();
|
||||
let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap();
|
||||
|
||||
let mut transmit_stats = TransmitShredsStats::default();
|
||||
// Broadcast the shreds
|
||||
let transmit_start = Instant::now();
|
||||
let mut send_mmsg_total = 0;
|
||||
let mut transmit_time = Measure::start("broadcast_shreds");
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&r_broadcast_peer_cache.peers_and_stakes,
|
||||
&r_broadcast_peer_cache.peers,
|
||||
&self.last_datapoint_submit,
|
||||
&mut send_mmsg_total,
|
||||
&mut transmit_stats,
|
||||
)?;
|
||||
let transmit_elapsed = transmit_start.elapsed();
|
||||
let new_transmit_shreds_stats = TransmitShredsStats {
|
||||
transmit_elapsed: duration_as_us(&transmit_elapsed),
|
||||
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
|
||||
send_mmsg_elapsed: send_mmsg_total,
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
drop(r_broadcast_peer_cache);
|
||||
transmit_time.stop();
|
||||
|
||||
transmit_stats.transmit_elapsed = transmit_time.as_us();
|
||||
transmit_stats.get_peers_elapsed = get_peers_time.as_us();
|
||||
transmit_stats.num_shreds = shreds.len();
|
||||
|
||||
// Process metrics
|
||||
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
|
||||
self.update_transmit_metrics(&transmit_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@@ -16,7 +16,7 @@ use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_gossip_pull::{CrdsFilter, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{
|
||||
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, SnapshotHash,
|
||||
Version, Vote, MAX_WALLCLOCK,
|
||||
@@ -214,11 +214,17 @@ struct GossipStats {
|
||||
repair_peers: Counter,
|
||||
new_push_requests: Counter,
|
||||
new_push_requests2: Counter,
|
||||
new_push_requests_num: Counter,
|
||||
filter_pull_response: Counter,
|
||||
process_pull_response: Counter,
|
||||
process_pull_response_count: Counter,
|
||||
process_pull_response_len: Counter,
|
||||
process_pull_response_timeout: Counter,
|
||||
process_pull_response_fail_insert: Counter,
|
||||
process_pull_response_fail_timeout: Counter,
|
||||
process_pull_response_success: Counter,
|
||||
process_pull_requests: Counter,
|
||||
generate_pull_responses: Counter,
|
||||
process_prune: Counter,
|
||||
process_push_message: Counter,
|
||||
prune_received_cache: Counter,
|
||||
@@ -227,7 +233,14 @@ struct GossipStats {
|
||||
epoch_slots_push: Counter,
|
||||
push_message: Counter,
|
||||
new_pull_requests: Counter,
|
||||
new_pull_requests_count: Counter,
|
||||
mark_pull_request: Counter,
|
||||
skip_pull_response_shred_version: Counter,
|
||||
skip_pull_shred_version: Counter,
|
||||
skip_push_message_shred_version: Counter,
|
||||
push_message_count: Counter,
|
||||
push_message_value_count: Counter,
|
||||
push_response_count: Counter,
|
||||
}
|
||||
|
||||
pub struct ClusterInfo {
|
||||
@@ -241,6 +254,13 @@ pub struct ClusterInfo {
|
||||
my_contact_info: RwLock<ContactInfo>,
|
||||
id: Pubkey,
|
||||
stats: GossipStats,
|
||||
socket: UdpSocket,
|
||||
}
|
||||
|
||||
impl Default for ClusterInfo {
|
||||
fn default() -> Self {
|
||||
Self::new_with_invalid_keypair(ContactInfo::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
@@ -390,6 +410,7 @@ impl ClusterInfo {
|
||||
my_contact_info: RwLock::new(contact_info),
|
||||
id,
|
||||
stats: GossipStats::default(),
|
||||
socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
};
|
||||
{
|
||||
let mut gossip = me.gossip.write().unwrap();
|
||||
@@ -415,6 +436,7 @@ impl ClusterInfo {
|
||||
my_contact_info: RwLock::new(my_contact_info),
|
||||
id: *new_id,
|
||||
stats: GossipStats::default(),
|
||||
socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -728,6 +750,13 @@ impl ClusterInfo {
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
}
|
||||
|
||||
pub fn send_vote(&self, vote: &Transaction) -> Result<()> {
|
||||
let tpu = self.my_contact_info().tpu;
|
||||
let buf = serialize(vote)?;
|
||||
self.socket.send_to(&buf, &tpu)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get votes in the crds
|
||||
/// * since - The timestamp of when the vote inserted must be greater than
|
||||
/// since. This allows the bank to query for new votes only.
|
||||
@@ -1391,6 +1420,9 @@ impl ClusterInfo {
|
||||
.collect()
|
||||
};
|
||||
self.append_entrypoint_to_pulls(&mut pulls);
|
||||
self.stats
|
||||
.new_pull_requests_count
|
||||
.add_relaxed(pulls.len() as u64);
|
||||
pulls
|
||||
.into_iter()
|
||||
.map(|(peer, filter, gossip, self_info)| {
|
||||
@@ -1405,7 +1437,7 @@ impl ClusterInfo {
|
||||
let (_, push_messages) = self
|
||||
.time_gossip_write_lock("new_push_requests", &self.stats.new_push_requests)
|
||||
.new_push_messages(timestamp());
|
||||
push_messages
|
||||
let messages: Vec<_> = push_messages
|
||||
.into_iter()
|
||||
.filter_map(|(peer, messages)| {
|
||||
let peer_label = CrdsValueLabel::ContactInfo(peer);
|
||||
@@ -1420,11 +1452,24 @@ impl ClusterInfo {
|
||||
.into_iter()
|
||||
.map(move |payload| (peer, Protocol::PushMessage(self_id, payload)))
|
||||
})
|
||||
.collect()
|
||||
.collect();
|
||||
self.stats
|
||||
.new_push_requests_num
|
||||
.add_relaxed(messages.len() as u64);
|
||||
messages
|
||||
}
|
||||
|
||||
fn gossip_request(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<(SocketAddr, Protocol)> {
|
||||
let pulls: Vec<_> = self.new_pull_requests(stakes);
|
||||
// Generate new push and pull requests
|
||||
fn generate_new_gossip_requests(
|
||||
&self,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
generate_pull_requests: bool,
|
||||
) -> Vec<(SocketAddr, Protocol)> {
|
||||
let pulls: Vec<_> = if generate_pull_requests {
|
||||
self.new_pull_requests(stakes)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let pushes: Vec<_> = self.new_push_requests();
|
||||
vec![pulls, pushes].into_iter().flatten().collect()
|
||||
}
|
||||
@@ -1435,8 +1480,9 @@ impl ClusterInfo {
|
||||
recycler: &PacketsRecycler,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
sender: &PacketSender,
|
||||
generate_pull_requests: bool,
|
||||
) -> Result<()> {
|
||||
let reqs = obj.gossip_request(&stakes);
|
||||
let reqs = obj.generate_new_gossip_requests(&stakes, generate_pull_requests);
|
||||
if !reqs.is_empty() {
|
||||
let packets = to_packets_with_destination(recycler.clone(), &reqs);
|
||||
sender.send(packets)?;
|
||||
@@ -1462,6 +1508,7 @@ impl ClusterInfo {
|
||||
|
||||
let message = CrdsData::Version(Version::new(obj.id()));
|
||||
obj.push_message(CrdsValue::new_signed(message, &obj.keypair));
|
||||
let mut generate_pull_requests = true;
|
||||
loop {
|
||||
let start = timestamp();
|
||||
thread_mem_usage::datapoint("solana-gossip");
|
||||
@@ -1477,7 +1524,9 @@ impl ClusterInfo {
|
||||
}
|
||||
None => HashMap::new(),
|
||||
};
|
||||
let _ = Self::run_gossip(&obj, &recycler, &stakes, &sender);
|
||||
|
||||
let _ =
|
||||
Self::run_gossip(&obj, &recycler, &stakes, &sender, generate_pull_requests);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
@@ -1542,6 +1591,7 @@ impl ClusterInfo {
|
||||
let time_left = GOSSIP_SLEEP_MILLIS - elapsed;
|
||||
sleep(Duration::from_millis(time_left));
|
||||
}
|
||||
generate_pull_requests = !generate_pull_requests;
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
@@ -1560,6 +1610,7 @@ impl ClusterInfo {
|
||||
let allocated = thread_mem_usage::Allocatedp::default();
|
||||
let mut gossip_pull_data: Vec<PullData> = vec![];
|
||||
let timeouts = me.gossip.read().unwrap().make_timeouts(&stakes, epoch_ms);
|
||||
let mut pull_responses = HashMap::new();
|
||||
packets.packets.iter().for_each(|packet| {
|
||||
let from_addr = packet.meta.addr();
|
||||
limited_deserialize(&packet.data[..packet.meta.size])
|
||||
@@ -1577,12 +1628,17 @@ impl ClusterInfo {
|
||||
if contact_info.id == me.id() {
|
||||
warn!("PullRequest ignored, I'm talking to myself");
|
||||
inc_new_counter_debug!("cluster_info-window-request-loopback", 1);
|
||||
} else {
|
||||
} else if contact_info.shred_version == 0
|
||||
|| contact_info.shred_version == me.my_shred_version()
|
||||
|| me.my_shred_version() == 0
|
||||
{
|
||||
gossip_pull_data.push(PullData {
|
||||
from_addr,
|
||||
caller,
|
||||
filter,
|
||||
});
|
||||
} else {
|
||||
me.stats.skip_pull_shred_version.add_relaxed(1);
|
||||
}
|
||||
}
|
||||
datapoint_debug!(
|
||||
@@ -1602,7 +1658,8 @@ impl ClusterInfo {
|
||||
}
|
||||
ret
|
||||
});
|
||||
Self::handle_pull_response(me, &from, data, &timeouts);
|
||||
let pull_entry = pull_responses.entry(from).or_insert_with(Vec::new);
|
||||
pull_entry.extend(data);
|
||||
datapoint_debug!(
|
||||
"solana-gossip-listen-memory",
|
||||
("pull_response", (allocated.get() - start) as i64, i64),
|
||||
@@ -1664,6 +1721,11 @@ impl ClusterInfo {
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
for (from, data) in pull_responses {
|
||||
Self::handle_pull_response(me, &from, data, &timeouts);
|
||||
}
|
||||
|
||||
// process the collected pulls together
|
||||
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data, stakes);
|
||||
if let Some(rsp) = rsp {
|
||||
@@ -1671,6 +1733,26 @@ impl ClusterInfo {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_data_budget(&self, stakes: &HashMap<Pubkey, u64>) {
|
||||
let mut w_outbound_budget = self.outbound_budget.write().unwrap();
|
||||
|
||||
let now = timestamp();
|
||||
const INTERVAL_MS: u64 = 100;
|
||||
// allow 30kBps per staked validator, epoch slots + votes ~= 1.5kB/slot ~= 4kB/s
|
||||
const BYTES_PER_INTERVAL: usize = 3000;
|
||||
const MAX_BUDGET_MULTIPLE: usize = 5; // allow budget build-up to 5x the interval default
|
||||
|
||||
if now - w_outbound_budget.last_timestamp_ms > INTERVAL_MS {
|
||||
let len = std::cmp::max(stakes.len(), 2);
|
||||
w_outbound_budget.bytes += len * BYTES_PER_INTERVAL;
|
||||
w_outbound_budget.bytes = std::cmp::min(
|
||||
w_outbound_budget.bytes,
|
||||
MAX_BUDGET_MULTIPLE * len * BYTES_PER_INTERVAL,
|
||||
);
|
||||
w_outbound_budget.last_timestamp_ms = now;
|
||||
}
|
||||
}
|
||||
|
||||
// Pull requests take an incoming bloom filter of contained entries from a node
|
||||
// and tries to send back to them the values it detects are missing.
|
||||
fn handle_pull_requests(
|
||||
@@ -1683,33 +1765,19 @@ impl ClusterInfo {
|
||||
let mut caller_and_filters = vec![];
|
||||
let mut addrs = vec![];
|
||||
let mut time = Measure::start("handle_pull_requests");
|
||||
{
|
||||
let mut w_outbound_budget = me.outbound_budget.write().unwrap();
|
||||
|
||||
let now = timestamp();
|
||||
const INTERVAL_MS: u64 = 100;
|
||||
// allow 50kBps per staked validator, epoch slots + votes ~= 1.5kB/slot ~= 4kB/s
|
||||
const BYTES_PER_INTERVAL: usize = 5000;
|
||||
const MAX_BUDGET_MULTIPLE: usize = 5; // allow budget build-up to 5x the interval default
|
||||
|
||||
if now - w_outbound_budget.last_timestamp_ms > INTERVAL_MS {
|
||||
let len = std::cmp::max(stakes.len(), 2);
|
||||
w_outbound_budget.bytes += len * BYTES_PER_INTERVAL;
|
||||
w_outbound_budget.bytes = std::cmp::min(
|
||||
w_outbound_budget.bytes,
|
||||
MAX_BUDGET_MULTIPLE * len * BYTES_PER_INTERVAL,
|
||||
);
|
||||
w_outbound_budget.last_timestamp_ms = now;
|
||||
}
|
||||
}
|
||||
me.update_data_budget(stakes);
|
||||
for pull_data in requests {
|
||||
caller_and_filters.push((pull_data.caller, pull_data.filter));
|
||||
addrs.push(pull_data.from_addr);
|
||||
}
|
||||
let now = timestamp();
|
||||
let self_id = me.id();
|
||||
|
||||
let pull_responses = me
|
||||
.time_gossip_write_lock("process_pull_reqs", &me.stats.process_pull_requests)
|
||||
.time_gossip_read_lock("generate_pull_responses", &me.stats.generate_pull_responses)
|
||||
.generate_pull_responses(&caller_and_filters);
|
||||
|
||||
me.time_gossip_write_lock("process_pull_reqs", &me.stats.process_pull_requests)
|
||||
.process_pull_requests(caller_and_filters, now);
|
||||
|
||||
// Filter bad to addresses
|
||||
@@ -1806,37 +1874,115 @@ impl ClusterInfo {
|
||||
Some(packets)
|
||||
}
|
||||
|
||||
// Returns (failed, timeout, success)
|
||||
fn handle_pull_response(
|
||||
me: &Self,
|
||||
from: &Pubkey,
|
||||
data: Vec<CrdsValue>,
|
||||
mut crds_values: Vec<CrdsValue>,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
) {
|
||||
let len = data.len();
|
||||
) -> (usize, usize, usize) {
|
||||
let len = crds_values.len();
|
||||
trace!("PullResponse me: {} from: {} len={}", me.id, from, len);
|
||||
let (_fail, timeout_count) = me
|
||||
.time_gossip_write_lock("process_pull", &me.stats.process_pull_response)
|
||||
.process_pull_response(from, timeouts, data, timestamp());
|
||||
|
||||
if let Some(shred_version) = me.lookup_contact_info(from, |ci| ci.shred_version) {
|
||||
Self::filter_by_shred_version(
|
||||
from,
|
||||
&mut crds_values,
|
||||
shred_version,
|
||||
me.my_shred_version(),
|
||||
);
|
||||
}
|
||||
let filtered_len = crds_values.len();
|
||||
|
||||
let mut pull_stats = ProcessPullStats::default();
|
||||
let (filtered_pulls, filtered_pulls_expired_timeout) = me
|
||||
.time_gossip_read_lock("filter_pull_resp", &me.stats.filter_pull_response)
|
||||
.filter_pull_responses(timeouts, crds_values, timestamp(), &mut pull_stats);
|
||||
|
||||
if !filtered_pulls.is_empty() || !filtered_pulls_expired_timeout.is_empty() {
|
||||
me.time_gossip_write_lock("process_pull_resp", &me.stats.process_pull_response)
|
||||
.process_pull_responses(
|
||||
from,
|
||||
filtered_pulls,
|
||||
filtered_pulls_expired_timeout,
|
||||
timestamp(),
|
||||
&mut pull_stats,
|
||||
);
|
||||
}
|
||||
|
||||
me.stats
|
||||
.skip_pull_response_shred_version
|
||||
.add_relaxed((len - filtered_len) as u64);
|
||||
me.stats.process_pull_response_count.add_relaxed(1);
|
||||
me.stats.process_pull_response_len.add_relaxed(len as u64);
|
||||
me.stats
|
||||
.process_pull_response_len
|
||||
.add_relaxed(filtered_len as u64);
|
||||
me.stats
|
||||
.process_pull_response_timeout
|
||||
.add_relaxed(timeout_count as u64);
|
||||
.add_relaxed(pull_stats.timeout_count as u64);
|
||||
me.stats
|
||||
.process_pull_response_fail_insert
|
||||
.add_relaxed(pull_stats.failed_insert as u64);
|
||||
me.stats
|
||||
.process_pull_response_fail_timeout
|
||||
.add_relaxed(pull_stats.failed_timeout as u64);
|
||||
me.stats
|
||||
.process_pull_response_success
|
||||
.add_relaxed(pull_stats.success as u64);
|
||||
|
||||
(
|
||||
pull_stats.failed_insert + pull_stats.failed_timeout,
|
||||
pull_stats.timeout_count,
|
||||
pull_stats.success,
|
||||
)
|
||||
}
|
||||
|
||||
fn filter_by_shred_version(
|
||||
from: &Pubkey,
|
||||
crds_values: &mut Vec<CrdsValue>,
|
||||
shred_version: u16,
|
||||
my_shred_version: u16,
|
||||
) {
|
||||
if my_shred_version != 0 && shred_version != 0 && shred_version != my_shred_version {
|
||||
// Allow someone to update their own ContactInfo so they
|
||||
// can change shred versions if needed.
|
||||
crds_values.retain(|crds_value| match &crds_value.data {
|
||||
CrdsData::ContactInfo(contact_info) => contact_info.id == *from,
|
||||
_ => false,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_push_message(
|
||||
me: &Self,
|
||||
recycler: &PacketsRecycler,
|
||||
from: &Pubkey,
|
||||
data: Vec<CrdsValue>,
|
||||
mut crds_values: Vec<CrdsValue>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Option<Packets> {
|
||||
let self_id = me.id();
|
||||
inc_new_counter_debug!("cluster_info-push_message", 1);
|
||||
me.stats.push_message_count.add_relaxed(1);
|
||||
let len = crds_values.len();
|
||||
|
||||
if let Some(shred_version) = me.lookup_contact_info(from, |ci| ci.shred_version) {
|
||||
Self::filter_by_shred_version(
|
||||
from,
|
||||
&mut crds_values,
|
||||
shred_version,
|
||||
me.my_shred_version(),
|
||||
);
|
||||
}
|
||||
let filtered_len = crds_values.len();
|
||||
me.stats
|
||||
.push_message_value_count
|
||||
.add_relaxed(filtered_len as u64);
|
||||
me.stats
|
||||
.skip_push_message_shred_version
|
||||
.add_relaxed((len - filtered_len) as u64);
|
||||
|
||||
let updated: Vec<_> = me
|
||||
.time_gossip_write_lock("process_push", &me.stats.process_push_message)
|
||||
.process_push_message(from, data, timestamp());
|
||||
.process_push_message(from, crds_values, timestamp());
|
||||
|
||||
let updated_labels: Vec<_> = updated.into_iter().map(|u| u.value.label()).collect();
|
||||
let prunes_map: HashMap<Pubkey, HashSet<Pubkey>> = me
|
||||
@@ -1866,6 +2012,9 @@ impl ClusterInfo {
|
||||
return None;
|
||||
}
|
||||
let mut packets = to_packets_with_destination(recycler.clone(), &rsp);
|
||||
me.stats
|
||||
.push_response_count
|
||||
.add_relaxed(packets.packets.len() as u64);
|
||||
if !packets.is_empty() {
|
||||
let pushes: Vec<_> = me.new_push_requests();
|
||||
inc_new_counter_debug!("cluster_info-push_message-pushes", pushes.len());
|
||||
@@ -1957,6 +2106,11 @@ impl ClusterInfo {
|
||||
),
|
||||
("all_tvu_peers", self.stats.all_tvu_peers.clear(), i64),
|
||||
("tvu_peers", self.stats.tvu_peers.clear(), i64),
|
||||
(
|
||||
"new_push_requests_num",
|
||||
self.stats.new_push_requests_num.clear(),
|
||||
i64
|
||||
),
|
||||
);
|
||||
datapoint_info!(
|
||||
"cluster_info_stats2",
|
||||
@@ -1978,11 +2132,41 @@ impl ClusterInfo {
|
||||
self.stats.process_pull_response.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"filter_pull_resp",
|
||||
self.stats.filter_pull_response.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"process_pull_resp_count",
|
||||
self.stats.process_pull_response_count.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"pull_response_fail_insert",
|
||||
self.stats.process_pull_response_fail_insert.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"pull_response_fail_timeout",
|
||||
self.stats.process_pull_response_fail_timeout.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"pull_response_success",
|
||||
self.stats.process_pull_response_success.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"process_pull_resp_timeout",
|
||||
self.stats.process_pull_response_timeout.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"push_response_count",
|
||||
self.stats.push_response_count.clear(),
|
||||
i64
|
||||
),
|
||||
);
|
||||
datapoint_info!(
|
||||
"cluster_info_stats3",
|
||||
@@ -1996,6 +2180,11 @@ impl ClusterInfo {
|
||||
self.stats.process_pull_requests.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"generate_pull_responses",
|
||||
self.stats.generate_pull_responses.clear(),
|
||||
i64
|
||||
),
|
||||
("process_prune", self.stats.process_prune.clear(), i64),
|
||||
(
|
||||
"process_push_message",
|
||||
@@ -2025,6 +2214,39 @@ impl ClusterInfo {
|
||||
i64
|
||||
),
|
||||
);
|
||||
datapoint_info!(
|
||||
"cluster_info_stats4",
|
||||
(
|
||||
"skip_push_message_shred_version",
|
||||
self.stats.skip_push_message_shred_version.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"skip_pull_response_shred_version",
|
||||
self.stats.skip_pull_response_shred_version.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"skip_pull_shred_version",
|
||||
self.stats.skip_pull_shred_version.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"push_message_count",
|
||||
self.stats.push_message_count.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"push_message_value_count",
|
||||
self.stats.push_message_value_count.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"new_pull_requests_count",
|
||||
self.stats.new_pull_requests_count.clear(),
|
||||
i64
|
||||
),
|
||||
);
|
||||
|
||||
*last_print = Instant::now();
|
||||
}
|
||||
@@ -2043,7 +2265,8 @@ impl ClusterInfo {
|
||||
.name("solana-listen".to_string())
|
||||
.spawn(move || {
|
||||
let thread_pool = rayon::ThreadPoolBuilder::new()
|
||||
.num_threads(get_thread_count())
|
||||
.num_threads(get_thread_count() / 2)
|
||||
.thread_name(|ix| format!("gos_work_{}", ix))
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut last_print = Instant::now();
|
||||
@@ -2406,6 +2629,92 @@ mod tests {
|
||||
assert!(ClusterInfo::is_spy_node(&node));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_pull() {
|
||||
solana_logger::setup();
|
||||
let node = Node::new_localhost();
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
|
||||
|
||||
let entrypoint_pubkey = Pubkey::new_rand();
|
||||
let data = test_crds_values(entrypoint_pubkey);
|
||||
let timeouts = HashMap::new();
|
||||
assert_eq!(
|
||||
(0, 0, 1),
|
||||
ClusterInfo::handle_pull_response(
|
||||
&cluster_info,
|
||||
&entrypoint_pubkey,
|
||||
data.clone(),
|
||||
&timeouts
|
||||
)
|
||||
);
|
||||
|
||||
let entrypoint_pubkey2 = Pubkey::new_rand();
|
||||
assert_eq!(
|
||||
(1, 0, 0),
|
||||
ClusterInfo::handle_pull_response(&cluster_info, &entrypoint_pubkey2, data, &timeouts)
|
||||
);
|
||||
}
|
||||
|
||||
fn test_crds_values(pubkey: Pubkey) -> Vec<CrdsValue> {
|
||||
let entrypoint = ContactInfo::new_localhost(&pubkey, timestamp());
|
||||
let entrypoint_crdsvalue =
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
|
||||
vec![entrypoint_crdsvalue]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_shred_version() {
|
||||
let from = Pubkey::new_rand();
|
||||
let my_shred_version = 1;
|
||||
let other_shred_version = 1;
|
||||
|
||||
// Allow same shred_version
|
||||
let mut values = test_crds_values(from);
|
||||
ClusterInfo::filter_by_shred_version(
|
||||
&from,
|
||||
&mut values,
|
||||
other_shred_version,
|
||||
my_shred_version,
|
||||
);
|
||||
assert_eq!(values.len(), 1);
|
||||
|
||||
// Allow shred_version=0.
|
||||
let other_shred_version = 0;
|
||||
ClusterInfo::filter_by_shred_version(
|
||||
&from,
|
||||
&mut values,
|
||||
other_shred_version,
|
||||
my_shred_version,
|
||||
);
|
||||
assert_eq!(values.len(), 1);
|
||||
|
||||
// Change to sender's ContactInfo version, allow that.
|
||||
let other_shred_version = 2;
|
||||
ClusterInfo::filter_by_shred_version(
|
||||
&from,
|
||||
&mut values,
|
||||
other_shred_version,
|
||||
my_shred_version,
|
||||
);
|
||||
assert_eq!(values.len(), 1);
|
||||
|
||||
let snapshot_hash_data = CrdsValue::new_unsigned(CrdsData::SnapshotHashes(SnapshotHash {
|
||||
from: Pubkey::new_rand(),
|
||||
hashes: vec![],
|
||||
wallclock: 0,
|
||||
}));
|
||||
values.push(snapshot_hash_data);
|
||||
// Change to sender's ContactInfo version, allow that.
|
||||
let other_shred_version = 2;
|
||||
ClusterInfo::filter_by_shred_version(
|
||||
&from,
|
||||
&mut values,
|
||||
other_shred_version,
|
||||
my_shred_version,
|
||||
);
|
||||
assert_eq!(values.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cluster_spy_gossip() {
|
||||
//check that gossip doesn't try to push to invalid addresses
|
||||
@@ -2418,7 +2727,7 @@ mod tests {
|
||||
.write()
|
||||
.unwrap()
|
||||
.refresh_push_active_set(&HashMap::new());
|
||||
let reqs = cluster_info.gossip_request(&HashMap::new());
|
||||
let reqs = cluster_info.generate_new_gossip_requests(&HashMap::new(), true);
|
||||
//assert none of the addrs are invalid.
|
||||
reqs.iter().all(|(addr, _)| {
|
||||
let res = ContactInfo::is_valid_address(addr);
|
||||
|
@@ -36,6 +36,7 @@ use std::collections::HashMap;
|
||||
pub struct Crds {
|
||||
/// Stores the map of labels and values
|
||||
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
pub num_inserts: usize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@@ -84,6 +85,7 @@ impl Default for Crds {
|
||||
fn default() -> Self {
|
||||
Crds {
|
||||
table: IndexMap::new(),
|
||||
num_inserts: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -93,6 +95,24 @@ impl Crds {
|
||||
pub fn new_versioned(&self, local_timestamp: u64, value: CrdsValue) -> VersionedCrdsValue {
|
||||
VersionedCrdsValue::new(local_timestamp, value)
|
||||
}
|
||||
pub fn would_insert(
|
||||
&self,
|
||||
value: CrdsValue,
|
||||
local_timestamp: u64,
|
||||
) -> Option<VersionedCrdsValue> {
|
||||
let new_value = self.new_versioned(local_timestamp, value);
|
||||
let label = new_value.value.label();
|
||||
let would_insert = self
|
||||
.table
|
||||
.get(&label)
|
||||
.map(|current| new_value > *current)
|
||||
.unwrap_or(true);
|
||||
if would_insert {
|
||||
Some(new_value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
/// insert the new value, returns the old value if insert succeeds
|
||||
pub fn insert_versioned(
|
||||
&mut self,
|
||||
@@ -107,6 +127,7 @@ impl Crds {
|
||||
.unwrap_or(true);
|
||||
if do_insert {
|
||||
let old = self.table.insert(label, new_value);
|
||||
self.num_inserts += 1;
|
||||
Ok(old)
|
||||
} else {
|
||||
trace!("INSERT FAILED data: {} new.wallclock: {}", label, wallclock,);
|
||||
|
@@ -6,7 +6,7 @@
|
||||
use crate::{
|
||||
crds::{Crds, VersionedCrdsValue},
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
|
||||
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
|
||||
crds_value::{CrdsValue, CrdsValueLabel},
|
||||
};
|
||||
@@ -76,17 +76,10 @@ impl CrdsGossip {
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> HashMap<Pubkey, HashSet<Pubkey>> {
|
||||
let id = &self.id;
|
||||
let crds = &self.crds;
|
||||
let push = &mut self.push;
|
||||
let versioned = labels
|
||||
.into_iter()
|
||||
.filter_map(|label| crds.lookup_versioned(&label));
|
||||
|
||||
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
|
||||
for val in versioned {
|
||||
let origin = val.value.pubkey();
|
||||
let hash = val.value_hash;
|
||||
let peers = push.prune_received_cache(id, &origin, hash, stakes);
|
||||
for origin in labels.iter().map(|k| k.pubkey()) {
|
||||
let peers = push.prune_received_cache(id, &origin, stakes);
|
||||
for from in peers {
|
||||
prune_map.entry(from).or_default().insert(origin);
|
||||
}
|
||||
@@ -113,7 +106,7 @@ impl CrdsGossip {
|
||||
return Err(CrdsGossipError::PruneMessageTimeout);
|
||||
}
|
||||
if self.id == *destination {
|
||||
self.push.process_prune_msg(peer, origin);
|
||||
self.push.process_prune_msg(&self.id, peer, origin);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(CrdsGossipError::BadPruneDestination)
|
||||
@@ -158,24 +151,47 @@ impl CrdsGossip {
|
||||
self.pull.mark_pull_request_creation_time(from, now)
|
||||
}
|
||||
/// process a pull request and create a response
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
filters: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
|
||||
self.pull
|
||||
.process_pull_requests(&mut self.crds, filters, now)
|
||||
.process_pull_requests(&mut self.crds, filters, now);
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.pull.generate_pull_responses(&self.crds, filters)
|
||||
}
|
||||
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> (usize, usize) {
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
self.pull
|
||||
.process_pull_response(&mut self.crds, from, timeouts, response, now)
|
||||
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
|
||||
}
|
||||
|
||||
/// process a pull response
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) {
|
||||
let success = self.pull.process_pull_responses(
|
||||
&mut self.crds,
|
||||
from,
|
||||
responses,
|
||||
responses_expired_timeout,
|
||||
now,
|
||||
process_pull_stats,
|
||||
);
|
||||
self.push.push_pull_responses(success, now);
|
||||
}
|
||||
|
||||
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {
|
||||
|
@@ -2,7 +2,6 @@
|
||||
pub enum CrdsGossipError {
|
||||
NoPeers,
|
||||
PushMessageTimeout,
|
||||
PushMessageAlreadyReceived,
|
||||
PushMessageOldVersion,
|
||||
BadPruneDestination,
|
||||
PruneMessageTimeout,
|
||||
|
@@ -10,7 +10,7 @@
|
||||
//! of false positives.
|
||||
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds::Crds;
|
||||
use crate::crds::{Crds, VersionedCrdsValue};
|
||||
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
@@ -20,8 +20,8 @@ use solana_runtime::bloom::Bloom;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
|
||||
// The maximum age of a value received over pull responses
|
||||
@@ -118,6 +118,14 @@ impl CrdsFilter {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ProcessPullStats {
|
||||
pub success: usize,
|
||||
pub failed_insert: usize,
|
||||
pub failed_timeout: usize,
|
||||
pub timeout_count: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPull {
|
||||
/// timestamp of last request
|
||||
@@ -126,6 +134,7 @@ pub struct CrdsGossipPull {
|
||||
purged_values: VecDeque<(Hash, u64)>,
|
||||
pub crds_timeout: u64,
|
||||
pub msg_timeout: u64,
|
||||
pub num_pulls: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPull {
|
||||
@@ -135,6 +144,7 @@ impl Default for CrdsGossipPull {
|
||||
pull_request_time: HashMap::new(),
|
||||
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
num_pulls: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -204,14 +214,13 @@ impl CrdsGossipPull {
|
||||
self.purged_values.push_back((hash, timestamp))
|
||||
}
|
||||
|
||||
/// process a pull request and create a response
|
||||
/// process a pull request
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
requests: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
let rv = self.filter_crds_values(crds, &requests);
|
||||
) {
|
||||
requests.into_iter().for_each(|(caller, _)| {
|
||||
let key = caller.label().pubkey();
|
||||
let old = crds.insert(caller, now);
|
||||
@@ -221,20 +230,33 @@ impl CrdsGossipPull {
|
||||
}
|
||||
crds.update_record_timestamp(&key, now);
|
||||
});
|
||||
rv
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
|
||||
/// Create gossip responses to pull requests
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
requests: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.filter_crds_values(crds, requests)
|
||||
}
|
||||
|
||||
// Checks if responses should be inserted and
|
||||
// returns those responses converted to VersionedCrdsValue
|
||||
// Separated in two vecs as:
|
||||
// .0 => responses that update the owner timestamp
|
||||
// .1 => responses that do not update the owner timestamp
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
responses: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> (usize, usize) {
|
||||
let mut failed = 0;
|
||||
let mut timeout_count = 0;
|
||||
for r in response {
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
let mut versioned = vec![];
|
||||
let mut versioned_expired_timestamp = vec![];
|
||||
for r in responses {
|
||||
let owner = r.label().pubkey();
|
||||
// Check if the crds value is older than the msg_timeout
|
||||
if now
|
||||
@@ -253,8 +275,8 @@ impl CrdsGossipPull {
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
timeout_count += 1;
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -262,29 +284,69 @@ impl CrdsGossipPull {
|
||||
// Before discarding this value, check if a ContactInfo for the owner
|
||||
// exists in the table. If it doesn't, that implies that this value can be discarded
|
||||
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
|
||||
timeout_count += 1;
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
} else {
|
||||
// Silently insert this old value without bumping record timestamps
|
||||
failed += crds.insert(r, now).is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned_expired_timestamp.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let old = crds.insert(r, now);
|
||||
failed += old.is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
}
|
||||
(versioned, versioned_expired_timestamp)
|
||||
}
|
||||
|
||||
/// process a vec of pull responses
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> Vec<(CrdsValueLabel, Hash, u64)> {
|
||||
let mut success = vec![];
|
||||
let mut owners = HashSet::new();
|
||||
for r in responses_expired_timeout {
|
||||
stats.failed_insert += crds.insert_versioned(r).is_err() as usize;
|
||||
}
|
||||
for r in responses {
|
||||
let owner = r.value.label().pubkey();
|
||||
let label = r.value.label();
|
||||
let wc = r.value.wallclock();
|
||||
let hash = r.value_hash;
|
||||
let old = crds.insert_versioned(r);
|
||||
if old.is_err() {
|
||||
stats.failed_insert += 1;
|
||||
} else {
|
||||
stats.success += 1;
|
||||
self.num_pulls += 1;
|
||||
success.push((label, hash, wc));
|
||||
}
|
||||
old.ok().map(|opt| {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
owners.insert(owner);
|
||||
opt.map(|val| {
|
||||
self.purged_values
|
||||
.push_back((val.value_hash, val.local_timestamp))
|
||||
})
|
||||
});
|
||||
}
|
||||
crds.update_record_timestamp(from, now);
|
||||
(failed, timeout_count)
|
||||
owners.insert(*from);
|
||||
for owner in owners {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
}
|
||||
success
|
||||
}
|
||||
// build a set of filters of the current crds table
|
||||
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
|
||||
@@ -374,6 +436,34 @@ impl CrdsGossipPull {
|
||||
.count();
|
||||
self.purged_values.drain(..cnt);
|
||||
}
|
||||
|
||||
/// For legacy tests
|
||||
#[cfg(test)]
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> (usize, usize, usize) {
|
||||
let mut stats = ProcessPullStats::default();
|
||||
let (versioned, versioned_expired_timeout) =
|
||||
self.filter_pull_responses(crds, timeouts, response, now, &mut stats);
|
||||
self.process_pull_responses(
|
||||
crds,
|
||||
from,
|
||||
versioned,
|
||||
versioned_expired_timeout,
|
||||
now,
|
||||
&mut stats,
|
||||
);
|
||||
(
|
||||
stats.failed_timeout + stats.failed_insert,
|
||||
stats.timeout_count,
|
||||
stats.success,
|
||||
)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
@@ -573,8 +663,9 @@ mod test {
|
||||
let mut dest_crds = Crds::default();
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
|
||||
assert!(dest_crds.lookup(&caller.label()).is_some());
|
||||
assert_eq!(
|
||||
@@ -643,8 +734,9 @@ mod test {
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
// if there is a false positive this is empty
|
||||
// prob should be around 0.1 per iteration
|
||||
if rsp.is_empty() {
|
||||
|
@@ -35,6 +35,7 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
|
||||
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
|
||||
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
|
||||
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPush {
|
||||
@@ -44,12 +45,18 @@ pub struct CrdsGossipPush {
|
||||
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
|
||||
/// push message queue
|
||||
push_messages: HashMap<CrdsValueLabel, Hash>,
|
||||
/// cache that tracks which validators a message was received from
|
||||
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
|
||||
/// Cache that tracks which validators a message was received from
|
||||
/// bool indicates it has been pruned.
|
||||
/// This cache represents a lagging view of which validators
|
||||
/// currently have this node in their `active_set`
|
||||
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
|
||||
pub num_active: usize,
|
||||
pub push_fanout: usize,
|
||||
pub msg_timeout: u64,
|
||||
pub prune_timeout: u64,
|
||||
pub num_total: usize,
|
||||
pub num_old: usize,
|
||||
pub num_pushes: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPush {
|
||||
@@ -64,6 +71,9 @@ impl Default for CrdsGossipPush {
|
||||
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
|
||||
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
|
||||
prune_timeout: CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS,
|
||||
num_total: 0,
|
||||
num_old: 0,
|
||||
num_pushes: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -81,18 +91,21 @@ impl CrdsGossipPush {
|
||||
&mut self,
|
||||
self_pubkey: &Pubkey,
|
||||
origin: &Pubkey,
|
||||
hash: Hash,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<Pubkey> {
|
||||
let origin_stake = stakes.get(origin).unwrap_or(&0);
|
||||
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
|
||||
let cache = self.received_cache.get(&hash);
|
||||
let cache = self.received_cache.get(origin);
|
||||
if cache.is_none() {
|
||||
return Vec::new();
|
||||
}
|
||||
let peers = cache.unwrap();
|
||||
|
||||
let peers = &cache.unwrap().1;
|
||||
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
|
||||
let peer_stake_total: u64 = peers
|
||||
.iter()
|
||||
.filter(|v| !(v.1).0)
|
||||
.map(|v| stakes.get(v.0).unwrap_or(&0))
|
||||
.sum();
|
||||
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
|
||||
if peer_stake_total < prune_stake_threshold {
|
||||
return Vec::new();
|
||||
@@ -100,7 +113,8 @@ impl CrdsGossipPush {
|
||||
|
||||
let staked_peers: Vec<(Pubkey, u64)> = peers
|
||||
.iter()
|
||||
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
|
||||
.filter(|v| !(v.1).0)
|
||||
.filter_map(|p| stakes.get(p.0).map(|s| (*p.0, *s)))
|
||||
.filter(|(_, s)| *s > 0)
|
||||
.collect();
|
||||
|
||||
@@ -117,16 +131,27 @@ impl CrdsGossipPush {
|
||||
let (next_peer, next_stake) = staked_peers[next];
|
||||
keep.insert(next_peer);
|
||||
peer_stake_sum += next_stake;
|
||||
if peer_stake_sum >= prune_stake_threshold {
|
||||
if peer_stake_sum >= prune_stake_threshold
|
||||
&& keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
peers
|
||||
.iter()
|
||||
let pruned_peers: Vec<Pubkey> = peers
|
||||
.keys()
|
||||
.filter(|p| !keep.contains(p))
|
||||
.cloned()
|
||||
.collect()
|
||||
.collect();
|
||||
pruned_peers.iter().for_each(|p| {
|
||||
self.received_cache
|
||||
.get_mut(origin)
|
||||
.unwrap()
|
||||
.get_mut(p)
|
||||
.unwrap()
|
||||
.0 = true;
|
||||
});
|
||||
pruned_peers
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
@@ -137,6 +162,7 @@ impl CrdsGossipPush {
|
||||
value: CrdsValue,
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
self.num_total += 1;
|
||||
if now
|
||||
> value
|
||||
.wallclock()
|
||||
@@ -149,21 +175,32 @@ impl CrdsGossipPush {
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
let label = value.label();
|
||||
let origin = label.pubkey();
|
||||
let new_value = crds.new_versioned(now, value);
|
||||
let value_hash = new_value.value_hash;
|
||||
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
||||
received_set.insert(from.clone());
|
||||
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
||||
}
|
||||
let received_set = self
|
||||
.received_cache
|
||||
.entry(origin)
|
||||
.or_insert_with(HashMap::new);
|
||||
received_set.entry(*from).or_insert((false, 0)).1 = now;
|
||||
|
||||
let old = crds.insert_versioned(new_value);
|
||||
if old.is_err() {
|
||||
self.num_old += 1;
|
||||
return Err(CrdsGossipError::PushMessageOldVersion);
|
||||
}
|
||||
let mut received_set = HashSet::new();
|
||||
received_set.insert(from.clone());
|
||||
self.push_messages.insert(label, value_hash);
|
||||
self.received_cache.insert(value_hash, (now, received_set));
|
||||
Ok(old.ok().and_then(|opt| opt))
|
||||
Ok(old.unwrap())
|
||||
}
|
||||
|
||||
/// push pull responses
|
||||
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
||||
for (label, value_hash, wc) in values {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
|
||||
continue;
|
||||
}
|
||||
self.push_messages.insert(label, value_hash);
|
||||
}
|
||||
}
|
||||
|
||||
/// New push message to broadcast to peers.
|
||||
@@ -172,18 +209,10 @@ impl CrdsGossipPush {
|
||||
/// The list of push messages is created such that all the randomly selected peers have not
|
||||
/// pruned the source addresses.
|
||||
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> {
|
||||
let max = self.active_set.len();
|
||||
let mut nodes: Vec<_> = (0..max).collect();
|
||||
nodes.shuffle(&mut rand::thread_rng());
|
||||
let peers: Vec<Pubkey> = nodes
|
||||
.into_iter()
|
||||
.filter_map(|n| self.active_set.get_index(n))
|
||||
.take(self.push_fanout)
|
||||
.map(|n| *n.0)
|
||||
.collect();
|
||||
let mut total_bytes: usize = 0;
|
||||
let mut values = vec![];
|
||||
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
|
||||
trace!("new_push_messages {}", self.push_messages.len());
|
||||
for (label, hash) in &self.push_messages {
|
||||
let res = crds.lookup_versioned(label);
|
||||
if res.is_none() {
|
||||
@@ -203,21 +232,37 @@ impl CrdsGossipPush {
|
||||
}
|
||||
values.push(value.clone());
|
||||
}
|
||||
trace!(
|
||||
"new_push_messages {} {}",
|
||||
values.len(),
|
||||
self.active_set.len()
|
||||
);
|
||||
for v in values {
|
||||
for p in peers.iter() {
|
||||
let filter = self.active_set.get_mut(p);
|
||||
if filter.is_some() && !filter.unwrap().contains(&v.label().pubkey()) {
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
//use a consistent index for the same origin so
|
||||
//the active set learns the MST for that origin
|
||||
let start = v.label().pubkey().as_ref()[0] as usize;
|
||||
let max = self.push_fanout.min(self.active_set.len());
|
||||
for i in start..(start + max) {
|
||||
let ix = i % self.active_set.len();
|
||||
if let Some((p, filter)) = self.active_set.get_index(ix) {
|
||||
if !filter.contains(&v.label().pubkey()) {
|
||||
trace!("new_push_messages insert {} {:?}", *p, v);
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
self.num_pushes += 1;
|
||||
}
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
push_messages
|
||||
}
|
||||
|
||||
/// add the `from` to the peer's filter of nodes
|
||||
pub fn process_prune_msg(&mut self, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
for origin in origins {
|
||||
if origin == self_pubkey {
|
||||
continue;
|
||||
}
|
||||
if let Some(p) = self.active_set.get_mut(peer) {
|
||||
p.add(origin)
|
||||
}
|
||||
@@ -339,15 +384,11 @@ impl CrdsGossipPush {
|
||||
|
||||
/// purge received push message cache
|
||||
pub fn purge_old_received_cache(&mut self, min_time: u64) {
|
||||
let old_msgs: Vec<Hash> = self
|
||||
.received_cache
|
||||
.iter()
|
||||
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
|
||||
.cloned()
|
||||
.collect();
|
||||
for k in old_msgs {
|
||||
self.received_cache.remove(&k);
|
||||
}
|
||||
self.received_cache
|
||||
.iter_mut()
|
||||
.for_each(|v| v.1.retain(|_, v| v.1 > min_time));
|
||||
|
||||
self.received_cache.retain(|_, v| !v.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,7 +412,6 @@ mod test {
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&origin, 0,
|
||||
)));
|
||||
let label = value.label();
|
||||
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
|
||||
let mut low_staked_set = HashSet::new();
|
||||
low_staked_peers.for_each(|p| {
|
||||
@@ -380,11 +420,7 @@ mod test {
|
||||
stakes.insert(p, 1);
|
||||
});
|
||||
|
||||
let versioned = crds
|
||||
.lookup_versioned(&label)
|
||||
.expect("versioned value should exist");
|
||||
let hash = versioned.value_hash;
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.is_empty(),
|
||||
"should not prune if min threshold has not been reached"
|
||||
@@ -395,7 +431,7 @@ mod test {
|
||||
stakes.insert(high_staked_peer, high_stake);
|
||||
let _ = push.process_push_message(&mut crds, &high_staked_peer, value.clone(), 0);
|
||||
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.len() < low_staked_set.len() + 1,
|
||||
"should not prune all peers"
|
||||
@@ -409,7 +445,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_push() {
|
||||
fn test_process_push_one() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@@ -426,8 +462,8 @@ mod test {
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
@@ -690,6 +726,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_prune() {
|
||||
let mut crds = Crds::default();
|
||||
let self_id = Pubkey::new_rand();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@@ -707,7 +744,11 @@ mod test {
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
|
||||
push.process_prune_msg(
|
||||
&self_id,
|
||||
&peer.label().pubkey(),
|
||||
&[new_msg.label().pubkey()],
|
||||
);
|
||||
assert_eq!(push.new_push_messages(&crds, 0), expected);
|
||||
}
|
||||
#[test]
|
||||
@@ -749,9 +790,9 @@ mod test {
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
|
||||
// purge the old pushed
|
||||
|
@@ -47,6 +47,7 @@ pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_service;
|
||||
pub mod rpc_subscriptions;
|
||||
pub mod send_transaction_service;
|
||||
pub mod serve_repair;
|
||||
pub mod serve_repair_service;
|
||||
pub mod sigverify;
|
||||
|
@@ -56,8 +56,8 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
|
||||
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
|
||||
"CWeRmXme7LmbaUWTZWFLt6FMnpzLCHaQLuR2TdgFn4Lq",
|
||||
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
|
||||
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
|
||||
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
|
||||
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
|
||||
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
|
||||
@@ -75,6 +75,7 @@ solana_sdk::pubkeys!(
|
||||
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
|
||||
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
|
||||
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
|
||||
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
|
||||
]
|
||||
);
|
||||
|
||||
@@ -84,6 +85,7 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
|
||||
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
|
||||
"FdGYQdiRky8NZzN9wZtczTBcWLYYRXrJ3LMDhqDPn5rM",
|
||||
]
|
||||
);
|
||||
|
||||
|
@@ -215,6 +215,7 @@ impl ReplayStage {
|
||||
&mut progress,
|
||||
transaction_status_sender.clone(),
|
||||
&verify_recyclers,
|
||||
&subscriptions,
|
||||
);
|
||||
Self::report_memory(&allocated, "replay_active_banks", start);
|
||||
|
||||
@@ -758,7 +759,6 @@ impl ReplayStage {
|
||||
progress.get_fork_stats(bank.slot()).unwrap().total_staked,
|
||||
lockouts_sender,
|
||||
);
|
||||
|
||||
Self::push_vote(
|
||||
cluster_info,
|
||||
bank,
|
||||
@@ -838,6 +838,7 @@ impl ReplayStage {
|
||||
let blockhash = bank.last_blockhash();
|
||||
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
|
||||
vote_tx.partial_sign(&[authorized_voter_keypair.as_ref()], blockhash);
|
||||
let _ = cluster_info.send_vote(&vote_tx);
|
||||
cluster_info.push_vote(tower_index, vote_tx);
|
||||
}
|
||||
|
||||
@@ -896,6 +897,7 @@ impl ReplayStage {
|
||||
progress: &mut ProgressMap,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
) -> bool {
|
||||
let mut did_complete_bank = false;
|
||||
let mut tx_count = 0;
|
||||
@@ -963,6 +965,7 @@ impl ReplayStage {
|
||||
did_complete_bank = true;
|
||||
info!("bank frozen: {}", bank.slot());
|
||||
bank.freeze();
|
||||
subscriptions.notify_frozen(bank.slot());
|
||||
} else {
|
||||
trace!(
|
||||
"bank {} not completed tick_height: {}, max_tick_height: {}",
|
||||
|
@@ -3,6 +3,7 @@
|
||||
use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_slots::ClusterSlots,
|
||||
contact_info::ContactInfo,
|
||||
repair_service::RepairStrategy,
|
||||
result::{Error, Result},
|
||||
window_service::{should_retransmit_and_persist, WindowService},
|
||||
@@ -17,8 +18,9 @@ use solana_ledger::{
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_error;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::clock::{Epoch, Slot};
|
||||
use solana_sdk::epoch_schedule::EpochSchedule;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_streamer::streamer::PacketReceiver;
|
||||
use std::{
|
||||
@@ -43,6 +45,8 @@ struct RetransmitStats {
|
||||
total_packets: AtomicU64,
|
||||
total_batches: AtomicU64,
|
||||
total_time: AtomicU64,
|
||||
epoch_fetch: AtomicU64,
|
||||
epoch_cache_update: AtomicU64,
|
||||
repair_total: AtomicU64,
|
||||
discard_total: AtomicU64,
|
||||
retransmit_total: AtomicU64,
|
||||
@@ -64,6 +68,8 @@ fn update_retransmit_stats(
|
||||
peers_len: usize,
|
||||
packets_by_slot: HashMap<Slot, usize>,
|
||||
packets_by_source: HashMap<String, usize>,
|
||||
epoch_fetch: u64,
|
||||
epoch_cach_update: u64,
|
||||
) {
|
||||
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
|
||||
stats
|
||||
@@ -82,6 +88,10 @@ fn update_retransmit_stats(
|
||||
.compute_turbine_peers_total
|
||||
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
|
||||
stats.total_batches.fetch_add(1, Ordering::Relaxed);
|
||||
stats.epoch_fetch.fetch_add(epoch_fetch, Ordering::Relaxed);
|
||||
stats
|
||||
.epoch_cache_update
|
||||
.fetch_add(epoch_cach_update, Ordering::Relaxed);
|
||||
{
|
||||
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
|
||||
for (slot, count) in packets_by_slot {
|
||||
@@ -106,6 +116,16 @@ fn update_retransmit_stats(
|
||||
stats.total_time.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"epoch_fetch",
|
||||
stats.epoch_fetch.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"epoch_cache_update",
|
||||
stats.epoch_cache_update.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"total_batches",
|
||||
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
|
||||
@@ -147,6 +167,14 @@ fn update_retransmit_stats(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct EpochStakesCache {
|
||||
epoch: Epoch,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
peers: Vec<ContactInfo>,
|
||||
stakes_and_index: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
@@ -155,6 +183,8 @@ fn retransmit(
|
||||
sock: &UdpSocket,
|
||||
id: u32,
|
||||
stats: &Arc<RetransmitStats>,
|
||||
epoch_stakes_cache: &Arc<RwLock<EpochStakesCache>>,
|
||||
last_peer_update: &Arc<AtomicU64>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let r_lock = r.lock().unwrap();
|
||||
@@ -171,12 +201,42 @@ fn retransmit(
|
||||
}
|
||||
drop(r_lock);
|
||||
|
||||
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
|
||||
let r_bank = bank_forks.read().unwrap().working_bank();
|
||||
let bank_epoch = r_bank.get_leader_schedule_epoch(r_bank.slot());
|
||||
epoch_fetch.stop();
|
||||
|
||||
let mut epoch_cache_update = Measure::start("retransmit_epoch_cach_update");
|
||||
let mut r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
if r_epoch_stakes_cache.epoch != bank_epoch {
|
||||
drop(r_epoch_stakes_cache);
|
||||
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
|
||||
if w_epoch_stakes_cache.epoch != bank_epoch {
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
w_epoch_stakes_cache.stakes = stakes;
|
||||
w_epoch_stakes_cache.epoch = bank_epoch;
|
||||
}
|
||||
drop(w_epoch_stakes_cache);
|
||||
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
}
|
||||
|
||||
let now = timestamp();
|
||||
let last = last_peer_update.load(Ordering::Relaxed);
|
||||
if now - last > 1000 && last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
|
||||
{
|
||||
drop(r_epoch_stakes_cache);
|
||||
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
|
||||
let (peers, stakes_and_index) =
|
||||
cluster_info.sorted_retransmit_peers_and_stakes(w_epoch_stakes_cache.stakes.clone());
|
||||
w_epoch_stakes_cache.peers = peers;
|
||||
w_epoch_stakes_cache.stakes_and_index = stakes_and_index;
|
||||
drop(w_epoch_stakes_cache);
|
||||
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
}
|
||||
let mut peers_len = 0;
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
|
||||
epoch_cache_update.stop();
|
||||
|
||||
let my_id = cluster_info.id();
|
||||
let mut discard_total = 0;
|
||||
let mut repair_total = 0;
|
||||
@@ -201,8 +261,8 @@ fn retransmit(
|
||||
let mut compute_turbine_peers = Measure::start("turbine_start");
|
||||
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
|
||||
&my_id,
|
||||
&peers,
|
||||
&stakes_and_index,
|
||||
&r_epoch_stakes_cache.peers,
|
||||
&r_epoch_stakes_cache.stakes_and_index,
|
||||
packet.meta.seed,
|
||||
);
|
||||
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
|
||||
@@ -215,8 +275,14 @@ fn retransmit(
|
||||
|
||||
let (neighbors, children) =
|
||||
compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, indexes);
|
||||
let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect();
|
||||
let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect();
|
||||
let neighbors: Vec<_> = neighbors
|
||||
.into_iter()
|
||||
.map(|index| &r_epoch_stakes_cache.peers[index])
|
||||
.collect();
|
||||
let children: Vec<_> = children
|
||||
.into_iter()
|
||||
.map(|index| &r_epoch_stakes_cache.peers[index])
|
||||
.collect();
|
||||
compute_turbine_peers.stop();
|
||||
compute_turbine_peers_total += compute_turbine_peers.as_us();
|
||||
|
||||
@@ -257,6 +323,8 @@ fn retransmit(
|
||||
peers_len,
|
||||
packets_by_slot,
|
||||
packets_by_source,
|
||||
epoch_fetch.as_us(),
|
||||
epoch_cache_update.as_us(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
@@ -286,6 +354,8 @@ pub fn retransmitter(
|
||||
let r = r.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
let stats = stats.clone();
|
||||
let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default()));
|
||||
let last_peer_update = Arc::new(AtomicU64::new(0));
|
||||
|
||||
Builder::new()
|
||||
.name("solana-retransmitter".to_string())
|
||||
@@ -300,6 +370,8 @@ pub fn retransmitter(
|
||||
&sockets[s],
|
||||
s as u32,
|
||||
&stats,
|
||||
&epoch_stakes_cache,
|
||||
&last_peer_update,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
|
492
core/src/rpc.rs
492
core/src/rpc.rs
@@ -7,6 +7,7 @@ use crate::{
|
||||
non_circulating_supply::calculate_non_circulating_supply,
|
||||
rpc_error::RpcCustomError,
|
||||
rpc_health::*,
|
||||
send_transaction_service::SendTransactionService,
|
||||
storage_stage::StorageState,
|
||||
validator::ValidatorExit,
|
||||
};
|
||||
@@ -16,7 +17,7 @@ use jsonrpc_derive::rpc;
|
||||
use solana_client::{
|
||||
rpc_config::*,
|
||||
rpc_request::{
|
||||
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
|
||||
MAX_GET_CONFIRMED_BLOCKS_RANGE, MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
|
||||
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, NUM_LARGEST_ACCOUNTS,
|
||||
},
|
||||
rpc_response::*,
|
||||
@@ -44,11 +45,9 @@ use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
|
||||
use std::{
|
||||
cmp::{max, min},
|
||||
collections::{HashMap, HashSet},
|
||||
net::{SocketAddr, UdpSocket},
|
||||
net::SocketAddr,
|
||||
str::FromStr,
|
||||
sync::{Arc, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
type RpcResponse<T> = Result<Response<T>>;
|
||||
@@ -77,7 +76,11 @@ pub struct JsonRpcRequestProcessor {
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
health: Arc<RpcHealth>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
genesis_hash: Hash,
|
||||
send_transaction_service: Arc<SendTransactionService>,
|
||||
}
|
||||
impl Metadata for JsonRpcRequestProcessor {}
|
||||
|
||||
impl JsonRpcRequestProcessor {
|
||||
fn bank(&self, commitment: Option<CommitmentConfig>) -> Result<Arc<Bank>> {
|
||||
@@ -125,6 +128,7 @@ impl JsonRpcRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
config: JsonRpcConfig,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
@@ -133,8 +137,11 @@ impl JsonRpcRequestProcessor {
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
health: Arc<RpcHealth>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
genesis_hash: Hash,
|
||||
send_transaction_service: Arc<SendTransactionService>,
|
||||
) -> Self {
|
||||
JsonRpcRequestProcessor {
|
||||
Self {
|
||||
config,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
@@ -142,6 +149,9 @@ impl JsonRpcRequestProcessor {
|
||||
storage_state,
|
||||
validator_exit,
|
||||
health,
|
||||
cluster_info,
|
||||
genesis_hash,
|
||||
send_transaction_service,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,6 +546,12 @@ impl JsonRpcRequestProcessor {
|
||||
if end_slot < start_slot {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
if end_slot - start_slot > MAX_GET_CONFIRMED_BLOCKS_RANGE {
|
||||
return Err(Error::invalid_params(format!(
|
||||
"Slot range too large; max {}",
|
||||
MAX_GET_CONFIRMED_BLOCKS_RANGE
|
||||
)));
|
||||
}
|
||||
Ok(self
|
||||
.blockstore
|
||||
.rooted_slot_iterator(max(start_slot, self.blockstore.lowest_slot()))
|
||||
@@ -734,11 +750,6 @@ impl JsonRpcRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tpu_addr(cluster_info: &ClusterInfo) -> Result<SocketAddr> {
|
||||
let contact_info = cluster_info.my_contact_info();
|
||||
Ok(contact_info.tpu)
|
||||
}
|
||||
|
||||
fn verify_pubkey(input: String) -> Result<Pubkey> {
|
||||
input
|
||||
.parse()
|
||||
@@ -765,14 +776,6 @@ fn run_transaction_simulation(
|
||||
(executed[0].0.clone().map(|_| ()), vec![])
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Meta {
|
||||
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>,
|
||||
pub cluster_info: Arc<ClusterInfo>,
|
||||
pub genesis_hash: Hash,
|
||||
}
|
||||
impl Metadata for Meta {}
|
||||
|
||||
#[rpc(server)]
|
||||
pub trait RpcSol {
|
||||
type Metadata;
|
||||
@@ -1058,7 +1061,7 @@ pub trait RpcSol {
|
||||
|
||||
pub struct RpcSolImpl;
|
||||
impl RpcSol for RpcSolImpl {
|
||||
type Metadata = Meta;
|
||||
type Metadata = JsonRpcRequestProcessor;
|
||||
|
||||
fn confirm_transaction(
|
||||
&self,
|
||||
@@ -1068,10 +1071,7 @@ impl RpcSol for RpcSolImpl {
|
||||
) -> RpcResponse<bool> {
|
||||
debug!("confirm_transaction rpc request received: {:?}", id);
|
||||
let signature = verify_signature(&id);
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.confirm_transaction(signature, commitment)
|
||||
meta.confirm_transaction(signature, commitment)
|
||||
}
|
||||
|
||||
fn get_account_info(
|
||||
@@ -1082,10 +1082,7 @@ impl RpcSol for RpcSolImpl {
|
||||
) -> RpcResponse<Option<RpcAccount>> {
|
||||
debug!("get_account_info rpc request received: {:?}", pubkey_str);
|
||||
let pubkey = verify_pubkey(pubkey_str);
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_account_info(pubkey, commitment)
|
||||
meta.get_account_info(pubkey, commitment)
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(
|
||||
@@ -1098,10 +1095,7 @@ impl RpcSol for RpcSolImpl {
|
||||
"get_minimum_balance_for_rent_exemption rpc request received: {:?}",
|
||||
data_len
|
||||
);
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_minimum_balance_for_rent_exemption(data_len, commitment)
|
||||
meta.get_minimum_balance_for_rent_exemption(data_len, commitment)
|
||||
}
|
||||
|
||||
fn get_program_accounts(
|
||||
@@ -1115,10 +1109,7 @@ impl RpcSol for RpcSolImpl {
|
||||
program_id_str
|
||||
);
|
||||
let program_id = verify_pubkey(program_id_str)?;
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_program_accounts(&program_id, commitment)
|
||||
meta.get_program_accounts(&program_id, commitment)
|
||||
}
|
||||
|
||||
fn get_inflation_governor(
|
||||
@@ -1127,10 +1118,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<RpcInflationGovernor> {
|
||||
debug!("get_inflation_governor rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_inflation_governor(commitment)
|
||||
meta.get_inflation_governor(commitment)
|
||||
}
|
||||
|
||||
fn get_inflation_rate(
|
||||
@@ -1139,15 +1127,12 @@ impl RpcSol for RpcSolImpl {
|
||||
epoch: Option<Epoch>,
|
||||
) -> Result<RpcInflationRate> {
|
||||
debug!("get_inflation_rate rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_inflation_rate(epoch)
|
||||
meta.get_inflation_rate(epoch)
|
||||
}
|
||||
|
||||
fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result<EpochSchedule> {
|
||||
debug!("get_epoch_schedule rpc request received");
|
||||
meta.request_processor.read().unwrap().get_epoch_schedule()
|
||||
meta.get_epoch_schedule()
|
||||
}
|
||||
|
||||
fn get_balance(
|
||||
@@ -1158,10 +1143,7 @@ impl RpcSol for RpcSolImpl {
|
||||
) -> RpcResponse<u64> {
|
||||
debug!("get_balance rpc request received: {:?}", pubkey_str);
|
||||
let pubkey = verify_pubkey(pubkey_str);
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_balance(pubkey, commitment)
|
||||
meta.get_balance(pubkey, commitment)
|
||||
}
|
||||
|
||||
fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> {
|
||||
@@ -1202,16 +1184,18 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<RpcEpochInfo> {
|
||||
let bank = meta.request_processor.read().unwrap().bank(commitment)?;
|
||||
let bank = meta.bank(commitment)?;
|
||||
let epoch_schedule = bank.epoch_schedule();
|
||||
|
||||
let slot = bank.slot();
|
||||
let block_height = bank.block_height();
|
||||
let (epoch, slot_index) = epoch_schedule.get_epoch_and_slot_index(slot);
|
||||
Ok(RpcEpochInfo {
|
||||
epoch,
|
||||
slot_index,
|
||||
slots_in_epoch: epoch_schedule.get_slots_in_epoch(epoch),
|
||||
absolute_slot: slot,
|
||||
block_height,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1220,11 +1204,7 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
block: Slot,
|
||||
) -> Result<RpcBlockCommitment<BlockCommitmentArray>> {
|
||||
Ok(meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_block_commitment(block))
|
||||
Ok(meta.get_block_commitment(block))
|
||||
}
|
||||
|
||||
fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String> {
|
||||
@@ -1238,7 +1218,7 @@ impl RpcSol for RpcSolImpl {
|
||||
slot: Option<Slot>,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<RpcLeaderSchedule>> {
|
||||
let bank = meta.request_processor.read().unwrap().bank(commitment)?;
|
||||
let bank = meta.bank(commitment)?;
|
||||
let slot = slot.unwrap_or_else(|| bank.slot());
|
||||
let epoch = bank.epoch_schedule().get_epoch(slot);
|
||||
|
||||
@@ -1265,10 +1245,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<RpcBlockhashFeeCalculator> {
|
||||
debug!("get_recent_blockhash rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_recent_blockhash(commitment)
|
||||
meta.get_recent_blockhash(commitment)
|
||||
}
|
||||
|
||||
fn get_fees(
|
||||
@@ -1277,7 +1254,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<RpcFees> {
|
||||
debug!("get_fees rpc request received");
|
||||
meta.request_processor.read().unwrap().get_fees(commitment)
|
||||
meta.get_fees(commitment)
|
||||
}
|
||||
|
||||
fn get_fee_calculator_for_blockhash(
|
||||
@@ -1289,18 +1266,12 @@ impl RpcSol for RpcSolImpl {
|
||||
debug!("get_fee_calculator_for_blockhash rpc request received");
|
||||
let blockhash =
|
||||
Hash::from_str(&blockhash).map_err(|e| Error::invalid_params(format!("{:?}", e)))?;
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_fee_calculator_for_blockhash(&blockhash, commitment)
|
||||
meta.get_fee_calculator_for_blockhash(&blockhash, commitment)
|
||||
}
|
||||
|
||||
fn get_fee_rate_governor(&self, meta: Self::Metadata) -> RpcResponse<RpcFeeRateGovernor> {
|
||||
debug!("get_fee_rate_governor rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_fee_rate_governor()
|
||||
meta.get_fee_rate_governor()
|
||||
}
|
||||
|
||||
fn get_signature_confirmation(
|
||||
@@ -1314,11 +1285,7 @@ impl RpcSol for RpcSolImpl {
|
||||
signature_str
|
||||
);
|
||||
let signature = verify_signature(&signature_str)?;
|
||||
Ok(meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_confirmation_status(signature, commitment))
|
||||
Ok(meta.get_signature_confirmation_status(signature, commitment))
|
||||
}
|
||||
|
||||
fn get_signature_status(
|
||||
@@ -1328,11 +1295,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<transaction::Result<()>>> {
|
||||
let signature = verify_signature(&signature_str)?;
|
||||
Ok(meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_status(signature, commitment))
|
||||
Ok(meta.get_signature_status(signature, commitment))
|
||||
}
|
||||
|
||||
fn get_signature_statuses(
|
||||
@@ -1351,14 +1314,11 @@ impl RpcSol for RpcSolImpl {
|
||||
for signature_str in signature_strs {
|
||||
signatures.push(verify_signature(&signature_str)?);
|
||||
}
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_statuses(signatures, config)
|
||||
meta.get_signature_statuses(signatures, config)
|
||||
}
|
||||
|
||||
fn get_slot(&self, meta: Self::Metadata, commitment: Option<CommitmentConfig>) -> Result<u64> {
|
||||
meta.request_processor.read().unwrap().get_slot(commitment)
|
||||
meta.get_slot(commitment)
|
||||
}
|
||||
|
||||
fn get_transaction_count(
|
||||
@@ -1367,10 +1327,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<u64> {
|
||||
debug!("get_transaction_count rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_transaction_count(commitment)
|
||||
meta.get_transaction_count(commitment)
|
||||
}
|
||||
|
||||
fn get_total_supply(
|
||||
@@ -1379,10 +1336,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<u64> {
|
||||
debug!("get_total_supply rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_total_supply(commitment)
|
||||
meta.get_total_supply(commitment)
|
||||
}
|
||||
|
||||
fn get_largest_accounts(
|
||||
@@ -1391,10 +1345,7 @@ impl RpcSol for RpcSolImpl {
|
||||
config: Option<RpcLargestAccountsConfig>,
|
||||
) -> RpcResponse<Vec<RpcAccountBalance>> {
|
||||
debug!("get_largest_accounts rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_largest_accounts(config)
|
||||
meta.get_largest_accounts(config)
|
||||
}
|
||||
|
||||
fn get_supply(
|
||||
@@ -1403,10 +1354,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<RpcSupply> {
|
||||
debug!("get_supply rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_supply(commitment)
|
||||
meta.get_supply(commitment)
|
||||
}
|
||||
|
||||
fn request_airdrop(
|
||||
@@ -1423,69 +1371,35 @@ impl RpcSol for RpcSolImpl {
|
||||
&commitment
|
||||
);
|
||||
|
||||
let faucet_addr = meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.config
|
||||
.faucet_addr
|
||||
.ok_or_else(Error::invalid_request)?;
|
||||
let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?;
|
||||
let pubkey = verify_pubkey(pubkey_str)?;
|
||||
|
||||
let blockhash = meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.bank(commitment.clone())?
|
||||
.confirmed_last_blockhash()
|
||||
.0;
|
||||
let (blockhash, last_valid_slot) = {
|
||||
let bank = meta.bank(commitment)?;
|
||||
|
||||
let blockhash = bank.confirmed_last_blockhash().0;
|
||||
(
|
||||
blockhash,
|
||||
bank.get_blockhash_last_valid_slot(&blockhash).unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash)
|
||||
.map_err(|err| {
|
||||
info!("request_airdrop_transaction failed: {:?}", err);
|
||||
Error::internal_error()
|
||||
})?;
|
||||
let signature = transaction.signatures[0];
|
||||
|
||||
let data = serialize(&transaction).map_err(|err| {
|
||||
let wire_transaction = serialize(&transaction).map_err(|err| {
|
||||
info!("request_airdrop: serialize error: {:?}", err);
|
||||
Error::internal_error()
|
||||
})?;
|
||||
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let tpu_addr = get_tpu_addr(&meta.cluster_info)?;
|
||||
transactions_socket
|
||||
.send_to(&data, tpu_addr)
|
||||
.map_err(|err| {
|
||||
info!("request_airdrop: send_to error: {:?}", err);
|
||||
Error::internal_error()
|
||||
})?;
|
||||
meta.send_transaction_service
|
||||
.send(signature, wire_transaction, last_valid_slot);
|
||||
|
||||
let signature = transaction.signatures[0];
|
||||
let now = Instant::now();
|
||||
let mut signature_status;
|
||||
let signature_timeout = match &commitment {
|
||||
Some(config) if config.commitment == CommitmentLevel::Recent => 5,
|
||||
_ => 30,
|
||||
};
|
||||
loop {
|
||||
signature_status = meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_statuses(vec![signature], None)?
|
||||
.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment.unwrap_or_default()))
|
||||
.map(|x| x.status);
|
||||
|
||||
if signature_status == Some(Ok(())) {
|
||||
info!("airdrop signature ok");
|
||||
return Ok(signature.to_string());
|
||||
} else if now.elapsed().as_secs() > signature_timeout {
|
||||
info!("airdrop signature timeout");
|
||||
return Err(Error::internal_error());
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
Ok(signature.to_string())
|
||||
}
|
||||
|
||||
fn send_transaction(
|
||||
@@ -1496,7 +1410,11 @@ impl RpcSol for RpcSolImpl {
|
||||
) -> Result<String> {
|
||||
let config = config.unwrap_or_default();
|
||||
let (wire_transaction, transaction) = deserialize_bs58_transaction(data)?;
|
||||
let signature = transaction.signatures[0].to_string();
|
||||
let signature = transaction.signatures[0];
|
||||
let bank = &*meta.bank(None)?;
|
||||
let last_valid_slot = bank
|
||||
.get_blockhash_last_valid_slot(&transaction.message.recent_blockhash)
|
||||
.unwrap_or(0);
|
||||
|
||||
if !config.skip_preflight {
|
||||
if transaction.verify().is_err() {
|
||||
@@ -1506,14 +1424,13 @@ impl RpcSol for RpcSolImpl {
|
||||
.into());
|
||||
}
|
||||
|
||||
if meta.request_processor.read().unwrap().health.check() != RpcHealthStatus::Ok {
|
||||
if meta.health.check() != RpcHealthStatus::Ok {
|
||||
return Err(RpcCustomError::SendTransactionPreflightFailure {
|
||||
message: "RPC node is unhealthy, unable to simulate transaction".into(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let bank = &*meta.request_processor.read().unwrap().bank(None)?;
|
||||
if let (Err(err), _log_output) = run_transaction_simulation(&bank, transaction) {
|
||||
// Note: it's possible that the transaction simulation failed but the actual
|
||||
// transaction would succeed, such as when a transaction depends on an earlier
|
||||
@@ -1527,21 +1444,9 @@ impl RpcSol for RpcSolImpl {
|
||||
}
|
||||
}
|
||||
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let tpu_addr = get_tpu_addr(&meta.cluster_info)?;
|
||||
trace!("send_transaction: leader is {:?}", &tpu_addr);
|
||||
transactions_socket
|
||||
.send_to(&wire_transaction, tpu_addr)
|
||||
.map_err(|err| {
|
||||
info!("send_transaction: send_to error: {:?}", err);
|
||||
Error::internal_error()
|
||||
})?;
|
||||
trace!(
|
||||
"send_transaction: sent {} bytes, signature={}",
|
||||
wire_transaction.len(),
|
||||
signature
|
||||
);
|
||||
Ok(signature)
|
||||
meta.send_transaction_service
|
||||
.send(signature, wire_transaction, last_valid_slot);
|
||||
Ok(signature.to_string())
|
||||
}
|
||||
|
||||
fn simulate_transaction(
|
||||
@@ -1559,7 +1464,7 @@ impl RpcSol for RpcSolImpl {
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let bank = &*meta.request_processor.read().unwrap().bank(None)?;
|
||||
let bank = &*meta.bank(None)?;
|
||||
let logs = if result.is_ok() {
|
||||
let sim_result = run_transaction_simulation(&bank, transaction);
|
||||
result = sim_result.0;
|
||||
@@ -1582,14 +1487,11 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<String> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_slot_leader(commitment)
|
||||
meta.get_slot_leader(commitment)
|
||||
}
|
||||
|
||||
fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot> {
|
||||
meta.request_processor.read().unwrap().minimum_ledger_slot()
|
||||
meta.minimum_ledger_slot()
|
||||
}
|
||||
|
||||
fn get_vote_accounts(
|
||||
@@ -1597,21 +1499,15 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<RpcVoteAccountStatus> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_vote_accounts(commitment)
|
||||
meta.get_vote_accounts(commitment)
|
||||
}
|
||||
|
||||
fn get_storage_turn_rate(&self, meta: Self::Metadata) -> Result<u64> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_storage_turn_rate()
|
||||
meta.get_storage_turn_rate()
|
||||
}
|
||||
|
||||
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<RpcStorageTurn> {
|
||||
meta.request_processor.read().unwrap().get_storage_turn()
|
||||
meta.get_storage_turn()
|
||||
}
|
||||
|
||||
fn get_slots_per_segment(
|
||||
@@ -1619,10 +1515,7 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<u64> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_slots_per_segment(commitment)
|
||||
meta.get_slots_per_segment(commitment)
|
||||
}
|
||||
|
||||
fn get_storage_pubkeys_for_slot(
|
||||
@@ -1630,25 +1523,16 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
slot: Slot,
|
||||
) -> Result<Vec<String>> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_storage_pubkeys_for_slot(slot)
|
||||
meta.get_storage_pubkeys_for_slot(slot)
|
||||
}
|
||||
|
||||
fn validator_exit(&self, meta: Self::Metadata) -> Result<bool> {
|
||||
meta.request_processor.read().unwrap().validator_exit()
|
||||
meta.validator_exit()
|
||||
}
|
||||
|
||||
fn get_identity(&self, meta: Self::Metadata) -> Result<RpcIdentity> {
|
||||
Ok(RpcIdentity {
|
||||
identity: meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.config
|
||||
.identity_pubkey
|
||||
.to_string(),
|
||||
identity: meta.config.identity_pubkey.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1659,10 +1543,7 @@ impl RpcSol for RpcSolImpl {
|
||||
}
|
||||
|
||||
fn set_log_filter(&self, meta: Self::Metadata, filter: String) -> Result<()> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.set_log_filter(filter)
|
||||
meta.set_log_filter(filter)
|
||||
}
|
||||
|
||||
fn get_confirmed_block(
|
||||
@@ -1671,10 +1552,7 @@ impl RpcSol for RpcSolImpl {
|
||||
slot: Slot,
|
||||
encoding: Option<TransactionEncoding>,
|
||||
) -> Result<Option<ConfirmedBlock>> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_block(slot, encoding)
|
||||
meta.get_confirmed_block(slot, encoding)
|
||||
}
|
||||
|
||||
fn get_confirmed_blocks(
|
||||
@@ -1683,14 +1561,11 @@ impl RpcSol for RpcSolImpl {
|
||||
start_slot: Slot,
|
||||
end_slot: Option<Slot>,
|
||||
) -> Result<Vec<Slot>> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_blocks(start_slot, end_slot)
|
||||
meta.get_confirmed_blocks(start_slot, end_slot)
|
||||
}
|
||||
|
||||
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>> {
|
||||
meta.request_processor.read().unwrap().get_block_time(slot)
|
||||
meta.get_block_time(slot)
|
||||
}
|
||||
|
||||
fn get_confirmed_transaction(
|
||||
@@ -1700,10 +1575,7 @@ impl RpcSol for RpcSolImpl {
|
||||
encoding: Option<TransactionEncoding>,
|
||||
) -> Result<Option<ConfirmedTransaction>> {
|
||||
let signature = verify_signature(&signature_str)?;
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_transaction(signature, encoding)
|
||||
meta.get_confirmed_transaction(signature, encoding)
|
||||
}
|
||||
|
||||
fn get_confirmed_signatures_for_address(
|
||||
@@ -1726,10 +1598,7 @@ impl RpcSol for RpcSolImpl {
|
||||
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE
|
||||
)));
|
||||
}
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_signatures_for_address(pubkey, start_slot, end_slot)
|
||||
meta.get_confirmed_signatures_for_address(pubkey, start_slot, end_slot)
|
||||
.map(|signatures| {
|
||||
signatures
|
||||
.iter()
|
||||
@@ -1739,10 +1608,7 @@ impl RpcSol for RpcSolImpl {
|
||||
}
|
||||
|
||||
fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_first_available_block()
|
||||
meta.get_first_available_block()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1811,8 +1677,8 @@ pub mod tests {
|
||||
const TEST_SLOTS_PER_EPOCH: u64 = 50;
|
||||
|
||||
struct RpcHandler {
|
||||
io: MetaIoHandler<Meta>,
|
||||
meta: Meta,
|
||||
io: MetaIoHandler<JsonRpcRequestProcessor>,
|
||||
meta: JsonRpcRequestProcessor,
|
||||
bank: Arc<Bank>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
blockhash: Hash,
|
||||
@@ -1936,7 +1802,14 @@ pub mod tests {
|
||||
let tx = system_transaction::transfer(&alice, pubkey, std::u64::MAX, blockhash);
|
||||
let _ = bank.process_transaction(&tx);
|
||||
|
||||
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
|
||||
cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr(
|
||||
&leader_pubkey,
|
||||
&socketaddr!("127.0.0.1:1234"),
|
||||
));
|
||||
|
||||
let meta = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig {
|
||||
enable_rpc_transaction_history: true,
|
||||
identity_pubkey: *pubkey,
|
||||
@@ -1948,8 +1821,14 @@ pub mod tests {
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
RpcHealth::stub(),
|
||||
)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
cluster_info.clone(),
|
||||
Hash::default(),
|
||||
Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit,
|
||||
)),
|
||||
);
|
||||
|
||||
cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr(
|
||||
&leader_pubkey,
|
||||
@@ -1959,11 +1838,6 @@ pub mod tests {
|
||||
let mut io = MetaIoHandler::default();
|
||||
let rpc = RpcSolImpl;
|
||||
io.extend_with(rpc.to_delegate());
|
||||
let meta = Meta {
|
||||
request_processor,
|
||||
cluster_info,
|
||||
genesis_hash: Hash::default(),
|
||||
};
|
||||
RpcHandler {
|
||||
io,
|
||||
meta,
|
||||
@@ -1990,14 +1864,22 @@ pub mod tests {
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
bank_forks,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
RpcHealth::stub(),
|
||||
Arc::new(ClusterInfo::default()),
|
||||
Hash::default(),
|
||||
Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit,
|
||||
)),
|
||||
);
|
||||
thread::spawn(move || {
|
||||
let blockhash = bank.confirmed_last_blockhash().0;
|
||||
@@ -2937,22 +2819,24 @@ pub mod tests {
|
||||
let mut io = MetaIoHandler::default();
|
||||
let rpc = RpcSolImpl;
|
||||
io.extend_with(rpc.to_delegate());
|
||||
let meta = Meta {
|
||||
request_processor: {
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
RpcHealth::stub(),
|
||||
);
|
||||
Arc::new(RwLock::new(request_processor))
|
||||
},
|
||||
cluster_info: Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default())),
|
||||
genesis_hash: Hash::default(),
|
||||
};
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let bank_forks = new_bank_forks().0;
|
||||
let meta = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
RpcHealth::stub(),
|
||||
cluster_info.clone(),
|
||||
Hash::default(),
|
||||
Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit,
|
||||
)),
|
||||
);
|
||||
|
||||
let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#;
|
||||
let res = io.handle_request_sync(req, meta);
|
||||
@@ -2979,24 +2863,25 @@ pub mod tests {
|
||||
let mut io = MetaIoHandler::default();
|
||||
let rpc = RpcSolImpl;
|
||||
io.extend_with(rpc.to_delegate());
|
||||
let meta = Meta {
|
||||
request_processor: {
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
health.clone(),
|
||||
);
|
||||
Arc::new(RwLock::new(request_processor))
|
||||
},
|
||||
cluster_info: Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
|
||||
));
|
||||
let meta = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
health.clone(),
|
||||
cluster_info.clone(),
|
||||
Hash::default(),
|
||||
Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit,
|
||||
)),
|
||||
genesis_hash: Hash::default(),
|
||||
};
|
||||
);
|
||||
|
||||
let mut bad_transaction =
|
||||
system_transaction::transfer(&Keypair::new(), &Pubkey::default(), 42, Hash::default());
|
||||
@@ -3059,17 +2944,6 @@ pub mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_get_tpu_addr() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
|
||||
));
|
||||
assert_eq!(
|
||||
get_tpu_addr(&cluster_info),
|
||||
Ok(socketaddr!("127.0.0.1:1234"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_verify_pubkey() {
|
||||
let pubkey = Pubkey::new_rand();
|
||||
@@ -3095,7 +2969,7 @@ pub mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair, Keypair) {
|
||||
pub(crate) fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair, Keypair) {
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
mint_keypair,
|
||||
@@ -3133,14 +3007,23 @@ pub mod tests {
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let bank_forks = new_bank_forks().0;
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
new_bank_forks().0,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
RpcHealth::stub(),
|
||||
cluster_info.clone(),
|
||||
Hash::default(),
|
||||
Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit,
|
||||
)),
|
||||
);
|
||||
assert_eq!(request_processor.validator_exit(), Ok(false));
|
||||
assert_eq!(exit.load(Ordering::Relaxed), false);
|
||||
@@ -3157,14 +3040,23 @@ pub mod tests {
|
||||
));
|
||||
let mut config = JsonRpcConfig::default();
|
||||
config.enable_validator_exit = true;
|
||||
let bank_forks = new_bank_forks().0;
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
new_bank_forks().0,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
RpcHealth::stub(),
|
||||
cluster_info.clone(),
|
||||
Hash::default(),
|
||||
Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit,
|
||||
)),
|
||||
);
|
||||
assert_eq!(request_processor.validator_exit(), Ok(true));
|
||||
assert_eq!(exit.load(Ordering::Relaxed), true);
|
||||
@@ -3241,14 +3133,22 @@ pub mod tests {
|
||||
|
||||
let mut config = JsonRpcConfig::default();
|
||||
config.enable_validator_exit = true;
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
RpcHealth::stub(),
|
||||
cluster_info.clone(),
|
||||
Hash::default(),
|
||||
Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit,
|
||||
)),
|
||||
);
|
||||
assert_eq!(
|
||||
request_processor.get_block_commitment(0),
|
||||
@@ -3475,11 +3375,37 @@ pub mod tests {
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[9, 11]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
|
||||
assert_eq!(confirmed_blocks, Vec::<Slot>::new());
|
||||
|
||||
block_commitment_cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.set_largest_confirmed_root(std::u64::MAX);
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0,{}]}}"#,
|
||||
MAX_GET_CONFIRMED_BLOCKS_RANGE
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
|
||||
assert_eq!(confirmed_blocks, vec![1, 3, 4, 8]);
|
||||
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0,{}]}}"#,
|
||||
MAX_GET_CONFIRMED_BLOCKS_RANGE + 1
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
assert_eq!(
|
||||
res,
|
||||
Some(
|
||||
r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Slot range too large; max 500000"},"id":1}"#.to_string(),
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -104,9 +104,7 @@ impl RpcHealth {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn stub() -> Arc<Self> {
|
||||
Arc::new(Self::new(
|
||||
Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
crate::contact_info::ContactInfo::default(),
|
||||
)),
|
||||
Arc::new(ClusterInfo::default()),
|
||||
None,
|
||||
42,
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
|
@@ -2,7 +2,8 @@
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, rpc_health::*,
|
||||
storage_stage::StorageState, validator::ValidatorExit,
|
||||
send_transaction_service::SendTransactionService, storage_stage::StorageState,
|
||||
validator::ValidatorExit,
|
||||
};
|
||||
use jsonrpc_core::MetaIoHandler;
|
||||
use jsonrpc_http_server::{
|
||||
@@ -20,7 +21,7 @@ use std::{
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
path::{Path, PathBuf},
|
||||
sync::atomic::AtomicBool,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
@@ -30,7 +31,7 @@ pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
|
||||
#[cfg(test)]
|
||||
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>, // Used only by test_rpc_new()...
|
||||
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
|
||||
|
||||
close_handle: Option<CloseHandle>,
|
||||
}
|
||||
@@ -248,7 +249,14 @@ impl JsonRpcService {
|
||||
override_health_check,
|
||||
));
|
||||
|
||||
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
|
||||
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
|
||||
let send_transaction_service = Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit_send_transaction_service,
|
||||
));
|
||||
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
@@ -256,7 +264,10 @@ impl JsonRpcService {
|
||||
storage_state,
|
||||
validator_exit.clone(),
|
||||
health.clone(),
|
||||
)));
|
||||
cluster_info,
|
||||
genesis_hash,
|
||||
send_transaction_service,
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
let test_request_processor = request_processor.clone();
|
||||
@@ -279,11 +290,7 @@ impl JsonRpcService {
|
||||
);
|
||||
let server = ServerBuilder::with_meta_extractor(
|
||||
io,
|
||||
move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
request_processor: request_processor.clone(),
|
||||
cluster_info: cluster_info.clone(),
|
||||
genesis_hash,
|
||||
},
|
||||
move |_req: &hyper::Request<hyper::Body>| request_processor.clone(),
|
||||
)
|
||||
.threads(num_cpus::get())
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
@@ -306,6 +313,7 @@ impl JsonRpcService {
|
||||
let server = server.unwrap();
|
||||
close_handle_sender.send(server.close_handle()).unwrap();
|
||||
server.wait();
|
||||
exit_send_transaction_service.store(true, Ordering::Relaxed);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@@ -339,7 +347,6 @@ impl JsonRpcService {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_value::{CrdsData, CrdsValue, SnapshotHash},
|
||||
rpc::tests::create_validator_exit,
|
||||
};
|
||||
@@ -349,10 +356,7 @@ mod tests {
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::signature::Signer;
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
sync::atomic::Ordering,
|
||||
};
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
#[test]
|
||||
fn test_rpc_new() {
|
||||
@@ -364,7 +368,7 @@ mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let rpc_addr = SocketAddr::new(
|
||||
ip_addr,
|
||||
@@ -398,8 +402,6 @@ mod tests {
|
||||
10_000,
|
||||
rpc_service
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_balance(Ok(mint_keypair.pubkey()), None)
|
||||
.unwrap()
|
||||
.value
|
||||
@@ -483,7 +485,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_trusted_validators() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let health_check_slot_distance = 123;
|
||||
let override_health_check = Arc::new(AtomicBool::new(false));
|
||||
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
|
||||
|
@@ -56,6 +56,7 @@ enum NotificationEntry {
|
||||
Slot(SlotInfo),
|
||||
Vote(Vote),
|
||||
Root(Slot),
|
||||
Frozen(Slot),
|
||||
Bank(CacheSlotInfo),
|
||||
Gossip(Slot),
|
||||
}
|
||||
@@ -64,6 +65,7 @@ impl std::fmt::Debug for NotificationEntry {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
NotificationEntry::Root(root) => write!(f, "Root({})", root),
|
||||
NotificationEntry::Frozen(slot) => write!(f, "Frozen({})", slot),
|
||||
NotificationEntry::Vote(vote) => write!(f, "Vote({:?})", vote),
|
||||
NotificationEntry::Slot(slot_info) => write!(f, "Slot({:?})", slot_info),
|
||||
NotificationEntry::Bank(cache_slot_info) => write!(
|
||||
@@ -219,6 +221,8 @@ fn filter_account_result(
|
||||
last_notified_slot: Slot,
|
||||
) -> (Box<dyn Iterator<Item = RpcAccount>>, Slot) {
|
||||
if let Some((account, fork)) = result {
|
||||
// If fork < last_notified_slot this means that we last notified for a fork
|
||||
// and should notify that the account state has been reverted.
|
||||
if fork != last_notified_slot {
|
||||
return (Box::new(iter::once(RpcAccount::encode(account))), fork);
|
||||
}
|
||||
@@ -639,6 +643,10 @@ impl RpcSubscriptions {
|
||||
self.enqueue_notification(NotificationEntry::Vote(vote.clone()));
|
||||
}
|
||||
|
||||
pub fn notify_frozen(&self, frozen_slot: Slot) {
|
||||
self.enqueue_notification(NotificationEntry::Frozen(frozen_slot));
|
||||
}
|
||||
|
||||
pub fn add_root_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<Slot>) {
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let mut subscriptions = self.subscriptions.root_subscriptions.write().unwrap();
|
||||
@@ -682,6 +690,7 @@ impl RpcSubscriptions {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
last_checked_slots: Arc<RwLock<HashMap<CommitmentLevel, Slot>>>,
|
||||
) {
|
||||
let mut pending_gossip_notifications = HashSet::new();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
@@ -712,6 +721,12 @@ impl RpcSubscriptions {
|
||||
for (_, sink) in subscriptions.iter() {
|
||||
notifier.notify(root, sink);
|
||||
}
|
||||
|
||||
// Prune old pending notifications
|
||||
pending_gossip_notifications = pending_gossip_notifications
|
||||
.into_iter()
|
||||
.filter(|&s| s > root)
|
||||
.collect();
|
||||
}
|
||||
NotificationEntry::Bank(cache_slot_info) => {
|
||||
RpcSubscriptions::notify_accounts_programs_signatures(
|
||||
@@ -723,23 +738,36 @@ impl RpcSubscriptions {
|
||||
¬ifier,
|
||||
)
|
||||
}
|
||||
NotificationEntry::Frozen(slot) => {
|
||||
if pending_gossip_notifications.remove(&slot) {
|
||||
Self::process_gossip_notification(
|
||||
slot,
|
||||
¬ifier,
|
||||
&subscriptions,
|
||||
&bank_forks,
|
||||
&last_checked_slots,
|
||||
);
|
||||
}
|
||||
}
|
||||
NotificationEntry::Gossip(slot) => {
|
||||
let _ = last_checked_slots
|
||||
.write()
|
||||
let bank_frozen = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.insert(CommitmentLevel::SingleGossip, slot);
|
||||
let cache_slot_info = CacheSlotInfo {
|
||||
highest_confirmed_slot: slot,
|
||||
..CacheSlotInfo::default()
|
||||
};
|
||||
RpcSubscriptions::notify_accounts_programs_signatures(
|
||||
&subscriptions.gossip_account_subscriptions,
|
||||
&subscriptions.gossip_program_subscriptions,
|
||||
&subscriptions.gossip_signature_subscriptions,
|
||||
&bank_forks,
|
||||
&cache_slot_info,
|
||||
¬ifier,
|
||||
)
|
||||
.get(slot)
|
||||
.filter(|b| b.is_frozen())
|
||||
.is_some();
|
||||
|
||||
if !bank_frozen {
|
||||
pending_gossip_notifications.insert(slot);
|
||||
} else {
|
||||
Self::process_gossip_notification(
|
||||
slot,
|
||||
¬ifier,
|
||||
&subscriptions,
|
||||
&bank_forks,
|
||||
&last_checked_slots,
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(RecvTimeoutError::Timeout) => {
|
||||
@@ -753,6 +781,42 @@ impl RpcSubscriptions {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_gossip_notification(
|
||||
slot: Slot,
|
||||
notifier: &RpcNotifier,
|
||||
subscriptions: &Subscriptions,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
last_checked_slots: &Arc<RwLock<HashMap<CommitmentLevel, Slot>>>,
|
||||
) {
|
||||
let mut last_checked_slots_lock = last_checked_slots.write().unwrap();
|
||||
let last_checked_slot = last_checked_slots_lock
|
||||
.get(&CommitmentLevel::SingleGossip)
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
if slot > last_checked_slot {
|
||||
last_checked_slots_lock.insert(CommitmentLevel::SingleGossip, slot);
|
||||
} else {
|
||||
// Avoid sending stale or duplicate notifications
|
||||
return;
|
||||
}
|
||||
|
||||
drop(last_checked_slots_lock);
|
||||
|
||||
let cache_slot_info = CacheSlotInfo {
|
||||
highest_confirmed_slot: slot,
|
||||
..CacheSlotInfo::default()
|
||||
};
|
||||
RpcSubscriptions::notify_accounts_programs_signatures(
|
||||
&subscriptions.gossip_account_subscriptions,
|
||||
&subscriptions.gossip_program_subscriptions,
|
||||
&subscriptions.gossip_signature_subscriptions,
|
||||
&bank_forks,
|
||||
&cache_slot_info,
|
||||
¬ifier,
|
||||
);
|
||||
}
|
||||
|
||||
fn notify_accounts_programs_signatures(
|
||||
account_subscriptions: &Arc<RpcAccountSubscriptions>,
|
||||
program_subscriptions: &Arc<RpcProgramSubscriptions>,
|
||||
@@ -1373,6 +1437,8 @@ pub(crate) mod tests {
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bank2 = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let alice = Keypair::new();
|
||||
|
||||
let (subscriber0, _id_receiver, transport_receiver0) =
|
||||
@@ -1398,17 +1464,10 @@ pub(crate) mod tests {
|
||||
sub_id0.clone(),
|
||||
subscriber0,
|
||||
);
|
||||
let sub_id1 = SubscriptionId::Number(1 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::recent()),
|
||||
sub_id1.clone(),
|
||||
subscriber1,
|
||||
);
|
||||
|
||||
assert!(subscriptions
|
||||
.subscriptions
|
||||
.account_subscriptions
|
||||
.gossip_account_subscriptions
|
||||
.read()
|
||||
.unwrap()
|
||||
.contains_key(&alice.pubkey()));
|
||||
@@ -1421,37 +1480,27 @@ pub(crate) mod tests {
|
||||
16,
|
||||
&solana_budget_program::id(),
|
||||
);
|
||||
|
||||
// Add the transaction to the 1st bank and then freeze the bank
|
||||
let bank1 = bank_forks.write().unwrap().get(1).cloned().unwrap();
|
||||
bank1.process_transaction(&tx).unwrap();
|
||||
bank1.freeze();
|
||||
|
||||
// Add the same transaction to the unfrozen 2nd bank
|
||||
bank_forks
|
||||
.write()
|
||||
.unwrap()
|
||||
.get(1)
|
||||
.get(2)
|
||||
.unwrap()
|
||||
.process_transaction(&tx)
|
||||
.unwrap();
|
||||
let mut cache_slot_info = CacheSlotInfo::default();
|
||||
cache_slot_info.current_slot = 1;
|
||||
subscriptions.notify_subscribers(cache_slot_info);
|
||||
let (response, _) = robust_poll_or_panic(transport_receiver1);
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 1 },
|
||||
"value": {
|
||||
"data": "1111111111111111",
|
||||
"executable": false,
|
||||
"lamports": 1,
|
||||
"owner": "Budget1111111111111111111111111111111111111",
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
},
|
||||
"subscription": 1,
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
|
||||
// First, notify the unfrozen bank first to queue pending notification
|
||||
subscriptions.notify_gossip_subscribers(2);
|
||||
|
||||
// Now, notify the frozen bank and ensure its notifications are processed
|
||||
subscriptions.notify_gossip_subscribers(1);
|
||||
|
||||
let (response, _) = robust_poll_or_panic(transport_receiver0);
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
@@ -1471,18 +1520,41 @@ pub(crate) mod tests {
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
|
||||
subscriptions.remove_account_subscription(&sub_id0);
|
||||
assert!(subscriptions
|
||||
.subscriptions
|
||||
.account_subscriptions
|
||||
.read()
|
||||
.unwrap()
|
||||
.contains_key(&alice.pubkey()));
|
||||
|
||||
let sub_id1 = SubscriptionId::Number(1 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::single_gossip()),
|
||||
sub_id1.clone(),
|
||||
subscriber1,
|
||||
);
|
||||
|
||||
subscriptions.notify_frozen(2);
|
||||
let (response, _) = robust_poll_or_panic(transport_receiver1);
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 2 },
|
||||
"value": {
|
||||
"data": "1111111111111111",
|
||||
"executable": false,
|
||||
"lamports": 1,
|
||||
"owner": "Budget1111111111111111111111111111111111111",
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
},
|
||||
"subscription": 1,
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
subscriptions.remove_account_subscription(&sub_id1);
|
||||
|
||||
assert!(!subscriptions
|
||||
.subscriptions
|
||||
.account_subscriptions
|
||||
.gossip_account_subscriptions
|
||||
.read()
|
||||
.unwrap()
|
||||
.contains_key(&alice.pubkey()));
|
||||
|
377
core/src/send_transaction_service.rs
Normal file
377
core/src/send_transaction_service.rs
Normal file
@@ -0,0 +1,377 @@
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Maximum size of the transaction queue
|
||||
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
|
||||
|
||||
pub struct SendTransactionService {
|
||||
thread: JoinHandle<()>,
|
||||
sender: Mutex<Sender<TransactionInfo>>,
|
||||
send_socket: UdpSocket,
|
||||
tpu_address: SocketAddr,
|
||||
}
|
||||
|
||||
struct TransactionInfo {
|
||||
signature: Signature,
|
||||
wire_transaction: Vec<u8>,
|
||||
last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq)]
|
||||
struct ProcessTransactionsResult {
|
||||
rooted: u64,
|
||||
expired: u64,
|
||||
retried: u64,
|
||||
failed: u64,
|
||||
retained: u64,
|
||||
}
|
||||
|
||||
impl SendTransactionService {
|
||||
pub fn new(
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let (sender, receiver) = channel::<TransactionInfo>();
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
|
||||
let thread = Self::retry_thread(receiver, bank_forks.clone(), tpu_address, exit.clone());
|
||||
Self {
|
||||
thread,
|
||||
sender: Mutex::new(sender),
|
||||
send_socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
tpu_address,
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_thread(
|
||||
receiver: Receiver<TransactionInfo>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
tpu_address: SocketAddr,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let mut last_status_check = Instant::now();
|
||||
let mut transactions = HashMap::new();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
Builder::new()
|
||||
.name("send-tx-svc".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Ok(transaction_info) = receiver.recv_timeout(Duration::from_secs(1)) {
|
||||
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
|
||||
transactions.insert(transaction_info.signature, transaction_info);
|
||||
} else {
|
||||
datapoint_warn!("send_transaction_service-queue-overflow");
|
||||
}
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
|
||||
if !transactions.is_empty() {
|
||||
datapoint_info!(
|
||||
"send_transaction_service-queue-size",
|
||||
("len", transactions.len(), i64)
|
||||
);
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let working_bank = bank_forks.working_bank();
|
||||
|
||||
let _result = Self::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
}
|
||||
last_status_check = Instant::now();
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn process_transactions(
|
||||
working_bank: &Arc<Bank>,
|
||||
root_bank: &Arc<Bank>,
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
transactions: &mut HashMap<Signature, TransactionInfo>,
|
||||
) -> ProcessTransactionsResult {
|
||||
let mut result = ProcessTransactionsResult::default();
|
||||
|
||||
transactions.retain(|signature, transaction_info| {
|
||||
if root_bank.has_signature(signature) {
|
||||
info!("Transaction is rooted: {}", signature);
|
||||
result.rooted += 1;
|
||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||
false
|
||||
} else if transaction_info.last_valid_slot < root_bank.slot() {
|
||||
info!("Dropping expired transaction: {}", signature);
|
||||
result.expired += 1;
|
||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||
false
|
||||
} else {
|
||||
match working_bank.get_signature_status_slot(signature) {
|
||||
None => {
|
||||
// Transaction is unknown to the working bank, it might have been
|
||||
// dropped or landed in another fork. Re-send it
|
||||
info!("Retrying transaction: {}", signature);
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
}
|
||||
Some((_slot, status)) => {
|
||||
if status.is_err() {
|
||||
info!("Dropping failed transaction: {}", signature);
|
||||
result.failed += 1;
|
||||
inc_new_counter_info!("send_transaction_service-failed", 1);
|
||||
false
|
||||
} else {
|
||||
result.retained += 1;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn send_transaction(
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
wire_transaction: &[u8],
|
||||
) {
|
||||
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
|
||||
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&self, signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) {
|
||||
inc_new_counter_info!("send_transaction_service-enqueue", 1, 1);
|
||||
Self::send_transaction(&self.send_socket, &self.tpu_address, &wire_transaction);
|
||||
|
||||
self.sender
|
||||
.lock()
|
||||
.unwrap()
|
||||
.send(TransactionInfo {
|
||||
signature,
|
||||
wire_transaction,
|
||||
last_valid_slot,
|
||||
})
|
||||
.unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err));
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::rpc::tests::new_bank_forks;
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Signer};
|
||||
|
||||
#[test]
|
||||
fn service_exit() {
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let bank_forks = new_bank_forks().0;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let send_tranaction_service =
|
||||
SendTransactionService::new(&cluster_info, &bank_forks, &exit);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
send_tranaction_service.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_transactions() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (bank_forks, mint_keypair, _voting_keypair) = new_bank_forks();
|
||||
let cluster_info = ClusterInfo::default();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
|
||||
let root_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank_forks.read().unwrap().working_bank(),
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
));
|
||||
let rooted_signature = root_bank
|
||||
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
|
||||
|
||||
let non_rooted_signature = working_bank
|
||||
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let failed_signature = {
|
||||
let blockhash = working_bank.last_blockhash();
|
||||
let transaction = solana_sdk::system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
blockhash,
|
||||
);
|
||||
let signature = transaction.signatures[0];
|
||||
working_bank.process_transaction(&transaction).unwrap_err();
|
||||
signature
|
||||
};
|
||||
|
||||
let mut transactions = HashMap::new();
|
||||
|
||||
info!("Expired transactions are dropped..");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo {
|
||||
signature: Signature::default(),
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: root_bank.slot() - 1,
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
expired: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Rooted transactions are dropped...");
|
||||
transactions.insert(
|
||||
rooted_signature,
|
||||
TransactionInfo {
|
||||
signature: rooted_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
rooted: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Failed transactions are dropped...");
|
||||
transactions.insert(
|
||||
failed_signature,
|
||||
TransactionInfo {
|
||||
signature: failed_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
failed: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Non-rooted transactions are kept...");
|
||||
transactions.insert(
|
||||
non_rooted_signature,
|
||||
TransactionInfo {
|
||||
signature: non_rooted_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retained: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
transactions.clear();
|
||||
|
||||
info!("Unknown transactions are retried...");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo {
|
||||
signature: Signature::default(),
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retried: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
@@ -3,6 +3,7 @@ use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionSta
|
||||
use solana_runtime::{
|
||||
bank::{Bank, HashAgeKind},
|
||||
nonce_utils,
|
||||
transaction_utils::OrderedIterator,
|
||||
};
|
||||
use solana_transaction_status::TransactionStatusMeta;
|
||||
use std::{
|
||||
@@ -50,25 +51,39 @@ impl TransactionStatusService {
|
||||
let TransactionStatusBatch {
|
||||
bank,
|
||||
transactions,
|
||||
iteration_order,
|
||||
statuses,
|
||||
balances,
|
||||
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
let slot = bank.slot();
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in transactions
|
||||
.iter()
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in
|
||||
OrderedIterator::new(&transactions, iteration_order.as_deref())
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
{
|
||||
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
|
||||
let fee_calculator = match hash_age_kind {
|
||||
let (fee_calculator, hash_kind) = match hash_age_kind.clone() {
|
||||
Some(HashAgeKind::DurableNonce(_, account)) => {
|
||||
nonce_utils::fee_calculator_of(&account)
|
||||
info!("nonce_account: {:?}", account);
|
||||
(nonce_utils::fee_calculator_of(&account), "durable_nonce")
|
||||
}
|
||||
_ => bank.get_fee_calculator(&transaction.message().recent_blockhash),
|
||||
_ => (
|
||||
bank.get_fee_calculator(&transaction.message().recent_blockhash),
|
||||
"recent_blockhash",
|
||||
),
|
||||
};
|
||||
if fee_calculator.is_none() {
|
||||
error!(
|
||||
"{:?} {:?} fee_calculator: {:?}",
|
||||
transaction.signatures[0],
|
||||
hash_kind,
|
||||
fee_calculator.is_some()
|
||||
);
|
||||
info!("{:?}", status);
|
||||
}
|
||||
.expect("FeeCalculator must exist");
|
||||
let fee_calculator = fee_calculator.expect("FeeCalculator must exist");
|
||||
let fee = fee_calculator.calculate_fee(transaction.message());
|
||||
let (writable_keys, readonly_keys) =
|
||||
transaction.message.get_account_keys_by_lock_type();
|
||||
|
@@ -5,7 +5,7 @@ use solana_core::cluster_info;
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::crds_gossip::*;
|
||||
use solana_core::crds_gossip_error::CrdsGossipError;
|
||||
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use solana_core::crds_gossip_pull::{ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS};
|
||||
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValueLabel;
|
||||
use solana_core::crds_value::{CrdsData, CrdsValue};
|
||||
@@ -426,37 +426,35 @@ fn network_run_pull(
|
||||
.map(|f| f.filter.bits.len() as usize / 8)
|
||||
.sum::<usize>();
|
||||
bytes += serialized_size(&caller_info).unwrap() as usize;
|
||||
let filters = filters
|
||||
let filters: Vec<_> = filters
|
||||
.into_iter()
|
||||
.map(|f| (caller_info.clone(), f))
|
||||
.collect();
|
||||
let rsp = network
|
||||
let rsp: Vec<_> = network
|
||||
.get(&to)
|
||||
.map(|node| {
|
||||
let mut rsp = vec![];
|
||||
rsp.append(
|
||||
&mut node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_requests(filters, now)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect(),
|
||||
);
|
||||
let rsp = node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.generate_pull_responses(&filters)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
node.lock().unwrap().process_pull_requests(filters, now);
|
||||
rsp
|
||||
})
|
||||
.unwrap();
|
||||
bytes += serialized_size(&rsp).unwrap() as usize;
|
||||
msgs += rsp.len();
|
||||
network.get(&from).map(|node| {
|
||||
node.lock()
|
||||
.unwrap()
|
||||
.mark_pull_request_creation_time(&from, now);
|
||||
overhead += node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_response(&from, &timeouts, rsp, now)
|
||||
.0;
|
||||
let mut node = node.lock().unwrap();
|
||||
node.mark_pull_request_creation_time(&from, now);
|
||||
let mut stats = ProcessPullStats::default();
|
||||
let (vers, vers_expired_timeout) =
|
||||
node.filter_pull_responses(&timeouts, rsp, now, &mut stats);
|
||||
node.process_pull_responses(&from, vers, vers_expired_timeout, now, &mut stats);
|
||||
overhead += stats.failed_insert;
|
||||
overhead += stats.failed_timeout;
|
||||
});
|
||||
(bytes, msgs, overhead)
|
||||
})
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -5,12 +5,16 @@ cd "$(dirname "$0")"
|
||||
|
||||
if [[ -n $CI_TAG ]]; then
|
||||
LATEST_SOLANA_RELEASE_VERSION=$CI_TAG
|
||||
else
|
||||
elif [[ -z $CI_PULL_REQUEST ]]; then
|
||||
LATEST_SOLANA_RELEASE_VERSION=$(\
|
||||
curl -sSfL https://api.github.com/repos/solana-labs/solana/releases/latest \
|
||||
| grep -m 1 tag_name \
|
||||
| sed -ne 's/^ *"tag_name": "\([^"]*\)",$/\1/p' \
|
||||
)
|
||||
else
|
||||
# Don't bother the `api.github.com` on pull requests to avoid getting rate
|
||||
# limited
|
||||
LATEST_SOLANA_RELEASE_VERSION=unknown-version
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
@@ -307,6 +307,7 @@ The result field will be an object with the following fields:
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
* `blockTime: <i64 | null>` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -355,8 +356,9 @@ Returns a list of confirmed blocks
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of u64 integers listing confirmed blocks
|
||||
between start_slot and either end_slot, if provided, or latest confirmed block,
|
||||
inclusive.
|
||||
between `start_slot` and either `end_slot`, if provided, or latest confirmed block,
|
||||
inclusive. Max range allowed is 500,000 slots.
|
||||
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -370,7 +372,8 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
|
||||
### getConfirmedSignaturesForAddress
|
||||
|
||||
Returns a list of all the confirmed signatures for transactions involving an address, within a specified Slot range. Max range allowed is 10_000 Slots.
|
||||
Returns a list of all the confirmed signatures for transactions involving an
|
||||
address, within a specified Slot range. Max range allowed is 10,000 Slots
|
||||
|
||||
#### Parameters:
|
||||
|
||||
@@ -448,6 +451,7 @@ Returns information about the current epoch
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `absoluteSlot: <u64>`, the current slot
|
||||
* `blockHeight: <u64>`, the current block height
|
||||
* `epoch: <u64>`, the current epoch
|
||||
* `slotIndex: <u64>`, the current slot relative to the start of the current epoch
|
||||
* `slotsInEpoch: <u64>`, the number of slots in this epoch
|
||||
@@ -459,7 +463,7 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"absoluteSlot":166598,"epoch":27,"slotIndex":2790,"slotsInEpoch":8192},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"absoluteSlot":166598,"blockHeight": 166500, "epoch":27,"slotIndex":2790,"slotsInEpoch":8192},"id":1}
|
||||
```
|
||||
|
||||
### getEpochSchedule
|
||||
@@ -1074,7 +1078,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.16"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.20"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
|
@@ -6,7 +6,7 @@ Solana takes a very different approach, which it calls _Proof of History_ or _Po
|
||||
|
||||
Solana technically never sends a _block_, but uses the term to describe the sequence of entries that validators vote on to achieve _confirmation_. In that way, Solana's confirmation times can be compared apples to apples to block-based systems. The current implementation sets block time to 800ms.
|
||||
|
||||
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
|
||||
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.205.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
|
||||
|
||||
## Relationship to VDFs
|
||||
|
||||
|
@@ -81,7 +81,7 @@ $ solana-validator \
|
||||
--dynamic-port-range 8000-8010 \
|
||||
--entrypoint 35.203.170.30:8001 \
|
||||
--expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY \
|
||||
--expected-shred-version 56096 \
|
||||
--expected-shred-version 62235 \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
|
@@ -6,9 +6,9 @@ Solana is an open source project implementing a new, high-performance, permissio
|
||||
|
||||
## Why Solana?
|
||||
|
||||
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.205.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.161.1078)
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.201.1078)
|
||||
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you would use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
|
@@ -71,7 +71,7 @@ A Block-Merkle is the Merkle Root of all the Entry-Merkles sequenced in the bloc
|
||||
|
||||
A Bank-Hash is the hash of the concatenation of the Block-Merkle and Accounts-Hash
|
||||
|
||||
<img alt="Bank Hash Diagram" src="img/spv-bank-hash.svg" class="center"/>
|
||||

|
||||
|
||||
An Accounts-Hash is the hash of the concatentation of the state hashes of each
|
||||
account modified during the current slot.
|
||||
|
@@ -7,6 +7,25 @@ experience for most people who are new or experienced with using crypto wallets.
|
||||
currently the easiest and fastest way to get set up with a new wallet on Solana.
|
||||
The app is free and getting your wallet set up only takes a few minutes.
|
||||
|
||||
### Trust Wallet Security
|
||||
|
||||
Tokens held in Trust Wallet are only as secure as the device on which the app is
|
||||
installed. Anyone who is able to unlock your phone or tablet may be able to
|
||||
use the Trust Wallet app and transfer your tokens. To improve security,
|
||||
you can add a passcode to the Trust Wallet application.
|
||||
To add a Trust Wallet passcode, open the app and go to
|
||||
Settings -> Security -> Passcode.
|
||||
|
||||
If someone gains access to your Trust Wallet application, they can access your
|
||||
recovery seed phrase.
|
||||
Anyone who has access to your seed phrase will be able to recreate
|
||||
your Trust Wallet keys on a different device. From there, they could
|
||||
sign transactions from that device rather than on your own phone or tablet.
|
||||
The seed phrase is displayed when a new wallet is created and it can also be
|
||||
viewed at any later time in the app by following these steps:
|
||||
- Go to Setting -> Wallets
|
||||
- Under the Options menu for a particular wallet tap "Show Recovery Phrase"
|
||||
|
||||
{% page-ref page="trust-wallet.md" %}
|
||||
|
||||
## Ledger Live with Ledger Nano S
|
||||
|
@@ -59,7 +59,7 @@ some interface for signing transactions.
|
||||
A hardware wallet, such as the
|
||||
[Ledger hardware wallet](https://www.ledger.com/), offers a great blend of
|
||||
security and convenience for cryptocurrencies. It effectively automates the
|
||||
process of offline signing while retaining nearly all the convenience of an FS
|
||||
wallet.
|
||||
process of offline signing while retaining nearly all the convenience of a file
|
||||
system wallet.
|
||||
|
||||
{% page-ref page="../hardware-wallet/README.md" %}
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-dos"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,10 +13,10 @@ clap = "2.33.0"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-download-utils"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Download Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,8 +14,8 @@ console = "0.10.0"
|
||||
indicatif = "0.14.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
tar = "0.4.26"
|
||||
|
||||
[lib]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@@ -6,7 +6,7 @@ VERSION=$PERF_LIBS_VERSION-1
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
if [[ $VERSION != "$(cat target/perf-libs/.version 2> /dev/null)" ]]; then
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
echo Note: Performance libraries are only available for Linux
|
||||
exit 0
|
||||
@@ -17,6 +17,7 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rm -rf target/perf-libs
|
||||
mkdir -p target/perf-libs
|
||||
(
|
||||
set -x
|
||||
@@ -35,7 +36,7 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
mkdir -p ~/.cache
|
||||
mv solana-perf.tgz ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz
|
||||
fi
|
||||
touch .$VERSION
|
||||
echo "$VERSION" > .version
|
||||
)
|
||||
|
||||
# Setup symlinks so the perf-libs/ can be found from all binaries run out of
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,13 +10,13 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.16" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.16" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.16" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.16" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.20" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.20" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,14 +15,14 @@ chrono = "0.4"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.16" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.16" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
|
||||
|
||||
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -24,11 +24,11 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
semver = "0.9.0"
|
||||
tar = "0.4.26"
|
||||
tempdir = "0.3.7"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -13,10 +13,10 @@ bs58 = "0.3.0"
|
||||
clap = "2.33"
|
||||
dirs = "2.0.2"
|
||||
num_cpus = "1.12.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.16" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.20" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
tiny-bip39 = "0.7.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,19 +15,22 @@ histogram = "*"
|
||||
log = { version = "0.4.8" }
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-cli = { path = "../cli", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.16" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.16" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-cli = { path = "../cli", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "1.0"
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
signal-hook = "0.1.15"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -14,6 +14,7 @@ use solana_ledger::{
|
||||
rooted_slot_iterator::RootedSlotIterator,
|
||||
snapshot_utils,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::Slot, genesis_config::GenesisConfig, native_token::lamports_to_sol, pubkey::Pubkey,
|
||||
shred_version::compute_shred_version,
|
||||
@@ -28,6 +29,7 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
process::{exit, Command, Stdio},
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use log::*;
|
||||
@@ -583,6 +585,16 @@ fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> Genes
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn main() {
|
||||
// Ignore SIGUSR1 to prevent long-running calls being killed by logrotate
|
||||
// in warehouse deployments
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// `register()` is unsafe because the action is called in a signal handler
|
||||
// with the usual caveats. So long as this action body stays empty, we'll
|
||||
// be fine
|
||||
unsafe { signal_hook::register(signal_hook::SIGUSR1, || {}) }.unwrap();
|
||||
}
|
||||
|
||||
const DEFAULT_ROOT_COUNT: &str = "1";
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
|
||||
@@ -754,6 +766,17 @@ fn main() {
|
||||
.takes_value(true)
|
||||
.help("Output directory for the snapshot"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("warp_slot")
|
||||
.required(false)
|
||||
.long("warp-slot")
|
||||
.takes_value(true)
|
||||
.value_name("WARP_SLOT")
|
||||
.validator(is_slot)
|
||||
.help("After loading the snapshot slot warp the ledger to WARP_SLOT, \
|
||||
which could be a slot in a galaxy far far away"),
|
||||
)
|
||||
|
||||
).subcommand(
|
||||
SubCommand::with_name("accounts")
|
||||
.about("Print account contents after processing in the ledger")
|
||||
@@ -990,6 +1013,7 @@ fn main() {
|
||||
("create-snapshot", Some(arg_matches)) => {
|
||||
let snapshot_slot = value_t_or_exit!(arg_matches, "snapshot_slot", Slot);
|
||||
let output_directory = value_t_or_exit!(arg_matches, "output_directory", String);
|
||||
let warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok();
|
||||
|
||||
let process_options = ProcessOptions {
|
||||
dev_halt_at_slot: Some(snapshot_slot),
|
||||
@@ -1006,12 +1030,26 @@ fn main() {
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
) {
|
||||
Ok((bank_forks, _bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let bank = bank_forks.get(snapshot_slot).unwrap_or_else(|| {
|
||||
eprintln!("Error: Slot {} is not available", snapshot_slot);
|
||||
exit(1);
|
||||
});
|
||||
let bank = bank_forks
|
||||
.get(snapshot_slot)
|
||||
.unwrap_or_else(|| {
|
||||
eprintln!("Error: Slot {} is not available", snapshot_slot);
|
||||
exit(1);
|
||||
})
|
||||
.clone();
|
||||
|
||||
let bank = if let Some(warp_slot) = warp_slot {
|
||||
Arc::new(Bank::warp_from_parent(
|
||||
&bank,
|
||||
bank.collector_id(),
|
||||
warp_slot,
|
||||
))
|
||||
} else {
|
||||
bank
|
||||
};
|
||||
|
||||
println!("Creating a snapshot of slot {}", bank.slot());
|
||||
assert!(bank.is_complete());
|
||||
bank.squash();
|
||||
|
||||
let temp_dir = tempfile::TempDir::new().unwrap_or_else(|err| {
|
||||
@@ -1035,7 +1073,8 @@ fn main() {
|
||||
snapshot_utils::archive_snapshot_package(&package).map(|ok| {
|
||||
println!(
|
||||
"Successfully created snapshot for slot {}: {:?}",
|
||||
snapshot_slot, package.tar_output_file
|
||||
bank.slot(),
|
||||
package.tar_output_file
|
||||
);
|
||||
println!(
|
||||
"Shred version: {}",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-ledger"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana ledger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -28,19 +28,19 @@ reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0
|
||||
regex = "1.3.6"
|
||||
serde = "1.0.105"
|
||||
serde_bytes = "0.11.3"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.16" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-measure = { path = "../measure", version = "1.1.16" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-perf = { path = "../perf", version = "1.1.16" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-measure = { path = "../measure", version = "1.1.20" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
solana-perf = { path = "../perf", version = "1.1.20" }
|
||||
ed25519-dalek = "1.0.0-pre.3"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.16" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.16" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
|
||||
symlink = "0.1.0"
|
||||
tar = "0.4.26"
|
||||
thiserror = "1.0"
|
||||
@@ -57,7 +57,7 @@ features = ["lz4"]
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
matches = "0.1.6"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.16" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -1649,6 +1649,7 @@ impl Blockstore {
|
||||
slot_transaction_iterator,
|
||||
),
|
||||
rewards,
|
||||
block_time: None, // See https://github.com/solana-labs/solana/issues/10089
|
||||
};
|
||||
return Ok(block);
|
||||
}
|
||||
@@ -5334,6 +5335,7 @@ pub mod tests {
|
||||
blockhash: blockhash.to_string(),
|
||||
previous_blockhash: Hash::default().to_string(),
|
||||
rewards: vec![],
|
||||
block_time: None,
|
||||
};
|
||||
// The previous_blockhash of `expected_block` is default because its parent slot is a
|
||||
// root, but empty of entries. This is special handling for snapshot root slots.
|
||||
@@ -5355,6 +5357,7 @@ pub mod tests {
|
||||
blockhash: blockhash.to_string(),
|
||||
previous_blockhash: blockhash.to_string(),
|
||||
rewards: vec![],
|
||||
block_time: None,
|
||||
};
|
||||
assert_eq!(confirmed_block, expected_block);
|
||||
|
||||
|
@@ -18,13 +18,14 @@ use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_runtime::{
|
||||
bank::{Bank, TransactionBalancesSet, TransactionProcessResult, TransactionResults},
|
||||
transaction_batch::TransactionBatch,
|
||||
transaction_utils::OrderedIterator,
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::{Slot, MAX_PROCESSING_AGE},
|
||||
genesis_config::GenesisConfig,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Keypair,
|
||||
signature::{Keypair, Signature},
|
||||
timing::duration_as_ms,
|
||||
transaction::{Result, Transaction, TransactionError},
|
||||
};
|
||||
@@ -57,6 +58,37 @@ fn first_err(results: &[Result<()>]) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Includes transaction signature for unit-testing
|
||||
fn get_first_error(
|
||||
batch: &TransactionBatch,
|
||||
fee_collection_results: Vec<Result<()>>,
|
||||
) -> Option<(Result<()>, Signature)> {
|
||||
let mut first_err = None;
|
||||
for (result, transaction) in fee_collection_results.iter().zip(OrderedIterator::new(
|
||||
batch.transactions(),
|
||||
batch.iteration_order(),
|
||||
)) {
|
||||
if let Err(ref err) = result {
|
||||
if first_err.is_none() {
|
||||
first_err = Some((result.clone(), transaction.signatures[0]));
|
||||
}
|
||||
warn!(
|
||||
"Unexpected validator error: {:?}, transaction: {:?}",
|
||||
err, transaction
|
||||
);
|
||||
datapoint_error!(
|
||||
"validator_process_entry_error",
|
||||
(
|
||||
"error",
|
||||
format!("error: {:?}, transaction: {:?}", err, transaction),
|
||||
String
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
first_err
|
||||
}
|
||||
|
||||
fn execute_batch(
|
||||
batch: &TransactionBatch,
|
||||
bank: &Arc<Bank>,
|
||||
@@ -78,33 +110,15 @@ fn execute_batch(
|
||||
send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
processing_results,
|
||||
balances,
|
||||
sender,
|
||||
);
|
||||
}
|
||||
|
||||
let mut first_err = None;
|
||||
for (result, transaction) in fee_collection_results.iter().zip(batch.transactions()) {
|
||||
if let Err(ref err) = result {
|
||||
if first_err.is_none() {
|
||||
first_err = Some(result.clone());
|
||||
}
|
||||
warn!(
|
||||
"Unexpected validator error: {:?}, transaction: {:?}",
|
||||
err, transaction
|
||||
);
|
||||
datapoint_error!(
|
||||
"validator_process_entry_error",
|
||||
(
|
||||
"error",
|
||||
format!("error: {:?}, transaction: {:?}", err, transaction),
|
||||
String
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
first_err.unwrap_or(Ok(()))
|
||||
let first_err = get_first_error(batch, fee_collection_results);
|
||||
first_err.map(|(result, _)| result).unwrap_or(Ok(()))
|
||||
}
|
||||
|
||||
fn execute_batches(
|
||||
@@ -824,6 +838,7 @@ fn process_single_slot(
|
||||
pub struct TransactionStatusBatch {
|
||||
pub bank: Arc<Bank>,
|
||||
pub transactions: Vec<Transaction>,
|
||||
pub iteration_order: Option<Vec<usize>>,
|
||||
pub statuses: Vec<TransactionProcessResult>,
|
||||
pub balances: TransactionBalancesSet,
|
||||
}
|
||||
@@ -832,6 +847,7 @@ pub type TransactionStatusSender = Sender<TransactionStatusBatch>;
|
||||
pub fn send_transaction_status_batch(
|
||||
bank: Arc<Bank>,
|
||||
transactions: &[Transaction],
|
||||
iteration_order: Option<Vec<usize>>,
|
||||
statuses: Vec<TransactionProcessResult>,
|
||||
balances: TransactionBalancesSet,
|
||||
transaction_status_sender: TransactionStatusSender,
|
||||
@@ -840,6 +856,7 @@ pub fn send_transaction_status_batch(
|
||||
if let Err(e) = transaction_status_sender.send(TransactionStatusBatch {
|
||||
bank,
|
||||
transactions: transactions.to_vec(),
|
||||
iteration_order,
|
||||
statuses,
|
||||
balances,
|
||||
}) {
|
||||
@@ -2665,4 +2682,51 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_first_error() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(1_000_000_000);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
let present_account_key = Keypair::new();
|
||||
let present_account = Account::new(1, 10, &Pubkey::default());
|
||||
bank.store_account(&present_account_key.pubkey(), &present_account);
|
||||
|
||||
let keypair = Keypair::new();
|
||||
|
||||
// Create array of two transactions which throw different errors
|
||||
let account_not_found_tx =
|
||||
system_transaction::transfer(&keypair, &Pubkey::new_rand(), 42, bank.last_blockhash());
|
||||
let account_not_found_sig = account_not_found_tx.signatures[0];
|
||||
let mut account_loaded_twice = system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&Pubkey::new_rand(),
|
||||
42,
|
||||
bank.last_blockhash(),
|
||||
);
|
||||
account_loaded_twice.message.account_keys[1] = mint_keypair.pubkey();
|
||||
let transactions = [account_loaded_twice, account_not_found_tx];
|
||||
|
||||
// Use inverted iteration_order
|
||||
let iteration_order: Vec<usize> = vec![1, 0];
|
||||
|
||||
let batch = bank.prepare_batch(&transactions, Some(iteration_order));
|
||||
let (
|
||||
TransactionResults {
|
||||
fee_collection_results,
|
||||
processing_results: _,
|
||||
},
|
||||
_balances,
|
||||
) = batch
|
||||
.bank()
|
||||
.load_execute_and_commit_transactions(&batch, MAX_PROCESSING_AGE, false);
|
||||
let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap();
|
||||
// First error found should be for the 2nd transaction, due to iteration_order
|
||||
assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound);
|
||||
assert_eq!(signature, account_not_found_sig);
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-local-cluster"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,24 +12,24 @@ homepage = "https://solana.com/"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.16" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.16" }
|
||||
solana-core = { path = "../core", version = "1.1.16" }
|
||||
solana-client = { path = "../client", version = "1.1.16" }
|
||||
solana-download-utils = { path = "../download-utils", version = "1.1.16" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.16" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.16" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.16" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.16" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.16" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.16" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.16" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.20" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.20" }
|
||||
solana-core = { path = "../core", version = "1.1.20" }
|
||||
solana-client = { path = "../client", version = "1.1.20" }
|
||||
solana-download-utils = { path = "../download-utils", version = "1.1.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.20" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.20" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
|
||||
tempfile = "3.1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.16" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-log-analyzer"
|
||||
description = "The solana cluster network analysis tool"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,8 +14,8 @@ byte-unit = "3.0.3"
|
||||
clap = "2.33.0"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
|
||||
[[bin]]
|
||||
name = "solana-log-analyzer"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-logger"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Logger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-measure"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -12,8 +12,8 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
jemallocator = "0.3.2"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-merkle-tree"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Merkle Tree"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -9,7 +9,7 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
fast-math = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-metrics"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Metrics"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,7 +14,7 @@ gethostname = "0.2.1"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.7.0"
|
||||
|
@@ -33,6 +33,9 @@ while [[ -n $1 ]]; do
|
||||
elif [[ $1 = --gossip-port ]]; then
|
||||
args+=("$1" "$2")
|
||||
shift 2
|
||||
elif [[ $1 = --dev-halt-at-slot ]]; then
|
||||
args+=("$1" "$2")
|
||||
shift 2
|
||||
elif [[ $1 = --dynamic-port-range ]]; then
|
||||
args+=("$1" "$2")
|
||||
shift 2
|
||||
@@ -54,6 +57,9 @@ while [[ -n $1 ]]; do
|
||||
elif [[ $1 = --no-restart ]]; then
|
||||
no_restart=1
|
||||
shift
|
||||
elif [[ $1 == --wait-for-supermajority ]]; then
|
||||
args+=("$1" "$2")
|
||||
shift 2
|
||||
else
|
||||
echo "Unknown argument: $1"
|
||||
$program --help
|
||||
|
@@ -88,16 +88,15 @@ if [[ ! -f $vote_account ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -f $stake_account ]]; then
|
||||
echo "Error: $stake_account already exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ((airdrops_enabled)); then
|
||||
$solana_cli "${common_args[@]}" airdrop "$stake_sol"
|
||||
fi
|
||||
|
||||
$solana_keygen new --no-passphrase -so "$stake_account"
|
||||
if ! [[ -f "$stake_account" ]]; then
|
||||
$solana_keygen new --no-passphrase -so "$stake_account"
|
||||
else
|
||||
echo "$stake_account already exists! Using it"
|
||||
fi
|
||||
|
||||
set -x
|
||||
$solana_cli "${common_args[@]}" \
|
||||
|
@@ -152,6 +152,9 @@ while [[ -n $1 ]]; do
|
||||
elif [[ $1 = --max-genesis-archive-unpacked-size ]]; then
|
||||
args+=("$1" "$2")
|
||||
shift 2
|
||||
elif [[ $1 == --wait-for-supermajority ]]; then
|
||||
args+=("$1" "$2")
|
||||
shift 2
|
||||
elif [[ $1 = -h ]]; then
|
||||
usage "$@"
|
||||
else
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-net-shaper"
|
||||
description = "The solana cluster network shaping tool"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,8 +13,8 @@ publish = false
|
||||
clap = "2.33.0"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
rand = "0.7.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-net-utils"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Network Utilities"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -18,8 +18,8 @@ rand = "0.7.0"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
socket2 = "0.3.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@@ -7,10 +7,12 @@ use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
pub type IpEchoServer = Runtime;
|
||||
|
||||
pub const MAX_PORT_COUNT_PER_MESSAGE: usize = 4;
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub(crate) struct IpEchoServerMessage {
|
||||
tcp_ports: [u16; 4], // Fixed size list of ports to avoid vec serde
|
||||
udp_ports: [u16; 4], // Fixed size list of ports to avoid vec serde
|
||||
tcp_ports: [u16; MAX_PORT_COUNT_PER_MESSAGE], // Fixed size list of ports to avoid vec serde
|
||||
udp_ports: [u16; MAX_PORT_COUNT_PER_MESSAGE], // Fixed size list of ports to avoid vec serde
|
||||
}
|
||||
|
||||
impl IpEchoServerMessage {
|
||||
|
@@ -2,6 +2,7 @@
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use socket2::{Domain, SockAddr, Socket, Type};
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::net::{IpAddr, SocketAddr, TcpListener, TcpStream, ToSocketAddrs, UdpSocket};
|
||||
use std::sync::mpsc::channel;
|
||||
@@ -9,7 +10,7 @@ use std::time::Duration;
|
||||
|
||||
mod ip_echo_server;
|
||||
use ip_echo_server::IpEchoServerMessage;
|
||||
pub use ip_echo_server::{ip_echo_server, IpEchoServer};
|
||||
pub use ip_echo_server::{ip_echo_server, IpEchoServer, MAX_PORT_COUNT_PER_MESSAGE};
|
||||
|
||||
/// A data type representing a public Udp socket
|
||||
pub struct UdpSocketPair {
|
||||
@@ -92,34 +93,36 @@ pub fn get_public_ip_addr(ip_echo_server_addr: &SocketAddr) -> Result<IpAddr, St
|
||||
|
||||
// Checks if any of the provided TCP/UDP ports are not reachable by the machine at
|
||||
// `ip_echo_server_addr`
|
||||
pub fn verify_reachable_ports(
|
||||
const DEFAULT_TIMEOUT_SECS: u64 = 5;
|
||||
const DEFAULT_RETRY_COUNT: usize = 5;
|
||||
|
||||
fn do_verify_reachable_ports(
|
||||
ip_echo_server_addr: &SocketAddr,
|
||||
tcp_listeners: Vec<(u16, TcpListener)>,
|
||||
udp_sockets: &[&UdpSocket],
|
||||
timeout: u64,
|
||||
udp_retry_count: usize,
|
||||
) -> bool {
|
||||
let udp_ports: Vec<_> = udp_sockets
|
||||
.iter()
|
||||
.map(|udp_socket| udp_socket.local_addr().unwrap().port())
|
||||
.collect();
|
||||
|
||||
info!(
|
||||
"Checking that tcp ports {:?} and udp ports {:?} are reachable from {:?}",
|
||||
tcp_listeners, udp_ports, ip_echo_server_addr
|
||||
"Checking that tcp ports {:?} from {:?}",
|
||||
tcp_listeners, ip_echo_server_addr
|
||||
);
|
||||
|
||||
let tcp_ports: Vec<_> = tcp_listeners.iter().map(|(port, _)| *port).collect();
|
||||
let _ = ip_echo_server_request(
|
||||
ip_echo_server_addr,
|
||||
IpEchoServerMessage::new(&tcp_ports, &udp_ports),
|
||||
IpEchoServerMessage::new(&tcp_ports, &[]),
|
||||
)
|
||||
.map_err(|err| warn!("ip_echo_server request failed: {}", err));
|
||||
|
||||
let mut ok = true;
|
||||
let timeout = Duration::from_secs(timeout);
|
||||
|
||||
// Wait for a connection to open on each TCP port
|
||||
for (port, tcp_listener) in tcp_listeners {
|
||||
let (sender, receiver) = channel();
|
||||
std::thread::spawn(move || {
|
||||
let listening_addr = tcp_listener.local_addr().unwrap();
|
||||
let thread_handle = std::thread::spawn(move || {
|
||||
debug!("Waiting for incoming connection on tcp/{}", port);
|
||||
match tcp_listener.incoming().next() {
|
||||
Some(_) => sender
|
||||
@@ -128,7 +131,7 @@ pub fn verify_reachable_ports(
|
||||
None => warn!("tcp incoming failed"),
|
||||
}
|
||||
});
|
||||
match receiver.recv_timeout(Duration::from_secs(5)) {
|
||||
match receiver.recv_timeout(timeout) {
|
||||
Ok(_) => {
|
||||
info!("tcp/{} is reachable", port);
|
||||
}
|
||||
@@ -137,9 +140,16 @@ pub fn verify_reachable_ports(
|
||||
"Received no response at tcp/{}, check your port configuration: {}",
|
||||
port, err
|
||||
);
|
||||
// Ugh, std rustc doesn't provide acceptng with timeout or restoring original
|
||||
// nonblocking-status of sockets because of lack of getter, only the setter...
|
||||
// So, to close the thread cleanly, just connect from here.
|
||||
// ref: https://github.com/rust-lang/rust/issues/31615
|
||||
TcpStream::connect_timeout(&listening_addr, timeout).unwrap();
|
||||
ok = false;
|
||||
}
|
||||
}
|
||||
// ensure to reap the thread
|
||||
thread_handle.join().unwrap();
|
||||
}
|
||||
|
||||
if !ok {
|
||||
@@ -147,51 +157,110 @@ pub fn verify_reachable_ports(
|
||||
return ok;
|
||||
}
|
||||
|
||||
for _udp_retries in 0..5 {
|
||||
// Wait for a datagram to arrive at each UDP port
|
||||
for udp_socket in udp_sockets {
|
||||
let port = udp_socket.local_addr().unwrap().port();
|
||||
let udp_socket = udp_socket.try_clone().expect("Unable to clone udp socket");
|
||||
let (sender, receiver) = channel();
|
||||
std::thread::spawn(move || {
|
||||
let mut buf = [0; 1];
|
||||
debug!("Waiting for incoming datagram on udp/{}", port);
|
||||
match udp_socket.recv(&mut buf) {
|
||||
Ok(_) => sender
|
||||
.send(())
|
||||
.unwrap_or_else(|err| warn!("send failure: {}", err)),
|
||||
Err(err) => warn!("udp recv failure: {}", err),
|
||||
}
|
||||
});
|
||||
match receiver.recv_timeout(Duration::from_secs(5)) {
|
||||
Ok(_) => {
|
||||
info!("udp/{} is reachable", port);
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Received no response at udp/{}, check your port configuration: {}",
|
||||
port, err
|
||||
);
|
||||
ok = false;
|
||||
}
|
||||
let mut udp_ports: BTreeMap<_, _> = BTreeMap::new();
|
||||
udp_sockets.iter().for_each(|udp_socket| {
|
||||
let port = udp_socket.local_addr().unwrap().port();
|
||||
udp_ports
|
||||
.entry(port)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(udp_socket);
|
||||
});
|
||||
let udp_ports: Vec<_> = udp_ports.into_iter().collect();
|
||||
|
||||
info!(
|
||||
"Checking that udp ports {:?} are reachable from {:?}",
|
||||
udp_ports.iter().map(|(port, _)| port).collect::<Vec<_>>(),
|
||||
ip_echo_server_addr
|
||||
);
|
||||
|
||||
'outer: for checked_ports_and_sockets in udp_ports.chunks(MAX_PORT_COUNT_PER_MESSAGE) {
|
||||
ok = false;
|
||||
|
||||
for udp_remaining_retry in (0_usize..udp_retry_count).rev() {
|
||||
let (checked_ports, checked_socket_iter) = (
|
||||
checked_ports_and_sockets
|
||||
.iter()
|
||||
.map(|(port, _)| *port)
|
||||
.collect::<Vec<_>>(),
|
||||
checked_ports_and_sockets
|
||||
.iter()
|
||||
.map(|(_, sockets)| sockets)
|
||||
.flatten(),
|
||||
);
|
||||
|
||||
let _ = ip_echo_server_request(
|
||||
ip_echo_server_addr,
|
||||
IpEchoServerMessage::new(&[], &checked_ports),
|
||||
)
|
||||
.map_err(|err| warn!("ip_echo_server request failed: {}", err));
|
||||
|
||||
// Spawn threads at once!
|
||||
let thread_handles: Vec<_> = checked_socket_iter
|
||||
.map(|udp_socket| {
|
||||
let port = udp_socket.local_addr().unwrap().port();
|
||||
let udp_socket = udp_socket.try_clone().expect("Unable to clone udp socket");
|
||||
std::thread::spawn(move || {
|
||||
let mut buf = [0; 1];
|
||||
let original_read_timeout = udp_socket.read_timeout().unwrap();
|
||||
udp_socket.set_read_timeout(Some(timeout)).unwrap();
|
||||
let recv_result = udp_socket.recv(&mut buf);
|
||||
debug!(
|
||||
"Waited for incoming datagram on udp/{}: {:?}",
|
||||
port, recv_result
|
||||
);
|
||||
udp_socket.set_read_timeout(original_read_timeout).unwrap();
|
||||
recv_result.map(|_| port).ok()
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Now join threads!
|
||||
// Separate from the above by collect()-ing as an intermediately step to make the iterator
|
||||
// eager not lazy so that joining happens here at once after creating bunch of threads
|
||||
// at once.
|
||||
let reachable_ports: BTreeSet<_> = thread_handles
|
||||
.into_iter()
|
||||
.filter_map(|t| t.join().unwrap())
|
||||
.collect();
|
||||
|
||||
if reachable_ports.len() == checked_ports.len() {
|
||||
info!(
|
||||
"checked udp ports: {:?}, reachable udp ports: {:?}",
|
||||
checked_ports, reachable_ports
|
||||
);
|
||||
ok = true;
|
||||
break;
|
||||
} else if udp_remaining_retry > 0 {
|
||||
// Might have lost a UDP packet, retry a couple times
|
||||
error!(
|
||||
"checked udp ports: {:?}, reachable udp ports: {:?}",
|
||||
checked_ports, reachable_ports
|
||||
);
|
||||
error!("There are some udp ports with no response!! Retrying...");
|
||||
} else {
|
||||
error!("Maximum retry count is reached....");
|
||||
break 'outer;
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
break;
|
||||
}
|
||||
ok = true;
|
||||
|
||||
// Might have lost a UDP packet, retry a couple times
|
||||
let _ = ip_echo_server_request(
|
||||
ip_echo_server_addr,
|
||||
IpEchoServerMessage::new(&[], &udp_ports),
|
||||
)
|
||||
.map_err(|err| warn!("ip_echo_server request failed: {}", err));
|
||||
}
|
||||
|
||||
ok
|
||||
}
|
||||
|
||||
pub fn verify_reachable_ports(
|
||||
ip_echo_server_addr: &SocketAddr,
|
||||
tcp_listeners: Vec<(u16, TcpListener)>,
|
||||
udp_sockets: &[&UdpSocket],
|
||||
) -> bool {
|
||||
do_verify_reachable_ports(
|
||||
ip_echo_server_addr,
|
||||
tcp_listeners,
|
||||
udp_sockets,
|
||||
DEFAULT_TIMEOUT_SECS,
|
||||
DEFAULT_RETRY_COUNT,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn parse_port_or_addr(optstr: Option<&str>, default_addr: SocketAddr) -> SocketAddr {
|
||||
if let Some(addrstr) = optstr {
|
||||
if let Ok(port) = addrstr.parse() {
|
||||
@@ -512,7 +581,25 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_public_ip_addr() {
|
||||
fn test_get_public_ip_addr_none() {
|
||||
solana_logger::setup();
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let (_server_port, (server_udp_socket, server_tcp_listener)) =
|
||||
bind_common_in_range(ip_addr, (3200, 3250)).unwrap();
|
||||
|
||||
let _runtime = ip_echo_server(server_tcp_listener);
|
||||
|
||||
let server_ip_echo_addr = server_udp_socket.local_addr().unwrap();
|
||||
assert_eq!(
|
||||
get_public_ip_addr(&server_ip_echo_addr),
|
||||
parse_host("127.0.0.1"),
|
||||
);
|
||||
|
||||
assert!(verify_reachable_ports(&server_ip_echo_addr, vec![], &[],));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_public_ip_addr_reachable() {
|
||||
solana_logger::setup();
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let (_server_port, (server_udp_socket, server_tcp_listener)) =
|
||||
@@ -534,4 +621,50 @@ mod tests {
|
||||
&[&client_udp_socket],
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_public_ip_addr_tcp_unreachable() {
|
||||
solana_logger::setup();
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let (_server_port, (server_udp_socket, _server_tcp_listener)) =
|
||||
bind_common_in_range(ip_addr, (3200, 3250)).unwrap();
|
||||
|
||||
// make the socket unreachable by not running the ip echo server!
|
||||
|
||||
let server_ip_echo_addr = server_udp_socket.local_addr().unwrap();
|
||||
|
||||
let (correct_client_port, (_client_udp_socket, client_tcp_listener)) =
|
||||
bind_common_in_range(ip_addr, (3200, 3250)).unwrap();
|
||||
|
||||
assert!(!do_verify_reachable_ports(
|
||||
&server_ip_echo_addr,
|
||||
vec![(correct_client_port, client_tcp_listener)],
|
||||
&[],
|
||||
2,
|
||||
3,
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_public_ip_addr_udp_unreachable() {
|
||||
solana_logger::setup();
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let (_server_port, (server_udp_socket, _server_tcp_listener)) =
|
||||
bind_common_in_range(ip_addr, (3200, 3250)).unwrap();
|
||||
|
||||
// make the socket unreachable by not running the ip echo server!
|
||||
|
||||
let server_ip_echo_addr = server_udp_socket.local_addr().unwrap();
|
||||
|
||||
let (_correct_client_port, (client_udp_socket, _client_tcp_listener)) =
|
||||
bind_common_in_range(ip_addr, (3200, 3250)).unwrap();
|
||||
|
||||
assert!(!do_verify_reachable_ports(
|
||||
&server_ip_echo_addr,
|
||||
vec![],
|
||||
&[&client_udp_socket],
|
||||
2,
|
||||
3,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
34
net/net.sh
34
net/net.sh
@@ -98,6 +98,7 @@ Operate a configured testnet
|
||||
--operating-mode development|softlaunch
|
||||
- Specify whether or not to launch the cluster in "development" mode with all features enabled at epoch 0,
|
||||
or "softlaunch" mode with some features disabled at epoch 0 (default: development)
|
||||
--warp-slot WARP_SLOT - Boot from a snapshot that has warped ahead to WARP_SLOT rather than a slot 0 genesis.
|
||||
sanity/start-specific options:
|
||||
-F - Discard validator nodes that didn't bootup successfully
|
||||
-o noInstallCheck - Skip solana-install sanity
|
||||
@@ -272,9 +273,11 @@ startBootstrapLeader() {
|
||||
${#clientIpList[@]} \"$benchTpsExtraArgs\" \
|
||||
${#clientIpList[@]} \"$benchExchangeExtraArgs\" \
|
||||
\"$genesisOptions\" \
|
||||
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize\" \
|
||||
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority\" \
|
||||
\"$gpuMode\" \
|
||||
\"$GEOLOCATION_API_KEY\" \
|
||||
\"$maybeWarpSlot\" \
|
||||
\"$waitForNodeInit\" \
|
||||
"
|
||||
|
||||
) >> "$logFile" 2>&1 || {
|
||||
@@ -341,9 +344,11 @@ startNode() {
|
||||
${#clientIpList[@]} \"$benchTpsExtraArgs\" \
|
||||
${#clientIpList[@]} \"$benchExchangeExtraArgs\" \
|
||||
\"$genesisOptions\" \
|
||||
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize\" \
|
||||
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority\" \
|
||||
\"$gpuMode\" \
|
||||
\"$GEOLOCATION_API_KEY\" \
|
||||
\"$maybeWarpSlot\" \
|
||||
\"$waitForNodeInit\" \
|
||||
"
|
||||
) >> "$logFile" 2>&1 &
|
||||
declare pid=$!
|
||||
@@ -582,6 +587,19 @@ deploy() {
|
||||
fi
|
||||
done
|
||||
|
||||
if ! $waitForNodeInit; then
|
||||
# Handle async init
|
||||
declare startTime=$SECONDS
|
||||
for ipAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}"; do
|
||||
declare timeWaited=$((SECONDS - startTime))
|
||||
if [[ $timeWaited -gt 600 ]]; then
|
||||
break
|
||||
fi
|
||||
ssh "${sshOptions[@]}" -n "$ipAddress" \
|
||||
"./solana/net/remote/remote-node-wait-init.sh $((600 - timeWaited))"
|
||||
done
|
||||
fi
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-validators-started=1"
|
||||
additionalNodeDeployTime=$SECONDS
|
||||
|
||||
@@ -727,6 +745,7 @@ maybeNoSnapshot=""
|
||||
maybeLimitLedgerSize=""
|
||||
maybeSkipLedgerVerify=""
|
||||
maybeDisableAirdrops=""
|
||||
maybeWaitForSupermajority=""
|
||||
debugBuild=false
|
||||
doBuild=true
|
||||
gpuMode=auto
|
||||
@@ -737,6 +756,8 @@ netemConfigFile=""
|
||||
netemCommand="add"
|
||||
clientDelayStart=0
|
||||
netLogDir=
|
||||
maybeWarpSlot=
|
||||
waitForNodeInit=true
|
||||
|
||||
command=$1
|
||||
[[ -n $command ]] || usage
|
||||
@@ -837,6 +858,15 @@ while [[ -n $1 ]]; do
|
||||
elif [[ $1 == --client-delay-start ]]; then
|
||||
clientDelayStart=$2
|
||||
shift 2
|
||||
elif [[ $1 == --wait-for-supermajority ]]; then
|
||||
maybeWaitForSupermajority="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 == --warp-slot ]]; then
|
||||
maybeWarpSlot="$1 $2"
|
||||
shift 2
|
||||
elif [[ $1 == --async-node-init ]]; then
|
||||
waitForNodeInit=false
|
||||
shift 1
|
||||
else
|
||||
usage "Unknown long option: $1"
|
||||
fi
|
||||
|
27
net/remote/remote-node-wait-init.sh
Executable file
27
net/remote/remote-node-wait-init.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
initCompleteFile=init-complete-node.log
|
||||
waitTime=${1:=600}
|
||||
|
||||
waitForNodeToInit() {
|
||||
declare hostname
|
||||
hostname=$(hostname)
|
||||
echo "--- waiting for $hostname to boot up"
|
||||
declare startTime=$SECONDS
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
declare timeWaited=$((SECONDS - startTime))
|
||||
if [[ $timeWaited -ge $waitTime ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $timeWaited seconds"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for $initCompleteFile ($timeWaited) on $hostname..."
|
||||
sleep 5
|
||||
done
|
||||
echo "$hostname booted up"
|
||||
}
|
||||
|
||||
cd ~/solana
|
||||
waitForNodeToInit
|
@@ -26,6 +26,8 @@ genesisOptions="${17}"
|
||||
extraNodeArgs="${18}"
|
||||
gpuMode="${19:-auto}"
|
||||
GEOLOCATION_API_KEY="${20}"
|
||||
maybeWarpSlot="${21}"
|
||||
waitForNodeInit="${22}"
|
||||
set +x
|
||||
|
||||
missing() {
|
||||
@@ -91,22 +93,6 @@ case "$gpuMode" in
|
||||
;;
|
||||
esac
|
||||
|
||||
waitForNodeToInit() {
|
||||
hostname=$(hostname)
|
||||
echo "--- waiting for $hostname to boot up"
|
||||
SECONDS=
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 600 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for $initCompleteFile ($SECONDS) on $hostname..."
|
||||
sleep 5
|
||||
done
|
||||
echo "$hostname booted up"
|
||||
}
|
||||
|
||||
case $deployMethod in
|
||||
local|tar|skip)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
@@ -229,6 +215,11 @@ EOF
|
||||
fi
|
||||
multinode-demo/setup.sh "${args[@]}"
|
||||
|
||||
if [[ -n "$maybeWarpSlot" ]]; then
|
||||
# shellcheck disable=SC2086 # Do not want to quote $maybeWarSlot
|
||||
solana-ledger-tool -l config/bootstrap-validator create-snapshot 0 config/bootstrap-validator $maybeWarpSlot
|
||||
fi
|
||||
|
||||
solana-ledger-tool -l config/bootstrap-validator shred-version --max-genesis-archive-unpacked-size 1073741824 | tee config/shred-version
|
||||
fi
|
||||
args=(
|
||||
@@ -252,7 +243,11 @@ cat >> ~/solana/on-reboot <<EOF
|
||||
disown
|
||||
EOF
|
||||
~/solana/on-reboot
|
||||
waitForNodeToInit
|
||||
|
||||
if $waitForNodeInit; then
|
||||
net/remote/remote-node-wait-init.sh 600
|
||||
fi
|
||||
|
||||
;;
|
||||
validator|blockstreamer)
|
||||
if [[ $deployMethod != skip ]]; then
|
||||
@@ -369,7 +364,10 @@ cat >> ~/solana/on-reboot <<EOF
|
||||
disown
|
||||
EOF
|
||||
~/solana/on-reboot
|
||||
waitForNodeToInit
|
||||
|
||||
if $waitForNodeInit; then
|
||||
net/remote/remote-node-wait-init.sh 600
|
||||
fi
|
||||
|
||||
if [[ $skipSetup != true && $nodeType != blockstreamer ]]; then
|
||||
# Wait for the validator to catch up to the bootstrap validator before
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-perf"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana Performance APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -17,11 +17,11 @@ serde = "1.0.105"
|
||||
dlopen_derive = "0.1.4"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
solana-sdk = { path = "../sdk", version = "1.1.16" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.16" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.16" }
|
||||
solana-logger = { path = "../logger", version = "1.1.16" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.16" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.20" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.20" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
|
||||
solana-logger = { path = "../logger", version = "1.1.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.20" }
|
||||
|
||||
[lib]
|
||||
name = "solana_perf"
|
||||
|
631
programs/bpf/Cargo.lock
generated
631
programs/bpf/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-bpf-programs"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "README.md"
|
||||
@@ -22,10 +22,10 @@ walkdir = "2"
|
||||
bincode = "1.1.4"
|
||||
byteorder = "1.3.2"
|
||||
elf = "0.0.10"
|
||||
solana-bpf-loader-program = { path = "../bpf_loader", version = "1.1.16" }
|
||||
solana-logger = { path = "../../logger", version = "1.1.16" }
|
||||
solana-runtime = { path = "../../runtime", version = "1.1.16" }
|
||||
solana-sdk = { path = "../../sdk", version = "1.1.16" }
|
||||
solana-bpf-loader-program = { path = "../bpf_loader", version = "1.1.20" }
|
||||
solana-logger = { path = "../../logger", version = "1.1.20" }
|
||||
solana-runtime = { path = "../../runtime", version = "1.1.20" }
|
||||
solana-sdk = { path = "../../sdk", version = "1.1.20" }
|
||||
solana_rbpf = "=0.1.28"
|
||||
|
||||
[[bench]]
|
||||
|
@@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-128bit"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,11 +12,11 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "1.1.16", default-features = false }
|
||||
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "1.1.16" }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "1.1.20", default-features = false }
|
||||
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "1.1.20" }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.1.16" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.1.20" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-128bit-dep"
|
||||
version = "1.1.16"
|
||||
version = "1.1.20"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "1.1.16", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "1.1.20", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.1.16" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.1.20" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user