Compare commits
137 Commits
Author | SHA1 | Date | |
---|---|---|---|
86419dfd3c | |||
263fc25992 | |||
f6b669eabc | |||
1b2fdcd3a2 | |||
e8e74ee009 | |||
227a13f2d2 | |||
6842e37780 | |||
035bd7db4e | |||
14c2e2af13 | |||
a5b915263b | |||
b87ac392e8 | |||
f6f4e6062a | |||
42c027a7e6 | |||
6fbad24477 | |||
4bd018e68b | |||
c73e40a351 | |||
8f790e3153 | |||
1972d8b5c0 | |||
bc2fd56516 | |||
00916b2ca6 | |||
006a5c5c88 | |||
6666e54a1f | |||
d6ea4f50c9 | |||
a0965e1eba | |||
7ca65341e6 | |||
141a5928c4 | |||
5f0584b6e8 | |||
b7fb739cd9 | |||
5a4a238029 | |||
01987f8f89 | |||
82caa50781 | |||
60b1bcddb5 | |||
dce7739b75 | |||
1c703af6a2 | |||
f49de3b1ad | |||
5c1b79f500 | |||
da04616fd4 | |||
8653c86284 | |||
809e4cbf25 | |||
1aef482972 | |||
248ab3a6ec | |||
ec1f2b4f90 | |||
c853632fc4 | |||
e651209f73 | |||
641f439a45 | |||
a2486f8094 | |||
d48bd80619 | |||
4ff70a05f1 | |||
7831cef9a7 | |||
7dd22d6601 | |||
3bb0388299 | |||
a0a2c61856 | |||
4afa64c20d | |||
be6edb950c | |||
62bc83ef39 | |||
f26824f2b5 | |||
bc808d785b | |||
a5e91f8b14 | |||
79b1d49e42 | |||
5c5207b7c4 | |||
6280ea1b6e | |||
f016ccdbb5 | |||
a528e966e6 | |||
4be9d926c8 | |||
94e162b0f0 | |||
26ca3c6d6d | |||
729b997392 | |||
37b381f47f | |||
0115bfa2ea | |||
3f60fe62c2 | |||
ea44e64d21 | |||
8e1c2d2df4 | |||
a79702c62c | |||
3c94084177 | |||
7d448eb1a9 | |||
a705764ca7 | |||
3110def6c3 | |||
afc89beefa | |||
d5d5e8797b | |||
09f0624887 | |||
52c20a5c38 | |||
3c38df9be0 | |||
da038e626a | |||
9cfbf8a94d | |||
fbcbd37650 | |||
dca932fe45 | |||
8d89eac32f | |||
862fd63bb4 | |||
578d77495a | |||
537d135005 | |||
5ade9b9f02 | |||
e023719c58 | |||
a278f745f8 | |||
640bb9cb95 | |||
c344a878b6 | |||
9b63f7a50f | |||
b128087445 | |||
72755fcd19 | |||
24937e63d4 | |||
995759faf5 | |||
db60bd30dc | |||
bc86ee8d13 | |||
93506b22e7 | |||
1e53760a65 | |||
24c796b434 | |||
2cdd3f835f | |||
c4e04f70d0 | |||
5d971472b2 | |||
f1201502d4 | |||
fd5222ad21 | |||
768a5f2b40 | |||
87b57b53f9 | |||
55a64c8945 | |||
e8c6233c6e | |||
f51b214449 | |||
8fe8a5717e | |||
9adf8b4fc8 | |||
82772f95a1 | |||
0b5d3df251 | |||
e63fdba252 | |||
5e65b7cbd9 | |||
68d0fe2dbc | |||
3aad5f563e | |||
ccfe09e460 | |||
6fd57fafc8 | |||
c7d857583f | |||
e29b7876ad | |||
de479ebda9 | |||
d3447f2f41 | |||
d9e14b4a82 | |||
94b97e4b56 | |||
abd977b819 | |||
36eafa56a3 | |||
06a63549c1 | |||
a4047bb9c8 | |||
a235423000 | |||
726eadc64b |
14
.buildkite/env/secrets.ejson
vendored
14
.buildkite/env/secrets.ejson
vendored
@ -1,12 +1,12 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
|
||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
|
||||
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
|
||||
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
|
||||
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
|
||||
}
|
||||
}
|
||||
|
33
.mergify.yml
33
.mergify.yml
@ -1,9 +1,40 @@
|
||||
# Validate your changes with:
|
||||
#
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate/
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: automatic merge (squash) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
# Join the dont-squash-my-commits group if you won't like your commits squashed
|
||||
- name: automatic merge (rebase) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
#- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
- name: remove automerge label on CI failure
|
||||
conditions:
|
||||
- label=automerge
|
||||
- "#status-failure!=0"
|
||||
actions:
|
||||
label:
|
||||
remove:
|
||||
- automerge
|
||||
comment:
|
||||
message: automerge label removed due to a CI failure
|
||||
- name: remove outdated reviews
|
||||
conditions:
|
||||
- base=master
|
||||
|
@ -18,6 +18,8 @@ branches:
|
||||
- master
|
||||
- /^v\d+\.\d+/
|
||||
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
on_success: change
|
||||
|
807
Cargo.lock
generated
807
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-measure = { path = "../measure", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-measure = { path = "../measure", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.0"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -15,23 +15,23 @@ ed25519-dalek = "=1.0.0-pre.3"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-client = { path = "../client", version = "1.1.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
|
||||
solana-client = { path = "../client", version = "1.1.24" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.24" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.13" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-perf = { path = "../perf", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.13" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.24" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-perf = { path = "../perf", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.24" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.24" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.24" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
@ -699,7 +699,6 @@ impl Archiver {
|
||||
.send::<u64>(
|
||||
RpcRequest::GetSlotsPerSegment,
|
||||
serde_json::json!([client_commitment]),
|
||||
0,
|
||||
)
|
||||
.unwrap())
|
||||
} else {
|
||||
@ -746,11 +745,7 @@ impl Archiver {
|
||||
let RpcStorageTurn {
|
||||
blockhash: storage_blockhash,
|
||||
slot: turn_slot,
|
||||
} = rpc_client.send(
|
||||
RpcRequest::GetStorageTurn,
|
||||
serde_json::value::Value::Null,
|
||||
0,
|
||||
)?;
|
||||
} = rpc_client.send(RpcRequest::GetStorageTurn, serde_json::value::Value::Null)?;
|
||||
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,12 +11,12 @@ edition = "2018"
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
solana-chacha = { path = "../chacha", version = "1.1.13" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-perf = { path = "../perf", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.24" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-perf = { path = "../perf", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,13 +10,13 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.10.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.13" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.24" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@ -2,24 +2,26 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.13" }
|
||||
solana-perf = { path = "../perf", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-measure = { path = "../measure", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.24" }
|
||||
solana-perf = { path = "../perf", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-measure = { path = "../measure", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-version = { path = "../version", version = "1.1.24" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,3 +1,4 @@
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
@ -64,15 +65,22 @@ fn check_txs(
|
||||
no_bank
|
||||
}
|
||||
|
||||
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
|
||||
fn make_accounts_txs(
|
||||
total_num_transactions: usize,
|
||||
hash: Hash,
|
||||
same_payer: bool,
|
||||
) -> Vec<Transaction> {
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
|
||||
(0..txes)
|
||||
let payer_key = Keypair::new();
|
||||
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
|
||||
(0..total_num_transactions)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.message.account_keys[0] = Pubkey::new_rand();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = Pubkey::new_rand();
|
||||
}
|
||||
new.message.account_keys[1] = Pubkey::new_rand();
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
@ -96,13 +104,61 @@ fn bytes_as_usize(bytes: &[u8]) -> usize {
|
||||
bytes[0] as usize | (bytes[1] as usize) << 8
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_version::version!())
|
||||
.arg(
|
||||
Arg::with_name("num_chunks")
|
||||
.long("num-chunks")
|
||||
.takes_value(true)
|
||||
.value_name("SIZE")
|
||||
.help("Number of transaction chunks."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("packets_per_chunk")
|
||||
.long("packets-per-chunk")
|
||||
.takes_value(true)
|
||||
.value_name("SIZE")
|
||||
.help("Packets per chunk"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("skip_sanity")
|
||||
.long("skip-sanity")
|
||||
.takes_value(false)
|
||||
.help("Skip transaction sanity execution"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("same_payer")
|
||||
.long("same-payer")
|
||||
.takes_value(false)
|
||||
.help("Use the same payer for transfers"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("iterations")
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.help("Number of iterations"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_threads")
|
||||
.long("num-threads")
|
||||
.takes_value(true)
|
||||
.help("Number of iterations"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let num_threads =
|
||||
value_t!(matches, "num_threads", usize).unwrap_or(BankingStage::num_threads() as usize);
|
||||
// a multiple of packet chunk duplicates to avoid races
|
||||
const CHUNKS: usize = 8 * 2;
|
||||
const PACKETS_PER_BATCH: usize = 192;
|
||||
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
|
||||
let num_chunks = value_t!(matches, "num_chunks", usize).unwrap_or(16);
|
||||
let packets_per_chunk = value_t!(matches, "packets_per_chunk", usize).unwrap_or(192);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(1000);
|
||||
|
||||
let total_num_transactions = num_chunks * num_threads * packets_per_chunk;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@ -116,34 +172,44 @@ fn main() {
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, txes);
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
||||
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
|
||||
let same_payer = matches.is_present("same_payer");
|
||||
let mut transactions =
|
||||
make_accounts_txs(total_num_transactions, genesis_config.hash(), same_payer);
|
||||
|
||||
// fund all the accounts
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = system_transaction::transfer(
|
||||
let mut fund = system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&tx.message.account_keys[0],
|
||||
mint_total / txes as u64,
|
||||
mint_total / total_num_transactions as u64,
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
|
||||
let skip_sanity = matches.is_present("skip_sanity");
|
||||
if !skip_sanity {
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
||||
}
|
||||
bank.clear_signatures();
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
@ -162,7 +228,7 @@ fn main() {
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
let chunk_len = verified.len() / CHUNKS;
|
||||
let chunk_len = verified.len() / num_chunks;
|
||||
let mut start = 0;
|
||||
|
||||
// This is so that the signal_receiver does not go out of scope after the closure.
|
||||
@ -171,17 +237,17 @@ fn main() {
|
||||
let signal_receiver = Arc::new(signal_receiver);
|
||||
let mut total_us = 0;
|
||||
let mut tx_total_us = 0;
|
||||
let base_tx_count = bank.transaction_count();
|
||||
let mut txs_processed = 0;
|
||||
let mut root = 1;
|
||||
let collector = Pubkey::new_rand();
|
||||
const ITERS: usize = 1_000;
|
||||
let config = Config {
|
||||
packets_per_batch: PACKETS_PER_BATCH,
|
||||
packets_per_batch: packets_per_chunk,
|
||||
chunk_len,
|
||||
num_threads,
|
||||
};
|
||||
let mut total_sent = 0;
|
||||
for _ in 0..ITERS {
|
||||
for _ in 0..iterations {
|
||||
let now = Instant::now();
|
||||
let mut sent = 0;
|
||||
|
||||
@ -222,7 +288,11 @@ fn main() {
|
||||
sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if check_txs(&signal_receiver, txes / CHUNKS, &poh_recorder) {
|
||||
if check_txs(
|
||||
&signal_receiver,
|
||||
total_num_transactions / num_chunks,
|
||||
&poh_recorder,
|
||||
) {
|
||||
debug!(
|
||||
"resetting bank {} tx count: {} txs_proc: {}",
|
||||
bank.slot(),
|
||||
@ -274,7 +344,7 @@ fn main() {
|
||||
debug!(
|
||||
"time: {} us checked: {} sent: {}",
|
||||
duration_as_us(&now.elapsed()),
|
||||
txes / CHUNKS,
|
||||
total_num_transactions / num_chunks,
|
||||
sent,
|
||||
);
|
||||
total_sent += sent;
|
||||
@ -285,20 +355,26 @@ fn main() {
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
start %= verified.len();
|
||||
}
|
||||
let txs_processed = bank_forks.working_bank().transaction_count();
|
||||
debug!("processed: {} base: {}", txs_processed, base_tx_count);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_total', 'median': '{}'}}",
|
||||
"{{'name': 'banking_bench_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
|
||||
);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
|
||||
"{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
|
||||
);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}",
|
||||
(1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64),
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
drop(vote_sender);
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -18,20 +18,20 @@ rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.13" }
|
||||
solana-client = { path = "../client", version = "1.1.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.13" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.24" }
|
||||
solana-client = { path = "../client", version = "1.1.24" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.24" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.13" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.24" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,17 +2,17 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -14,24 +14,24 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.13" }
|
||||
solana-client = { path = "../client", version = "1.1.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.24" }
|
||||
solana-client = { path = "../client", version = "1.1.24" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.24" }
|
||||
#solana-librapay = { path = "../programs/librapay", version = "1.1.8", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.13" }
|
||||
solana-measure = { path = "../measure", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.24" }
|
||||
solana-measure = { path = "../measure", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.8", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.13" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.24" }
|
||||
|
||||
#[features]
|
||||
#move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,12 +10,12 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.13" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-perf = { path = "../perf", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.24" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-perf = { path = "../perf", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,11 +12,11 @@ edition = "2018"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-perf = { path = "../perf", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-perf = { path = "../perf", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@ -5,6 +5,9 @@
|
||||
# Release tags use buildkite-release.yml instead
|
||||
|
||||
steps:
|
||||
- command: "ci/test-sanity.sh"
|
||||
name: "sanity"
|
||||
timeout_in_minutes: 5
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 20
|
||||
|
@ -2,8 +2,10 @@
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source multinode-demo/common.sh
|
||||
|
||||
rm -f config/run/init-completed
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
timeout 15 ./run.sh &
|
||||
pid=$!
|
||||
@ -17,6 +19,16 @@ while [[ ! -f config/run/init-completed ]]; do
|
||||
fi
|
||||
done
|
||||
|
||||
snapshot_slot=1
|
||||
|
||||
# wait a bit longer than snapshot_slot
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
|
||||
sleep 1
|
||||
done
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
||||
|
||||
wait $pid
|
||||
|
||||
$solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" config/snapshot-ledger
|
||||
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
|
||||
$solana_ledger_tool verify --ledger config/snapshot-ledger
|
||||
|
@ -10,9 +10,6 @@ source ci/rust-version.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
# Look for failed mergify.io backports
|
||||
_ git show HEAD --check --oneline
|
||||
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
|
||||
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
|
||||
@ -23,10 +20,8 @@ _ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnin
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
|
27
ci/test-sanity.sh
Executable file
27
ci/test-sanity.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
source ci/_
|
||||
|
||||
(
|
||||
echo --- git diff --check
|
||||
set -x
|
||||
# Look for failed mergify.io backports by searching leftover conflict markers
|
||||
# Also check for any trailing whitespaces!
|
||||
if [[ -n $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
|
||||
base_branch=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
else
|
||||
base_branch=$BUILDKITE_BRANCH
|
||||
fi
|
||||
git fetch origin "$base_branch"
|
||||
git diff "$(git merge-base HEAD "origin/$base_branch")..HEAD" --check --oneline
|
||||
)
|
||||
|
||||
echo
|
||||
|
||||
_ ci/nits.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
echo --- ok
|
@ -39,9 +39,9 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 1gb/thread of memory
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>16 ? 16 : NPROC))
|
||||
NPROC=$((NPROC>14 ? 14 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
|
@ -23,10 +23,14 @@ if [[ -z $CI_TAG ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $CI_REPO_SLUG ]]; then
|
||||
echo Error: CI_REPO_SLUG not defined
|
||||
exit 1
|
||||
fi
|
||||
# Force CI_REPO_SLUG since sometimes
|
||||
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
|
||||
# artifact upload to fail
|
||||
CI_REPO_SLUG=solana-labs/solana
|
||||
#if [[ -z $CI_REPO_SLUG ]]; then
|
||||
# echo Error: CI_REPO_SLUG not defined
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
releaseId=$( \
|
||||
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@ -6,50 +6,86 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Signature},
|
||||
};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
U: FromStr,
|
||||
U::Err: Display,
|
||||
{
|
||||
string
|
||||
.as_ref()
|
||||
.parse::<U>()
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("error parsing '{}': {}", string, err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as type T.
|
||||
// Takes a String to avoid second type parameter when used as a clap validator
|
||||
pub fn is_parsable<T>(string: String) -> Result<(), String>
|
||||
where
|
||||
T: FromStr,
|
||||
T::Err: Display,
|
||||
{
|
||||
is_parsable_generic::<T, String>(string)
|
||||
}
|
||||
|
||||
// Return an error if a pubkey cannot be parsed.
|
||||
pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
match string.parse::<Pubkey>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Pubkey, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a hash cannot be parsed.
|
||||
pub fn is_hash(string: String) -> Result<(), String> {
|
||||
match string.parse::<Hash>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
pub fn is_hash<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Hash, _>(string)
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed.
|
||||
pub fn is_keypair(string: String) -> Result<(), String> {
|
||||
read_keypair_file(&string)
|
||||
pub fn is_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed
|
||||
pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
|
||||
if string.as_str() == ASK_KEYWORD {
|
||||
pub fn is_keypair_or_ask_keyword<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if string.as_ref() == ASK_KEYWORD {
|
||||
return Ok(());
|
||||
}
|
||||
read_keypair_file(&string)
|
||||
read_keypair_file(string.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
||||
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
|
||||
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_pubkey(string.as_ref()).or_else(|_| is_keypair(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
|
||||
// produce a pubkey()
|
||||
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
match parse_keypair_path(&string) {
|
||||
pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match parse_keypair_path(string.as_ref()) {
|
||||
KeypairUrl::Filepath(path) => is_keypair(path),
|
||||
_ => Ok(()),
|
||||
}
|
||||
@ -63,13 +99,19 @@ pub fn is_valid_pubkey(string: String) -> Result<(), String> {
|
||||
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
|
||||
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
|
||||
// also provided and correct happens in parsing, not in validation.
|
||||
pub fn is_valid_signer(string: String) -> Result<(), String> {
|
||||
pub fn is_valid_signer<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_valid_pubkey(string)
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey=signature string
|
||||
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
let mut signer = string.split('=');
|
||||
pub fn is_pubkey_sig<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let mut signer = string.as_ref().split('=');
|
||||
match Pubkey::from_str(
|
||||
signer
|
||||
.next()
|
||||
@ -90,8 +132,11 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
}
|
||||
|
||||
// Return an error if a url cannot be parsed.
|
||||
pub fn is_url(string: String) -> Result<(), String> {
|
||||
match url::Url::parse(&string) {
|
||||
pub fn is_url<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match url::Url::parse(string.as_ref()) {
|
||||
Ok(url) => {
|
||||
if url.has_host() {
|
||||
Ok(())
|
||||
@ -103,20 +148,26 @@ pub fn is_url(string: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_slot(slot: String) -> Result<(), String> {
|
||||
slot.parse::<Slot>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_slot<T>(slot: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Slot, _>(slot)
|
||||
}
|
||||
|
||||
pub fn is_port(port: String) -> Result<(), String> {
|
||||
port.parse::<u16>()
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
pub fn is_port<T>(port: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<u16, _>(port)
|
||||
}
|
||||
|
||||
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
pub fn is_valid_percentage<T>(percentage: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
percentage
|
||||
.as_ref()
|
||||
.parse::<u8>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
@ -136,8 +187,11 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
if amount.parse::<u64>().is_ok() || amount.parse::<f64>().is_ok() {
|
||||
pub fn is_amount<T>(amount: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if amount.as_ref().parse::<u64>().is_ok() || amount.as_ref().parse::<f64>().is_ok() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!(
|
||||
@ -147,14 +201,20 @@ pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
|
||||
DateTime::parse_from_rfc3339(&value)
|
||||
pub fn is_rfc3339_datetime<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
DateTime::parse_from_rfc3339(value.as_ref())
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{}", e))
|
||||
}
|
||||
|
||||
pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
let value = value.replace("'", "");
|
||||
pub fn is_derivation<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let value = value.as_ref().replace("'", "");
|
||||
let mut parts = value.split('/');
|
||||
let account = parts.next().unwrap();
|
||||
account
|
||||
@ -186,14 +246,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_derivation() {
|
||||
assert_eq!(is_derivation("2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("65537".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
|
||||
assert!(is_derivation("a".to_string()).is_err());
|
||||
assert!(is_derivation("4294967296".to_string()).is_err());
|
||||
assert!(is_derivation("a/b".to_string()).is_err());
|
||||
assert!(is_derivation("0/4294967296".to_string()).is_err());
|
||||
assert_eq!(is_derivation("2"), Ok(()));
|
||||
assert_eq!(is_derivation("0"), Ok(()));
|
||||
assert_eq!(is_derivation("65537"), Ok(()));
|
||||
assert_eq!(is_derivation("0/2"), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'"), Ok(()));
|
||||
assert!(is_derivation("a").is_err());
|
||||
assert!(is_derivation("4294967296").is_err());
|
||||
assert!(is_derivation("a/b").is_err());
|
||||
assert!(is_derivation("0/4294967296").is_err());
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::io;
|
||||
use std::{collections::HashMap, io};
|
||||
use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
@ -17,6 +17,8 @@ pub struct Config {
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub keypair_path: String,
|
||||
#[serde(default)]
|
||||
pub address_labels: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -36,6 +38,7 @@ impl Default for Config {
|
||||
json_rpc_url,
|
||||
websocket_url,
|
||||
keypair_path,
|
||||
address_labels: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -27,28 +27,28 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.13" }
|
||||
solana-client = { path = "../client", version = "1.1.13" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.24" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.24" }
|
||||
solana-client = { path = "../client", version = "1.1.24" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.24" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.24" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.24" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.24" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.24" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.24" }
|
||||
thiserror = "1.0.13"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.24" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
157
cli/src/cli.rs
157
cli/src/cli.rs
@ -1,7 +1,7 @@
|
||||
use crate::{
|
||||
cli_output::{CliAccount, CliSignOnlyData, CliSignature, OutputFormat},
|
||||
cluster_query::*,
|
||||
display::println_name_value,
|
||||
display::{new_spinner_progress_bar, println_name_value, println_transaction},
|
||||
nonce::{self, *},
|
||||
offline::{blockhash_query::BlockhashQuery, *},
|
||||
stake::*,
|
||||
@ -26,7 +26,7 @@ use solana_clap_utils::{
|
||||
use solana_client::{
|
||||
client_error::{ClientErrorKind, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::RpcLargestAccountsFilter,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
|
||||
rpc_response::{RpcAccount, RpcKeyedAccount},
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
@ -36,7 +36,7 @@ use solana_faucet::faucet_mock::request_airdrop_transaction;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
clock::{Epoch, Slot},
|
||||
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
@ -47,6 +47,7 @@ use solana_sdk::{
|
||||
program_utils::DecodeError,
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{Keypair, Signature, Signer, SignerError},
|
||||
signers::Signers,
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
@ -244,8 +245,8 @@ pub enum CliCommand {
|
||||
},
|
||||
TransactionHistory {
|
||||
address: Pubkey,
|
||||
end_slot: Option<Slot>, // None == latest slot
|
||||
slot_limit: u64,
|
||||
end_slot: Option<Slot>, // None == latest slot
|
||||
slot_limit: Option<u64>, // None == search full history
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
@ -381,6 +382,7 @@ pub enum CliCommand {
|
||||
},
|
||||
// Vote Commands
|
||||
CreateVoteAccount {
|
||||
vote_account: SignerIndex,
|
||||
seed: Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: Option<Pubkey>,
|
||||
@ -406,6 +408,12 @@ pub enum CliCommand {
|
||||
VoteUpdateValidator {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
VoteUpdateCommission {
|
||||
vote_account_pubkey: Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
@ -727,6 +735,9 @@ pub fn parse_command(
|
||||
("vote-update-validator", Some(matches)) => {
|
||||
parse_vote_update_validator(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-update-commission", Some(matches)) => {
|
||||
parse_vote_update_commission(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-authorize-voter", Some(matches)) => parse_vote_authorize(
|
||||
matches,
|
||||
default_signer_path,
|
||||
@ -1231,7 +1242,7 @@ fn process_confirm(
|
||||
"\nTransaction executed in slot {}:",
|
||||
confirmed_transaction.slot
|
||||
);
|
||||
crate::display::println_transaction(
|
||||
println_transaction(
|
||||
&confirmed_transaction
|
||||
.transaction
|
||||
.transaction
|
||||
@ -1261,7 +1272,7 @@ fn process_confirm(
|
||||
}
|
||||
|
||||
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
|
||||
crate::display::println_transaction(transaction, &None, "");
|
||||
println_transaction(transaction, &None, "");
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
@ -1299,6 +1310,103 @@ fn process_show_account(
|
||||
Ok(account_string)
|
||||
}
|
||||
|
||||
fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
rpc_client: &RpcClient,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
// Send all transactions
|
||||
let mut transactions_signatures = vec![];
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if cfg!(not(test)) {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let signature = rpc_client
|
||||
.send_transaction_with_config(
|
||||
&transaction,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
},
|
||||
)
|
||||
.ok();
|
||||
transactions_signatures.push((transaction, signature));
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions sent",
|
||||
transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = rpc_client.get_signature_status(&signature) {
|
||||
if rpc_client
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(0)
|
||||
> 1
|
||||
{
|
||||
return false;
|
||||
} else {
|
||||
return match status {
|
||||
None => true,
|
||||
Some(result) => result.is_err(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.collect();
|
||||
|
||||
if transactions_signatures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err("Transactions failed".into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator) = rpc_client
|
||||
.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
|
||||
transactions = vec![];
|
||||
for (mut transaction, _) in transactions_signatures.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_deploy(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@ -1366,11 +1474,18 @@ fn process_deploy(
|
||||
})?;
|
||||
|
||||
trace!("Writing program data");
|
||||
rpc_client.send_and_confirm_transactions(write_transactions, &signers)?;
|
||||
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|
||||
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
|
||||
)?;
|
||||
|
||||
trace!("Finalizing program account");
|
||||
rpc_client
|
||||
.send_and_confirm_transaction_with_spinner(&finalize_tx)
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&finalize_tx,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
CliError::DynamicProgramError(format!("Program finalize transaction failed: {}", e))
|
||||
})?;
|
||||
@ -1584,11 +1699,6 @@ fn process_transfer(
|
||||
) -> ProcessResult {
|
||||
let from = config.signers[from];
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&from.pubkey(), "cli keypair".to_string()),
|
||||
(to, "to".to_string()),
|
||||
)?;
|
||||
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
|
||||
let ixs = vec![system_instruction::transfer(&from.pubkey(), to, lamports)];
|
||||
@ -2059,6 +2169,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
|
||||
// Create vote account
|
||||
CliCommand::CreateVoteAccount {
|
||||
vote_account,
|
||||
seed,
|
||||
identity_account,
|
||||
authorized_voter,
|
||||
@ -2067,6 +2178,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_create_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
*vote_account,
|
||||
seed,
|
||||
*identity_account,
|
||||
authorized_voter,
|
||||
@ -2111,11 +2223,24 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
*withdraw_authority,
|
||||
),
|
||||
CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_commission(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*commission,
|
||||
*withdraw_authority,
|
||||
),
|
||||
|
||||
// Wallet Commands
|
||||
@ -3361,6 +3486,7 @@ mod tests {
|
||||
let bob_pubkey = bob_keypair.pubkey();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@ -3386,6 +3512,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
@ -3586,6 +3713,7 @@ mod tests {
|
||||
let bob_keypair = Keypair::new();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@ -3605,6 +3733,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 1,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
use crate::{
|
||||
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
|
||||
cli_output::*,
|
||||
display::println_name_value,
|
||||
display::{new_spinner_progress_bar, println_name_value},
|
||||
};
|
||||
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg, COMMITMENT_ARG},
|
||||
input_parsers::*,
|
||||
@ -457,8 +456,7 @@ pub fn parse_transaction_history(
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
|
||||
let end_slot = value_t!(matches, "end_slot", Slot).ok();
|
||||
let slot_limit = value_t!(matches, "limit", u64)
|
||||
.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE);
|
||||
let slot_limit = value_t!(matches, "limit", u64).ok();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::TransactionHistory {
|
||||
@ -470,15 +468,6 @@ pub fn parse_transaction_history(
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
||||
pub fn process_catchup(
|
||||
rpc_client: &RpcClient,
|
||||
node_pubkey: &Pubkey,
|
||||
@ -1263,7 +1252,7 @@ pub fn process_transaction_history(
|
||||
rpc_client: &RpcClient,
|
||||
address: &Pubkey,
|
||||
end_slot: Option<Slot>, // None == use latest slot
|
||||
slot_limit: u64,
|
||||
slot_limit: Option<u64>,
|
||||
) -> ProcessResult {
|
||||
let end_slot = {
|
||||
if let Some(end_slot) = end_slot {
|
||||
@ -1272,18 +1261,30 @@ pub fn process_transaction_history(
|
||||
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
|
||||
}
|
||||
};
|
||||
let start_slot = end_slot.saturating_sub(slot_limit);
|
||||
let mut start_slot = match slot_limit {
|
||||
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
|
||||
None => rpc_client.minimum_ledger_slot()?,
|
||||
};
|
||||
|
||||
println!(
|
||||
"Transactions affecting {} within slots [{},{}]",
|
||||
address, start_slot, end_slot
|
||||
);
|
||||
let signatures =
|
||||
rpc_client.get_confirmed_signatures_for_address(address, start_slot, end_slot)?;
|
||||
for signature in &signatures {
|
||||
println!("{}", signature);
|
||||
|
||||
let mut transaction_count = 0;
|
||||
while start_slot < end_slot {
|
||||
let signatures = rpc_client.get_confirmed_signatures_for_address(
|
||||
address,
|
||||
start_slot,
|
||||
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
|
||||
)?;
|
||||
for signature in &signatures {
|
||||
println!("{}", signature);
|
||||
}
|
||||
transaction_count += signatures.len();
|
||||
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
|
||||
}
|
||||
Ok(format!("{} transactions found", signatures.len(),))
|
||||
Ok(format!("{} transactions found", transaction_count))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -1,5 +1,6 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
@ -200,3 +201,12 @@ pub fn println_transaction(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
pub fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
128
cli/src/vote.rs
128
cli/src/vote.rs
@ -174,6 +174,37 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-commission")
|
||||
.about("Update the vote account's commission")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Vote account to update"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("commission")
|
||||
.index(2)
|
||||
.value_name("PERCENTAGE")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_percentage)
|
||||
.help("The new commission")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-account")
|
||||
.about("Show the contents of a vote account")
|
||||
@ -242,7 +273,7 @@ pub fn parse_create_vote_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let (vote_account, vote_account_pubkey) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let (identity_account, identity_pubkey) =
|
||||
signer_of(matches, "identity_account", wallet_manager)?;
|
||||
@ -260,6 +291,7 @@ pub fn parse_create_vote_account(
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: signer_info.index_of(vote_account_pubkey).unwrap(),
|
||||
seed,
|
||||
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
|
||||
authorized_voter,
|
||||
@ -309,7 +341,8 @@ pub fn parse_vote_update_validator(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (new_identity_account, new_identity_pubkey) =
|
||||
signer_of(matches, "new_identity_account", wallet_manager)?;
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
@ -323,6 +356,36 @@ pub fn parse_vote_update_validator(
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_update_commission(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, authorized_withdrawer],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@ -381,13 +444,14 @@ pub fn parse_withdraw_from_vote_account(
|
||||
pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account: SignerIndex,
|
||||
seed: &Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: &Option<Pubkey>,
|
||||
authorized_withdrawer: &Option<Pubkey>,
|
||||
commission: u8,
|
||||
) -> ProcessResult {
|
||||
let vote_account = config.signers[1];
|
||||
let vote_account = config.signers[vote_account];
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let vote_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
@ -506,8 +570,9 @@ pub fn process_vote_update_validator(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let new_identity_account = config.signers[new_identity_account];
|
||||
let new_identity_pubkey = new_identity_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
@ -534,6 +599,34 @@ pub fn process_vote_update_validator(
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
}
|
||||
|
||||
pub fn process_vote_update_commission(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_commission(
|
||||
vote_account_pubkey,
|
||||
&authorized_withdrawer.pubkey(),
|
||||
commission,
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.signers[0].pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
}
|
||||
|
||||
fn get_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
@ -729,6 +822,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@ -757,6 +851,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@ -789,6 +884,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(authed),
|
||||
@ -819,6 +915,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@ -846,6 +943,7 @@ mod tests {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@ -855,6 +953,28 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_update_commission = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-update-commission",
|
||||
&pubkey_string,
|
||||
"42",
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_update_commission, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey: pubkey,
|
||||
commission: 42,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(read_keypair_file(&keypair_file).unwrap()),
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromVoteAccount subcommand
|
||||
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
|
@ -68,6 +68,7 @@ fn test_stake_delegation_force() {
|
||||
let vote_keypair = Keypair::new();
|
||||
config.signers = vec![&default_signer, &vote_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
|
@ -57,6 +57,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
let vote_account_pubkey = vote_account_keypair.pubkey();
|
||||
config.signers = vec![&default_signer, &vote_account_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
@ -111,6 +112,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.24" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@ -31,7 +31,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,10 +1,5 @@
|
||||
use crate::{client_error::Result, rpc_request::RpcRequest};
|
||||
|
||||
pub(crate) trait GenericRpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: RpcRequest,
|
||||
params: serde_json::Value,
|
||||
retries: usize,
|
||||
) -> Result<serde_json::Value>;
|
||||
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value>;
|
||||
}
|
||||
|
@ -38,12 +38,7 @@ impl MockRpcClientRequest {
|
||||
}
|
||||
|
||||
impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: RpcRequest,
|
||||
params: serde_json::Value,
|
||||
_retries: usize,
|
||||
) -> Result<serde_json::Value> {
|
||||
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
|
||||
if let Some(value) = self.mocks.write().unwrap().remove(&request) {
|
||||
return Ok(value);
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ use crate::{
|
||||
generic_rpc_client_request::GenericRpcClientRequest,
|
||||
mock_rpc_client_request::{MockRpcClientRequest, Mocks},
|
||||
rpc_client_request::RpcClientRequest,
|
||||
rpc_config::RpcLargestAccountsConfig,
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig},
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
rpc_response::*,
|
||||
};
|
||||
@ -21,7 +21,6 @@ use solana_sdk::{
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
hash::Hash,
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
signers::Signers,
|
||||
@ -32,7 +31,6 @@ use solana_transaction_status::{
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
error,
|
||||
net::SocketAddr,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
@ -96,10 +94,20 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
|
||||
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
|
||||
}
|
||||
|
||||
pub fn send_transaction_with_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
|
||||
let signature_base58_str: String =
|
||||
self.send(RpcRequest::SendTransaction, json!([serialized_encoded]), 5)?;
|
||||
let signature_base58_str: String = self.send(
|
||||
RpcRequest::SendTransaction,
|
||||
json!([serialized_encoded, config]),
|
||||
)?;
|
||||
|
||||
let signature = signature_base58_str
|
||||
.parse::<Signature>()
|
||||
@ -119,6 +127,18 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simulate_transaction(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
sig_verify: bool,
|
||||
) -> RpcResult<RpcSimulateTransactionResult> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
self.send(
|
||||
RpcRequest::SimulateTransaction,
|
||||
json!([serialized_encoded, { "sigVerify": sig_verify }]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_signature_status(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
@ -131,7 +151,7 @@ impl RpcClient {
|
||||
signatures: &[Signature],
|
||||
) -> RpcResult<Vec<Option<TransactionStatus>>> {
|
||||
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
|
||||
self.send(RpcRequest::GetSignatureStatuses, json!([signatures]), 5)
|
||||
self.send(RpcRequest::GetSignatureStatuses, json!([signatures]))
|
||||
}
|
||||
|
||||
pub fn get_signature_status_with_commitment(
|
||||
@ -142,7 +162,6 @@ impl RpcClient {
|
||||
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
|
||||
RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()]]),
|
||||
5,
|
||||
)?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
@ -161,7 +180,6 @@ impl RpcClient {
|
||||
json!([[signature.to_string()], {
|
||||
"searchTransactionHistory": search_transaction_history
|
||||
}]),
|
||||
5,
|
||||
)?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
@ -177,14 +195,14 @@ impl RpcClient {
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<Slot> {
|
||||
self.send(RpcRequest::GetSlot, json!([commitment_config]), 0)
|
||||
self.send(RpcRequest::GetSlot, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn supply_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<RpcSupply> {
|
||||
self.send(RpcRequest::GetSupply, json!([commitment_config]), 0)
|
||||
self.send(RpcRequest::GetSupply, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn total_supply(&self) -> ClientResult<u64> {
|
||||
@ -195,14 +213,14 @@ impl RpcClient {
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<u64> {
|
||||
self.send(RpcRequest::GetTotalSupply, json!([commitment_config]), 0)
|
||||
self.send(RpcRequest::GetTotalSupply, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn get_largest_accounts_with_config(
|
||||
&self,
|
||||
config: RpcLargestAccountsConfig,
|
||||
) -> RpcResult<Vec<RpcAccountBalance>> {
|
||||
self.send(RpcRequest::GetLargestAccounts, json!([config]), 0)
|
||||
self.send(RpcRequest::GetLargestAccounts, json!([config]))
|
||||
}
|
||||
|
||||
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
|
||||
@ -213,11 +231,11 @@ impl RpcClient {
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<RpcVoteAccountStatus> {
|
||||
self.send(RpcRequest::GetVoteAccounts, json!([commitment_config]), 0)
|
||||
self.send(RpcRequest::GetVoteAccounts, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn get_cluster_nodes(&self) -> ClientResult<Vec<RpcContactInfo>> {
|
||||
self.send(RpcRequest::GetClusterNodes, Value::Null, 0)
|
||||
self.send(RpcRequest::GetClusterNodes, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
|
||||
@ -229,7 +247,7 @@ impl RpcClient {
|
||||
slot: Slot,
|
||||
encoding: TransactionEncoding,
|
||||
) -> ClientResult<ConfirmedBlock> {
|
||||
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]), 0)
|
||||
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
|
||||
}
|
||||
|
||||
pub fn get_confirmed_blocks(
|
||||
@ -240,7 +258,6 @@ impl RpcClient {
|
||||
self.send(
|
||||
RpcRequest::GetConfirmedBlocks,
|
||||
json!([start_slot, end_slot]),
|
||||
0,
|
||||
)
|
||||
}
|
||||
|
||||
@ -253,7 +270,6 @@ impl RpcClient {
|
||||
let signatures_base58_str: Vec<String> = self.send(
|
||||
RpcRequest::GetConfirmedSignaturesForAddress,
|
||||
json!([address.to_string(), start_slot, end_slot]),
|
||||
0,
|
||||
)?;
|
||||
|
||||
let mut signatures = vec![];
|
||||
@ -275,13 +291,12 @@ impl RpcClient {
|
||||
self.send(
|
||||
RpcRequest::GetConfirmedTransaction,
|
||||
json!([signature.to_string(), encoding]),
|
||||
0,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
|
||||
let request = RpcRequest::GetBlockTime;
|
||||
let response = self.client.send(request, json!([slot]), 0);
|
||||
let response = self.client.send(request, json!([slot]));
|
||||
|
||||
response
|
||||
.map(|result_json| {
|
||||
@ -304,7 +319,7 @@ impl RpcClient {
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<RpcEpochInfo> {
|
||||
self.send(RpcRequest::GetEpochInfo, json!([commitment_config]), 0)
|
||||
self.send(RpcRequest::GetEpochInfo, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn get_leader_schedule(
|
||||
@ -322,16 +337,15 @@ impl RpcClient {
|
||||
self.send(
|
||||
RpcRequest::GetLeaderSchedule,
|
||||
json!([slot, commitment_config]),
|
||||
0,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_epoch_schedule(&self) -> ClientResult<EpochSchedule> {
|
||||
self.send(RpcRequest::GetEpochSchedule, Value::Null, 0)
|
||||
self.send(RpcRequest::GetEpochSchedule, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_identity(&self) -> ClientResult<Pubkey> {
|
||||
let rpc_identity: RpcIdentity = self.send(RpcRequest::GetIdentity, Value::Null, 0)?;
|
||||
let rpc_identity: RpcIdentity = self.send(RpcRequest::GetIdentity, Value::Null)?;
|
||||
|
||||
rpc_identity.identity.parse::<Pubkey>().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
@ -341,16 +355,20 @@ impl RpcClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_inflation(&self) -> ClientResult<Inflation> {
|
||||
self.send(RpcRequest::GetInflation, Value::Null, 0)
|
||||
pub fn get_inflation_governor(&self) -> ClientResult<RpcInflationGovernor> {
|
||||
self.send(RpcRequest::GetInflationGovernor, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_inflation_rate(&self) -> ClientResult<RpcInflationRate> {
|
||||
self.send(RpcRequest::GetInflationRate, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
|
||||
self.send(RpcRequest::GetVersion, Value::Null, 0)
|
||||
self.send(RpcRequest::GetVersion, Value::Null)
|
||||
}
|
||||
|
||||
pub fn minimum_ledger_slot(&self) -> ClientResult<Slot> {
|
||||
self.send(RpcRequest::MinimumLedgerSlot, Value::Null, 0)
|
||||
self.send(RpcRequest::MinimumLedgerSlot, Value::Null)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction(
|
||||
@ -398,74 +416,6 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transactions<T: Signers>(
|
||||
&self,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let mut send_retries = 5;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
// Send all transactions
|
||||
let mut transactions_signatures = vec![];
|
||||
for transaction in transactions {
|
||||
if cfg!(not(test)) {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let signature = self.send_transaction(&transaction).ok();
|
||||
transactions_signatures.push((transaction, signature))
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = self.get_signature_status(&signature) {
|
||||
if status.is_none() {
|
||||
return false;
|
||||
}
|
||||
return status.unwrap().is_err();
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.collect();
|
||||
|
||||
if transactions_signatures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err(RpcError::ForUser("Transactions failed".to_string()).into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator) =
|
||||
self.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
|
||||
transactions = vec![];
|
||||
for (mut transaction, _) in transactions_signatures.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resign_transaction<T: Signers>(
|
||||
&self,
|
||||
tx: &mut Transaction,
|
||||
@ -477,15 +427,11 @@ impl RpcClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn retry_get_balance(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
retries: usize,
|
||||
) -> Result<Option<u64>, Box<dyn error::Error>> {
|
||||
pub fn retry_get_balance(&self, pubkey: &Pubkey, _retries: usize) -> ClientResult<Option<u64>> {
|
||||
let request = RpcRequest::GetBalance;
|
||||
let balance_json = self
|
||||
.client
|
||||
.send(request, json!([pubkey.to_string()]), retries)
|
||||
.send(request, json!([pubkey.to_string()]))
|
||||
.map_err(|err| err.into_with_request(request))?;
|
||||
|
||||
Ok(Some(
|
||||
@ -509,7 +455,6 @@ impl RpcClient {
|
||||
let response = self.client.send(
|
||||
RpcRequest::GetAccountInfo,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
0,
|
||||
);
|
||||
|
||||
response
|
||||
@ -546,7 +491,7 @@ impl RpcClient {
|
||||
let request = RpcRequest::GetMinimumBalanceForRentExemption;
|
||||
let minimum_balance_json = self
|
||||
.client
|
||||
.send(request, json!([data_len]), 0)
|
||||
.send(request, json!([data_len]))
|
||||
.map_err(|err| err.into_with_request(request))?;
|
||||
|
||||
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json)
|
||||
@ -574,16 +519,12 @@ impl RpcClient {
|
||||
self.send(
|
||||
RpcRequest::GetBalance,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
0,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let accounts: Vec<RpcKeyedAccount> = self.send(
|
||||
RpcRequest::GetProgramAccounts,
|
||||
json!([pubkey.to_string()]),
|
||||
0,
|
||||
)?;
|
||||
let accounts: Vec<RpcKeyedAccount> =
|
||||
self.send(RpcRequest::GetProgramAccounts, json!([pubkey.to_string()]))?;
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
@ -606,35 +547,50 @@ impl RpcClient {
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<u64> {
|
||||
self.send(
|
||||
RpcRequest::GetTransactionCount,
|
||||
json!([commitment_config]),
|
||||
0,
|
||||
)
|
||||
self.send(RpcRequest::GetTransactionCount, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
|
||||
Ok(self
|
||||
let (blockhash, fee_calculator, _last_valid_slot) = self
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::default())?
|
||||
.value)
|
||||
.value;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<(Hash, FeeCalculator)> {
|
||||
let Response {
|
||||
) -> RpcResult<(Hash, FeeCalculator, Slot)> {
|
||||
let (context, blockhash, fee_calculator, last_valid_slot) = if let Ok(Response {
|
||||
context,
|
||||
value:
|
||||
RpcFees {
|
||||
blockhash,
|
||||
fee_calculator,
|
||||
last_valid_slot,
|
||||
},
|
||||
}) =
|
||||
self.send::<Response<RpcFees>>(RpcRequest::GetFees, json!([commitment_config]))
|
||||
{
|
||||
(context, blockhash, fee_calculator, last_valid_slot)
|
||||
} else if let Ok(Response {
|
||||
context,
|
||||
value:
|
||||
RpcBlockhashFeeCalculator {
|
||||
blockhash,
|
||||
fee_calculator,
|
||||
},
|
||||
} = self.send::<Response<RpcBlockhashFeeCalculator>>(
|
||||
}) = self.send::<Response<RpcBlockhashFeeCalculator>>(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
json!([commitment_config]),
|
||||
0,
|
||||
)?;
|
||||
) {
|
||||
(context, blockhash, fee_calculator, 0)
|
||||
} else {
|
||||
return Err(ClientError::new_with_request(
|
||||
RpcError::ParseError("RpcBlockhashFeeCalculator or RpcFees".to_string()).into(),
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
));
|
||||
};
|
||||
|
||||
let blockhash = blockhash.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
@ -644,7 +600,7 @@ impl RpcClient {
|
||||
})?;
|
||||
Ok(Response {
|
||||
context,
|
||||
value: (blockhash, fee_calculator),
|
||||
value: (blockhash, fee_calculator, last_valid_slot),
|
||||
})
|
||||
}
|
||||
|
||||
@ -652,24 +608,36 @@ impl RpcClient {
|
||||
&self,
|
||||
blockhash: &Hash,
|
||||
) -> ClientResult<Option<FeeCalculator>> {
|
||||
let Response { value, .. } = self.send::<Response<Option<RpcFeeCalculator>>>(
|
||||
Ok(self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
blockhash,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_fee_calculator_for_blockhash_with_commitment(
|
||||
&self,
|
||||
blockhash: &Hash,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<FeeCalculator>> {
|
||||
let Response { context, value } = self.send::<Response<Option<RpcFeeCalculator>>>(
|
||||
RpcRequest::GetFeeCalculatorForBlockhash,
|
||||
json!([blockhash.to_string()]),
|
||||
0,
|
||||
json!([blockhash.to_string(), commitment_config]),
|
||||
)?;
|
||||
|
||||
Ok(value.map(|rf| rf.fee_calculator))
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value.map(|rf| rf.fee_calculator),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
|
||||
let Response {
|
||||
context,
|
||||
value: RpcFeeRateGovernor { fee_rate_governor },
|
||||
} = self.send::<Response<RpcFeeRateGovernor>>(
|
||||
RpcRequest::GetFeeRateGovernor,
|
||||
Value::Null,
|
||||
0,
|
||||
)?;
|
||||
} =
|
||||
self.send::<Response<RpcFeeRateGovernor>>(RpcRequest::GetFeeRateGovernor, Value::Null)?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
@ -704,7 +672,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_genesis_hash(&self) -> ClientResult<Hash> {
|
||||
let hash_str: String = self.send(RpcRequest::GetGenesisHash, Value::Null, 0)?;
|
||||
let hash_str: String = self.send(RpcRequest::GetGenesisHash, Value::Null)?;
|
||||
let hash = hash_str.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Hash".to_string()).into(),
|
||||
@ -900,7 +868,6 @@ impl RpcClient {
|
||||
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
|
||||
RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()]]),
|
||||
5,
|
||||
)?;
|
||||
|
||||
let confirmations = result.value[0]
|
||||
@ -919,6 +886,17 @@ impl RpcClient {
|
||||
pub fn send_and_confirm_transaction_with_spinner(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_config(
|
||||
transaction,
|
||||
RpcSendTransactionConfig::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner_and_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let mut confirmations = 0;
|
||||
|
||||
@ -934,7 +912,7 @@ impl RpcClient {
|
||||
));
|
||||
let mut status_retries = 15;
|
||||
let (signature, status) = loop {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
let signature = self.send_transaction_with_config(transaction, config.clone())?;
|
||||
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self
|
||||
@ -1012,17 +990,17 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn validator_exit(&self) -> ClientResult<bool> {
|
||||
self.send(RpcRequest::ValidatorExit, Value::Null, 0)
|
||||
self.send(RpcRequest::ValidatorExit, Value::Null)
|
||||
}
|
||||
|
||||
pub fn send<T>(&self, request: RpcRequest, params: Value, retries: usize) -> ClientResult<T>
|
||||
pub fn send<T>(&self, request: RpcRequest, params: Value) -> ClientResult<T>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
assert!(params.is_array() || params.is_null());
|
||||
let response = self
|
||||
.client
|
||||
.send(request, params, retries)
|
||||
.send(request, params)
|
||||
.map_err(|err| err.into_with_request(request))?;
|
||||
serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_request(err.into(), request))
|
||||
@ -1053,7 +1031,6 @@ mod tests {
|
||||
use jsonrpc_core::{Error, IoHandler, Params};
|
||||
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
|
||||
use serde_json::Number;
|
||||
use solana_logger;
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError, signature::Keypair, system_transaction,
|
||||
transaction::TransactionError,
|
||||
@ -1099,62 +1076,21 @@ mod tests {
|
||||
.send(
|
||||
RpcRequest::GetBalance,
|
||||
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(balance, 50);
|
||||
|
||||
let blockhash: String = rpc_client
|
||||
.send(RpcRequest::GetRecentBlockhash, Value::Null, 0)
|
||||
.send(RpcRequest::GetRecentBlockhash, Value::Null)
|
||||
.unwrap();
|
||||
assert_eq!(blockhash, "deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
|
||||
|
||||
// Send erroneous parameter
|
||||
let blockhash: ClientResult<String> =
|
||||
rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"]), 0);
|
||||
rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"]));
|
||||
assert_eq!(blockhash.is_err(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retry_send() {
|
||||
solana_logger::setup();
|
||||
let (sender, receiver) = channel();
|
||||
thread::spawn(move || {
|
||||
// 1. Pick a random port
|
||||
// 2. Tell the client to start using it
|
||||
// 3. Delay for 1.5 seconds before starting the server to ensure the client will fail
|
||||
// and need to retry
|
||||
let rpc_addr: SocketAddr = "0.0.0.0:4242".parse().unwrap();
|
||||
sender.send(rpc_addr.clone()).unwrap();
|
||||
sleep(Duration::from_millis(1500));
|
||||
|
||||
let mut io = IoHandler::default();
|
||||
io.add_method("getBalance", move |_params: Params| {
|
||||
Ok(Value::Number(Number::from(5)))
|
||||
});
|
||||
let server = ServerBuilder::new(io)
|
||||
.threads(1)
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
AccessControlAllowOrigin::Any,
|
||||
]))
|
||||
.start_http(&rpc_addr)
|
||||
.expect("Unable to start RPC server");
|
||||
server.wait();
|
||||
});
|
||||
|
||||
let rpc_addr = receiver.recv().unwrap();
|
||||
let rpc_client = RpcClient::new_socket(rpc_addr);
|
||||
|
||||
let balance: u64 = rpc_client
|
||||
.send(
|
||||
RpcRequest::GetBalance,
|
||||
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"]),
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(balance, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_send_transaction() {
|
||||
let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
|
@ -4,8 +4,7 @@ use crate::{
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
};
|
||||
use log::*;
|
||||
use reqwest::{self, header::CONTENT_TYPE};
|
||||
use solana_sdk::clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
|
||||
use reqwest::{self, header::CONTENT_TYPE, StatusCode};
|
||||
use std::{thread::sleep, time::Duration};
|
||||
|
||||
pub struct RpcClientRequest {
|
||||
@ -29,17 +28,13 @@ impl RpcClientRequest {
|
||||
}
|
||||
|
||||
impl GenericRpcClientRequest for RpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: RpcRequest,
|
||||
params: serde_json::Value,
|
||||
mut retries: usize,
|
||||
) -> Result<serde_json::Value> {
|
||||
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
|
||||
// Concurrent requests are not supported so reuse the same request id for all requests
|
||||
let request_id = 1;
|
||||
|
||||
let request_json = request.build_request_json(request_id, params);
|
||||
|
||||
let mut too_many_requests_retries = 5;
|
||||
loop {
|
||||
match self
|
||||
.client
|
||||
@ -50,6 +45,19 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
||||
{
|
||||
Ok(response) => {
|
||||
if !response.status().is_success() {
|
||||
if response.status() == StatusCode::TOO_MANY_REQUESTS
|
||||
&& too_many_requests_retries > 0
|
||||
{
|
||||
too_many_requests_retries -= 1;
|
||||
debug!(
|
||||
"Server responded with {:?}, {} retries left",
|
||||
response, too_many_requests_retries
|
||||
);
|
||||
|
||||
// Sleep for 500ms to give the server a break
|
||||
sleep(Duration::from_millis(500));
|
||||
continue;
|
||||
}
|
||||
return Err(response.error_for_status().unwrap_err().into());
|
||||
}
|
||||
|
||||
@ -63,17 +71,8 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
||||
}
|
||||
return Ok(json["result"].clone());
|
||||
}
|
||||
Err(e) => {
|
||||
info!("{:?} failed, {} retries left: {:?}", request, retries, e);
|
||||
if retries == 0 {
|
||||
return Err(e.into());
|
||||
}
|
||||
retries -= 1;
|
||||
|
||||
// Sleep for approximately half a slot
|
||||
sleep(Duration::from_millis(
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
|
||||
));
|
||||
Err(err) => {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@ -9,6 +9,18 @@ pub struct RpcSignatureStatusConfig {
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSendTransactionConfig {
|
||||
pub skip_preflight: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSimulateTransactionConfig {
|
||||
pub sig_verify: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcLargestAccountsFilter {
|
||||
@ -23,3 +35,11 @@ pub struct RpcLargestAccountsConfig {
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub filter: Option<RpcLargestAccountsFilter>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStakeConfig {
|
||||
pub epoch: Option<Epoch>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
@ -16,15 +16,18 @@ pub enum RpcRequest {
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetGenesisHash,
|
||||
GetIdentity,
|
||||
GetInflation,
|
||||
GetLargestAccounts,
|
||||
GetLeaderSchedule,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
GetFeeRateGovernor,
|
||||
GetFees,
|
||||
GetGenesisHash,
|
||||
GetIdentity,
|
||||
GetInflationGovernor,
|
||||
GetInflationRate,
|
||||
GetLargestAccounts,
|
||||
GetLeaderSchedule,
|
||||
GetMinimumBalanceForRentExemption,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetSignatureStatuses,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
@ -37,12 +40,12 @@ pub enum RpcRequest {
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
GetVoteAccounts,
|
||||
MinimumLedgerSlot,
|
||||
RegisterNode,
|
||||
RequestAirdrop,
|
||||
SendTransaction,
|
||||
SimulateTransaction,
|
||||
SignVote,
|
||||
GetMinimumBalanceForRentExemption,
|
||||
MinimumLedgerSlot,
|
||||
}
|
||||
|
||||
impl fmt::Display for RpcRequest {
|
||||
@ -60,15 +63,18 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
RpcRequest::GetIdentity => "getIdentity",
|
||||
RpcRequest::GetInflation => "getInflation",
|
||||
RpcRequest::GetLargestAccounts => "getLargestAccounts",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
|
||||
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
|
||||
RpcRequest::GetFees => "getFees",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
RpcRequest::GetIdentity => "getIdentity",
|
||||
RpcRequest::GetInflationGovernor => "getInflationGovernor",
|
||||
RpcRequest::GetInflationRate => "getInflationRate",
|
||||
RpcRequest::GetLargestAccounts => "getLargestAccounts",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
@ -81,12 +87,12 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
RpcRequest::GetVoteAccounts => "getVoteAccounts",
|
||||
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
|
||||
RpcRequest::RegisterNode => "registerNode",
|
||||
RpcRequest::RequestAirdrop => "requestAirdrop",
|
||||
RpcRequest::SendTransaction => "sendTransaction",
|
||||
RpcRequest::SimulateTransaction => "simulateTransaction",
|
||||
RpcRequest::SignVote => "signVote",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
|
||||
};
|
||||
|
||||
write!(f, "{}", method)
|
||||
@ -96,6 +102,7 @@ impl fmt::Display for RpcRequest {
|
||||
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
|
||||
|
||||
impl RpcRequest {
|
||||
pub(crate) fn build_request_json(self, id: u64, params: Value) -> Value {
|
||||
@ -142,10 +149,6 @@ mod tests {
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getEpochInfo");
|
||||
|
||||
let test_request = RpcRequest::GetInflation;
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getInflation");
|
||||
|
||||
let test_request = RpcRequest::GetRecentBlockhash;
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getRecentBlockhash");
|
||||
|
@ -3,6 +3,7 @@ use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
@ -35,6 +36,14 @@ pub struct RpcBlockhashFeeCalculator {
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFees {
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFeeCalculator {
|
||||
@ -47,6 +56,37 @@ pub struct RpcFeeRateGovernor {
|
||||
pub fee_rate_governor: FeeRateGovernor,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationGovernor {
|
||||
pub initial: f64,
|
||||
pub terminal: f64,
|
||||
pub taper: f64,
|
||||
pub foundation: f64,
|
||||
pub foundation_term: f64,
|
||||
}
|
||||
|
||||
impl From<Inflation> for RpcInflationGovernor {
|
||||
fn from(inflation: Inflation) -> Self {
|
||||
Self {
|
||||
initial: inflation.initial,
|
||||
terminal: inflation.terminal,
|
||||
taper: inflation.taper,
|
||||
foundation: inflation.foundation,
|
||||
foundation_term: inflation.foundation_term,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationRate {
|
||||
pub total: f64,
|
||||
pub validator: f64,
|
||||
pub foundation: f64,
|
||||
pub epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcKeyedAccount {
|
||||
@ -129,6 +169,9 @@ pub struct RpcEpochInfo {
|
||||
|
||||
/// The absolute current slot
|
||||
pub absolute_slot: Slot,
|
||||
|
||||
/// The current block height
|
||||
pub block_height: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
@ -188,6 +231,13 @@ pub struct RpcSignatureConfirmation {
|
||||
pub status: Result<()>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSimulateTransactionResult {
|
||||
pub err: Option<TransactionError>,
|
||||
pub logs: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStorageTurn {
|
||||
@ -210,3 +260,20 @@ pub struct RpcSupply {
|
||||
pub non_circulating: u64,
|
||||
pub non_circulating_accounts: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum StakeActivationState {
|
||||
Activating,
|
||||
Active,
|
||||
Deactivating,
|
||||
Inactive,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStakeActivation {
|
||||
pub state: StakeActivationState,
|
||||
pub active: u64,
|
||||
pub inactive: u64,
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ impl SyncClient for ThinClient {
|
||||
match recent_blockhash {
|
||||
Ok(Response { value, .. }) => {
|
||||
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
|
||||
Ok(value)
|
||||
Ok((value.0, value.1))
|
||||
}
|
||||
Err(e) => {
|
||||
self.optimizer.report(index, std::u64::MAX);
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -21,6 +21,7 @@ byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
fs_extra = "1.1.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = "1.3"
|
||||
@ -41,37 +42,37 @@ regex = "1.3.6"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-client = { path = "../client", version = "1.1.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.13" }
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.13" }
|
||||
solana-measure = { path = "../measure", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.13" }
|
||||
solana-perf = { path = "../perf", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.13" }
|
||||
solana-version = { path = "../version", version = "1.1.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.13" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.13" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.24" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.24" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-client = { path = "../client", version = "1.1.24" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.24" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.24" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.24" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.24" }
|
||||
solana-measure = { path = "../measure", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.24" }
|
||||
solana-perf = { path = "../perf", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.24" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.24" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.24" }
|
||||
solana-version = { path = "../version", version = "1.1.24" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.24" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.24" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.24" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.13" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.24" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -3,6 +3,7 @@
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
|
||||
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
@ -48,7 +49,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&last_datapoint,
|
||||
&mut 0,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
|
@ -31,7 +31,13 @@ impl AccountsBackgroundService {
|
||||
bank.process_dead_slots();
|
||||
|
||||
// Currently, given INTERVAL_MS, we process 1 slot/100 ms
|
||||
bank.process_stale_slot();
|
||||
let shrunken_account_count = bank.process_stale_slot();
|
||||
if shrunken_account_count > 0 {
|
||||
datapoint_info!(
|
||||
"stale_slot_shrink",
|
||||
("accounts", shrunken_account_count, i64)
|
||||
);
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(INTERVAL_MS));
|
||||
})
|
||||
|
@ -51,7 +51,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
|
||||
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
|
||||
|
||||
/// Transaction forwarding
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 4;
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
|
||||
|
||||
// Fixed thread size seems to be fastest on GCP setup
|
||||
pub const NUM_THREADS: u32 = 4;
|
||||
@ -292,7 +292,7 @@ impl BankingStage {
|
||||
enable_forwarding: bool,
|
||||
batch_limit: usize,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
) {
|
||||
) -> BufferedPacketsDecision {
|
||||
let (leader_at_slot_offset, poh_has_bank, would_be_leader) = {
|
||||
let poh = poh_recorder.lock().unwrap();
|
||||
(
|
||||
@ -349,6 +349,7 @@ impl BankingStage {
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
decision
|
||||
}
|
||||
|
||||
pub fn process_loop(
|
||||
@ -365,8 +366,8 @@ impl BankingStage {
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut buffered_packets = vec![];
|
||||
loop {
|
||||
if !buffered_packets.is_empty() {
|
||||
Self::process_buffered_packets(
|
||||
while !buffered_packets.is_empty() {
|
||||
let decision = Self::process_buffered_packets(
|
||||
&my_pubkey,
|
||||
&socket,
|
||||
poh_recorder,
|
||||
@ -376,6 +377,11 @@ impl BankingStage {
|
||||
batch_limit,
|
||||
transaction_status_sender.clone(),
|
||||
);
|
||||
if decision == BufferedPacketsDecision::Hold {
|
||||
// If we are waiting on a new bank,
|
||||
// check the receiver for more transactions/for exiting
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let recv_timeout = if !buffered_packets.is_empty() {
|
||||
@ -503,7 +509,7 @@ impl BankingStage {
|
||||
// expires.
|
||||
let txs = batch.transactions();
|
||||
let pre_balances = if transaction_status_sender.is_some() {
|
||||
bank.collect_balances(txs)
|
||||
bank.collect_balances(batch)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@ -539,10 +545,11 @@ impl BankingStage {
|
||||
.processing_results;
|
||||
|
||||
if let Some(sender) = transaction_status_sender {
|
||||
let post_balances = bank.collect_balances(txs);
|
||||
let post_balances = bank.collect_balances(batch);
|
||||
send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
transaction_statuses,
|
||||
TransactionBalancesSet::new(pre_balances, post_balances),
|
||||
sender,
|
||||
|
@ -35,7 +35,7 @@ use std::{
|
||||
};
|
||||
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub(crate) mod broadcast_metrics;
|
||||
pub mod broadcast_metrics;
|
||||
pub(crate) mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
@ -374,13 +374,14 @@ pub fn broadcast_shreds(
|
||||
peers_and_stakes: &[(u64, usize)],
|
||||
peers: &[ContactInfo],
|
||||
last_datapoint_submit: &Arc<AtomicU64>,
|
||||
send_mmsg_total: &mut u64,
|
||||
transmit_stats: &mut TransmitShredsStats,
|
||||
) -> Result<()> {
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
if broadcast_len == 0 {
|
||||
update_peer_stats(1, 1, last_datapoint_submit);
|
||||
return Ok(());
|
||||
}
|
||||
let mut shred_select = Measure::start("shred_select");
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
@ -389,6 +390,8 @@ pub fn broadcast_shreds(
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
})
|
||||
.collect();
|
||||
shred_select.stop();
|
||||
transmit_stats.shred_select += shred_select.as_us();
|
||||
|
||||
let mut sent = 0;
|
||||
let mut send_mmsg_time = Measure::start("send_mmsg");
|
||||
@ -401,7 +404,7 @@ pub fn broadcast_shreds(
|
||||
}
|
||||
}
|
||||
send_mmsg_time.stop();
|
||||
*send_mmsg_total += send_mmsg_time.as_us();
|
||||
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(&peers);
|
||||
update_peer_stats(
|
||||
|
@ -29,11 +29,12 @@ impl ProcessShredsStats {
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct TransmitShredsStats {
|
||||
pub(crate) transmit_elapsed: u64,
|
||||
pub(crate) send_mmsg_elapsed: u64,
|
||||
pub(crate) get_peers_elapsed: u64,
|
||||
pub(crate) num_shreds: usize,
|
||||
pub struct TransmitShredsStats {
|
||||
pub transmit_elapsed: u64,
|
||||
pub send_mmsg_elapsed: u64,
|
||||
pub get_peers_elapsed: u64,
|
||||
pub shred_select: u64,
|
||||
pub num_shreds: usize,
|
||||
}
|
||||
|
||||
impl BroadcastStats for TransmitShredsStats {
|
||||
@ -42,6 +43,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
|
||||
self.get_peers_elapsed += new_stats.get_peers_elapsed;
|
||||
self.num_shreds += new_stats.num_shreds;
|
||||
self.shred_select += new_stats.shred_select;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
@ -58,6 +60,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("shred_select", self.shred_select as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -176,15 +179,16 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update() {
|
||||
fn test_update_broadcast() {
|
||||
let start = Instant::now();
|
||||
let mut slot_broadcast_stats = SlotBroadcastStats::default();
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
get_peers_elapsed: 2,
|
||||
send_mmsg_elapsed: 3,
|
||||
shred_select: 4,
|
||||
num_shreds: 5,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
slot: 0,
|
||||
@ -198,16 +202,18 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
transmit_elapsed: 7,
|
||||
get_peers_elapsed: 8,
|
||||
send_mmsg_elapsed: 9,
|
||||
shred_select: 10,
|
||||
num_shreds: 11,
|
||||
},
|
||||
&None,
|
||||
);
|
||||
@ -217,9 +223,10 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
// If another batch is given, then total number of batches == num_expected_batches == 2,
|
||||
// so the batch should be purged from the HashMap
|
||||
@ -228,6 +235,7 @@ mod test {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
shred_select: 1,
|
||||
num_shreds: 1,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
|
@ -81,14 +81,13 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
// Broadcast data
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
let mut send_mmsg_total = 0;
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&Arc::new(AtomicU64::new(0)),
|
||||
&mut send_mmsg_total,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
@ -9,6 +9,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -23,6 +24,14 @@ pub struct StandardBroadcastRun {
|
||||
shred_version: u16,
|
||||
last_datapoint_submit: Arc<AtomicU64>,
|
||||
num_batches: usize,
|
||||
broadcast_peer_cache: Arc<RwLock<BroadcastPeerCache>>,
|
||||
last_peer_update: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct BroadcastPeerCache {
|
||||
peers: Vec<ContactInfo>,
|
||||
peers_and_stakes: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
@ -38,6 +47,8 @@ impl StandardBroadcastRun {
|
||||
shred_version,
|
||||
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
|
||||
num_batches: 0,
|
||||
broadcast_peer_cache: Arc::new(RwLock::new(BroadcastPeerCache::default())),
|
||||
last_peer_update: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,33 +304,46 @@ impl StandardBroadcastRun {
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) -> Result<()> {
|
||||
const BROADCAST_PEER_UPDATE_INTERVAL_MS: u64 = 1000;
|
||||
trace!("Broadcasting {:?} shreds", shreds.len());
|
||||
// Get the list of peers to broadcast to
|
||||
let get_peers_start = Instant::now();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
let get_peers_elapsed = get_peers_start.elapsed();
|
||||
let mut get_peers_time = Measure::start("broadcast::get_peers");
|
||||
let now = timestamp();
|
||||
let last = self.last_peer_update.load(Ordering::Relaxed);
|
||||
if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS
|
||||
&& self
|
||||
.last_peer_update
|
||||
.compare_and_swap(now, last, Ordering::Relaxed)
|
||||
== last
|
||||
{
|
||||
let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
w_broadcast_peer_cache.peers = peers;
|
||||
w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes;
|
||||
}
|
||||
get_peers_time.stop();
|
||||
let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap();
|
||||
|
||||
let mut transmit_stats = TransmitShredsStats::default();
|
||||
// Broadcast the shreds
|
||||
let transmit_start = Instant::now();
|
||||
let mut send_mmsg_total = 0;
|
||||
let mut transmit_time = Measure::start("broadcast_shreds");
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&r_broadcast_peer_cache.peers_and_stakes,
|
||||
&r_broadcast_peer_cache.peers,
|
||||
&self.last_datapoint_submit,
|
||||
&mut send_mmsg_total,
|
||||
&mut transmit_stats,
|
||||
)?;
|
||||
let transmit_elapsed = transmit_start.elapsed();
|
||||
let new_transmit_shreds_stats = TransmitShredsStats {
|
||||
transmit_elapsed: duration_as_us(&transmit_elapsed),
|
||||
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
|
||||
send_mmsg_elapsed: send_mmsg_total,
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
drop(r_broadcast_peer_cache);
|
||||
transmit_time.stop();
|
||||
|
||||
transmit_stats.transmit_elapsed = transmit_time.as_us();
|
||||
transmit_stats.get_peers_elapsed = get_peers_time.as_us();
|
||||
transmit_stats.num_shreds = shreds.len();
|
||||
|
||||
// Process metrics
|
||||
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
|
||||
self.update_transmit_metrics(&transmit_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,10 @@
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
consensus::VOTE_THRESHOLD_SIZE,
|
||||
crds_value::CrdsValueLabel,
|
||||
poh_recorder::PohRecorder,
|
||||
result::{Error, Result},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify,
|
||||
verified_vote_packets::VerifiedVotePackets,
|
||||
};
|
||||
@ -14,7 +16,10 @@ use log::*;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_metrics::inc_new_counter_debug;
|
||||
use solana_perf::packet::{self, Packets};
|
||||
use solana_runtime::{bank::Bank, epoch_stakes::EpochAuthorizedVoters};
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
epoch_schedule::EpochSchedule,
|
||||
@ -43,6 +48,7 @@ pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
|
||||
pub struct SlotVoteTracker {
|
||||
voted: HashSet<Arc<Pubkey>>,
|
||||
updates: Option<Vec<Arc<Pubkey>>>,
|
||||
total_stake: u64,
|
||||
}
|
||||
|
||||
impl SlotVoteTracker {
|
||||
@ -203,6 +209,7 @@ impl ClusterInfoVoteListener {
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
) -> Self {
|
||||
let exit_ = exit.clone();
|
||||
|
||||
@ -244,6 +251,7 @@ impl ClusterInfoVoteListener {
|
||||
verified_vote_transactions_receiver,
|
||||
vote_tracker,
|
||||
&bank_forks,
|
||||
subscriptions,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@ -372,6 +380,7 @@ impl ClusterInfoVoteListener {
|
||||
vote_txs_receiver: VerifiedVoteTransactionsReceiver,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@ -380,10 +389,15 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
vote_tracker.process_new_root_bank(&root_bank);
|
||||
let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch());
|
||||
|
||||
if let Err(e) =
|
||||
Self::get_and_process_votes(&vote_txs_receiver, &vote_tracker, root_bank.slot())
|
||||
{
|
||||
if let Err(e) = Self::get_and_process_votes(
|
||||
&vote_txs_receiver,
|
||||
&vote_tracker,
|
||||
root_bank.slot(),
|
||||
subscriptions.clone(),
|
||||
epoch_stakes,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
@ -397,21 +411,51 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn get_and_process_votes_for_tests(
|
||||
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
) -> Result<()> {
|
||||
Self::get_and_process_votes(
|
||||
vote_txs_receiver,
|
||||
vote_tracker,
|
||||
last_root,
|
||||
subscriptions,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_and_process_votes(
|
||||
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut vote_txs = vote_txs_receiver.recv_timeout(timer)?;
|
||||
while let Ok(new_txs) = vote_txs_receiver.try_recv() {
|
||||
vote_txs.extend(new_txs);
|
||||
}
|
||||
Self::process_votes(vote_tracker, vote_txs, last_root);
|
||||
Self::process_votes(
|
||||
vote_tracker,
|
||||
vote_txs,
|
||||
last_root,
|
||||
subscriptions,
|
||||
epoch_stakes,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_votes(vote_tracker: &VoteTracker, vote_txs: Vec<Transaction>, root: Slot) {
|
||||
fn process_votes(
|
||||
vote_tracker: &VoteTracker,
|
||||
vote_txs: Vec<Transaction>,
|
||||
root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
) {
|
||||
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
|
||||
{
|
||||
let all_slot_trackers = &vote_tracker.slot_vote_trackers;
|
||||
@ -463,7 +507,7 @@ impl ClusterInfoVoteListener {
|
||||
continue;
|
||||
}
|
||||
|
||||
for slot in vote.slots {
|
||||
for &slot in vote.slots.iter() {
|
||||
if slot <= root {
|
||||
continue;
|
||||
}
|
||||
@ -488,6 +532,8 @@ impl ClusterInfoVoteListener {
|
||||
.or_default()
|
||||
.insert(unduplicated_pubkey.unwrap());
|
||||
}
|
||||
|
||||
subscriptions.notify_vote(&vote);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -504,15 +550,35 @@ impl ClusterInfoVoteListener {
|
||||
if w_slot_tracker.updates.is_none() {
|
||||
w_slot_tracker.updates = Some(vec![]);
|
||||
}
|
||||
for pk in slot_diff {
|
||||
w_slot_tracker.voted.insert(pk.clone());
|
||||
w_slot_tracker.updates.as_mut().unwrap().push(pk);
|
||||
let mut current_stake = 0;
|
||||
for pubkey in slot_diff {
|
||||
Self::sum_stake(&mut current_stake, epoch_stakes, &pubkey);
|
||||
|
||||
w_slot_tracker.voted.insert(pubkey.clone());
|
||||
w_slot_tracker.updates.as_mut().unwrap().push(pubkey);
|
||||
}
|
||||
Self::notify_for_stake_change(
|
||||
current_stake,
|
||||
w_slot_tracker.total_stake,
|
||||
&subscriptions,
|
||||
epoch_stakes,
|
||||
slot,
|
||||
);
|
||||
w_slot_tracker.total_stake += current_stake;
|
||||
} else {
|
||||
let voted: HashSet<_> = slot_diff.into_iter().collect();
|
||||
let mut total_stake = 0;
|
||||
let voted: HashSet<_> = slot_diff
|
||||
.into_iter()
|
||||
.map(|pubkey| {
|
||||
Self::sum_stake(&mut total_stake, epoch_stakes, &pubkey);
|
||||
pubkey
|
||||
})
|
||||
.collect();
|
||||
Self::notify_for_stake_change(total_stake, 0, &subscriptions, epoch_stakes, slot);
|
||||
let new_slot_tracker = SlotVoteTracker {
|
||||
voted: voted.clone(),
|
||||
updates: Some(voted.into_iter().collect()),
|
||||
total_stake,
|
||||
};
|
||||
vote_tracker
|
||||
.slot_vote_trackers
|
||||
@ -522,11 +588,38 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_for_stake_change(
|
||||
current_stake: u64,
|
||||
previous_stake: u64,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
slot: Slot,
|
||||
) {
|
||||
if let Some(stakes) = epoch_stakes {
|
||||
let supermajority_stake = (stakes.total_stake() as f64 * VOTE_THRESHOLD_SIZE) as u64;
|
||||
if previous_stake < supermajority_stake
|
||||
&& (previous_stake + current_stake) > supermajority_stake
|
||||
{
|
||||
subscriptions.notify_gossip_subscribers(slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) {
|
||||
if let Some(stakes) = epoch_stakes {
|
||||
if let Some(vote_account) = stakes.stakes().vote_accounts().get(pubkey) {
|
||||
*sum += vote_account.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::packet;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
@ -623,7 +716,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_update_new_root() {
|
||||
let (vote_tracker, bank, _) = setup();
|
||||
let (vote_tracker, bank, _, _) = setup();
|
||||
|
||||
// Check outdated slots are purged with new root
|
||||
let new_voter = Arc::new(Pubkey::new_rand());
|
||||
@ -664,7 +757,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_update_new_leader_schedule_epoch() {
|
||||
let (vote_tracker, bank, _) = setup();
|
||||
let (vote_tracker, bank, _, _) = setup();
|
||||
|
||||
// Check outdated slots are purged with new root
|
||||
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
@ -706,7 +799,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_process_votes() {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs) = setup();
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
|
||||
let vote_slots = vec![1, 2];
|
||||
@ -725,7 +818,14 @@ mod tests {
|
||||
});
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
ClusterInfoVoteListener::get_and_process_votes(&votes_receiver, &vote_tracker, 0).unwrap();
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
for vote_slot in vote_slots {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
|
||||
let r_slot_vote_tracker = slot_vote_tracker.read().unwrap();
|
||||
@ -744,7 +844,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_process_votes2() {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs) = setup();
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
// Send some votes to process
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
|
||||
@ -769,7 +869,14 @@ mod tests {
|
||||
}
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
ClusterInfoVoteListener::get_and_process_votes(&votes_receiver, &vote_tracker, 0).unwrap();
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(i as u64 + 1).unwrap();
|
||||
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
|
||||
@ -788,7 +895,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_get_voters_by_epoch() {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, bank, validator_voting_keypairs) = setup();
|
||||
let (vote_tracker, bank, validator_voting_keypairs, _) = setup();
|
||||
let last_known_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let last_known_slot = bank
|
||||
.epoch_schedule()
|
||||
@ -859,11 +966,23 @@ mod tests {
|
||||
100,
|
||||
);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
|
||||
// Send a vote to process, should add a reference to the pubkey for that voter
|
||||
// in the tracker
|
||||
let validator0_keypairs = &validator_voting_keypairs[0];
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let vote_tx = vec![vote_transaction::new_vote_transaction(
|
||||
// Must vote > root to be processed
|
||||
vec![bank.slot() + 1],
|
||||
@ -874,7 +993,13 @@ mod tests {
|
||||
&validator0_keypairs.vote_keypair,
|
||||
)];
|
||||
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0);
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_tx,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
None,
|
||||
);
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
@ -924,7 +1049,7 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0);
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
|
||||
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
@ -938,7 +1063,12 @@ mod tests {
|
||||
assert_eq!(ref_count, current_ref_count);
|
||||
}
|
||||
|
||||
fn setup() -> (Arc<VoteTracker>, Arc<Bank>, Vec<ValidatorVoteKeypairs>) {
|
||||
fn setup() -> (
|
||||
Arc<VoteTracker>,
|
||||
Arc<Bank>,
|
||||
Vec<ValidatorVoteKeypairs>,
|
||||
Arc<RpcSubscriptions>,
|
||||
) {
|
||||
let validator_voting_keypairs: Vec<_> = (0..10)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
@ -950,6 +1080,18 @@ mod tests {
|
||||
);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
|
||||
// Integrity Checks
|
||||
let current_epoch = bank.epoch();
|
||||
@ -976,8 +1118,9 @@ mod tests {
|
||||
assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch);
|
||||
(
|
||||
Arc::new(vote_tracker),
|
||||
Arc::new(bank),
|
||||
bank,
|
||||
validator_voting_keypairs,
|
||||
subscriptions,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@ use std::collections::HashMap;
|
||||
pub struct Crds {
|
||||
/// Stores the map of labels and values
|
||||
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
pub num_inserts: usize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@ -84,6 +85,7 @@ impl Default for Crds {
|
||||
fn default() -> Self {
|
||||
Crds {
|
||||
table: IndexMap::new(),
|
||||
num_inserts: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -93,6 +95,24 @@ impl Crds {
|
||||
pub fn new_versioned(&self, local_timestamp: u64, value: CrdsValue) -> VersionedCrdsValue {
|
||||
VersionedCrdsValue::new(local_timestamp, value)
|
||||
}
|
||||
pub fn would_insert(
|
||||
&self,
|
||||
value: CrdsValue,
|
||||
local_timestamp: u64,
|
||||
) -> Option<VersionedCrdsValue> {
|
||||
let new_value = self.new_versioned(local_timestamp, value);
|
||||
let label = new_value.value.label();
|
||||
let would_insert = self
|
||||
.table
|
||||
.get(&label)
|
||||
.map(|current| new_value > *current)
|
||||
.unwrap_or(true);
|
||||
if would_insert {
|
||||
Some(new_value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
/// insert the new value, returns the old value if insert succeeds
|
||||
pub fn insert_versioned(
|
||||
&mut self,
|
||||
@ -107,6 +127,7 @@ impl Crds {
|
||||
.unwrap_or(true);
|
||||
if do_insert {
|
||||
let old = self.table.insert(label, new_value);
|
||||
self.num_inserts += 1;
|
||||
Ok(old)
|
||||
} else {
|
||||
trace!("INSERT FAILED data: {} new.wallclock: {}", label, wallclock,);
|
||||
|
@ -6,7 +6,7 @@
|
||||
use crate::{
|
||||
crds::{Crds, VersionedCrdsValue},
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
|
||||
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
|
||||
crds_value::{CrdsValue, CrdsValueLabel},
|
||||
};
|
||||
@ -76,17 +76,10 @@ impl CrdsGossip {
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> HashMap<Pubkey, HashSet<Pubkey>> {
|
||||
let id = &self.id;
|
||||
let crds = &self.crds;
|
||||
let push = &mut self.push;
|
||||
let versioned = labels
|
||||
.into_iter()
|
||||
.filter_map(|label| crds.lookup_versioned(&label));
|
||||
|
||||
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
|
||||
for val in versioned {
|
||||
let origin = val.value.pubkey();
|
||||
let hash = val.value_hash;
|
||||
let peers = push.prune_received_cache(id, &origin, hash, stakes);
|
||||
for origin in labels.iter().map(|k| k.pubkey()) {
|
||||
let peers = push.prune_received_cache(id, &origin, stakes);
|
||||
for from in peers {
|
||||
prune_map.entry(from).or_default().insert(origin);
|
||||
}
|
||||
@ -113,7 +106,7 @@ impl CrdsGossip {
|
||||
return Err(CrdsGossipError::PruneMessageTimeout);
|
||||
}
|
||||
if self.id == *destination {
|
||||
self.push.process_prune_msg(peer, origin);
|
||||
self.push.process_prune_msg(&self.id, peer, origin);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(CrdsGossipError::BadPruneDestination)
|
||||
@ -158,24 +151,47 @@ impl CrdsGossip {
|
||||
self.pull.mark_pull_request_creation_time(from, now)
|
||||
}
|
||||
/// process a pull request and create a response
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
filters: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
|
||||
self.pull
|
||||
.process_pull_requests(&mut self.crds, filters, now)
|
||||
.process_pull_requests(&mut self.crds, filters, now);
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.pull.generate_pull_responses(&self.crds, filters)
|
||||
}
|
||||
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
self.pull
|
||||
.process_pull_response(&mut self.crds, from, timeouts, response, now)
|
||||
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
|
||||
}
|
||||
|
||||
/// process a pull response
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) {
|
||||
let success = self.pull.process_pull_responses(
|
||||
&mut self.crds,
|
||||
from,
|
||||
responses,
|
||||
responses_expired_timeout,
|
||||
now,
|
||||
process_pull_stats,
|
||||
);
|
||||
self.push.push_pull_responses(success, now);
|
||||
}
|
||||
|
||||
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {
|
||||
|
@ -2,7 +2,6 @@
|
||||
pub enum CrdsGossipError {
|
||||
NoPeers,
|
||||
PushMessageTimeout,
|
||||
PushMessageAlreadyReceived,
|
||||
PushMessageOldVersion,
|
||||
BadPruneDestination,
|
||||
PruneMessageTimeout,
|
||||
|
@ -10,7 +10,7 @@
|
||||
//! of false positives.
|
||||
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds::Crds;
|
||||
use crate::crds::{Crds, VersionedCrdsValue};
|
||||
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
@ -20,8 +20,8 @@ use solana_runtime::bloom::Bloom;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
|
||||
// The maximum age of a value received over pull responses
|
||||
@ -118,6 +118,14 @@ impl CrdsFilter {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ProcessPullStats {
|
||||
pub success: usize,
|
||||
pub failed_insert: usize,
|
||||
pub failed_timeout: usize,
|
||||
pub timeout_count: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPull {
|
||||
/// timestamp of last request
|
||||
@ -126,6 +134,7 @@ pub struct CrdsGossipPull {
|
||||
purged_values: VecDeque<(Hash, u64)>,
|
||||
pub crds_timeout: u64,
|
||||
pub msg_timeout: u64,
|
||||
pub num_pulls: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPull {
|
||||
@ -135,6 +144,7 @@ impl Default for CrdsGossipPull {
|
||||
pull_request_time: HashMap::new(),
|
||||
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
num_pulls: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -204,14 +214,13 @@ impl CrdsGossipPull {
|
||||
self.purged_values.push_back((hash, timestamp))
|
||||
}
|
||||
|
||||
/// process a pull request and create a response
|
||||
/// process a pull request
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
requests: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
let rv = self.filter_crds_values(crds, &requests);
|
||||
) {
|
||||
requests.into_iter().for_each(|(caller, _)| {
|
||||
let key = caller.label().pubkey();
|
||||
let old = crds.insert(caller, now);
|
||||
@ -221,19 +230,33 @@ impl CrdsGossipPull {
|
||||
}
|
||||
crds.update_record_timestamp(&key, now);
|
||||
});
|
||||
rv
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
|
||||
/// Create gossip responses to pull requests
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
requests: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.filter_crds_values(crds, requests)
|
||||
}
|
||||
|
||||
// Checks if responses should be inserted and
|
||||
// returns those responses converted to VersionedCrdsValue
|
||||
// Separated in two vecs as:
|
||||
// .0 => responses that update the owner timestamp
|
||||
// .1 => responses that do not update the owner timestamp
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
responses: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
let mut failed = 0;
|
||||
for r in response {
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
let mut versioned = vec![];
|
||||
let mut versioned_expired_timestamp = vec![];
|
||||
for r in responses {
|
||||
let owner = r.label().pubkey();
|
||||
// Check if the crds value is older than the msg_timeout
|
||||
if now
|
||||
@ -252,11 +275,8 @@ impl CrdsGossipPull {
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -264,32 +284,69 @@ impl CrdsGossipPull {
|
||||
// Before discarding this value, check if a ContactInfo for the owner
|
||||
// exists in the table. If it doesn't, that implies that this value can be discarded
|
||||
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
} else {
|
||||
// Silently insert this old value without bumping record timestamps
|
||||
failed += crds.insert(r, now).is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned_expired_timestamp.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let old = crds.insert(r, now);
|
||||
failed += old.is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
}
|
||||
(versioned, versioned_expired_timestamp)
|
||||
}
|
||||
|
||||
/// process a vec of pull responses
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> Vec<(CrdsValueLabel, Hash, u64)> {
|
||||
let mut success = vec![];
|
||||
let mut owners = HashSet::new();
|
||||
for r in responses_expired_timeout {
|
||||
stats.failed_insert += crds.insert_versioned(r).is_err() as usize;
|
||||
}
|
||||
for r in responses {
|
||||
let owner = r.value.label().pubkey();
|
||||
let label = r.value.label();
|
||||
let wc = r.value.wallclock();
|
||||
let hash = r.value_hash;
|
||||
let old = crds.insert_versioned(r);
|
||||
if old.is_err() {
|
||||
stats.failed_insert += 1;
|
||||
} else {
|
||||
stats.success += 1;
|
||||
self.num_pulls += 1;
|
||||
success.push((label, hash, wc));
|
||||
}
|
||||
old.ok().map(|opt| {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
owners.insert(owner);
|
||||
opt.map(|val| {
|
||||
self.purged_values
|
||||
.push_back((val.value_hash, val.local_timestamp))
|
||||
})
|
||||
});
|
||||
}
|
||||
crds.update_record_timestamp(from, now);
|
||||
failed
|
||||
owners.insert(*from);
|
||||
for owner in owners {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
}
|
||||
success
|
||||
}
|
||||
// build a set of filters of the current crds table
|
||||
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
|
||||
@ -379,6 +436,34 @@ impl CrdsGossipPull {
|
||||
.count();
|
||||
self.purged_values.drain(..cnt);
|
||||
}
|
||||
|
||||
/// For legacy tests
|
||||
#[cfg(test)]
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> (usize, usize, usize) {
|
||||
let mut stats = ProcessPullStats::default();
|
||||
let (versioned, versioned_expired_timeout) =
|
||||
self.filter_pull_responses(crds, timeouts, response, now, &mut stats);
|
||||
self.process_pull_responses(
|
||||
crds,
|
||||
from,
|
||||
versioned,
|
||||
versioned_expired_timeout,
|
||||
now,
|
||||
&mut stats,
|
||||
);
|
||||
(
|
||||
stats.failed_timeout + stats.failed_insert,
|
||||
stats.timeout_count,
|
||||
stats.success,
|
||||
)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
@ -578,8 +663,9 @@ mod test {
|
||||
let mut dest_crds = Crds::default();
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
|
||||
assert!(dest_crds.lookup(&caller.label()).is_some());
|
||||
assert_eq!(
|
||||
@ -648,8 +734,9 @@ mod test {
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
// if there is a false positive this is empty
|
||||
// prob should be around 0.1 per iteration
|
||||
if rsp.is_empty() {
|
||||
@ -660,13 +747,15 @@ mod test {
|
||||
continue;
|
||||
}
|
||||
assert_eq!(rsp.len(), 1);
|
||||
let failed = node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&node_pubkey,
|
||||
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
|
||||
rsp.pop().unwrap(),
|
||||
1,
|
||||
);
|
||||
let failed = node
|
||||
.process_pull_response(
|
||||
&mut node_crds,
|
||||
&node_pubkey,
|
||||
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
|
||||
rsp.pop().unwrap(),
|
||||
1,
|
||||
)
|
||||
.0;
|
||||
assert_eq!(failed, 0);
|
||||
assert_eq!(
|
||||
node_crds
|
||||
@ -827,7 +916,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry.clone()],
|
||||
1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -843,7 +933,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry.clone(), unstaked_peer_entry],
|
||||
node.msg_timeout + 100,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
2
|
||||
);
|
||||
|
||||
@ -856,7 +947,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -872,7 +964,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_vote.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -885,7 +978,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_vote.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
1
|
||||
);
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
|
||||
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
|
||||
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
|
||||
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPush {
|
||||
@ -44,12 +45,18 @@ pub struct CrdsGossipPush {
|
||||
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
|
||||
/// push message queue
|
||||
push_messages: HashMap<CrdsValueLabel, Hash>,
|
||||
/// cache that tracks which validators a message was received from
|
||||
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
|
||||
/// Cache that tracks which validators a message was received from
|
||||
/// bool indicates it has been pruned.
|
||||
/// This cache represents a lagging view of which validators
|
||||
/// currently have this node in their `active_set`
|
||||
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
|
||||
pub num_active: usize,
|
||||
pub push_fanout: usize,
|
||||
pub msg_timeout: u64,
|
||||
pub prune_timeout: u64,
|
||||
pub num_total: usize,
|
||||
pub num_old: usize,
|
||||
pub num_pushes: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPush {
|
||||
@ -64,6 +71,9 @@ impl Default for CrdsGossipPush {
|
||||
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
|
||||
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
|
||||
prune_timeout: CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS,
|
||||
num_total: 0,
|
||||
num_old: 0,
|
||||
num_pushes: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -81,18 +91,21 @@ impl CrdsGossipPush {
|
||||
&mut self,
|
||||
self_pubkey: &Pubkey,
|
||||
origin: &Pubkey,
|
||||
hash: Hash,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<Pubkey> {
|
||||
let origin_stake = stakes.get(origin).unwrap_or(&0);
|
||||
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
|
||||
let cache = self.received_cache.get(&hash);
|
||||
let cache = self.received_cache.get(origin);
|
||||
if cache.is_none() {
|
||||
return Vec::new();
|
||||
}
|
||||
let peers = cache.unwrap();
|
||||
|
||||
let peers = &cache.unwrap().1;
|
||||
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
|
||||
let peer_stake_total: u64 = peers
|
||||
.iter()
|
||||
.filter(|v| !(v.1).0)
|
||||
.map(|v| stakes.get(v.0).unwrap_or(&0))
|
||||
.sum();
|
||||
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
|
||||
if peer_stake_total < prune_stake_threshold {
|
||||
return Vec::new();
|
||||
@ -100,7 +113,8 @@ impl CrdsGossipPush {
|
||||
|
||||
let staked_peers: Vec<(Pubkey, u64)> = peers
|
||||
.iter()
|
||||
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
|
||||
.filter(|v| !(v.1).0)
|
||||
.filter_map(|p| stakes.get(p.0).map(|s| (*p.0, *s)))
|
||||
.filter(|(_, s)| *s > 0)
|
||||
.collect();
|
||||
|
||||
@ -117,16 +131,27 @@ impl CrdsGossipPush {
|
||||
let (next_peer, next_stake) = staked_peers[next];
|
||||
keep.insert(next_peer);
|
||||
peer_stake_sum += next_stake;
|
||||
if peer_stake_sum >= prune_stake_threshold {
|
||||
if peer_stake_sum >= prune_stake_threshold
|
||||
&& keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
peers
|
||||
.iter()
|
||||
let pruned_peers: Vec<Pubkey> = peers
|
||||
.keys()
|
||||
.filter(|p| !keep.contains(p))
|
||||
.cloned()
|
||||
.collect()
|
||||
.collect();
|
||||
pruned_peers.iter().for_each(|p| {
|
||||
self.received_cache
|
||||
.get_mut(origin)
|
||||
.unwrap()
|
||||
.get_mut(p)
|
||||
.unwrap()
|
||||
.0 = true;
|
||||
});
|
||||
pruned_peers
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
@ -137,6 +162,7 @@ impl CrdsGossipPush {
|
||||
value: CrdsValue,
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
self.num_total += 1;
|
||||
if now
|
||||
> value
|
||||
.wallclock()
|
||||
@ -149,21 +175,32 @@ impl CrdsGossipPush {
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
let label = value.label();
|
||||
let origin = label.pubkey();
|
||||
let new_value = crds.new_versioned(now, value);
|
||||
let value_hash = new_value.value_hash;
|
||||
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
||||
received_set.insert(from.clone());
|
||||
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
||||
}
|
||||
let received_set = self
|
||||
.received_cache
|
||||
.entry(origin)
|
||||
.or_insert_with(HashMap::new);
|
||||
received_set.entry(*from).or_insert((false, 0)).1 = now;
|
||||
|
||||
let old = crds.insert_versioned(new_value);
|
||||
if old.is_err() {
|
||||
self.num_old += 1;
|
||||
return Err(CrdsGossipError::PushMessageOldVersion);
|
||||
}
|
||||
let mut received_set = HashSet::new();
|
||||
received_set.insert(from.clone());
|
||||
self.push_messages.insert(label, value_hash);
|
||||
self.received_cache.insert(value_hash, (now, received_set));
|
||||
Ok(old.ok().and_then(|opt| opt))
|
||||
Ok(old.unwrap())
|
||||
}
|
||||
|
||||
/// push pull responses
|
||||
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
||||
for (label, value_hash, wc) in values {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
|
||||
continue;
|
||||
}
|
||||
self.push_messages.insert(label, value_hash);
|
||||
}
|
||||
}
|
||||
|
||||
/// New push message to broadcast to peers.
|
||||
@ -172,18 +209,10 @@ impl CrdsGossipPush {
|
||||
/// The list of push messages is created such that all the randomly selected peers have not
|
||||
/// pruned the source addresses.
|
||||
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> {
|
||||
let max = self.active_set.len();
|
||||
let mut nodes: Vec<_> = (0..max).collect();
|
||||
nodes.shuffle(&mut rand::thread_rng());
|
||||
let peers: Vec<Pubkey> = nodes
|
||||
.into_iter()
|
||||
.filter_map(|n| self.active_set.get_index(n))
|
||||
.take(self.push_fanout)
|
||||
.map(|n| *n.0)
|
||||
.collect();
|
||||
let mut total_bytes: usize = 0;
|
||||
let mut values = vec![];
|
||||
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
|
||||
trace!("new_push_messages {}", self.push_messages.len());
|
||||
for (label, hash) in &self.push_messages {
|
||||
let res = crds.lookup_versioned(label);
|
||||
if res.is_none() {
|
||||
@ -203,21 +232,37 @@ impl CrdsGossipPush {
|
||||
}
|
||||
values.push(value.clone());
|
||||
}
|
||||
trace!(
|
||||
"new_push_messages {} {}",
|
||||
values.len(),
|
||||
self.active_set.len()
|
||||
);
|
||||
for v in values {
|
||||
for p in peers.iter() {
|
||||
let filter = self.active_set.get_mut(p);
|
||||
if filter.is_some() && !filter.unwrap().contains(&v.label().pubkey()) {
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
//use a consistent index for the same origin so
|
||||
//the active set learns the MST for that origin
|
||||
let start = v.label().pubkey().as_ref()[0] as usize;
|
||||
let max = self.push_fanout.min(self.active_set.len());
|
||||
for i in start..(start + max) {
|
||||
let ix = i % self.active_set.len();
|
||||
if let Some((p, filter)) = self.active_set.get_index(ix) {
|
||||
if !filter.contains(&v.label().pubkey()) {
|
||||
trace!("new_push_messages insert {} {:?}", *p, v);
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
self.num_pushes += 1;
|
||||
}
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
push_messages
|
||||
}
|
||||
|
||||
/// add the `from` to the peer's filter of nodes
|
||||
pub fn process_prune_msg(&mut self, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
for origin in origins {
|
||||
if origin == self_pubkey {
|
||||
continue;
|
||||
}
|
||||
if let Some(p) = self.active_set.get_mut(peer) {
|
||||
p.add(origin)
|
||||
}
|
||||
@ -339,15 +384,11 @@ impl CrdsGossipPush {
|
||||
|
||||
/// purge received push message cache
|
||||
pub fn purge_old_received_cache(&mut self, min_time: u64) {
|
||||
let old_msgs: Vec<Hash> = self
|
||||
.received_cache
|
||||
.iter()
|
||||
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
|
||||
.cloned()
|
||||
.collect();
|
||||
for k in old_msgs {
|
||||
self.received_cache.remove(&k);
|
||||
}
|
||||
self.received_cache
|
||||
.iter_mut()
|
||||
.for_each(|v| v.1.retain(|_, v| v.1 > min_time));
|
||||
|
||||
self.received_cache.retain(|_, v| !v.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@ -371,7 +412,6 @@ mod test {
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&origin, 0,
|
||||
)));
|
||||
let label = value.label();
|
||||
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
|
||||
let mut low_staked_set = HashSet::new();
|
||||
low_staked_peers.for_each(|p| {
|
||||
@ -380,11 +420,7 @@ mod test {
|
||||
stakes.insert(p, 1);
|
||||
});
|
||||
|
||||
let versioned = crds
|
||||
.lookup_versioned(&label)
|
||||
.expect("versioned value should exist");
|
||||
let hash = versioned.value_hash;
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.is_empty(),
|
||||
"should not prune if min threshold has not been reached"
|
||||
@ -395,7 +431,7 @@ mod test {
|
||||
stakes.insert(high_staked_peer, high_stake);
|
||||
let _ = push.process_push_message(&mut crds, &high_staked_peer, value.clone(), 0);
|
||||
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.len() < low_staked_set.len() + 1,
|
||||
"should not prune all peers"
|
||||
@ -409,7 +445,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_push() {
|
||||
fn test_process_push_one() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@ -426,8 +462,8 @@ mod test {
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
@ -690,6 +726,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_prune() {
|
||||
let mut crds = Crds::default();
|
||||
let self_id = Pubkey::new_rand();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@ -707,7 +744,11 @@ mod test {
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
|
||||
push.process_prune_msg(
|
||||
&self_id,
|
||||
&peer.label().pubkey(),
|
||||
&[new_msg.label().pubkey()],
|
||||
);
|
||||
assert_eq!(push.new_push_messages(&crds, 0), expected);
|
||||
}
|
||||
#[test]
|
||||
@ -749,9 +790,9 @@ mod test {
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
|
||||
// purge the old pushed
|
||||
|
@ -3,7 +3,7 @@
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore_db::Result as BlockstoreResult;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::clock::{Slot, DEFAULT_TICKS_PER_SLOT, TICKS_PER_DAY};
|
||||
use std::string::ToString;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
@ -29,9 +29,12 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
|
||||
// and starve other blockstore users.
|
||||
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
|
||||
|
||||
// Remove a limited number of slots at a time, so the operation
|
||||
// does not take too long and block other blockstore users.
|
||||
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
|
||||
// Delay between purges to cooperate with other blockstore users
|
||||
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
|
||||
|
||||
// Compacting at a slower interval than purging helps keep IOPS down.
|
||||
// Once a day should be ample
|
||||
const DEFAULT_COMPACTION_SLOT_INTERVAL: u64 = TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT;
|
||||
|
||||
pub struct LedgerCleanupService {
|
||||
t_cleanup: JoinHandle<()>,
|
||||
@ -50,6 +53,8 @@ impl LedgerCleanupService {
|
||||
);
|
||||
let exit = exit.clone();
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-ledger-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
@ -62,6 +67,9 @@ impl LedgerCleanupService {
|
||||
max_ledger_slots,
|
||||
&mut last_purge_slot,
|
||||
DEFAULT_PURGE_SLOT_INTERVAL,
|
||||
Some(DEFAULT_DELAY_BETWEEN_PURGES),
|
||||
&mut last_compaction_slot,
|
||||
DEFAULT_COMPACTION_SLOT_INTERVAL,
|
||||
) {
|
||||
match e {
|
||||
RecvTimeoutError::Disconnected => break,
|
||||
@ -77,8 +85,8 @@ impl LedgerCleanupService {
|
||||
blockstore: &Arc<Blockstore>,
|
||||
root: Slot,
|
||||
max_ledger_shreds: u64,
|
||||
) -> (u64, Slot, Slot, u64) {
|
||||
let mut shreds = Vec::new();
|
||||
) -> (bool, Slot, Slot, u64) {
|
||||
let mut total_slots = Vec::new();
|
||||
let mut iterate_time = Measure::start("iterate_time");
|
||||
let mut total_shreds = 0;
|
||||
let mut first_slot = 0;
|
||||
@ -89,33 +97,43 @@ impl LedgerCleanupService {
|
||||
}
|
||||
// Not exact since non-full slots will have holes
|
||||
total_shreds += meta.received;
|
||||
shreds.push((slot, meta.received));
|
||||
total_slots.push((slot, meta.received));
|
||||
if slot > root {
|
||||
break;
|
||||
}
|
||||
}
|
||||
iterate_time.stop();
|
||||
info!(
|
||||
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
|
||||
max_ledger_shreds,
|
||||
shreds.len(),
|
||||
"first_slot={} total_slots={} total_shreds={} max_ledger_shreds={}, {}",
|
||||
first_slot,
|
||||
total_slots.len(),
|
||||
total_shreds,
|
||||
max_ledger_shreds,
|
||||
iterate_time
|
||||
);
|
||||
if (total_shreds as u64) < max_ledger_shreds {
|
||||
return (0, 0, 0, total_shreds);
|
||||
return (false, 0, 0, total_shreds);
|
||||
}
|
||||
let mut cur_shreds = 0;
|
||||
let mut lowest_slot_to_clean = shreds[0].0;
|
||||
for (slot, num_shreds) in shreds.iter().rev() {
|
||||
cur_shreds += *num_shreds as u64;
|
||||
if cur_shreds > max_ledger_shreds {
|
||||
lowest_slot_to_clean = *slot;
|
||||
let mut num_shreds_to_clean = 0;
|
||||
let mut lowest_cleanup_slot = total_slots[0].0;
|
||||
for (slot, num_shreds) in total_slots.iter().rev() {
|
||||
num_shreds_to_clean += *num_shreds as u64;
|
||||
if num_shreds_to_clean > max_ledger_shreds {
|
||||
lowest_cleanup_slot = *slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
(cur_shreds, lowest_slot_to_clean, first_slot, total_shreds)
|
||||
(true, first_slot, lowest_cleanup_slot, total_shreds)
|
||||
}
|
||||
|
||||
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
|
||||
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
// Get the newest root
|
||||
while let Ok(new_root) = new_root_receiver.try_recv() {
|
||||
root = new_root;
|
||||
}
|
||||
Ok(root)
|
||||
}
|
||||
|
||||
fn cleanup_ledger(
|
||||
@ -124,58 +142,88 @@ impl LedgerCleanupService {
|
||||
max_ledger_shreds: u64,
|
||||
last_purge_slot: &mut u64,
|
||||
purge_interval: u64,
|
||||
delay_between_purges: Option<Duration>,
|
||||
last_compaction_slot: &mut u64,
|
||||
compaction_interval: u64,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
// Get the newest root
|
||||
while let Ok(new_root) = new_root_receiver.try_recv() {
|
||||
root = new_root;
|
||||
let root = Self::receive_new_roots(new_root_receiver)?;
|
||||
if root - *last_purge_slot <= purge_interval {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if root - *last_purge_slot > purge_interval {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
|
||||
root, last_purge_slot, purge_interval, disk_utilization_pre
|
||||
);
|
||||
*last_purge_slot = root;
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
"purge: last_root={}, last_purge_slot={}, purge_interval={}, last_compaction_slot={}, disk_utilization={:?}",
|
||||
root, last_purge_slot, purge_interval, last_compaction_slot, disk_utilization_pre
|
||||
);
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot, total_shreds) =
|
||||
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
|
||||
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
|
||||
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
|
||||
|
||||
if num_shreds_to_clean > 0 {
|
||||
debug!(
|
||||
"cleaning up to: {} shreds: {} first: {}",
|
||||
lowest_slot_to_clean, num_shreds_to_clean, first_slot
|
||||
);
|
||||
loop {
|
||||
let current_lowest =
|
||||
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
|
||||
|
||||
let mut slot_update_time = Measure::start("slot_update");
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
|
||||
slot_update_time.stop();
|
||||
|
||||
let mut clean_time = Measure::start("ledger_clean");
|
||||
blockstore.purge_slots(first_slot, Some(current_lowest));
|
||||
clean_time.stop();
|
||||
|
||||
debug!(
|
||||
"ledger purge {} -> {}: {} {}",
|
||||
first_slot, current_lowest, slot_update_time, clean_time
|
||||
);
|
||||
first_slot += DEFAULT_PURGE_BATCH_SIZE;
|
||||
if current_lowest == lowest_slot_to_clean {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
if slots_to_clean {
|
||||
let mut compact_first_slot = std::u64::MAX;
|
||||
if lowest_cleanup_slot.saturating_sub(*last_compaction_slot) > compaction_interval {
|
||||
compact_first_slot = *last_compaction_slot;
|
||||
*last_compaction_slot = lowest_cleanup_slot;
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
let purge_complete = Arc::new(AtomicBool::new(false));
|
||||
let blockstore = blockstore.clone();
|
||||
let purge_complete1 = purge_complete.clone();
|
||||
let _t_purge = Builder::new()
|
||||
.name("solana-ledger-purge".to_string())
|
||||
.spawn(move || {
|
||||
let mut slot_update_time = Measure::start("slot_update");
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
|
||||
slot_update_time.stop();
|
||||
|
||||
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds);
|
||||
info!(
|
||||
"purging data from slots {} to {}",
|
||||
purge_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
|
||||
let mut purge_time = Measure::start("purge_slots_with_delay");
|
||||
blockstore.purge_slots_with_delay(
|
||||
purge_first_slot,
|
||||
lowest_cleanup_slot,
|
||||
delay_between_purges,
|
||||
);
|
||||
purge_time.stop();
|
||||
info!("{}", purge_time);
|
||||
|
||||
if compact_first_slot < lowest_cleanup_slot {
|
||||
info!(
|
||||
"compacting data from slots {} to {}",
|
||||
compact_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
if let Err(err) =
|
||||
blockstore.compact_storage(compact_first_slot, lowest_cleanup_slot)
|
||||
{
|
||||
// This error is not fatal and indicates an internal error?
|
||||
error!(
|
||||
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
|
||||
err, compact_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
purge_complete1.store(true, Ordering::Relaxed);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Keep pulling roots off `new_root_receiver` while purging to avoid channel buildup
|
||||
while !purge_complete.load(Ordering::Relaxed) {
|
||||
if let Err(err) = Self::receive_new_roots(new_root_receiver) {
|
||||
debug!("receive_new_roots: {}", err);
|
||||
}
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -218,9 +266,19 @@ mod tests {
|
||||
|
||||
//send a signal to kill all but 5 shreds, which will be in the newest slots
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
sender.send(50).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
|
||||
.unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
5,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
//check that 0-40 don't exist
|
||||
blockstore
|
||||
@ -250,6 +308,7 @@ mod tests {
|
||||
info!("{}", first_insert);
|
||||
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
let mut slot = initial_slots;
|
||||
let mut num_slots = 6;
|
||||
for _ in 0..5 {
|
||||
@ -273,6 +332,9 @@ mod tests {
|
||||
initial_slots,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
time.stop();
|
||||
@ -308,12 +370,16 @@ mod tests {
|
||||
// send signal to cleanup slots
|
||||
let (sender, receiver) = channel();
|
||||
sender.send(n).unwrap();
|
||||
let mut next_purge_batch = 0;
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
max_ledger_shreds,
|
||||
&mut next_purge_batch,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
@ -42,10 +42,12 @@ pub mod retransmit_stage;
|
||||
pub mod rewards_recorder_service;
|
||||
pub mod rpc;
|
||||
pub mod rpc_error;
|
||||
pub mod rpc_health;
|
||||
pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_service;
|
||||
pub mod rpc_subscriptions;
|
||||
pub mod send_transaction_service;
|
||||
pub mod serve_repair;
|
||||
pub mod serve_repair_service;
|
||||
pub mod sigverify;
|
||||
|
@ -8,7 +8,7 @@ pub struct NonCirculatingSupply {
|
||||
pub accounts: Vec<Pubkey>,
|
||||
}
|
||||
|
||||
pub fn calculate_non_circulating_supply(bank: Arc<Bank>) -> NonCirculatingSupply {
|
||||
pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSupply {
|
||||
debug!("Updating Bank supply, epoch: {}", bank.epoch());
|
||||
let mut non_circulating_accounts_set: HashSet<Pubkey> = HashSet::new();
|
||||
|
||||
@ -56,8 +56,8 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
|
||||
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
|
||||
"CWeRmXme7LmbaUWTZWFLt6FMnpzLCHaQLuR2TdgFn4Lq",
|
||||
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
|
||||
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
|
||||
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
|
||||
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
|
||||
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
|
||||
@ -74,6 +74,8 @@ solana_sdk::pubkeys!(
|
||||
"CHmdL15akDcJgBkY6BP3hzs98Dqr6wbdDC5p8odvtSbq",
|
||||
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
|
||||
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
|
||||
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
|
||||
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
|
||||
]
|
||||
);
|
||||
|
||||
@ -83,6 +85,7 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
|
||||
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
|
||||
"FdGYQdiRky8NZzN9wZtczTBcWLYYRXrJ3LMDhqDPn5rM",
|
||||
]
|
||||
);
|
||||
|
||||
@ -149,7 +152,7 @@ mod tests {
|
||||
(num_genesis_accounts + num_non_circulating_accounts + num_stake_accounts) * balance
|
||||
);
|
||||
|
||||
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
|
||||
let non_circulating_supply = calculate_non_circulating_supply(&bank);
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
(num_non_circulating_accounts + num_stake_accounts) * balance
|
||||
@ -164,7 +167,7 @@ mod tests {
|
||||
for key in non_circulating_accounts {
|
||||
bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default()));
|
||||
}
|
||||
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
|
||||
let non_circulating_supply = calculate_non_circulating_supply(&bank);
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
(num_non_circulating_accounts * new_balance) + (num_stake_accounts * balance)
|
||||
@ -179,7 +182,7 @@ mod tests {
|
||||
bank = Arc::new(new_from_parent(&bank));
|
||||
}
|
||||
assert_eq!(bank.epoch(), 1);
|
||||
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
|
||||
let non_circulating_supply = calculate_non_circulating_supply(&bank);
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
num_non_circulating_accounts * new_balance
|
||||
|
@ -215,6 +215,7 @@ impl ReplayStage {
|
||||
&mut progress,
|
||||
transaction_status_sender.clone(),
|
||||
&verify_recyclers,
|
||||
&subscriptions,
|
||||
);
|
||||
Self::report_memory(&allocated, "replay_active_banks", start);
|
||||
|
||||
@ -758,7 +759,6 @@ impl ReplayStage {
|
||||
progress.get_fork_stats(bank.slot()).unwrap().total_staked,
|
||||
lockouts_sender,
|
||||
);
|
||||
|
||||
Self::push_vote(
|
||||
cluster_info,
|
||||
bank,
|
||||
@ -838,6 +838,7 @@ impl ReplayStage {
|
||||
let blockhash = bank.last_blockhash();
|
||||
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
|
||||
vote_tx.partial_sign(&[authorized_voter_keypair.as_ref()], blockhash);
|
||||
let _ = cluster_info.send_vote(&vote_tx);
|
||||
cluster_info.push_vote(tower_index, vote_tx);
|
||||
}
|
||||
|
||||
@ -896,6 +897,7 @@ impl ReplayStage {
|
||||
progress: &mut ProgressMap,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
) -> bool {
|
||||
let mut did_complete_bank = false;
|
||||
let mut tx_count = 0;
|
||||
@ -963,6 +965,7 @@ impl ReplayStage {
|
||||
did_complete_bank = true;
|
||||
info!("bank frozen: {}", bank.slot());
|
||||
bank.freeze();
|
||||
subscriptions.notify_frozen(bank.slot());
|
||||
} else {
|
||||
trace!(
|
||||
"bank {} not completed tick_height: {}, max_tick_height: {}",
|
||||
|
@ -3,6 +3,7 @@
|
||||
use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_slots::ClusterSlots,
|
||||
contact_info::ContactInfo,
|
||||
repair_service::RepairStrategy,
|
||||
result::{Error, Result},
|
||||
window_service::{should_retransmit_and_persist, WindowService},
|
||||
@ -17,8 +18,9 @@ use solana_ledger::{
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_error;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::clock::{Epoch, Slot};
|
||||
use solana_sdk::epoch_schedule::EpochSchedule;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_streamer::streamer::PacketReceiver;
|
||||
use std::{
|
||||
@ -43,6 +45,8 @@ struct RetransmitStats {
|
||||
total_packets: AtomicU64,
|
||||
total_batches: AtomicU64,
|
||||
total_time: AtomicU64,
|
||||
epoch_fetch: AtomicU64,
|
||||
epoch_cache_update: AtomicU64,
|
||||
repair_total: AtomicU64,
|
||||
discard_total: AtomicU64,
|
||||
retransmit_total: AtomicU64,
|
||||
@ -64,6 +68,8 @@ fn update_retransmit_stats(
|
||||
peers_len: usize,
|
||||
packets_by_slot: HashMap<Slot, usize>,
|
||||
packets_by_source: HashMap<String, usize>,
|
||||
epoch_fetch: u64,
|
||||
epoch_cach_update: u64,
|
||||
) {
|
||||
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
|
||||
stats
|
||||
@ -82,6 +88,10 @@ fn update_retransmit_stats(
|
||||
.compute_turbine_peers_total
|
||||
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
|
||||
stats.total_batches.fetch_add(1, Ordering::Relaxed);
|
||||
stats.epoch_fetch.fetch_add(epoch_fetch, Ordering::Relaxed);
|
||||
stats
|
||||
.epoch_cache_update
|
||||
.fetch_add(epoch_cach_update, Ordering::Relaxed);
|
||||
{
|
||||
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
|
||||
for (slot, count) in packets_by_slot {
|
||||
@ -106,6 +116,16 @@ fn update_retransmit_stats(
|
||||
stats.total_time.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"epoch_fetch",
|
||||
stats.epoch_fetch.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"epoch_cache_update",
|
||||
stats.epoch_cache_update.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"total_batches",
|
||||
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
|
||||
@ -147,6 +167,14 @@ fn update_retransmit_stats(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct EpochStakesCache {
|
||||
epoch: Epoch,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
peers: Vec<ContactInfo>,
|
||||
stakes_and_index: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
@ -155,6 +183,8 @@ fn retransmit(
|
||||
sock: &UdpSocket,
|
||||
id: u32,
|
||||
stats: &Arc<RetransmitStats>,
|
||||
epoch_stakes_cache: &Arc<RwLock<EpochStakesCache>>,
|
||||
last_peer_update: &Arc<AtomicU64>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let r_lock = r.lock().unwrap();
|
||||
@ -171,12 +201,42 @@ fn retransmit(
|
||||
}
|
||||
drop(r_lock);
|
||||
|
||||
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
|
||||
let r_bank = bank_forks.read().unwrap().working_bank();
|
||||
let bank_epoch = r_bank.get_leader_schedule_epoch(r_bank.slot());
|
||||
epoch_fetch.stop();
|
||||
|
||||
let mut epoch_cache_update = Measure::start("retransmit_epoch_cach_update");
|
||||
let mut r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
if r_epoch_stakes_cache.epoch != bank_epoch {
|
||||
drop(r_epoch_stakes_cache);
|
||||
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
|
||||
if w_epoch_stakes_cache.epoch != bank_epoch {
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
w_epoch_stakes_cache.stakes = stakes;
|
||||
w_epoch_stakes_cache.epoch = bank_epoch;
|
||||
}
|
||||
drop(w_epoch_stakes_cache);
|
||||
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
}
|
||||
|
||||
let now = timestamp();
|
||||
let last = last_peer_update.load(Ordering::Relaxed);
|
||||
if now - last > 1000 && last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
|
||||
{
|
||||
drop(r_epoch_stakes_cache);
|
||||
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
|
||||
let (peers, stakes_and_index) =
|
||||
cluster_info.sorted_retransmit_peers_and_stakes(w_epoch_stakes_cache.stakes.clone());
|
||||
w_epoch_stakes_cache.peers = peers;
|
||||
w_epoch_stakes_cache.stakes_and_index = stakes_and_index;
|
||||
drop(w_epoch_stakes_cache);
|
||||
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
}
|
||||
let mut peers_len = 0;
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
|
||||
epoch_cache_update.stop();
|
||||
|
||||
let my_id = cluster_info.id();
|
||||
let mut discard_total = 0;
|
||||
let mut repair_total = 0;
|
||||
@ -201,8 +261,8 @@ fn retransmit(
|
||||
let mut compute_turbine_peers = Measure::start("turbine_start");
|
||||
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
|
||||
&my_id,
|
||||
&peers,
|
||||
&stakes_and_index,
|
||||
&r_epoch_stakes_cache.peers,
|
||||
&r_epoch_stakes_cache.stakes_and_index,
|
||||
packet.meta.seed,
|
||||
);
|
||||
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
|
||||
@ -215,8 +275,14 @@ fn retransmit(
|
||||
|
||||
let (neighbors, children) =
|
||||
compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, indexes);
|
||||
let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect();
|
||||
let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect();
|
||||
let neighbors: Vec<_> = neighbors
|
||||
.into_iter()
|
||||
.map(|index| &r_epoch_stakes_cache.peers[index])
|
||||
.collect();
|
||||
let children: Vec<_> = children
|
||||
.into_iter()
|
||||
.map(|index| &r_epoch_stakes_cache.peers[index])
|
||||
.collect();
|
||||
compute_turbine_peers.stop();
|
||||
compute_turbine_peers_total += compute_turbine_peers.as_us();
|
||||
|
||||
@ -257,6 +323,8 @@ fn retransmit(
|
||||
peers_len,
|
||||
packets_by_slot,
|
||||
packets_by_source,
|
||||
epoch_fetch.as_us(),
|
||||
epoch_cache_update.as_us(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
@ -286,6 +354,8 @@ pub fn retransmitter(
|
||||
let r = r.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
let stats = stats.clone();
|
||||
let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default()));
|
||||
let last_peer_update = Arc::new(AtomicU64::new(0));
|
||||
|
||||
Builder::new()
|
||||
.name("solana-retransmitter".to_string())
|
||||
@ -300,6 +370,8 @@ pub fn retransmitter(
|
||||
&sockets[s],
|
||||
s as u32,
|
||||
&stats,
|
||||
&epoch_stakes_cache,
|
||||
&last_peer_update,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
|
1172
core/src/rpc.rs
1172
core/src/rpc.rs
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,8 @@ use solana_sdk::clock::Slot;
|
||||
|
||||
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
|
||||
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
|
||||
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
|
||||
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
NonexistentClusterRoot {
|
||||
@ -13,6 +15,10 @@ pub enum RpcCustomError {
|
||||
slot: Slot,
|
||||
first_available_block: Slot,
|
||||
},
|
||||
SendTransactionPreflightFailure {
|
||||
message: String,
|
||||
},
|
||||
SendTransactionIsNotSigned,
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@ -40,6 +46,16 @@ impl From<RpcCustomError> for Error {
|
||||
),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SendTransactionPreflightFailure { message } => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_2),
|
||||
message,
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SendTransactionIsNotSigned => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_3),
|
||||
message: "Transaction is not signed".to_string(),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
118
core/src/rpc_health.rs
Normal file
118
core/src/rpc_health.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
pub enum RpcHealthStatus {
|
||||
Ok,
|
||||
Behind, // Validator is behind its trusted validators
|
||||
}
|
||||
|
||||
pub struct RpcHealth {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
health_check_slot_distance: u64,
|
||||
override_health_check: Arc<AtomicBool>,
|
||||
#[cfg(test)]
|
||||
stub_health_status: std::sync::RwLock<Option<RpcHealthStatus>>,
|
||||
}
|
||||
|
||||
impl RpcHealth {
|
||||
pub fn new(
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
health_check_slot_distance: u64,
|
||||
override_health_check: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cluster_info,
|
||||
trusted_validators,
|
||||
health_check_slot_distance,
|
||||
override_health_check,
|
||||
#[cfg(test)]
|
||||
stub_health_status: std::sync::RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check(&self) -> RpcHealthStatus {
|
||||
#[cfg(test)]
|
||||
{
|
||||
if let Some(stub_health_status) = *self.stub_health_status.read().unwrap() {
|
||||
return stub_health_status;
|
||||
}
|
||||
}
|
||||
|
||||
if self.override_health_check.load(Ordering::Relaxed) {
|
||||
RpcHealthStatus::Ok
|
||||
} else if let Some(trusted_validators) = &self.trusted_validators {
|
||||
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
|
||||
(
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0),
|
||||
trusted_validators
|
||||
.iter()
|
||||
.map(|trusted_validator| {
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
// This validator is considered healthy if its latest account hash slot is within
|
||||
// `health_check_slot_distance` of the latest trusted validator's account hash slot
|
||||
if latest_account_hash_slot > 0
|
||||
&& latest_trusted_validator_account_hash_slot > 0
|
||||
&& latest_account_hash_slot
|
||||
> latest_trusted_validator_account_hash_slot
|
||||
.saturating_sub(self.health_check_slot_distance)
|
||||
{
|
||||
RpcHealthStatus::Ok
|
||||
} else {
|
||||
warn!(
|
||||
"health check: me={}, latest trusted_validator={}",
|
||||
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
|
||||
);
|
||||
RpcHealthStatus::Behind
|
||||
}
|
||||
} else {
|
||||
// No trusted validator point of reference available, so this validator is healthy
|
||||
// because it's running
|
||||
RpcHealthStatus::Ok
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn stub() -> Arc<Self> {
|
||||
Arc::new(Self::new(
|
||||
Arc::new(ClusterInfo::default()),
|
||||
None,
|
||||
42,
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn stub_set_health_status(&self, stub_health_status: Option<RpcHealthStatus>) {
|
||||
*self.stub_health_status.write().unwrap() = stub_health_status;
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::rpc_subscriptions::{RpcSubscriptions, SlotInfo};
|
||||
use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote, SlotInfo};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
|
||||
@ -114,6 +114,18 @@ pub trait RpcSolPubSub {
|
||||
)]
|
||||
fn slot_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
|
||||
|
||||
// Get notification when vote is encountered
|
||||
#[pubsub(subscription = "voteNotification", subscribe, name = "voteSubscribe")]
|
||||
fn vote_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<RpcVote>);
|
||||
|
||||
// Unsubscribe from vote notification subscription.
|
||||
#[pubsub(
|
||||
subscription = "voteNotification",
|
||||
unsubscribe,
|
||||
name = "voteUnsubscribe"
|
||||
)]
|
||||
fn vote_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
|
||||
|
||||
// Get notification when a new root is set
|
||||
#[pubsub(subscription = "rootNotification", subscribe, name = "rootSubscribe")]
|
||||
fn root_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<Slot>);
|
||||
@ -295,6 +307,27 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
}
|
||||
}
|
||||
|
||||
fn vote_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<RpcVote>) {
|
||||
info!("vote_subscribe");
|
||||
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
let sub_id = SubscriptionId::Number(id as u64);
|
||||
info!("vote_subscribe: id={:?}", sub_id);
|
||||
self.subscriptions.add_vote_subscription(sub_id, subscriber);
|
||||
}
|
||||
|
||||
fn vote_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
|
||||
info!("vote_unsubscribe");
|
||||
if self.subscriptions.remove_vote_subscription(&id) {
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(Error {
|
||||
code: ErrorCode::InvalidParams,
|
||||
message: "Invalid Request: Subscription id does not exist".into(),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn root_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<Slot>) {
|
||||
info!("root_subscribe");
|
||||
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
@ -321,9 +354,11 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
|
||||
commitment::{BlockCommitmentCache, CacheSlotInfo},
|
||||
rpc_subscriptions::tests::robust_poll_or_panic,
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use serial_test_derive::serial;
|
||||
@ -333,13 +368,18 @@ mod tests {
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
|
||||
};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_program, system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_vote_program::vote_transaction;
|
||||
use std::{
|
||||
sync::{atomic::AtomicBool, RwLock},
|
||||
thread::sleep,
|
||||
@ -519,7 +559,7 @@ mod tests {
|
||||
session,
|
||||
subscriber,
|
||||
contract_state.pubkey().to_string(),
|
||||
None,
|
||||
Some(CommitmentConfig::recent()),
|
||||
);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &contract_funds.pubkey(), 51, blockhash);
|
||||
@ -836,4 +876,97 @@ mod tests {
|
||||
.slot_unsubscribe(Some(session), SubscriptionId::Number(0))
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_vote_subscribe() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
|
||||
let validator_voting_keypairs: Vec<_> = (0..10)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
create_genesis_config_with_vote_accounts(10_000, &validator_voting_keypairs, 100);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
// Setup RPC
|
||||
let mut rpc =
|
||||
RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks.clone());
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("voteNotification");
|
||||
|
||||
// Setup Subscriptions
|
||||
let subscriptions =
|
||||
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
|
||||
rpc.subscriptions = Arc::new(subscriptions);
|
||||
rpc.vote_subscribe(session, subscriber);
|
||||
|
||||
// Create some voters at genesis
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (vote_tracker, validator_voting_keypairs) =
|
||||
(Arc::new(vote_tracker), validator_voting_keypairs);
|
||||
|
||||
let vote_slots = vec![1, 2];
|
||||
validator_voting_keypairs.iter().for_each(|keypairs| {
|
||||
let node_keypair = &keypairs.node_keypair;
|
||||
let vote_keypair = &keypairs.vote_keypair;
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
vote_slots.clone(),
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
node_keypair,
|
||||
vote_keypair,
|
||||
vote_keypair,
|
||||
);
|
||||
votes_sender.send(vec![vote_tx]).unwrap();
|
||||
});
|
||||
|
||||
// Process votes and check they were notified.
|
||||
ClusterInfoVoteListener::get_and_process_votes_for_tests(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
rpc.subscriptions.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (response, _) = robust_poll_or_panic(receiver);
|
||||
assert_eq!(
|
||||
response,
|
||||
r#"{"jsonrpc":"2.0","method":"voteNotification","params":{"result":{"hash":"11111111111111111111111111111111","slots":[1,2],"timestamp":null},"subscription":0}}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_vote_unsubscribe() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, _) = Subscriber::new_test("voteNotification");
|
||||
rpc.vote_subscribe(session, subscriber);
|
||||
|
||||
let session = create_session();
|
||||
assert!(rpc
|
||||
.vote_unsubscribe(Some(session), SubscriptionId::Number(42))
|
||||
.is_err());
|
||||
|
||||
let session = create_session();
|
||||
assert!(rpc
|
||||
.vote_unsubscribe(Some(session), SubscriptionId::Number(0))
|
||||
.is_ok());
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
//! The `rpc_service` module implements the Solana JSON RPC service.
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*,
|
||||
storage_stage::StorageState, validator::ValidatorExit,
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, rpc_health::*,
|
||||
send_transaction_service::SendTransactionService, storage_stage::StorageState,
|
||||
validator::ValidatorExit,
|
||||
};
|
||||
use jsonrpc_core::MetaIoHandler;
|
||||
use jsonrpc_http_server::{
|
||||
@ -15,25 +16,22 @@ use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
snapshot_utils,
|
||||
};
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey};
|
||||
use solana_sdk::{hash::Hash, native_token::lamports_to_sol, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
path::{Path, PathBuf},
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
|
||||
// If trusted validators are specified, consider this validator healthy if its latest account hash
|
||||
// is no further behind than this distance from the latest trusted validator account hash
|
||||
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
|
||||
#[cfg(test)]
|
||||
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>, // Used only by test_rpc_new()...
|
||||
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
|
||||
|
||||
close_handle: Option<CloseHandle>,
|
||||
}
|
||||
@ -42,24 +40,24 @@ struct RpcRequestMiddleware {
|
||||
ledger_path: PathBuf,
|
||||
snapshot_archive_path_regex: Regex,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
health: Arc<RpcHealth>,
|
||||
}
|
||||
|
||||
impl RpcRequestMiddleware {
|
||||
pub fn new(
|
||||
ledger_path: PathBuf,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
health: Arc<RpcHealth>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ledger_path,
|
||||
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
|
||||
.unwrap(),
|
||||
snapshot_config,
|
||||
cluster_info,
|
||||
trusted_validators,
|
||||
bank_forks,
|
||||
health,
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,7 +83,7 @@ impl RpcRequestMiddleware {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn is_get_path(&self, path: &str) -> bool {
|
||||
fn is_file_get_path(&self, path: &str) -> bool {
|
||||
match path {
|
||||
"/genesis.tar.bz2" => true,
|
||||
_ => {
|
||||
@ -98,7 +96,7 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
}
|
||||
|
||||
fn get(&self, path: &str) -> RequestMiddlewareAction {
|
||||
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
|
||||
let stem = path.split_at(1).1; // Drop leading '/' from path
|
||||
let filename = {
|
||||
match path {
|
||||
@ -130,58 +128,10 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
|
||||
fn health_check(&self) -> &'static str {
|
||||
let response = if let Some(trusted_validators) = &self.trusted_validators {
|
||||
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
|
||||
(
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0),
|
||||
trusted_validators
|
||||
.iter()
|
||||
.map(|trusted_validator| {
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
// This validator is considered healthy if its latest account hash slot is within
|
||||
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
|
||||
if latest_account_hash_slot > 0
|
||||
&& latest_trusted_validator_account_hash_slot > 0
|
||||
&& latest_account_hash_slot
|
||||
> latest_trusted_validator_account_hash_slot
|
||||
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
|
||||
{
|
||||
"ok"
|
||||
} else {
|
||||
warn!(
|
||||
"health check: me={}, latest trusted_validator={}",
|
||||
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
|
||||
);
|
||||
"behind"
|
||||
}
|
||||
} else {
|
||||
// No trusted validator point of reference available, so this validator is healthy
|
||||
// because it's running
|
||||
"ok"
|
||||
let response = match self.health.check() {
|
||||
RpcHealthStatus::Ok => "ok",
|
||||
RpcHealthStatus::Behind => "behind",
|
||||
};
|
||||
|
||||
info!("health check: {}", response);
|
||||
response
|
||||
}
|
||||
@ -217,8 +167,19 @@ impl RequestMiddleware for RpcRequestMiddleware {
|
||||
};
|
||||
}
|
||||
}
|
||||
if self.is_get_path(request.uri().path()) {
|
||||
self.get(request.uri().path())
|
||||
|
||||
if let Some(result) = process_rest(&self.bank_forks, request.uri().path()) {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(jsonrpc_core::futures::future::ok(
|
||||
hyper::Response::builder()
|
||||
.status(hyper::StatusCode::OK)
|
||||
.body(hyper::Body::from(result))
|
||||
.unwrap(),
|
||||
)),
|
||||
}
|
||||
} else if self.is_file_get_path(request.uri().path()) {
|
||||
self.process_file_get(request.uri().path())
|
||||
} else if request.uri().path() == "/health" {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
@ -238,6 +199,29 @@ impl RequestMiddleware for RpcRequestMiddleware {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_rest(bank_forks: &Arc<RwLock<BankForks>>, path: &str) -> Option<String> {
|
||||
match path {
|
||||
"/v0/circulating-supply" => {
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
let bank = r_bank_forks.root_bank();
|
||||
let total_supply = bank.capitalization();
|
||||
let non_circulating_supply =
|
||||
crate::non_circulating_supply::calculate_non_circulating_supply(&bank).lamports;
|
||||
Some(format!(
|
||||
"{}",
|
||||
lamports_to_sol(total_supply - non_circulating_supply)
|
||||
))
|
||||
}
|
||||
"/v0/total-supply" => {
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
let bank = r_bank_forks.root_bank();
|
||||
let total_supply = bank.capitalization();
|
||||
Some(format!("{}", lamports_to_sol(total_supply)))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl JsonRpcService {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
@ -253,17 +237,37 @@ impl JsonRpcService {
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
override_health_check: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
info!("rpc bound to {:?}", rpc_addr);
|
||||
info!("rpc configuration: {:?}", config);
|
||||
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
|
||||
|
||||
let health = Arc::new(RpcHealth::new(
|
||||
cluster_info.clone(),
|
||||
trusted_validators,
|
||||
config.health_check_slot_distance,
|
||||
override_health_check,
|
||||
));
|
||||
|
||||
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
|
||||
let send_transaction_service = Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit_send_transaction_service,
|
||||
));
|
||||
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
storage_state,
|
||||
validator_exit.clone(),
|
||||
)));
|
||||
health.clone(),
|
||||
cluster_info,
|
||||
genesis_hash,
|
||||
send_transaction_service,
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
let test_request_processor = request_processor.clone();
|
||||
@ -281,16 +285,12 @@ impl JsonRpcService {
|
||||
let request_middleware = RpcRequestMiddleware::new(
|
||||
ledger_path,
|
||||
snapshot_config,
|
||||
cluster_info.clone(),
|
||||
trusted_validators,
|
||||
bank_forks.clone(),
|
||||
health.clone(),
|
||||
);
|
||||
let server = ServerBuilder::with_meta_extractor(
|
||||
io,
|
||||
move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
request_processor: request_processor.clone(),
|
||||
cluster_info: cluster_info.clone(),
|
||||
genesis_hash,
|
||||
},
|
||||
move |_req: &hyper::Request<hyper::Body>| request_processor.clone(),
|
||||
)
|
||||
.threads(num_cpus::get())
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
@ -313,6 +313,7 @@ impl JsonRpcService {
|
||||
let server = server.unwrap();
|
||||
close_handle_sender.send(server.close_handle()).unwrap();
|
||||
server.wait();
|
||||
exit_send_transaction_service.store(true, Ordering::Relaxed);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@ -346,7 +347,6 @@ impl JsonRpcService {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_value::{CrdsData, CrdsValue, SnapshotHash},
|
||||
rpc::tests::create_validator_exit,
|
||||
};
|
||||
@ -356,8 +356,7 @@ mod tests {
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::signature::Signer;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
#[test]
|
||||
fn test_rpc_new() {
|
||||
@ -369,7 +368,7 @@ mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let rpc_addr = SocketAddr::new(
|
||||
ip_addr,
|
||||
@ -394,6 +393,7 @@ mod tests {
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
None,
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
);
|
||||
let thread = rpc_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
||||
@ -402,8 +402,6 @@ mod tests {
|
||||
10_000,
|
||||
rpc_service
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_balance(Ok(mint_keypair.pubkey()), None)
|
||||
.unwrap()
|
||||
.value
|
||||
@ -412,11 +410,36 @@ mod tests {
|
||||
rpc_service.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_get_path() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)))
|
||||
}
|
||||
|
||||
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
|
||||
#[test]
|
||||
fn test_process_rest_api() {
|
||||
let bank_forks = create_bank_forks();
|
||||
|
||||
assert_eq!(None, process_rest(&bank_forks, "not-a-supported-rest-api"));
|
||||
assert_eq!(
|
||||
Some("0.000010127".to_string()),
|
||||
process_rest(&bank_forks, "/v0/circulating-supply")
|
||||
);
|
||||
assert_eq!(
|
||||
Some("0.000010127".to_string()),
|
||||
process_rest(&bank_forks, "/v0/total-supply")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_file_get_path() {
|
||||
let bank_forks = create_bank_forks();
|
||||
let rrm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
bank_forks.clone(),
|
||||
RpcHealth::stub(),
|
||||
);
|
||||
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
Some(SnapshotConfig {
|
||||
@ -424,49 +447,57 @@ mod tests {
|
||||
snapshot_package_output_path: PathBuf::from("/"),
|
||||
snapshot_path: PathBuf::from("/"),
|
||||
}),
|
||||
cluster_info,
|
||||
None,
|
||||
bank_forks,
|
||||
RpcHealth::stub(),
|
||||
);
|
||||
|
||||
assert!(rrm.is_get_path("/genesis.tar.bz2"));
|
||||
assert!(!rrm.is_get_path("genesis.tar.bz2"));
|
||||
assert!(rrm.is_file_get_path("/genesis.tar.bz2"));
|
||||
assert!(!rrm.is_file_get_path("genesis.tar.bz2"));
|
||||
|
||||
assert!(!rrm.is_get_path("/snapshot.tar.bz2")); // This is a redirect
|
||||
assert!(!rrm.is_file_get_path("/snapshot.tar.bz2")); // This is a redirect
|
||||
|
||||
assert!(
|
||||
!rrm.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2")
|
||||
);
|
||||
assert!(rrm_with_snapshot_config
|
||||
.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"));
|
||||
assert!(!rrm.is_file_get_path(
|
||||
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
|
||||
));
|
||||
assert!(rrm_with_snapshot_config.is_file_get_path(
|
||||
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
|
||||
));
|
||||
|
||||
assert!(!rrm.is_get_path(
|
||||
assert!(!rrm.is_file_get_path(
|
||||
"/snapshot-notaslotnumber-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
|
||||
));
|
||||
|
||||
assert!(!rrm.is_get_path("/"));
|
||||
assert!(!rrm.is_get_path(".."));
|
||||
assert!(!rrm.is_get_path("🎣"));
|
||||
assert!(!rrm.is_file_get_path("/"));
|
||||
assert!(!rrm.is_file_get_path(".."));
|
||||
assert!(!rrm.is_file_get_path("🎣"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_no_trusted_validators() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
create_bank_forks(),
|
||||
RpcHealth::stub(),
|
||||
);
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_trusted_validators() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let health_check_slot_distance = 123;
|
||||
let override_health_check = Arc::new(AtomicBool::new(false));
|
||||
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
|
||||
let health = Arc::new(RpcHealth::new(
|
||||
cluster_info.clone(),
|
||||
Some(trusted_validators.clone().into_iter().collect()),
|
||||
);
|
||||
health_check_slot_distance,
|
||||
override_health_check.clone(),
|
||||
));
|
||||
|
||||
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, create_bank_forks(), health);
|
||||
|
||||
// No account hashes for this node or any trusted validators == "behind"
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
@ -474,6 +505,9 @@ mod tests {
|
||||
// No account hashes for any trusted validators == "behind"
|
||||
cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
override_health_check.store(true, Ordering::Relaxed);
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
override_health_check.store(false, Ordering::Relaxed);
|
||||
|
||||
// This node is ahead of the trusted validators == "ok"
|
||||
cluster_info
|
||||
@ -504,7 +538,7 @@ mod tests {
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[1].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
|
||||
vec![(1000 + health_check_slot_distance - 1, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
@ -520,7 +554,7 @@ mod tests {
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[2].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
|
||||
vec![(1000 + health_check_slot_distance, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
|
@ -15,12 +15,13 @@ use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
clock::{Slot, UnixTimestamp},
|
||||
commitment_config::{CommitmentConfig, CommitmentLevel},
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction,
|
||||
};
|
||||
use solana_vote_program::vote_state::Vote;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{Receiver, RecvTimeoutError, SendError, Sender},
|
||||
@ -43,22 +44,36 @@ pub struct SlotInfo {
|
||||
pub root: Slot,
|
||||
}
|
||||
|
||||
// A more human-friendly version of Vote, with the bank state signature base58 encoded.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct RpcVote {
|
||||
pub slots: Vec<Slot>,
|
||||
pub hash: String,
|
||||
pub timestamp: Option<UnixTimestamp>,
|
||||
}
|
||||
|
||||
enum NotificationEntry {
|
||||
Slot(SlotInfo),
|
||||
Vote(Vote),
|
||||
Root(Slot),
|
||||
Frozen(Slot),
|
||||
Bank(CacheSlotInfo),
|
||||
Gossip(Slot),
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for NotificationEntry {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
NotificationEntry::Root(root) => write!(f, "Root({})", root),
|
||||
NotificationEntry::Frozen(slot) => write!(f, "Frozen({})", slot),
|
||||
NotificationEntry::Vote(vote) => write!(f, "Vote({:?})", vote),
|
||||
NotificationEntry::Slot(slot_info) => write!(f, "Slot({:?})", slot_info),
|
||||
NotificationEntry::Bank(cache_slot_info) => write!(
|
||||
f,
|
||||
"Bank({{current_slot: {:?}}})",
|
||||
cache_slot_info.current_slot
|
||||
),
|
||||
NotificationEntry::Gossip(slot) => write!(f, "Gossip({:?})", slot),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -76,6 +91,7 @@ type RpcSignatureSubscriptions = RwLock<
|
||||
HashMap<Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>>>>,
|
||||
>;
|
||||
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
|
||||
type RpcVoteSubscriptions = RwLock<HashMap<SubscriptionId, Sink<RpcVote>>>;
|
||||
type RpcRootSubscriptions = RwLock<HashMap<SubscriptionId, Sink<Slot>>>;
|
||||
|
||||
fn add_subscription<K, S>(
|
||||
@ -90,7 +106,7 @@ fn add_subscription<K, S>(
|
||||
S: Clone,
|
||||
{
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let commitment = commitment.unwrap_or_else(CommitmentConfig::recent);
|
||||
let commitment = commitment.unwrap_or_else(CommitmentConfig::single);
|
||||
let subscription_data = SubscriptionData {
|
||||
sink,
|
||||
commitment,
|
||||
@ -159,7 +175,9 @@ where
|
||||
CommitmentLevel::Max => cache_slot_info.largest_confirmed_root,
|
||||
CommitmentLevel::Recent => cache_slot_info.current_slot,
|
||||
CommitmentLevel::Root => cache_slot_info.node_root,
|
||||
CommitmentLevel::Single => cache_slot_info.highest_confirmed_slot,
|
||||
CommitmentLevel::Single | CommitmentLevel::SingleGossip => {
|
||||
cache_slot_info.highest_confirmed_slot
|
||||
}
|
||||
};
|
||||
let results = {
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
@ -203,6 +221,8 @@ fn filter_account_result(
|
||||
last_notified_slot: Slot,
|
||||
) -> (Box<dyn Iterator<Item = RpcAccount>>, Slot) {
|
||||
if let Some((account, fork)) = result {
|
||||
// If fork < last_notified_slot this means that we last notified for a fork
|
||||
// and should notify that the account state has been reverted.
|
||||
if fork != last_notified_slot {
|
||||
return (Box::new(iter::once(RpcAccount::encode(account))), fork);
|
||||
}
|
||||
@ -246,7 +266,11 @@ struct Subscriptions {
|
||||
account_subscriptions: Arc<RpcAccountSubscriptions>,
|
||||
program_subscriptions: Arc<RpcProgramSubscriptions>,
|
||||
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
|
||||
gossip_account_subscriptions: Arc<RpcAccountSubscriptions>,
|
||||
gossip_program_subscriptions: Arc<RpcProgramSubscriptions>,
|
||||
gossip_signature_subscriptions: Arc<RpcSignatureSubscriptions>,
|
||||
slot_subscriptions: Arc<RpcSlotSubscriptions>,
|
||||
vote_subscriptions: Arc<RpcVoteSubscriptions>,
|
||||
root_subscriptions: Arc<RpcRootSubscriptions>,
|
||||
}
|
||||
|
||||
@ -257,6 +281,7 @@ pub struct RpcSubscriptions {
|
||||
notifier_runtime: Option<Runtime>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
last_checked_slots: Arc<RwLock<HashMap<CommitmentLevel, Slot>>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
@ -282,7 +307,11 @@ impl RpcSubscriptions {
|
||||
let account_subscriptions = Arc::new(RpcAccountSubscriptions::default());
|
||||
let program_subscriptions = Arc::new(RpcProgramSubscriptions::default());
|
||||
let signature_subscriptions = Arc::new(RpcSignatureSubscriptions::default());
|
||||
let gossip_account_subscriptions = Arc::new(RpcAccountSubscriptions::default());
|
||||
let gossip_program_subscriptions = Arc::new(RpcProgramSubscriptions::default());
|
||||
let gossip_signature_subscriptions = Arc::new(RpcSignatureSubscriptions::default());
|
||||
let slot_subscriptions = Arc::new(RpcSlotSubscriptions::default());
|
||||
let vote_subscriptions = Arc::new(RpcVoteSubscriptions::default());
|
||||
let root_subscriptions = Arc::new(RpcRootSubscriptions::default());
|
||||
let notification_sender = Arc::new(Mutex::new(notification_sender));
|
||||
|
||||
@ -293,11 +322,18 @@ impl RpcSubscriptions {
|
||||
account_subscriptions,
|
||||
program_subscriptions,
|
||||
signature_subscriptions,
|
||||
gossip_account_subscriptions,
|
||||
gossip_program_subscriptions,
|
||||
gossip_signature_subscriptions,
|
||||
slot_subscriptions,
|
||||
vote_subscriptions,
|
||||
root_subscriptions,
|
||||
};
|
||||
let _subscriptions = subscriptions.clone();
|
||||
|
||||
let last_checked_slots = Arc::new(RwLock::new(HashMap::new()));
|
||||
let _last_checked_slots = last_checked_slots.clone();
|
||||
|
||||
let notifier_runtime = RuntimeBuilder::new()
|
||||
.core_threads(1)
|
||||
.name_prefix("solana-rpc-notifier-")
|
||||
@ -314,6 +350,7 @@ impl RpcSubscriptions {
|
||||
notification_receiver,
|
||||
_subscriptions,
|
||||
_bank_forks,
|
||||
_last_checked_slots,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@ -325,6 +362,7 @@ impl RpcSubscriptions {
|
||||
t_cleanup: Some(t_cleanup),
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
last_checked_slots,
|
||||
exit: exit.clone(),
|
||||
}
|
||||
}
|
||||
@ -412,11 +450,10 @@ impl RpcSubscriptions {
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<RpcAccount>>,
|
||||
) {
|
||||
let mut subscriptions = self.subscriptions.account_subscriptions.write().unwrap();
|
||||
let slot = match commitment
|
||||
.unwrap_or_else(CommitmentConfig::recent)
|
||||
.commitment
|
||||
{
|
||||
let commitment_level = commitment
|
||||
.unwrap_or_else(CommitmentConfig::single)
|
||||
.commitment;
|
||||
let slot = match commitment_level {
|
||||
CommitmentLevel::Max => self
|
||||
.block_commitment_cache
|
||||
.read()
|
||||
@ -429,6 +466,12 @@ impl RpcSubscriptions {
|
||||
.read()
|
||||
.unwrap()
|
||||
.highest_confirmed_slot(),
|
||||
CommitmentLevel::SingleGossip => *self
|
||||
.last_checked_slots
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&CommitmentLevel::SingleGossip)
|
||||
.unwrap_or(&0),
|
||||
};
|
||||
let last_notified_slot = if let Some((_account, slot)) = self
|
||||
.bank_forks
|
||||
@ -441,6 +484,15 @@ impl RpcSubscriptions {
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip {
|
||||
self.subscriptions
|
||||
.gossip_account_subscriptions
|
||||
.write()
|
||||
.unwrap()
|
||||
} else {
|
||||
self.subscriptions.account_subscriptions.write().unwrap()
|
||||
};
|
||||
add_subscription(
|
||||
&mut subscriptions,
|
||||
pubkey,
|
||||
@ -453,7 +505,16 @@ impl RpcSubscriptions {
|
||||
|
||||
pub fn remove_account_subscription(&self, id: &SubscriptionId) -> bool {
|
||||
let mut subscriptions = self.subscriptions.account_subscriptions.write().unwrap();
|
||||
remove_subscription(&mut subscriptions, id)
|
||||
if remove_subscription(&mut subscriptions, id) {
|
||||
true
|
||||
} else {
|
||||
let mut subscriptions = self
|
||||
.subscriptions
|
||||
.gossip_account_subscriptions
|
||||
.write()
|
||||
.unwrap();
|
||||
remove_subscription(&mut subscriptions, id)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_program_subscription(
|
||||
@ -463,7 +524,17 @@ impl RpcSubscriptions {
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<RpcKeyedAccount>>,
|
||||
) {
|
||||
let mut subscriptions = self.subscriptions.program_subscriptions.write().unwrap();
|
||||
let commitment_level = commitment
|
||||
.unwrap_or_else(CommitmentConfig::recent)
|
||||
.commitment;
|
||||
let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip {
|
||||
self.subscriptions
|
||||
.gossip_program_subscriptions
|
||||
.write()
|
||||
.unwrap()
|
||||
} else {
|
||||
self.subscriptions.program_subscriptions.write().unwrap()
|
||||
};
|
||||
add_subscription(
|
||||
&mut subscriptions,
|
||||
program_id,
|
||||
@ -476,7 +547,16 @@ impl RpcSubscriptions {
|
||||
|
||||
pub fn remove_program_subscription(&self, id: &SubscriptionId) -> bool {
|
||||
let mut subscriptions = self.subscriptions.program_subscriptions.write().unwrap();
|
||||
remove_subscription(&mut subscriptions, id)
|
||||
if remove_subscription(&mut subscriptions, id) {
|
||||
true
|
||||
} else {
|
||||
let mut subscriptions = self
|
||||
.subscriptions
|
||||
.gossip_program_subscriptions
|
||||
.write()
|
||||
.unwrap();
|
||||
remove_subscription(&mut subscriptions, id)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_signature_subscription(
|
||||
@ -486,7 +566,17 @@ impl RpcSubscriptions {
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<RpcSignatureResult>>,
|
||||
) {
|
||||
let mut subscriptions = self.subscriptions.signature_subscriptions.write().unwrap();
|
||||
let commitment_level = commitment
|
||||
.unwrap_or_else(CommitmentConfig::recent)
|
||||
.commitment;
|
||||
let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip {
|
||||
self.subscriptions
|
||||
.gossip_signature_subscriptions
|
||||
.write()
|
||||
.unwrap()
|
||||
} else {
|
||||
self.subscriptions.signature_subscriptions.write().unwrap()
|
||||
};
|
||||
add_subscription(
|
||||
&mut subscriptions,
|
||||
signature,
|
||||
@ -499,7 +589,16 @@ impl RpcSubscriptions {
|
||||
|
||||
pub fn remove_signature_subscription(&self, id: &SubscriptionId) -> bool {
|
||||
let mut subscriptions = self.subscriptions.signature_subscriptions.write().unwrap();
|
||||
remove_subscription(&mut subscriptions, id)
|
||||
if remove_subscription(&mut subscriptions, id) {
|
||||
true
|
||||
} else {
|
||||
let mut subscriptions = self
|
||||
.subscriptions
|
||||
.gossip_signature_subscriptions
|
||||
.write()
|
||||
.unwrap();
|
||||
remove_subscription(&mut subscriptions, id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify subscribers of changes to any accounts or new signatures since
|
||||
@ -508,6 +607,12 @@ impl RpcSubscriptions {
|
||||
self.enqueue_notification(NotificationEntry::Bank(cache_slot_info));
|
||||
}
|
||||
|
||||
/// Notify SingleGossip commitment-level subscribers of changes to any accounts or new
|
||||
/// signatures.
|
||||
pub fn notify_gossip_subscribers(&self, slot: Slot) {
|
||||
self.enqueue_notification(NotificationEntry::Gossip(slot));
|
||||
}
|
||||
|
||||
pub fn add_slot_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<SlotInfo>) {
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let mut subscriptions = self.subscriptions.slot_subscriptions.write().unwrap();
|
||||
@ -523,6 +628,25 @@ impl RpcSubscriptions {
|
||||
self.enqueue_notification(NotificationEntry::Slot(SlotInfo { slot, parent, root }));
|
||||
}
|
||||
|
||||
pub fn add_vote_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<RpcVote>) {
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let mut subscriptions = self.subscriptions.vote_subscriptions.write().unwrap();
|
||||
subscriptions.insert(sub_id, sink);
|
||||
}
|
||||
|
||||
pub fn remove_vote_subscription(&self, id: &SubscriptionId) -> bool {
|
||||
let mut subscriptions = self.subscriptions.vote_subscriptions.write().unwrap();
|
||||
subscriptions.remove(id).is_some()
|
||||
}
|
||||
|
||||
pub fn notify_vote(&self, vote: &Vote) {
|
||||
self.enqueue_notification(NotificationEntry::Vote(vote.clone()));
|
||||
}
|
||||
|
||||
pub fn notify_frozen(&self, frozen_slot: Slot) {
|
||||
self.enqueue_notification(NotificationEntry::Frozen(frozen_slot));
|
||||
}
|
||||
|
||||
pub fn add_root_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<Slot>) {
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let mut subscriptions = self.subscriptions.root_subscriptions.write().unwrap();
|
||||
@ -564,7 +688,9 @@ impl RpcSubscriptions {
|
||||
notification_receiver: Receiver<NotificationEntry>,
|
||||
subscriptions: Subscriptions,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
last_checked_slots: Arc<RwLock<HashMap<CommitmentLevel, Slot>>>,
|
||||
) {
|
||||
let mut pending_gossip_notifications = HashSet::new();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
@ -577,52 +703,69 @@ impl RpcSubscriptions {
|
||||
notifier.notify(slot_info, sink);
|
||||
}
|
||||
}
|
||||
NotificationEntry::Vote(ref vote_info) => {
|
||||
let subscriptions = subscriptions.vote_subscriptions.read().unwrap();
|
||||
for (_, sink) in subscriptions.iter() {
|
||||
notifier.notify(
|
||||
RpcVote {
|
||||
slots: vote_info.slots.clone(),
|
||||
hash: bs58::encode(vote_info.hash).into_string(),
|
||||
timestamp: vote_info.timestamp,
|
||||
},
|
||||
sink,
|
||||
);
|
||||
}
|
||||
}
|
||||
NotificationEntry::Root(root) => {
|
||||
let subscriptions = subscriptions.root_subscriptions.read().unwrap();
|
||||
for (_, sink) in subscriptions.iter() {
|
||||
notifier.notify(root, sink);
|
||||
}
|
||||
|
||||
// Prune old pending notifications
|
||||
pending_gossip_notifications = pending_gossip_notifications
|
||||
.into_iter()
|
||||
.filter(|&s| s > root)
|
||||
.collect();
|
||||
}
|
||||
NotificationEntry::Bank(cache_slot_info) => {
|
||||
let pubkeys: Vec<_> = {
|
||||
let subs = subscriptions.account_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
};
|
||||
for pubkey in &pubkeys {
|
||||
Self::check_account(
|
||||
pubkey,
|
||||
&bank_forks,
|
||||
subscriptions.account_subscriptions.clone(),
|
||||
RpcSubscriptions::notify_accounts_programs_signatures(
|
||||
&subscriptions.account_subscriptions,
|
||||
&subscriptions.program_subscriptions,
|
||||
&subscriptions.signature_subscriptions,
|
||||
&bank_forks,
|
||||
&cache_slot_info,
|
||||
¬ifier,
|
||||
)
|
||||
}
|
||||
NotificationEntry::Frozen(slot) => {
|
||||
if pending_gossip_notifications.remove(&slot) {
|
||||
Self::process_gossip_notification(
|
||||
slot,
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
&subscriptions,
|
||||
&bank_forks,
|
||||
&last_checked_slots,
|
||||
);
|
||||
}
|
||||
}
|
||||
NotificationEntry::Gossip(slot) => {
|
||||
let bank_frozen = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(slot)
|
||||
.filter(|b| b.is_frozen())
|
||||
.is_some();
|
||||
|
||||
let programs: Vec<_> = {
|
||||
let subs = subscriptions.program_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
};
|
||||
for program_id in &programs {
|
||||
Self::check_program(
|
||||
program_id,
|
||||
&bank_forks,
|
||||
subscriptions.program_subscriptions.clone(),
|
||||
if !bank_frozen {
|
||||
pending_gossip_notifications.insert(slot);
|
||||
} else {
|
||||
Self::process_gossip_notification(
|
||||
slot,
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
);
|
||||
}
|
||||
|
||||
let signatures: Vec<_> = {
|
||||
let subs = subscriptions.signature_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
};
|
||||
for signature in &signatures {
|
||||
Self::check_signature(
|
||||
signature,
|
||||
&subscriptions,
|
||||
&bank_forks,
|
||||
subscriptions.signature_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
&last_checked_slots,
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -638,6 +781,93 @@ impl RpcSubscriptions {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_gossip_notification(
|
||||
slot: Slot,
|
||||
notifier: &RpcNotifier,
|
||||
subscriptions: &Subscriptions,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
last_checked_slots: &Arc<RwLock<HashMap<CommitmentLevel, Slot>>>,
|
||||
) {
|
||||
let mut last_checked_slots_lock = last_checked_slots.write().unwrap();
|
||||
let last_checked_slot = last_checked_slots_lock
|
||||
.get(&CommitmentLevel::SingleGossip)
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
if slot > last_checked_slot {
|
||||
last_checked_slots_lock.insert(CommitmentLevel::SingleGossip, slot);
|
||||
} else {
|
||||
// Avoid sending stale or duplicate notifications
|
||||
return;
|
||||
}
|
||||
|
||||
drop(last_checked_slots_lock);
|
||||
|
||||
let cache_slot_info = CacheSlotInfo {
|
||||
highest_confirmed_slot: slot,
|
||||
..CacheSlotInfo::default()
|
||||
};
|
||||
RpcSubscriptions::notify_accounts_programs_signatures(
|
||||
&subscriptions.gossip_account_subscriptions,
|
||||
&subscriptions.gossip_program_subscriptions,
|
||||
&subscriptions.gossip_signature_subscriptions,
|
||||
&bank_forks,
|
||||
&cache_slot_info,
|
||||
¬ifier,
|
||||
);
|
||||
}
|
||||
|
||||
fn notify_accounts_programs_signatures(
|
||||
account_subscriptions: &Arc<RpcAccountSubscriptions>,
|
||||
program_subscriptions: &Arc<RpcProgramSubscriptions>,
|
||||
signature_subscriptions: &Arc<RpcSignatureSubscriptions>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cache_slot_info: &CacheSlotInfo,
|
||||
notifier: &RpcNotifier,
|
||||
) {
|
||||
let pubkeys: Vec<_> = {
|
||||
let subs = account_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
};
|
||||
for pubkey in &pubkeys {
|
||||
Self::check_account(
|
||||
pubkey,
|
||||
&bank_forks,
|
||||
account_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
);
|
||||
}
|
||||
|
||||
let programs: Vec<_> = {
|
||||
let subs = program_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
};
|
||||
for program_id in &programs {
|
||||
Self::check_program(
|
||||
program_id,
|
||||
&bank_forks,
|
||||
program_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
);
|
||||
}
|
||||
|
||||
let signatures: Vec<_> = {
|
||||
let subs = signature_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
};
|
||||
for signature in &signatures {
|
||||
Self::check_signature(
|
||||
signature,
|
||||
&bank_forks,
|
||||
signature_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn shutdown(&mut self) -> std::thread::Result<()> {
|
||||
if let Some(runtime) = self.notifier_runtime.take() {
|
||||
info!("RPC Notifier runtime - shutting down");
|
||||
@ -736,7 +966,12 @@ pub(crate) mod tests {
|
||||
),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_account_subscription(alice.pubkey(), None, sub_id.clone(), subscriber);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::recent()),
|
||||
sub_id.clone(),
|
||||
subscriber,
|
||||
);
|
||||
|
||||
assert!(subscriptions
|
||||
.subscriptions
|
||||
@ -1185,4 +1420,143 @@ pub(crate) mod tests {
|
||||
assert_eq!(subscriptions.len(), (num_keys - 1) as usize);
|
||||
assert!(subscriptions.get(&0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_gossip_separate_account_notifications() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(100);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bank2 = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let alice = Keypair::new();
|
||||
|
||||
let (subscriber0, _id_receiver, transport_receiver0) =
|
||||
Subscriber::new_test("accountNotification");
|
||||
let (subscriber1, _id_receiver, transport_receiver1) =
|
||||
Subscriber::new_test("accountNotification");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore_bank(
|
||||
blockstore,
|
||||
bank_forks.read().unwrap().get(1).unwrap().clone(),
|
||||
1,
|
||||
),
|
||||
)),
|
||||
);
|
||||
let sub_id0 = SubscriptionId::Number(0 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::single_gossip()),
|
||||
sub_id0.clone(),
|
||||
subscriber0,
|
||||
);
|
||||
|
||||
assert!(subscriptions
|
||||
.subscriptions
|
||||
.gossip_account_subscriptions
|
||||
.read()
|
||||
.unwrap()
|
||||
.contains_key(&alice.pubkey()));
|
||||
|
||||
let tx = system_transaction::create_account(
|
||||
&mint_keypair,
|
||||
&alice,
|
||||
blockhash,
|
||||
1,
|
||||
16,
|
||||
&solana_budget_program::id(),
|
||||
);
|
||||
|
||||
// Add the transaction to the 1st bank and then freeze the bank
|
||||
let bank1 = bank_forks.write().unwrap().get(1).cloned().unwrap();
|
||||
bank1.process_transaction(&tx).unwrap();
|
||||
bank1.freeze();
|
||||
|
||||
// Add the same transaction to the unfrozen 2nd bank
|
||||
bank_forks
|
||||
.write()
|
||||
.unwrap()
|
||||
.get(2)
|
||||
.unwrap()
|
||||
.process_transaction(&tx)
|
||||
.unwrap();
|
||||
|
||||
// First, notify the unfrozen bank first to queue pending notification
|
||||
subscriptions.notify_gossip_subscribers(2);
|
||||
|
||||
// Now, notify the frozen bank and ensure its notifications are processed
|
||||
subscriptions.notify_gossip_subscribers(1);
|
||||
|
||||
let (response, _) = robust_poll_or_panic(transport_receiver0);
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 1 },
|
||||
"value": {
|
||||
"data": "1111111111111111",
|
||||
"executable": false,
|
||||
"lamports": 1,
|
||||
"owner": "Budget1111111111111111111111111111111111111",
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
},
|
||||
"subscription": 0,
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
subscriptions.remove_account_subscription(&sub_id0);
|
||||
|
||||
let sub_id1 = SubscriptionId::Number(1 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::single_gossip()),
|
||||
sub_id1.clone(),
|
||||
subscriber1,
|
||||
);
|
||||
|
||||
subscriptions.notify_frozen(2);
|
||||
let (response, _) = robust_poll_or_panic(transport_receiver1);
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 2 },
|
||||
"value": {
|
||||
"data": "1111111111111111",
|
||||
"executable": false,
|
||||
"lamports": 1,
|
||||
"owner": "Budget1111111111111111111111111111111111111",
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
},
|
||||
"subscription": 1,
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
subscriptions.remove_account_subscription(&sub_id1);
|
||||
|
||||
assert!(!subscriptions
|
||||
.subscriptions
|
||||
.gossip_account_subscriptions
|
||||
.read()
|
||||
.unwrap()
|
||||
.contains_key(&alice.pubkey()));
|
||||
}
|
||||
}
|
||||
|
377
core/src/send_transaction_service.rs
Normal file
377
core/src/send_transaction_service.rs
Normal file
@ -0,0 +1,377 @@
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Maximum size of the transaction queue
|
||||
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
|
||||
|
||||
pub struct SendTransactionService {
|
||||
thread: JoinHandle<()>,
|
||||
sender: Mutex<Sender<TransactionInfo>>,
|
||||
send_socket: UdpSocket,
|
||||
tpu_address: SocketAddr,
|
||||
}
|
||||
|
||||
struct TransactionInfo {
|
||||
signature: Signature,
|
||||
wire_transaction: Vec<u8>,
|
||||
last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq)]
|
||||
struct ProcessTransactionsResult {
|
||||
rooted: u64,
|
||||
expired: u64,
|
||||
retried: u64,
|
||||
failed: u64,
|
||||
retained: u64,
|
||||
}
|
||||
|
||||
impl SendTransactionService {
|
||||
pub fn new(
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let (sender, receiver) = channel::<TransactionInfo>();
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
|
||||
let thread = Self::retry_thread(receiver, bank_forks.clone(), tpu_address, exit.clone());
|
||||
Self {
|
||||
thread,
|
||||
sender: Mutex::new(sender),
|
||||
send_socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
tpu_address,
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_thread(
|
||||
receiver: Receiver<TransactionInfo>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
tpu_address: SocketAddr,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let mut last_status_check = Instant::now();
|
||||
let mut transactions = HashMap::new();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
Builder::new()
|
||||
.name("send-tx-svc".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Ok(transaction_info) = receiver.recv_timeout(Duration::from_secs(1)) {
|
||||
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
|
||||
transactions.insert(transaction_info.signature, transaction_info);
|
||||
} else {
|
||||
datapoint_warn!("send_transaction_service-queue-overflow");
|
||||
}
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
|
||||
if !transactions.is_empty() {
|
||||
datapoint_info!(
|
||||
"send_transaction_service-queue-size",
|
||||
("len", transactions.len(), i64)
|
||||
);
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let working_bank = bank_forks.working_bank();
|
||||
|
||||
let _result = Self::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
}
|
||||
last_status_check = Instant::now();
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn process_transactions(
|
||||
working_bank: &Arc<Bank>,
|
||||
root_bank: &Arc<Bank>,
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
transactions: &mut HashMap<Signature, TransactionInfo>,
|
||||
) -> ProcessTransactionsResult {
|
||||
let mut result = ProcessTransactionsResult::default();
|
||||
|
||||
transactions.retain(|signature, transaction_info| {
|
||||
if root_bank.has_signature(signature) {
|
||||
info!("Transaction is rooted: {}", signature);
|
||||
result.rooted += 1;
|
||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||
false
|
||||
} else if transaction_info.last_valid_slot < root_bank.slot() {
|
||||
info!("Dropping expired transaction: {}", signature);
|
||||
result.expired += 1;
|
||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||
false
|
||||
} else {
|
||||
match working_bank.get_signature_status_slot(signature) {
|
||||
None => {
|
||||
// Transaction is unknown to the working bank, it might have been
|
||||
// dropped or landed in another fork. Re-send it
|
||||
info!("Retrying transaction: {}", signature);
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
}
|
||||
Some((_slot, status)) => {
|
||||
if status.is_err() {
|
||||
info!("Dropping failed transaction: {}", signature);
|
||||
result.failed += 1;
|
||||
inc_new_counter_info!("send_transaction_service-failed", 1);
|
||||
false
|
||||
} else {
|
||||
result.retained += 1;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn send_transaction(
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
wire_transaction: &[u8],
|
||||
) {
|
||||
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
|
||||
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&self, signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) {
|
||||
inc_new_counter_info!("send_transaction_service-enqueue", 1, 1);
|
||||
Self::send_transaction(&self.send_socket, &self.tpu_address, &wire_transaction);
|
||||
|
||||
self.sender
|
||||
.lock()
|
||||
.unwrap()
|
||||
.send(TransactionInfo {
|
||||
signature,
|
||||
wire_transaction,
|
||||
last_valid_slot,
|
||||
})
|
||||
.unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err));
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::rpc::tests::new_bank_forks;
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Signer};
|
||||
|
||||
#[test]
|
||||
fn service_exit() {
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let bank_forks = new_bank_forks().0;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let send_tranaction_service =
|
||||
SendTransactionService::new(&cluster_info, &bank_forks, &exit);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
send_tranaction_service.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_transactions() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (bank_forks, mint_keypair, _voting_keypair) = new_bank_forks();
|
||||
let cluster_info = ClusterInfo::default();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
|
||||
let root_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank_forks.read().unwrap().working_bank(),
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
));
|
||||
let rooted_signature = root_bank
|
||||
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
|
||||
|
||||
let non_rooted_signature = working_bank
|
||||
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let failed_signature = {
|
||||
let blockhash = working_bank.last_blockhash();
|
||||
let transaction = solana_sdk::system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
blockhash,
|
||||
);
|
||||
let signature = transaction.signatures[0];
|
||||
working_bank.process_transaction(&transaction).unwrap_err();
|
||||
signature
|
||||
};
|
||||
|
||||
let mut transactions = HashMap::new();
|
||||
|
||||
info!("Expired transactions are dropped..");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo {
|
||||
signature: Signature::default(),
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: root_bank.slot() - 1,
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
expired: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Rooted transactions are dropped...");
|
||||
transactions.insert(
|
||||
rooted_signature,
|
||||
TransactionInfo {
|
||||
signature: rooted_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
rooted: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Failed transactions are dropped...");
|
||||
transactions.insert(
|
||||
failed_signature,
|
||||
TransactionInfo {
|
||||
signature: failed_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
failed: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Non-rooted transactions are kept...");
|
||||
transactions.insert(
|
||||
non_rooted_signature,
|
||||
TransactionInfo {
|
||||
signature: non_rooted_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retained: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
transactions.clear();
|
||||
|
||||
info!("Unknown transactions are retried...");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo {
|
||||
signature: Signature::default(),
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retried: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
@ -590,6 +590,8 @@ impl ServeRepair {
|
||||
);
|
||||
if let Some(packet) = packet {
|
||||
res.packets.push(packet);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if meta.is_parent_set() && res.packets.len() <= max_responses {
|
||||
slot = meta.parent_slot;
|
||||
@ -864,6 +866,8 @@ mod tests {
|
||||
// Should not panic.
|
||||
run_orphan(UNLOCK_NONCE_SLOT, 3, None);
|
||||
run_orphan(UNLOCK_NONCE_SLOT, 3, Some(9));
|
||||
// Giving no nonce after UNLOCK_NONCE_SLOT should return empty
|
||||
run_orphan(UNLOCK_NONCE_SLOT + 1, 3, None);
|
||||
}
|
||||
|
||||
fn run_orphan(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
|
||||
@ -902,40 +906,47 @@ mod tests {
|
||||
|
||||
// For a orphan request for `slot + num_slots - 1`, we should return the highest shreds
|
||||
// from slots in the range [slot, slot + num_slots - 1]
|
||||
let rv: Vec<_> = ServeRepair::run_orphan(
|
||||
let rv = ServeRepair::run_orphan(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
slot + num_slots - 1,
|
||||
5,
|
||||
nonce,
|
||||
)
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
.map(|b| b.clone())
|
||||
.collect();
|
||||
);
|
||||
|
||||
// Verify responses
|
||||
let expected: Vec<_> = (slot..slot + num_slots)
|
||||
.rev()
|
||||
.filter_map(|slot| {
|
||||
let nonce = if Shred::is_nonce_unlocked(slot) {
|
||||
nonce
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
||||
repair_response::repair_response_packet(
|
||||
&blockstore,
|
||||
slot,
|
||||
index,
|
||||
&socketaddr_any!(),
|
||||
nonce,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(rv, expected);
|
||||
if Shred::is_nonce_unlocked(slot + num_slots - 1) && nonce.is_none() {
|
||||
// If a nonce is expected but not provided, there should be no
|
||||
// response
|
||||
assert!(rv.is_none());
|
||||
} else {
|
||||
// Verify responses
|
||||
let rv: Vec<_> = rv
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
.map(|b| b.clone())
|
||||
.collect();
|
||||
let expected: Vec<_> = (slot..slot + num_slots)
|
||||
.rev()
|
||||
.filter_map(|slot| {
|
||||
let nonce = if Shred::is_nonce_unlocked(slot) {
|
||||
nonce
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
||||
repair_response::repair_response_packet(
|
||||
&blockstore,
|
||||
slot,
|
||||
index,
|
||||
&socketaddr_any!(),
|
||||
nonce,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(rv, expected);
|
||||
}
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
|
@ -8,6 +8,7 @@ use crate::{
|
||||
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
|
||||
fetch_stage::FetchStage,
|
||||
poh_recorder::{PohRecorder, WorkingBankEntry},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify::TransactionSigVerifier,
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
};
|
||||
@ -44,6 +45,7 @@ impl Tpu {
|
||||
tpu_forwards_sockets: Vec<UdpSocket>,
|
||||
broadcast_sockets: Vec<UdpSocket>,
|
||||
sigverify_disabled: bool,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
broadcast_type: &BroadcastStageType,
|
||||
@ -79,6 +81,7 @@ impl Tpu {
|
||||
&poh_recorder,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
subscriptions.clone(),
|
||||
);
|
||||
|
||||
let banking_stage = BankingStage::new(
|
||||
|
@ -3,6 +3,7 @@ use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionSta
|
||||
use solana_runtime::{
|
||||
bank::{Bank, HashAgeKind},
|
||||
nonce_utils,
|
||||
transaction_utils::OrderedIterator,
|
||||
};
|
||||
use solana_transaction_status::TransactionStatusMeta;
|
||||
use std::{
|
||||
@ -50,25 +51,39 @@ impl TransactionStatusService {
|
||||
let TransactionStatusBatch {
|
||||
bank,
|
||||
transactions,
|
||||
iteration_order,
|
||||
statuses,
|
||||
balances,
|
||||
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
let slot = bank.slot();
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in transactions
|
||||
.iter()
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in
|
||||
OrderedIterator::new(&transactions, iteration_order.as_deref())
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
{
|
||||
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
|
||||
let fee_calculator = match hash_age_kind {
|
||||
let (fee_calculator, hash_kind) = match hash_age_kind.clone() {
|
||||
Some(HashAgeKind::DurableNonce(_, account)) => {
|
||||
nonce_utils::fee_calculator_of(&account)
|
||||
info!("nonce_account: {:?}", account);
|
||||
(nonce_utils::fee_calculator_of(&account), "durable_nonce")
|
||||
}
|
||||
_ => bank.get_fee_calculator(&transaction.message().recent_blockhash),
|
||||
_ => (
|
||||
bank.get_fee_calculator(&transaction.message().recent_blockhash),
|
||||
"recent_blockhash",
|
||||
),
|
||||
};
|
||||
if fee_calculator.is_none() {
|
||||
error!(
|
||||
"{:?} {:?} fee_calculator: {:?}",
|
||||
transaction.signatures[0],
|
||||
hash_kind,
|
||||
fee_calculator.is_some()
|
||||
);
|
||||
info!("{:?}", status);
|
||||
}
|
||||
.expect("FeeCalculator must exist");
|
||||
let fee_calculator = fee_calculator.expect("FeeCalculator must exist");
|
||||
let fee = fee_calculator.calculate_fee(transaction.message());
|
||||
let (writable_keys, readonly_keys) =
|
||||
transaction.message.get_account_keys_by_lock_type();
|
||||
|
@ -259,6 +259,7 @@ pub mod tests {
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
#[ignore]
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_tvu_exit() {
|
||||
|
@ -30,7 +30,7 @@ use solana_ledger::{
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
blockstore_processor::{self, BankForksInfo},
|
||||
create_new_tmp_ledger,
|
||||
hardened_unpack::open_genesis_config,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
leader_schedule::FixedSchedule,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
@ -81,6 +81,7 @@ pub struct ValidatorConfig {
|
||||
pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection
|
||||
pub frozen_accounts: Vec<Pubkey>,
|
||||
pub no_rocksdb_compaction: bool,
|
||||
pub max_genesis_archive_unpacked_size: u64,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@ -107,6 +108,7 @@ impl Default for ValidatorConfig {
|
||||
accounts_hash_fault_injection_slots: 0,
|
||||
frozen_accounts: vec![],
|
||||
no_rocksdb_compaction: false,
|
||||
max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -247,6 +249,7 @@ impl Validator {
|
||||
block_commitment_cache.clone(),
|
||||
));
|
||||
|
||||
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
|
||||
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
|
||||
if ContactInfo::is_valid_address(&node.info.rpc) {
|
||||
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
|
||||
@ -269,6 +272,7 @@ impl Validator {
|
||||
storage_state.clone(),
|
||||
validator_exit.clone(),
|
||||
config.trusted_validators.clone(),
|
||||
rpc_override_health_check.clone(),
|
||||
),
|
||||
PubSubService::new(
|
||||
&subscriptions,
|
||||
@ -388,7 +392,7 @@ impl Validator {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
wait_for_supermajority(config, &bank, &cluster_info);
|
||||
wait_for_supermajority(config, &bank, &cluster_info, rpc_override_health_check);
|
||||
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
|
||||
assert_eq!(
|
||||
@ -470,6 +474,7 @@ impl Validator {
|
||||
node.sockets.tpu_forwards,
|
||||
node.sockets.broadcast,
|
||||
config.dev_sigverify_disabled,
|
||||
&subscriptions,
|
||||
transaction_status_sender,
|
||||
&blockstore,
|
||||
&config.broadcast_stage_type,
|
||||
@ -578,7 +583,8 @@ fn new_banks_from_blockstore(
|
||||
LeaderScheduleCache,
|
||||
Option<(Slot, Hash)>,
|
||||
) {
|
||||
let genesis_config = open_genesis_config(blockstore_path);
|
||||
let genesis_config =
|
||||
open_genesis_config(blockstore_path, config.max_genesis_archive_unpacked_size);
|
||||
|
||||
// This needs to be limited otherwise the state in the VoteAccount data
|
||||
// grows too large
|
||||
@ -642,7 +648,12 @@ fn new_banks_from_blockstore(
|
||||
)
|
||||
}
|
||||
|
||||
fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &ClusterInfo) {
|
||||
fn wait_for_supermajority(
|
||||
config: &ValidatorConfig,
|
||||
bank: &Bank,
|
||||
cluster_info: &ClusterInfo,
|
||||
rpc_override_health_check: Arc<AtomicBool>,
|
||||
) {
|
||||
if config.wait_for_supermajority != Some(bank.slot()) {
|
||||
return;
|
||||
}
|
||||
@ -657,8 +668,13 @@ fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &
|
||||
if gossip_stake_percent >= 80 {
|
||||
break;
|
||||
}
|
||||
// The normal RPC health checks don't apply as the node is waiting, so feign health to
|
||||
// prevent load balancers from removing the node from their list of candidates during a
|
||||
// manual restart.
|
||||
rpc_override_health_check.store(true, Ordering::Relaxed);
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
rpc_override_health_check.store(false, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub struct TestValidator {
|
||||
@ -783,15 +799,6 @@ fn report_target_features() {
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
#[target_feature(enable = "avx2")]
|
||||
{
|
||||
if is_x86_feature_detected!("avx2") {
|
||||
info!("AVX2 detected");
|
||||
} else {
|
||||
error!("Your machine does not have AVX2 support, please rebuild from source on your machine");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@ use solana_core::cluster_info;
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::crds_gossip::*;
|
||||
use solana_core::crds_gossip_error::CrdsGossipError;
|
||||
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use solana_core::crds_gossip_pull::{ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS};
|
||||
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValueLabel;
|
||||
use solana_core::crds_value::{CrdsData, CrdsValue};
|
||||
@ -426,36 +426,35 @@ fn network_run_pull(
|
||||
.map(|f| f.filter.bits.len() as usize / 8)
|
||||
.sum::<usize>();
|
||||
bytes += serialized_size(&caller_info).unwrap() as usize;
|
||||
let filters = filters
|
||||
let filters: Vec<_> = filters
|
||||
.into_iter()
|
||||
.map(|f| (caller_info.clone(), f))
|
||||
.collect();
|
||||
let rsp = network
|
||||
let rsp: Vec<_> = network
|
||||
.get(&to)
|
||||
.map(|node| {
|
||||
let mut rsp = vec![];
|
||||
rsp.append(
|
||||
&mut node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_requests(filters, now)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect(),
|
||||
);
|
||||
let rsp = node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.generate_pull_responses(&filters)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
node.lock().unwrap().process_pull_requests(filters, now);
|
||||
rsp
|
||||
})
|
||||
.unwrap();
|
||||
bytes += serialized_size(&rsp).unwrap() as usize;
|
||||
msgs += rsp.len();
|
||||
network.get(&from).map(|node| {
|
||||
node.lock()
|
||||
.unwrap()
|
||||
.mark_pull_request_creation_time(&from, now);
|
||||
overhead += node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_response(&from, &timeouts, rsp, now);
|
||||
let mut node = node.lock().unwrap();
|
||||
node.mark_pull_request_creation_time(&from, now);
|
||||
let mut stats = ProcessPullStats::default();
|
||||
let (vers, vers_expired_timeout) =
|
||||
node.filter_pull_responses(&timeouts, rsp, now, &mut stats);
|
||||
node.process_pull_responses(&from, vers, vers_expired_timeout, now, &mut stats);
|
||||
overhead += stats.failed_insert;
|
||||
overhead += stats.failed_timeout;
|
||||
});
|
||||
(bytes, msgs, overhead)
|
||||
})
|
||||
|
@ -147,7 +147,7 @@ fn test_rpc_invalid_requests() {
|
||||
.unwrap();
|
||||
let json: Value = serde_json::from_str(&response.text().unwrap()).unwrap();
|
||||
let the_error = json["error"]["message"].as_str().unwrap();
|
||||
assert_eq!(the_error, "Invalid request");
|
||||
assert_eq!(the_error, "Invalid");
|
||||
|
||||
// test invalid get_account_info request
|
||||
let client = reqwest::blocking::Client::new();
|
||||
@ -167,7 +167,7 @@ fn test_rpc_invalid_requests() {
|
||||
.unwrap();
|
||||
let json: Value = serde_json::from_str(&response.text().unwrap()).unwrap();
|
||||
let the_error = json["error"]["message"].as_str().unwrap();
|
||||
assert_eq!(the_error, "Invalid request");
|
||||
assert_eq!(the_error, "Invalid");
|
||||
|
||||
// test invalid get_account_info request
|
||||
let client = reqwest::blocking::Client::new();
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -5,12 +5,16 @@ cd "$(dirname "$0")"
|
||||
|
||||
if [[ -n $CI_TAG ]]; then
|
||||
LATEST_SOLANA_RELEASE_VERSION=$CI_TAG
|
||||
else
|
||||
elif [[ -z $CI_PULL_REQUEST ]]; then
|
||||
LATEST_SOLANA_RELEASE_VERSION=$(\
|
||||
curl -sSfL https://api.github.com/repos/solana-labs/solana/releases/latest \
|
||||
| grep -m 1 tag_name \
|
||||
| sed -ne 's/^ *"tag_name": "\([^"]*\)",$/\1/p' \
|
||||
)
|
||||
else
|
||||
# Don't bother the `api.github.com` on pull requests to avoid getting rate
|
||||
# limited
|
||||
LATEST_SOLANA_RELEASE_VERSION=unknown-version
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
@ -27,10 +27,12 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
|
||||
* [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor)
|
||||
* [getFees](jsonrpc-api.md#getfees)
|
||||
* [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock)
|
||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
||||
* [getIdentity](jsonrpc-api.md#getidentity)
|
||||
* [getInflation](jsonrpc-api.md#getinflation)
|
||||
* [getInflationGovernor](jsonrpc-api.md#getinflationgovernor)
|
||||
* [getInflationRate](jsonrpc-api.md#getinflationrate)
|
||||
* [getLargestAccounts](jsonrpc-api.md#getlargestaccounts)
|
||||
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
|
||||
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
|
||||
@ -40,6 +42,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getSlot](jsonrpc-api.md#getslot)
|
||||
* [getSlotLeader](jsonrpc-api.md#getslotleader)
|
||||
* [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment)
|
||||
* [getStakeActivation](jsonrpc-api.md#getstakeactivation)
|
||||
* [getStoragePubkeysForSlot](jsonrpc-api.md#getstoragepubkeysforslot)
|
||||
* [getStorageTurn](jsonrpc-api.md#getstorageturn)
|
||||
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
|
||||
@ -50,6 +53,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
|
||||
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
|
||||
* [sendTransaction](jsonrpc-api.md#sendtransaction)
|
||||
* [simulateTransaction](jsonrpc-api.md#simulatetransaction)
|
||||
* [setLogFilter](jsonrpc-api.md#setlogfilter)
|
||||
* [validatorExit](jsonrpc-api.md#validatorexit)
|
||||
* [Subscription Websocket](jsonrpc-api.md#subscription-websocket)
|
||||
@ -304,6 +308,7 @@ The result field will be an object with the following fields:
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
* `blockTime: <i64 | null>` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -352,8 +357,9 @@ Returns a list of confirmed blocks
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of u64 integers listing confirmed blocks
|
||||
between start_slot and either end_slot, if provided, or latest confirmed block,
|
||||
inclusive.
|
||||
between `start_slot` and either `end_slot`, if provided, or latest confirmed block,
|
||||
inclusive. Max range allowed is 500,000 slots.
|
||||
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -367,7 +373,8 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
|
||||
### getConfirmedSignaturesForAddress
|
||||
|
||||
Returns a list of all the confirmed signatures for transactions involving an address, within a specified Slot range. Max range allowed is 10_000 Slots.
|
||||
Returns a list of all the confirmed signatures for transactions involving an
|
||||
address, within a specified Slot range. Max range allowed is 10,000 Slots
|
||||
|
||||
#### Parameters:
|
||||
|
||||
@ -445,6 +452,7 @@ Returns information about the current epoch
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `absoluteSlot: <u64>`, the current slot
|
||||
* `blockHeight: <u64>`, the current block height
|
||||
* `epoch: <u64>`, the current epoch
|
||||
* `slotIndex: <u64>`, the current slot relative to the start of the current epoch
|
||||
* `slotsInEpoch: <u64>`, the number of slots in this epoch
|
||||
@ -456,7 +464,7 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"absoluteSlot":166598,"epoch":27,"slotIndex":2790,"slotsInEpoch":8192},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"absoluteSlot":166598,"blockHeight": 166500, "epoch":27,"slotIndex":2790,"slotsInEpoch":8192},"id":1}
|
||||
```
|
||||
|
||||
### getEpochSchedule
|
||||
@ -493,7 +501,8 @@ Returns the fee calculator associated with the query blockhash, or `null` if the
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `blockhash: <string>`, query blockhash as a Base58 encoded string
|
||||
* `<string>` - query blockhash as a Base58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -541,6 +550,34 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":{"feeRateGovernor":{"burnPercent":50,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getFees
|
||||
|
||||
Returns a recent block hash from the ledger, a fee schedule that can be used to
|
||||
compute the cost of submitting a transaction using it, and the last slot in
|
||||
which the blockhash will be valid.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields:
|
||||
|
||||
* `blockhash: <string>` - a Hash as base-58 encoded string
|
||||
* `feeCalculator: <object>` - FeeCalculator object, the fee schedule for this block hash
|
||||
* `lastValidSlot: <u64>` - last slot in which a blockhash will be valid
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getFees"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{lamportsPerSignature":5000},"lastValidSlot":297}},"id":1}
|
||||
```
|
||||
|
||||
### getFirstAvailableBlock
|
||||
|
||||
Returns the slot of the lowest confirmed block that has not been purged from the ledger
|
||||
@ -608,33 +645,59 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"identity": "2r1F4iWqVcb8M1DbAjQuFpebkQHY9hcVU4WuW2DJBppN"},"id":1}
|
||||
```
|
||||
|
||||
### getInflation
|
||||
### getInflationGovernor
|
||||
|
||||
Returns the inflation configuration of the cluster
|
||||
Returns the current inflation governor
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an Inflation object with the following fields:
|
||||
The result field will be a JSON object with the following fields:
|
||||
|
||||
* `initial: <f64>`, the initial inflation percentage from time 0
|
||||
* `terminal: <f64>`, terminal inflation percentage
|
||||
* `taper: <f64>`, rate per year at which inflation is lowered
|
||||
* `foundation: <f64>`, percentage of total inflation allocated to the foundation
|
||||
* `foundationTerm: <f64>`, duration of foundation pool inflation in years
|
||||
* `storage: <f64>`, percentage of total inflation allocated to storage rewards
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflation"}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflationGovernor"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"taper":0.15,"terminal":0.015},"id":1}
|
||||
```
|
||||
|
||||
### getInflationRate
|
||||
|
||||
Returns the specific inflation values for a particular epoch
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<u64>` - (optional) Epoch, default is the current epoch
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be a JSON object with the following fields:
|
||||
|
||||
* `total: <f64>`, total inflation
|
||||
* `validator: <f64>`, inflation allocated to validators
|
||||
* `foundation: <f64>`, inflation allocated to the foundation
|
||||
* `epoch: <f64>`, epoch for which these values are valid
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflationRate"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"epoch":100,"foundation":0.001,"total":0.149,"validator":0.148},"id":1}
|
||||
```
|
||||
|
||||
### getLargestAccounts
|
||||
@ -768,7 +831,7 @@ An RpcResponse containing a JSON object consisting of a string blockhash and Fee
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"lamportsPerSignature":5000}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatuses
|
||||
@ -882,6 +945,41 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":1024,"id":1}
|
||||
```
|
||||
|
||||
### getStakeActivation
|
||||
|
||||
Returns epoch activation information for a stake account
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Pubkey of stake account to query, as base-58 encoded string
|
||||
* `<object>` - (optional) Configuration object containing the following optional fields:
|
||||
* (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* (optional) `epoch: <u64>` - epoch for which to calculate activation details. If parameter not provided, defaults to current epoch.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be a JSON object with the following fields:
|
||||
|
||||
* `state: <string` - the stake account's activation state, one of: `active`, `inactive`, `activating`, `deactivating`
|
||||
* `active: <u64>` - stake active during the epoch
|
||||
* `inactive: <u64>` - stake inactive during the epoch
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStakeActivation", "params": ["CYRJWqiSjLitBAcRxPvWpgX3s5TvmN2SuRY3eEYypFvT"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"active":197717120,"inactive":0,"state":"active"},"id":1}
|
||||
|
||||
// Request with Epoch
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStakeActivation", "params": ["CYRJWqiSjLitBAcRxPvWpgX3s5TvmN2SuRY3eEYypFvT", {"epoch": 4}]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"active":124429280,"inactive":73287840,"state":"activating"},"id":1}
|
||||
```
|
||||
|
||||
### getStoragePubkeysForSlot
|
||||
|
||||
Returns the storage Pubkeys for a particular slot
|
||||
@ -969,7 +1067,7 @@ The result will be an RpcResponse JSON object with `value` equal to a JSON objec
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getCirculatingSupply"}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSupply"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"circulating":16000,"nonCirculating":1000000,"nonCirculatingAccounts":["FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5","9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA","3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9","BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z],total:1016000}},"id":1}
|
||||
```
|
||||
@ -1016,7 +1114,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.13"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.24"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@ -1098,11 +1196,20 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
|
||||
### sendTransaction
|
||||
|
||||
Creates new transaction
|
||||
Submits a signed transaction to the cluster for processing.
|
||||
|
||||
Before submitting, the following preflight checks are performed:
|
||||
1. The transaction signatures are verified
|
||||
2. The transaction is simulated against the latest max confirmed bank
|
||||
and on failure an error will be returned. Preflight checks may be disabled if
|
||||
desired.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - fully-signed Transaction, as base-58 encoded string
|
||||
* `<object>` - (optional) Configuration object containing the following field:
|
||||
* `skipPreflight: <bool>` - if true, skip the preflight transaction checks (default: false)
|
||||
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -1112,10 +1219,38 @@ Creates new transaction
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["3gKEMTuxvm3DKEJc4UyiyoNz1sxwdVRW2pyDDXqaCvUjGApnsazGh2y4W92zuaSSdJhBbWLYAkZokBt4N5oW27R7zCVaLLpLxvATL2GgheEh9DmmDR1P9r1ZqirVXM2fF3z5cafmc4EtwWc1UErFdCWj1qYvy4bDGMLXRYLURxaKytEEqrxz6JXj8rUHhDpjTZeFxmC6iAW3hZr6cmaAzewQCQfiEv2HfydriwHDtN95u3Y1EF6SuXxcRqox2aTjGye2Ln9zFj4XbnAtjCmkZhR"]}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
|
||||
{"jsonrpc":"2.0","result":"2id3YC2jK9G5Wo2phDx4gJVAew8DcY5NAojnVuao8rkxwPYPe8cSwE5GzhEgJA2y8fVjDEo6iR6ykBvDxrTQrtpb","id":1}
|
||||
```
|
||||
|
||||
### simulateTransaction
|
||||
|
||||
Simulate sending a transaction
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Transaction, as base-58 encoded string. The transaction must have a valid blockhash, but is not required to be signed.
|
||||
* `<object>` - (optional) Configuration object containing the following field:
|
||||
* `sigVerify: <bool>` - if true the transaction signatures will be verified (default: false)
|
||||
|
||||
#### Results:
|
||||
|
||||
An RpcResponse containing a TransactionStatus object
|
||||
The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields:
|
||||
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `logs: <array | null>` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure)
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"simulateTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":218},"value":{"confirmations":0,"err":null,"slot":218,"status":{"Ok":null}}},"id":1}
|
||||
```
|
||||
|
||||
### setLogFilter
|
||||
@ -1168,7 +1303,7 @@ After connecting to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
||||
|
||||
* Submit subscription requests to the websocket using the methods below
|
||||
* Multiple subscriptions may be active at once
|
||||
* Many subscriptions take the optional [`commitment` parameter](jsonrpc-api.md#configuring-state-commitment), defining how . For subscriptions, if commitment is unspecified, the default value is `recent`.
|
||||
* Many subscriptions take the optional [`commitment` parameter](jsonrpc-api.md#configuring-state-commitment), defining how finalized a change should be to trigger a notification. For subscriptions, if commitment is unspecified, the default value is `"single"`.
|
||||
|
||||
### accountSubscribe
|
||||
|
||||
@ -1179,8 +1314,6 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
* `<string>` - account Pubkey, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<number>` - Subscription id \(needed to unsubscribe\)
|
||||
@ -1191,7 +1324,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
|
||||
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", 15]}
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", {"commitment": "single"}]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
@ -1200,7 +1333,25 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF","rentEpoch":28},"subscription":0}}
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": {
|
||||
"slot": 5199307
|
||||
},
|
||||
"value": {
|
||||
"data": "9qRxMDwy1ntDhBBoiy4Na9uDLbRTSzUS989mpwz",
|
||||
"executable": false,
|
||||
"lamports": 33594,
|
||||
"owner": "H9oaJujXETwkmjyweuqKPFtk2no4SumoU9A3hi3dC8U6",
|
||||
"rentEpoch": 635
|
||||
}
|
||||
},
|
||||
"subscription": 23784
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### accountUnsubscribe
|
||||
@ -1234,8 +1385,6 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
* `<string>` - program\_id Pubkey, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<integer>` - Subscription id \(needed to unsubscribe\)
|
||||
@ -1244,9 +1393,9 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV"]}
|
||||
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["7BwE8yitxiWkD8jVPFvPmV7rs2Znzi4NHzJGLu2dzpUq"]}
|
||||
|
||||
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV", 15]}
|
||||
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["7BwE8yitxiWkD8jVPFvPmV7rs2Znzi4NHzJGLu2dzpUq", {"commitment": "single"}]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
@ -1254,12 +1403,30 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
* `<string>` - account Pubkey, as base-58 encoded string
|
||||
* `<object>` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
||||
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd","rentEpoch":28}],"subscription":0}}
|
||||
```
|
||||
```bash
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "programNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": {
|
||||
"slot": 5208469
|
||||
},
|
||||
"value": {
|
||||
"pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq"
|
||||
"account": {
|
||||
"data": "9qRxMDwy1ntDhBBoiy4Na9uDLbRTSzUS989m",
|
||||
"executable": false,
|
||||
"lamports": 33594,
|
||||
"owner": "7BwE8yitxiWkD8jVPFvPmV7rs2Znzi4NHzJGLu2dzpUq",
|
||||
"rentEpoch": 636
|
||||
},
|
||||
}
|
||||
},
|
||||
"subscription": 24040
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### programUnsubscribe
|
||||
|
||||
@ -1304,7 +1471,7 @@ Subscribe to a transaction signature to receive notification when the transactio
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
|
||||
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", 15]}
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", {"commitment": "max"}]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
@ -1313,7 +1480,21 @@ Subscribe to a transaction signature to receive notification when the transactio
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": {"err": null}, "subscription":0}}
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": {
|
||||
"slot": 5207624
|
||||
},
|
||||
"value": {
|
||||
"err": null
|
||||
}
|
||||
},
|
||||
"subscription": 24006
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### signatureUnsubscribe
|
||||
@ -1363,7 +1544,18 @@ None
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "slotNotification", "params": {"result":{"parent":75,"root":44,"slot":76},"subscription":0}}
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "slotNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"parent": 75,
|
||||
"root": 44,
|
||||
"slot": 76
|
||||
},
|
||||
"subscription": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### slotUnsubscribe
|
||||
@ -1415,7 +1607,14 @@ None
|
||||
The result is the latest root slot number.
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "rootNotification", "params": {"result":42,"subscription":0}}
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "rootNotification",
|
||||
"params": {
|
||||
"result": 42,
|
||||
"subscription": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### rootUnsubscribe
|
||||
@ -1439,3 +1638,57 @@ Unsubscribe from root notifications
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
||||
### voteSubscribe
|
||||
|
||||
Subscribe to receive notification anytime a new vote is observed in gossip.
|
||||
These votes are pre-consensus therefore there is no guarantee these votes will
|
||||
enter the ledger.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `integer` - subscription id \(needed to unsubscribe\)
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"voteSubscribe"}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
The result is the latest vote, containing its hash, a list of voted slots, and an optional timestamp.
|
||||
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"voteNotification","params":{"result":{"hash":"8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM","slots":[1,2],"timestamp":null},"subscription":0}}
|
||||
```
|
||||
|
||||
### voteUnsubscribe
|
||||
|
||||
Unsubscribe from vote notifications
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<integer>` - subscription id to cancel
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<bool>` - unsubscribe success message
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"voteUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
@ -6,7 +6,7 @@ Solana takes a very different approach, which it calls _Proof of History_ or _Po
|
||||
|
||||
Solana technically never sends a _block_, but uses the term to describe the sequence of entries that validators vote on to achieve _confirmation_. In that way, Solana's confirmation times can be compared apples to apples to block-based systems. The current implementation sets block time to 800ms.
|
||||
|
||||
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
|
||||
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.245.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
|
||||
|
||||
## Relationship to VDFs
|
||||
|
||||
|
@ -44,6 +44,8 @@ $ solana-validator \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
The `--trusted-validator`s is operated by Solana
|
||||
|
||||
|
||||
## Testnet
|
||||
* Testnet is where we stress test recent release features on a live
|
||||
@ -72,16 +74,21 @@ $ solana-validator \
|
||||
--identity ~/validator-keypair.json \
|
||||
--vote-account ~/vote-account-keypair.json \
|
||||
--trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \
|
||||
--trusted-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQAD \
|
||||
--no-untrusted-rpc \
|
||||
--ledger ~/validator-ledger \
|
||||
--rpc-port 8899 \
|
||||
--dynamic-port-range 8000-8010 \
|
||||
--entrypoint 35.203.170.30:8001 \
|
||||
--expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY \
|
||||
--expected-shred-version 56096 \
|
||||
--expected-shred-version 62235 \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
The identity of the `--trusted-validator`s are:
|
||||
* `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - testnet.solana.com (Solana)
|
||||
* `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One
|
||||
|
||||
## Mainnet Beta
|
||||
A permissionless, persistent cluster for early token holders and launch partners.
|
||||
Currently smart contracts, rewards, and inflation are disabled.
|
||||
@ -114,6 +121,8 @@ $ solana-validator \
|
||||
--dynamic-port-range 8000-8010 \
|
||||
--entrypoint mainnet-beta.solana.com:8001 \
|
||||
--expected-genesis-hash 5eykt4UsFv8P8NJdTREpY1vzqKqZKvdpKuc147dw2N9d \
|
||||
--expected-shred-version 54208 \
|
||||
--expected-shred-version 64864 \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
All four `--trusted-validator`s are operated by Solana
|
||||
|
@ -6,9 +6,9 @@ Solana is an open source project implementing a new, high-performance, permissio
|
||||
|
||||
## Why Solana?
|
||||
|
||||
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.245.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.1078)
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.241.1078)
|
||||
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you would use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
|
@ -71,7 +71,7 @@ A Block-Merkle is the Merkle Root of all the Entry-Merkles sequenced in the bloc
|
||||
|
||||
A Bank-Hash is the hash of the concatenation of the Block-Merkle and Accounts-Hash
|
||||
|
||||
<img alt="Bank Hash Diagram" src="img/spv-bank-hash.svg" class="center"/>
|
||||

|
||||
|
||||
An Accounts-Hash is the hash of the concatentation of the state hashes of each
|
||||
account modified during the current slot.
|
||||
|
@ -7,6 +7,25 @@ experience for most people who are new or experienced with using crypto wallets.
|
||||
currently the easiest and fastest way to get set up with a new wallet on Solana.
|
||||
The app is free and getting your wallet set up only takes a few minutes.
|
||||
|
||||
### Trust Wallet Security
|
||||
|
||||
Tokens held in Trust Wallet are only as secure as the device on which the app is
|
||||
installed. Anyone who is able to unlock your phone or tablet may be able to
|
||||
use the Trust Wallet app and transfer your tokens. To improve security,
|
||||
you can add a passcode to the Trust Wallet application.
|
||||
To add a Trust Wallet passcode, open the app and go to
|
||||
Settings -> Security -> Passcode.
|
||||
|
||||
If someone gains access to your Trust Wallet application, they can access your
|
||||
recovery seed phrase.
|
||||
Anyone who has access to your seed phrase will be able to recreate
|
||||
your Trust Wallet keys on a different device. From there, they could
|
||||
sign transactions from that device rather than on your own phone or tablet.
|
||||
The seed phrase is displayed when a new wallet is created and it can also be
|
||||
viewed at any later time in the app by following these steps:
|
||||
- Go to Setting -> Wallets
|
||||
- Under the Options menu for a particular wallet tap "Show Recovery Phrase"
|
||||
|
||||
{% page-ref page="trust-wallet.md" %}
|
||||
|
||||
## Ledger Live with Ledger Nano S
|
||||
|
@ -59,7 +59,7 @@ some interface for signing transactions.
|
||||
A hardware wallet, such as the
|
||||
[Ledger hardware wallet](https://www.ledger.com/), offers a great blend of
|
||||
security and convenience for cryptocurrencies. It effectively automates the
|
||||
process of offline signing while retaining nearly all the convenience of an FS
|
||||
wallet.
|
||||
process of offline signing while retaining nearly all the convenience of a file
|
||||
system wallet.
|
||||
|
||||
{% page-ref page="../hardware-wallet/README.md" %}
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-dos"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -13,10 +13,10 @@ clap = "2.33.0"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-download-utils"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Download Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,8 +14,8 @@ console = "0.10.0"
|
||||
indicatif = "0.14.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
tar = "0.4.26"
|
||||
|
||||
[lib]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -6,7 +6,7 @@ VERSION=$PERF_LIBS_VERSION-1
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
if [[ $VERSION != "$(cat target/perf-libs/.version 2> /dev/null)" ]]; then
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
echo Note: Performance libraries are only available for Linux
|
||||
exit 0
|
||||
@ -17,6 +17,7 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rm -rf target/perf-libs
|
||||
mkdir -p target/perf-libs
|
||||
(
|
||||
set -x
|
||||
@ -35,7 +36,7 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
mkdir -p ~/.cache
|
||||
mv solana-perf.tgz ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz
|
||||
fi
|
||||
touch .$VERSION
|
||||
echo "$VERSION" > .version
|
||||
)
|
||||
|
||||
# Setup symlinks so the perf-libs/ can be found from all binaries run out of
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,13 +10,13 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.13" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.24" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.24" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.24" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.24" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -18,36 +18,35 @@ use log::*;
|
||||
use solana_runtime::bank::{Bank, EnteredEpochCallback};
|
||||
|
||||
pub fn get_inflation(operating_mode: OperatingMode, epoch: Epoch) -> Option<Inflation> {
|
||||
let past_epoch_inflation = get_inflation_for_epoch(operating_mode, epoch.saturating_sub(1));
|
||||
let epoch_inflation = get_inflation_for_epoch(operating_mode, epoch);
|
||||
|
||||
if epoch_inflation != past_epoch_inflation || epoch == 0 {
|
||||
Some(epoch_inflation)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_inflation_for_epoch(operating_mode: OperatingMode, epoch: Epoch) -> Inflation {
|
||||
match operating_mode {
|
||||
OperatingMode::Development => {
|
||||
if epoch == 0 {
|
||||
Some(Inflation::default())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
OperatingMode::Development => Inflation::default(),
|
||||
OperatingMode::Preview => {
|
||||
if epoch == 0 {
|
||||
// No inflation at epoch 0
|
||||
Some(Inflation::new_disabled())
|
||||
} else if epoch == 44 {
|
||||
Some(Inflation::default())
|
||||
if epoch >= 44 {
|
||||
Inflation::default()
|
||||
} else {
|
||||
None
|
||||
Inflation::new_disabled()
|
||||
}
|
||||
}
|
||||
OperatingMode::Stable => {
|
||||
if epoch == 0 {
|
||||
// No inflation at epoch 0
|
||||
Some(Inflation::new_disabled())
|
||||
} else if epoch == std::u64::MAX {
|
||||
if epoch == std::u64::MAX {
|
||||
// Inflation starts
|
||||
//
|
||||
// The epoch of std::u64::MAX - 1 is a placeholder and is expected to be reduced in
|
||||
// The epoch of std::u64::MAX is a placeholder and is expected to be reduced in
|
||||
// a future hard fork.
|
||||
Some(Inflation::default())
|
||||
Inflation::default()
|
||||
} else {
|
||||
None
|
||||
// No inflation from epoch 0
|
||||
Inflation::new_disabled()
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -72,9 +71,7 @@ pub fn get_programs(operating_mode: OperatingMode, epoch: Epoch) -> Option<Vec<(
|
||||
}
|
||||
}
|
||||
OperatingMode::Stable => {
|
||||
if epoch == std::u64::MAX - 1 {
|
||||
// The epoch of std::u64::MAX - 1 is a placeholder and is expected to be reduced in
|
||||
// a future hard fork.
|
||||
if epoch == 34 {
|
||||
Some(vec![solana_bpf_loader_program!()])
|
||||
} else if epoch == std::u64::MAX {
|
||||
// The epoch of std::u64::MAX is a placeholder and is expected to be reduced in a
|
||||
@ -163,7 +160,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_softlaunch_programs() {
|
||||
assert_eq!(get_programs(OperatingMode::Stable, 1), None);
|
||||
assert!(get_programs(OperatingMode::Stable, std::u64::MAX - 1).is_some());
|
||||
assert!(get_programs(OperatingMode::Stable, 34).is_some());
|
||||
assert!(get_programs(OperatingMode::Stable, std::u64::MAX).is_some());
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,13 +15,14 @@ chrono = "0.4"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.24" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.24" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.24" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@ -6,7 +6,10 @@ use solana_clap_utils::{
|
||||
input_validators::{is_pubkey_or_keypair, is_rfc3339_datetime, is_valid_percentage},
|
||||
};
|
||||
use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account};
|
||||
use solana_ledger::{blockstore::create_new_ledger, poh::compute_hashes_per_tick};
|
||||
use solana_ledger::{
|
||||
blockstore::create_new_ledger, blockstore_db::AccessType,
|
||||
hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, poh::compute_hashes_per_tick,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock,
|
||||
@ -121,6 +124,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
timing::duration_as_us(&PohConfig::default().target_tick_duration);
|
||||
let default_ticks_per_slot = &clock::DEFAULT_TICKS_PER_SLOT.to_string();
|
||||
let default_operating_mode = "stable";
|
||||
let default_genesis_archive_unpacked_size = MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string();
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
@ -327,6 +331,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
"Selects the features that will be enabled for the cluster"
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("max_genesis_archive_unpacked_size")
|
||||
.long("max-genesis-archive-unpacked-size")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.default_value(&default_genesis_archive_unpacked_size)
|
||||
.help(
|
||||
"maximum total uncompressed file size of created genesis archive",
|
||||
),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let faucet_lamports = value_t!(matches, "faucet_lamports", u64).unwrap_or(0);
|
||||
@ -513,6 +527,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
}
|
||||
}
|
||||
|
||||
let max_genesis_archive_unpacked_size =
|
||||
value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64);
|
||||
|
||||
let issued_lamports = genesis_config
|
||||
.accounts
|
||||
.iter()
|
||||
@ -521,7 +538,13 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
|
||||
add_genesis_accounts(&mut genesis_config, issued_lamports - faucet_lamports);
|
||||
|
||||
create_new_ledger(&ledger_path, &genesis_config)?;
|
||||
solana_logger::setup();
|
||||
create_new_ledger(
|
||||
&ledger_path,
|
||||
&genesis_config,
|
||||
max_genesis_archive_unpacked_size,
|
||||
AccessType::PrimaryOnly,
|
||||
)?;
|
||||
|
||||
println!("{}", genesis_config);
|
||||
Ok(())
|
||||
|
@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-core = { path = "../core", version = "1.1.13" }
|
||||
solana-client = { path = "../client", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-core = { path = "../core", version = "1.1.24" }
|
||||
solana-client = { path = "../client", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
|
||||
|
||||
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -24,11 +24,11 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-client = { path = "../client", version = "1.1.13" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-client = { path = "../client", version = "1.1.24" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
semver = "0.9.0"
|
||||
tar = "0.4.26"
|
||||
tempdir = "0.3.7"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,10 +13,10 @@ bs58 = "0.3.0"
|
||||
clap = "2.33"
|
||||
dirs = "2.0.2"
|
||||
num_cpus = "1.12.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.13" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.24" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
tiny-bip39 = "0.7.0"
|
||||
|
||||
[[bin]]
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -12,21 +12,25 @@ homepage = "https://solana.com/"
|
||||
bs58 = "0.3.0"
|
||||
clap = "2.33.0"
|
||||
histogram = "*"
|
||||
log = { version = "0.4.8" }
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
|
||||
solana-cli = { path = "../cli", version = "1.1.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.24" }
|
||||
solana-cli = { path = "../cli", version = "1.1.24" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.24" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.24" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.24" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "1.0"
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
signal-hook = "0.1.15"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -8,12 +8,13 @@ use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blockstore::Blockstore,
|
||||
blockstore_db::{self, Column, Database},
|
||||
blockstore_db::{self, AccessType, Column, Database},
|
||||
blockstore_processor::{BankForksInfo, ProcessOptions},
|
||||
hardened_unpack::open_genesis_config,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
rooted_slot_iterator::RootedSlotIterator,
|
||||
snapshot_utils,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::Slot, genesis_config::GenesisConfig, native_token::lamports_to_sol, pubkey::Pubkey,
|
||||
shred_version::compute_shred_version,
|
||||
@ -28,8 +29,11 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
process::{exit, Command, Stdio},
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use log::*;
|
||||
|
||||
#[derive(PartialEq)]
|
||||
enum LedgerOutputMethod {
|
||||
Print,
|
||||
@ -494,8 +498,8 @@ fn analyze_storage(database: &Database) -> Result<(), String> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn open_blockstore(ledger_path: &Path) -> Blockstore {
|
||||
match Blockstore::open(ledger_path) {
|
||||
fn open_blockstore(ledger_path: &Path, access_type: AccessType) -> Blockstore {
|
||||
match Blockstore::open_with_access_type(ledger_path, access_type) {
|
||||
Ok(blockstore) => blockstore,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
|
||||
@ -504,8 +508,8 @@ fn open_blockstore(ledger_path: &Path) -> Blockstore {
|
||||
}
|
||||
}
|
||||
|
||||
fn open_database(ledger_path: &Path) -> Database {
|
||||
match Database::open(&ledger_path.join("rocksdb")) {
|
||||
fn open_database(ledger_path: &Path, access_type: AccessType) -> Database {
|
||||
match Database::open(&ledger_path.join("rocksdb"), access_type) {
|
||||
Ok(database) => database,
|
||||
Err(err) => {
|
||||
eprintln!("Unable to read the Ledger rocksdb: {:?}", err);
|
||||
@ -528,33 +532,69 @@ fn load_bank_forks(
|
||||
ledger_path: &PathBuf,
|
||||
genesis_config: &GenesisConfig,
|
||||
process_options: ProcessOptions,
|
||||
access_type: AccessType,
|
||||
) -> bank_forks_utils::LoadResult {
|
||||
let blockstore = open_blockstore(&ledger_path, access_type);
|
||||
let snapshot_path = ledger_path.clone().join(if blockstore.is_primary_access() {
|
||||
"snapshot"
|
||||
} else {
|
||||
"snapshot.ledger-tool"
|
||||
});
|
||||
let snapshot_config = if arg_matches.is_present("no_snapshot") {
|
||||
None
|
||||
} else {
|
||||
Some(SnapshotConfig {
|
||||
snapshot_interval_slots: 0, // Value doesn't matter
|
||||
snapshot_package_output_path: ledger_path.clone(),
|
||||
snapshot_path: ledger_path.clone().join("snapshot"),
|
||||
snapshot_path,
|
||||
})
|
||||
};
|
||||
let account_paths = if let Some(account_paths) = arg_matches.value_of("account_paths") {
|
||||
if !blockstore.is_primary_access() {
|
||||
// Be defenstive, when default account dir is explicitly specified, it's still possible
|
||||
// to wipe the dir possibly shared by the running validator!
|
||||
eprintln!("Error: custom accounts path is not supported under secondary access");
|
||||
exit(1);
|
||||
}
|
||||
account_paths.split(',').map(PathBuf::from).collect()
|
||||
} else {
|
||||
} else if blockstore.is_primary_access() {
|
||||
vec![ledger_path.join("accounts")]
|
||||
} else {
|
||||
let non_primary_accounts_path = ledger_path.join("accounts.ledger-tool");
|
||||
warn!(
|
||||
"Default accounts path is switched aligning with Blockstore's secondary access: {:?}",
|
||||
non_primary_accounts_path
|
||||
);
|
||||
vec![non_primary_accounts_path]
|
||||
};
|
||||
|
||||
bank_forks_utils::load(
|
||||
&genesis_config,
|
||||
&open_blockstore(&ledger_path),
|
||||
&blockstore,
|
||||
account_paths,
|
||||
snapshot_config.as_ref(),
|
||||
process_options,
|
||||
)
|
||||
}
|
||||
|
||||
fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> GenesisConfig {
|
||||
let max_genesis_archive_unpacked_size =
|
||||
value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64);
|
||||
open_genesis_config(ledger_path, max_genesis_archive_unpacked_size)
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn main() {
|
||||
// Ignore SIGUSR1 to prevent long-running calls being killed by logrotate
|
||||
// in warehouse deployments
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// `register()` is unsafe because the action is called in a signal handler
|
||||
// with the usual caveats. So long as this action body stays empty, we'll
|
||||
// be fine
|
||||
unsafe { signal_hook::register(signal_hook::SIGUSR1, || {}) }.unwrap();
|
||||
}
|
||||
|
||||
const DEFAULT_ROOT_COUNT: &str = "1";
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
|
||||
@ -586,6 +626,13 @@ fn main() {
|
||||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.help("Add a hard fork at this slot");
|
||||
let default_genesis_archive_unpacked_size = MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string();
|
||||
let max_genesis_archive_unpacked_size_arg = Arg::with_name("max_genesis_archive_unpacked_size")
|
||||
.long("max-genesis-archive-unpacked-size")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.default_value(&default_genesis_archive_unpacked_size)
|
||||
.help("maximum total uncompressed size of unpacked genesis archive");
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
@ -635,15 +682,18 @@ fn main() {
|
||||
.subcommand(
|
||||
SubCommand::with_name("genesis")
|
||||
.about("Prints the ledger's genesis config")
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("genesis-hash")
|
||||
.about("Prints the ledger's genesis hash")
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("shred-version")
|
||||
.about("Prints the ledger's shred hash")
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("bounds")
|
||||
@ -667,6 +717,7 @@ fn main() {
|
||||
.arg(&account_paths_arg)
|
||||
.arg(&halt_at_slot_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
.arg(
|
||||
Arg::with_name("skip_poh_verify")
|
||||
.long("skip-poh-verify")
|
||||
@ -680,6 +731,7 @@ fn main() {
|
||||
.arg(&account_paths_arg)
|
||||
.arg(&halt_at_slot_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
.arg(
|
||||
Arg::with_name("include_all_votes")
|
||||
.long("include-all-votes")
|
||||
@ -698,6 +750,7 @@ fn main() {
|
||||
.arg(&no_snapshot_arg)
|
||||
.arg(&account_paths_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
.arg(
|
||||
Arg::with_name("snapshot_slot")
|
||||
.index(1)
|
||||
@ -713,6 +766,17 @@ fn main() {
|
||||
.takes_value(true)
|
||||
.help("Output directory for the snapshot"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("warp_slot")
|
||||
.required(false)
|
||||
.long("warp-slot")
|
||||
.takes_value(true)
|
||||
.value_name("WARP_SLOT")
|
||||
.validator(is_slot)
|
||||
.help("After loading the snapshot slot warp the ledger to WARP_SLOT, \
|
||||
which could be a slot in a galaxy far far away"),
|
||||
)
|
||||
|
||||
).subcommand(
|
||||
SubCommand::with_name("accounts")
|
||||
.about("Print account contents after processing in the ledger")
|
||||
@ -726,6 +790,7 @@ fn main() {
|
||||
.takes_value(false)
|
||||
.help("Include sysvars too"),
|
||||
)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
).subcommand(
|
||||
SubCommand::with_name("capitalization")
|
||||
.about("Print capitalization (aka, total suppy)")
|
||||
@ -733,17 +798,7 @@ fn main() {
|
||||
.arg(&account_paths_arg)
|
||||
.arg(&halt_at_slot_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
).subcommand(
|
||||
SubCommand::with_name("prune")
|
||||
.about("Prune the ledger at the block height")
|
||||
.arg(
|
||||
Arg::with_name("slot_list")
|
||||
.long("slot-list")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The location of the YAML file with a list of rollback slot heights and hashes"),
|
||||
)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
).subcommand(
|
||||
SubCommand::with_name("purge")
|
||||
.about("Purge the ledger at the block height")
|
||||
@ -753,14 +808,14 @@ fn main() {
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("Start slot to purge from."),
|
||||
.help("Start slot to purge from (inclusive)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("end_slot")
|
||||
.index(2)
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.help("Optional ending slot to stop purging."),
|
||||
.required(true)
|
||||
.help("Ending slot to stop purging (inclusive)"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
@ -810,16 +865,19 @@ fn main() {
|
||||
("print", Some(arg_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
output_ledger(
|
||||
open_blockstore(&ledger_path),
|
||||
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary),
|
||||
starting_slot,
|
||||
LedgerOutputMethod::Print,
|
||||
);
|
||||
}
|
||||
("genesis", Some(_arg_matches)) => {
|
||||
println!("{}", open_genesis_config(&ledger_path));
|
||||
("genesis", Some(arg_matches)) => {
|
||||
println!("{}", open_genesis_config_by(&ledger_path, arg_matches));
|
||||
}
|
||||
("genesis-hash", Some(_arg_matches)) => {
|
||||
println!("{}", open_genesis_config(&ledger_path).hash());
|
||||
("genesis-hash", Some(arg_matches)) => {
|
||||
println!(
|
||||
"{}",
|
||||
open_genesis_config_by(&ledger_path, arg_matches).hash()
|
||||
);
|
||||
}
|
||||
("shred-version", Some(arg_matches)) => {
|
||||
let process_options = ProcessOptions {
|
||||
@ -828,8 +886,14 @@ fn main() {
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let genesis_config = open_genesis_config(&ledger_path);
|
||||
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
|
||||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
match load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
) {
|
||||
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let bank_info = &bank_forks_info[0];
|
||||
let bank = bank_forks[bank_info.bank_slot].clone();
|
||||
@ -850,7 +914,7 @@ fn main() {
|
||||
}
|
||||
("slot", Some(arg_matches)) => {
|
||||
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
|
||||
let blockstore = open_blockstore(&ledger_path);
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary);
|
||||
for slot in slots {
|
||||
println!("Slot {}", slot);
|
||||
if let Err(err) = output_slot(&blockstore, slot, &LedgerOutputMethod::Print) {
|
||||
@ -861,14 +925,14 @@ fn main() {
|
||||
("json", Some(arg_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
output_ledger(
|
||||
open_blockstore(&ledger_path),
|
||||
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary),
|
||||
starting_slot,
|
||||
LedgerOutputMethod::Json,
|
||||
);
|
||||
}
|
||||
("set-dead-slot", Some(arg_matches)) => {
|
||||
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
|
||||
let blockstore = open_blockstore(&ledger_path);
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::PrimaryOnly);
|
||||
for slot in slots {
|
||||
match blockstore.set_dead_slot(slot) {
|
||||
Ok(_) => println!("Slot {} dead", slot),
|
||||
@ -883,13 +947,17 @@ fn main() {
|
||||
poh_verify: !arg_matches.is_present("skip_poh_verify"),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
println!("{}", open_genesis_config(&ledger_path).hash());
|
||||
println!(
|
||||
"genesis hash: {}",
|
||||
open_genesis_config_by(&ledger_path, arg_matches).hash()
|
||||
);
|
||||
|
||||
load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&open_genesis_config(&ledger_path),
|
||||
&open_genesis_config_by(&ledger_path, arg_matches),
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Ledger verification failed: {:?}", err);
|
||||
@ -910,8 +978,9 @@ fn main() {
|
||||
match load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&open_genesis_config(&ledger_path),
|
||||
&open_genesis_config_by(&ledger_path, arg_matches),
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
) {
|
||||
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let dot = graph_forks(
|
||||
@ -944,6 +1013,7 @@ fn main() {
|
||||
("create-snapshot", Some(arg_matches)) => {
|
||||
let snapshot_slot = value_t_or_exit!(arg_matches, "snapshot_slot", Slot);
|
||||
let output_directory = value_t_or_exit!(arg_matches, "output_directory", String);
|
||||
let warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok();
|
||||
|
||||
let process_options = ProcessOptions {
|
||||
dev_halt_at_slot: Some(snapshot_slot),
|
||||
@ -951,15 +1021,35 @@ fn main() {
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let genesis_config = open_genesis_config(&ledger_path);
|
||||
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
|
||||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
match load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
) {
|
||||
Ok((bank_forks, _bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let bank = bank_forks.get(snapshot_slot).unwrap_or_else(|| {
|
||||
eprintln!("Error: Slot {} is not available", snapshot_slot);
|
||||
exit(1);
|
||||
});
|
||||
let bank = bank_forks
|
||||
.get(snapshot_slot)
|
||||
.unwrap_or_else(|| {
|
||||
eprintln!("Error: Slot {} is not available", snapshot_slot);
|
||||
exit(1);
|
||||
})
|
||||
.clone();
|
||||
|
||||
let bank = if let Some(warp_slot) = warp_slot {
|
||||
Arc::new(Bank::warp_from_parent(
|
||||
&bank,
|
||||
bank.collector_id(),
|
||||
warp_slot,
|
||||
))
|
||||
} else {
|
||||
bank
|
||||
};
|
||||
|
||||
println!("Creating a snapshot of slot {}", bank.slot());
|
||||
assert!(bank.is_complete());
|
||||
bank.squash();
|
||||
|
||||
let temp_dir = tempfile::TempDir::new().unwrap_or_else(|err| {
|
||||
@ -983,7 +1073,8 @@ fn main() {
|
||||
snapshot_utils::archive_snapshot_package(&package).map(|ok| {
|
||||
println!(
|
||||
"Successfully created snapshot for slot {}: {:?}",
|
||||
snapshot_slot, package.tar_output_file
|
||||
bank.slot(),
|
||||
package.tar_output_file
|
||||
);
|
||||
println!(
|
||||
"Shred version: {}",
|
||||
@ -1014,9 +1105,15 @@ fn main() {
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let genesis_config = open_genesis_config(&ledger_path);
|
||||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
let include_sysvars = arg_matches.is_present("include_sysvars");
|
||||
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
|
||||
match load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
) {
|
||||
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let slot = dev_halt_at_slot.unwrap_or_else(|| {
|
||||
if bank_forks_info.len() > 1 {
|
||||
@ -1064,8 +1161,14 @@ fn main() {
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let genesis_config = open_genesis_config(&ledger_path);
|
||||
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
|
||||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
match load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
) {
|
||||
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let slot = dev_halt_at_slot.unwrap_or_else(|| {
|
||||
if bank_forks_info.len() > 1 {
|
||||
@ -1135,50 +1238,13 @@ fn main() {
|
||||
}
|
||||
("purge", Some(arg_matches)) => {
|
||||
let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot);
|
||||
let end_slot = value_t!(arg_matches, "end_slot", Slot);
|
||||
let end_slot = end_slot.map_or(None, Some);
|
||||
let blockstore = open_blockstore(&ledger_path);
|
||||
let end_slot = value_t_or_exit!(arg_matches, "end_slot", Slot);
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::PrimaryOnly);
|
||||
blockstore.purge_slots(start_slot, end_slot);
|
||||
}
|
||||
("prune", Some(arg_matches)) => {
|
||||
if let Some(prune_file_path) = arg_matches.value_of("slot_list") {
|
||||
let blockstore = open_blockstore(&ledger_path);
|
||||
let prune_file = File::open(prune_file_path.to_string()).unwrap();
|
||||
let slot_hashes: BTreeMap<u64, String> =
|
||||
serde_yaml::from_reader(prune_file).unwrap();
|
||||
|
||||
let iter =
|
||||
RootedSlotIterator::new(0, &blockstore).expect("Failed to get rooted slot");
|
||||
|
||||
let potential_hashes: Vec<_> = iter
|
||||
.filter_map(|(slot, _meta)| {
|
||||
let blockhash = blockstore
|
||||
.get_slot_entries(slot, 0)
|
||||
.unwrap()
|
||||
.last()
|
||||
.unwrap()
|
||||
.hash
|
||||
.to_string();
|
||||
|
||||
slot_hashes.get(&slot).and_then(|hash| {
|
||||
if *hash == blockhash {
|
||||
Some((slot, blockhash))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (target_slot, target_hash) = potential_hashes
|
||||
.last()
|
||||
.expect("Failed to find a valid slot");
|
||||
println!("Prune at slot {:?} hash {:?}", target_slot, target_hash);
|
||||
blockstore.prune(*target_slot);
|
||||
}
|
||||
blockstore.purge_from_next_slots(start_slot, end_slot);
|
||||
}
|
||||
("list-roots", Some(arg_matches)) => {
|
||||
let blockstore = open_blockstore(&ledger_path);
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary);
|
||||
let max_height = if let Some(height) = arg_matches.value_of("max_height") {
|
||||
usize::from_str(height).expect("Maximum height must be a number")
|
||||
} else {
|
||||
@ -1231,7 +1297,9 @@ fn main() {
|
||||
});
|
||||
}
|
||||
("bounds", Some(arg_matches)) => {
|
||||
match open_blockstore(&ledger_path).slot_meta_iterator(0) {
|
||||
match open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary)
|
||||
.slot_meta_iterator(0)
|
||||
{
|
||||
Ok(metas) => {
|
||||
let all = arg_matches.is_present("all");
|
||||
|
||||
@ -1257,15 +1325,20 @@ fn main() {
|
||||
}
|
||||
}
|
||||
}
|
||||
("analyze-storage", _) => match analyze_storage(&open_database(&ledger_path)) {
|
||||
Ok(()) => {
|
||||
println!("Ok.");
|
||||
("analyze-storage", _) => {
|
||||
match analyze_storage(&open_database(
|
||||
&ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
)) {
|
||||
Ok(()) => {
|
||||
println!("Ok.");
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Unable to read the Ledger: {:?}", err);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Unable to read the Ledger: {:?}", err);
|
||||
exit(1);
|
||||
}
|
||||
},
|
||||
}
|
||||
("", _) => {
|
||||
eprintln!("{}", matches.usage());
|
||||
exit(1);
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-ledger"
|
||||
version = "1.1.13"
|
||||
version = "1.1.24"
|
||||
description = "Solana ledger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -28,19 +28,19 @@ reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0
|
||||
regex = "1.3.6"
|
||||
serde = "1.0.105"
|
||||
serde_bytes = "0.11.3"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.13" }
|
||||
solana-logger = { path = "../logger", version = "1.1.13" }
|
||||
solana-measure = { path = "../measure", version = "1.1.13" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.13" }
|
||||
solana-perf = { path = "../perf", version = "1.1.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.24" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.24" }
|
||||
solana-logger = { path = "../logger", version = "1.1.24" }
|
||||
solana-measure = { path = "../measure", version = "1.1.24" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.24" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.24" }
|
||||
solana-perf = { path = "../perf", version = "1.1.24" }
|
||||
ed25519-dalek = "1.0.0-pre.3"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.24" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.24" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.24" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.24" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.24" }
|
||||
symlink = "0.1.0"
|
||||
tar = "0.4.26"
|
||||
thiserror = "1.0"
|
||||
@ -57,7 +57,7 @@ features = ["lz4"]
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
matches = "0.1.6"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.24" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user