Compare commits
371 Commits
banking-be
...
v1.9.13
Author | SHA1 | Date | |
---|---|---|---|
|
3ac7e043a7 | ||
|
db43c5d46d | ||
|
48dc4b3bb2 | ||
|
b0543a1ae6 | ||
|
8b0576d954 | ||
|
5c3e1967e6 | ||
|
06ebed861e | ||
|
52bd1658cc | ||
|
07a6b597d0 | ||
|
857a541ddb | ||
|
87fee49ed7 | ||
|
3ed915dcc9 | ||
|
3875bc91ab | ||
|
e0f5fb887b | ||
|
49952e05cf | ||
|
4a100fbe3b | ||
|
607a98e9d0 | ||
|
86a97563b5 | ||
|
4e5d9885da | ||
|
714cf0eff2 | ||
|
babba3b0ff | ||
|
8c59fa5a73 | ||
|
de694402ca | ||
|
f56b25ac29 | ||
|
2137008532 | ||
|
4a4e560299 | ||
|
66f85a0703 | ||
|
3bee925967 | ||
|
44109c0cd4 | ||
|
83281fe3ff | ||
|
2dd5b76986 | ||
|
0e6a849fc7 | ||
|
db9826c93f | ||
|
46a02b7b4a | ||
|
83fb17c77b | ||
|
e6f6a8a1b4 | ||
|
6b71570f48 | ||
|
34631669a4 | ||
|
a400fd4558 | ||
|
f42f094dd0 | ||
|
ef9ffffcaa | ||
|
61fea1d2a7 | ||
|
023ab1c427 | ||
|
f4f0b64af4 | ||
|
84c57dd0a8 | ||
|
450404f800 | ||
|
8413700a2f | ||
|
215c708599 | ||
|
c02c73fa5f | ||
|
68d846c7a9 | ||
|
08d6b9850d | ||
|
4ebeb33602 | ||
|
1f4ad0d1e8 | ||
|
b2b92d7f5c | ||
|
02f8651a9c | ||
|
0fdbec9735 | ||
|
f629c71849 | ||
|
43e562142f | ||
|
c3098e99d1 | ||
|
421ad42b12 | ||
|
08cc140d4a | ||
|
2120ef5808 | ||
|
c08af09aaa | ||
|
8b12749f02 | ||
|
e343a17ce9 | ||
|
3fd78ac6ea | ||
|
41bbc11a46 | ||
|
68934353f2 | ||
|
92543a3f92 | ||
|
a514aff819 | ||
|
8d8525e4fc | ||
|
2c1cec4e2c | ||
|
7d0a0a26bb | ||
|
73016d3ed2 | ||
|
9b1cb5c1b7 | ||
|
94f4748a34 | ||
|
8963724ed6 | ||
|
65df58c64a | ||
|
380c5da2d0 | ||
|
7d488a6ed8 | ||
|
159cfdae25 | ||
|
1c3d09ed21 | ||
|
2c8cfdb3f3 | ||
|
85570ac207 | ||
|
054b95cbe1 | ||
|
b67a5bb3b9 | ||
|
3e3fb4e296 | ||
|
f66d8551e9 | ||
|
a5cb10666c | ||
|
76384758d8 | ||
|
4eca26ae50 | ||
|
2d144afec5 | ||
|
781609b27a | ||
|
5a5244ecf8 | ||
|
2e60f95ab9 | ||
|
55179524bd | ||
|
4a0785ddcd | ||
|
4698fbc036 | ||
|
70f76b450e | ||
|
d64eebb799 | ||
|
71211e0d90 | ||
|
320fbd63c5 | ||
|
0fe00bab7d | ||
|
00630d9c1b | ||
|
d05b5b0902 | ||
|
5c69af607d | ||
|
df16a37ab5 | ||
|
432eafd730 | ||
|
41142a7d76 | ||
|
8047601a7b | ||
|
85856a73aa | ||
|
c3890ada8e | ||
|
ceb253ce90 | ||
|
dd6c365bd9 | ||
|
9ea025315e | ||
|
c43cef79b5 | ||
|
2605724aa3 | ||
|
539f303eb7 | ||
|
f7091811d4 | ||
|
15ef1827bf | ||
|
85fef67213 | ||
|
90a70d9b5b | ||
|
643442e830 | ||
|
69e207ca58 | ||
|
fb8db79e63 | ||
|
237347847b | ||
|
4706790c20 | ||
|
04281734e5 | ||
|
a98ca9037d | ||
|
12e40a40f5 | ||
|
c715bc93cf | ||
|
3aa3cd8852 | ||
|
f83cb74509 | ||
|
6c47a98945 | ||
|
4dfbb4347c | ||
|
28fc733894 | ||
|
93b44d8a4c | ||
|
2804204f80 | ||
|
4d891043d1 | ||
|
74498650bc | ||
|
af3b307734 | ||
|
2368e09d89 | ||
|
6fca541847 | ||
|
15e9cedc0d | ||
|
d68a40396c | ||
|
b0e0410003 | ||
|
d1174f677e | ||
|
cf88542254 | ||
|
99c55dbec3 | ||
|
bc412d51d6 | ||
|
87c3e71bb8 | ||
|
d0cf5bb721 | ||
|
fb54991901 | ||
|
9995a54be7 | ||
|
d9a5f714e1 | ||
|
620a80b581 | ||
|
b354dae249 | ||
|
af7ed83285 | ||
|
8bc4cc90d2 | ||
|
39a4cc95dc | ||
|
187ed6a387 | ||
|
91bc44931f | ||
|
35ca3182ba | ||
|
24345d8e63 | ||
|
bf45f5b88e | ||
|
2ddb5b27c1 | ||
|
7f10fd6a21 | ||
|
a0a881594a | ||
|
e9e35fd7bd | ||
|
66b94b86a9 | ||
|
59f406d78a | ||
|
dbf9a32883 | ||
|
37e9076db0 | ||
|
f77ea5f324 | ||
|
c9df037dae | ||
|
2b87d99479 | ||
|
2546ef4ad6 | ||
|
96ae795758 | ||
|
9bddb4e437 | ||
|
4079f12a3e | ||
|
e121b94524 | ||
|
a7623ad18c | ||
|
054e475c6c | ||
|
7a421fe602 | ||
|
2ef0b85829 | ||
|
a6b7a3b7ff | ||
|
9d69f2b324 | ||
|
4f82a4ba1f | ||
|
ed0b30efcc | ||
|
4ee6bc9a93 | ||
|
676c43b9d2 | ||
|
b1d8296498 | ||
|
34984ed16e | ||
|
f4d1577337 | ||
|
58dcc451a9 | ||
|
f0695ef6d9 | ||
|
41b0d6cca3 | ||
|
ae77a52c97 | ||
|
133314e58c | ||
|
cb49ae21b4 | ||
|
a9ebba5643 | ||
|
8ce65878da | ||
|
a4ca18a54d | ||
|
7cb147fdcd | ||
|
2d693be9fa | ||
|
50e716fc80 | ||
|
1f00926874 | ||
|
662c6be51e | ||
|
9761f5b67f | ||
|
7b1da62763 | ||
|
2f97fee71a | ||
|
3ae674dd28 | ||
|
8214bc9db4 | ||
|
1132def37c | ||
|
7267ebaaf2 | ||
|
4be6e52a4f | ||
|
e7348243b4 | ||
|
fc0c74d722 | ||
|
687cd4779e | ||
|
b28d7050ab | ||
|
6d72acfd6d | ||
|
840ec0686e | ||
|
ba0188a36d | ||
|
05b9a2f203 | ||
|
8578429c4d | ||
|
87f4a1f4b6 | ||
|
17411f9b4c | ||
|
fb0e5adc7e | ||
|
f4ded6fb6b | ||
|
f89bf7b939 | ||
|
c99aed4abf | ||
|
edfd8c1717 | ||
|
09dbf069e8 | ||
|
9764d4349b | ||
|
d84b994451 | ||
|
185f52b712 | ||
|
3b59f67562 | ||
|
7d2589e2ac | ||
|
77558c315d | ||
|
464d533da3 | ||
|
f8bf478fde | ||
|
35fb47d1ce | ||
|
5bd27dd175 | ||
|
794f28d9ab | ||
|
d7a673f7f5 | ||
|
b3fa1288aa | ||
|
3e4e2e9113 | ||
|
fd4754e5a9 | ||
|
0a9460ed8b | ||
|
478c641cb5 | ||
|
735f000952 | ||
|
264bb903a3 | ||
|
7c5d3e5874 | ||
|
70d5b6aeaf | ||
|
ca451ea23e | ||
|
113d261a2c | ||
|
c6ab915668 | ||
|
d5c0ffc11f | ||
|
6a2b62de62 | ||
|
4645be3e52 | ||
|
7efd0391e9 | ||
|
6a556c5adb | ||
|
0cd45400ca | ||
|
531f36c571 | ||
|
9c9d3e8b6b | ||
|
74b98c2dd4 | ||
|
9fb67f9b07 | ||
|
401c542d2a | ||
|
14ed446923 | ||
|
adc584ee22 | ||
|
810ca36eae | ||
|
16f821ea8c | ||
|
584e9bfbe7 | ||
|
3ad4c3306c | ||
|
be0bcd85ed | ||
|
8708186760 | ||
|
8f3e37c174 | ||
|
7d61935bf1 | ||
|
a70eb098f4 | ||
|
f31593bfbe | ||
|
8f26c71964 | ||
|
9fbaaa5102 | ||
|
78e7913352 | ||
|
f58b87befe | ||
|
1a2823b875 | ||
|
75fe0d3ecf | ||
|
c296a6c9ed | ||
|
57e5406476 | ||
|
4f57c4a4fe | ||
|
c4b3b2865d | ||
|
f58c375b1f | ||
|
bf41c53f11 | ||
|
e3a4b98432 | ||
|
91657ba8fe | ||
|
35ee48bec9 | ||
|
02cfa85214 | ||
|
02be3a6568 | ||
|
b20fae5a09 | ||
|
e572678176 | ||
|
f4521002b9 | ||
|
0c5a2bcd5a | ||
|
c25d16bf0d | ||
|
301e38044a | ||
|
bfa6302985 | ||
|
b66e2ae353 | ||
|
3967dc8685 | ||
|
569c83295d | ||
|
a462c58594 | ||
|
7dba8bb49f | ||
|
c907d4444d | ||
|
b4c847557b | ||
|
de48347078 | ||
|
9f173d3717 | ||
|
dcd76e484f | ||
|
2246135654 | ||
|
41ea597256 | ||
|
fb955bd4ec | ||
|
5c3fbb384f | ||
|
a056fd88cb | ||
|
2f1816d1db | ||
|
2cd2f3ba7b | ||
|
135dfdbf1e | ||
|
fad4bfdf2a | ||
|
a9d4728c35 | ||
|
3977bcde63 | ||
|
cf2a9de19c | ||
|
5e2b12aee5 | ||
|
6c329e2fd3 | ||
|
0376045c7d | ||
|
c1f54c22ed | ||
|
0576d133ad | ||
|
9956afb2bd | ||
|
01941cf3de | ||
|
4b63d51e3e | ||
|
5bf4445ae6 | ||
|
7782d34bbf | ||
|
2c4765e75a | ||
|
e71ea19e60 | ||
|
ed0040d555 | ||
|
da9e6826ac | ||
|
68fc72a7f4 | ||
|
2a6bb2b954 | ||
|
ef51778c78 | ||
|
abecf292a3 | ||
|
a31660815f | ||
|
539ad4bea6 | ||
|
85f601993f | ||
|
b0754cc575 | ||
|
effd0b2547 | ||
|
8836069719 | ||
|
2698a5c705 | ||
|
dd157fd47f | ||
|
8cacf82cb8 | ||
|
8ee5fbc5c0 | ||
|
f2a6b94e5c | ||
|
ef970bb14a | ||
|
cabd851904 | ||
|
2d2ef59550 | ||
|
b7b56d5016 | ||
|
18e3a635b4 | ||
|
2b4347d502 | ||
|
87accd16d8 | ||
|
0e969015fc | ||
|
46935c022e | ||
|
8a7106bc08 | ||
|
89d2f34a03 | ||
|
b3fa1e4550 | ||
|
58c755e1d4 | ||
|
60085305b4 | ||
|
b4c8e095bd | ||
|
3e28ffa884 |
@@ -12,7 +12,8 @@ export PS4="++"
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
eval "$(ci/channel-info.sh)"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
eval "$(ci/sbf-tools-info.sh)"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
|
||||
(
|
||||
set -x
|
||||
MAX_CACHE_SIZE=18 # gigabytes
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,6 +4,7 @@
|
||||
/solana-metrics/
|
||||
/solana-metrics.tar.bz2
|
||||
/target/
|
||||
/test-ledger/
|
||||
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
|
@@ -113,3 +113,10 @@ pull_request_rules:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.9
|
||||
|
||||
commands_restrictions:
|
||||
# The author of copied PRs is the Mergify user.
|
||||
# Restrict `copy` access to Core Contributors
|
||||
copy:
|
||||
conditions:
|
||||
- author=@core-contributors
|
||||
|
932
Cargo.lock
generated
932
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@@ -1,17 +1,16 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"accountsdb-plugin-interface",
|
||||
"accountsdb-plugin-manager",
|
||||
"accountsdb-plugin-postgres",
|
||||
"accounts-cluster-bench",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"account-decoder",
|
||||
"accounts-bench",
|
||||
"accounts-cluster-bench",
|
||||
"banking-bench",
|
||||
"banks-client",
|
||||
"banks-interface",
|
||||
"banks-server",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"bucket_map",
|
||||
"bloom",
|
||||
"clap-utils",
|
||||
"cli-config",
|
||||
"cli-output",
|
||||
@@ -26,6 +25,8 @@ members = [
|
||||
"validator",
|
||||
"genesis",
|
||||
"genesis-utils",
|
||||
"geyser-plugin-interface",
|
||||
"geyser-plugin-manager",
|
||||
"gossip",
|
||||
"install",
|
||||
"keygen",
|
||||
@@ -46,7 +47,11 @@ members = [
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/address-lookup-table",
|
||||
"programs/address-lookup-table-tests",
|
||||
"programs/ed25519-tests",
|
||||
"programs/bpf_loader",
|
||||
"programs/bpf_loader/gen-syscall-list",
|
||||
"programs/compute-budget",
|
||||
"programs/config",
|
||||
"programs/stake",
|
||||
@@ -65,7 +70,6 @@ members = [
|
||||
"tokens",
|
||||
"transaction-dos",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,9 +19,9 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.9.0"
|
||||
|
@@ -5,7 +5,7 @@ use {
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id},
|
||||
parse_token::{parse_token, spl_token_ids},
|
||||
parse_vote::parse_vote,
|
||||
},
|
||||
inflector::Inflector,
|
||||
@@ -21,7 +21,6 @@ lazy_static! {
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -31,7 +30,9 @@ lazy_static! {
|
||||
);
|
||||
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
|
||||
for spl_token_id in spl_token_ids() {
|
||||
m.insert(spl_token_id, ParsableAccount::SplToken);
|
||||
}
|
||||
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
|
||||
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
|
@@ -15,16 +15,31 @@ use {
|
||||
|
||||
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id() -> Pubkey {
|
||||
fn spl_token_id() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::id().to_bytes())
|
||||
}
|
||||
|
||||
// Returns all known SPL Token program ids
|
||||
pub fn spl_token_ids() -> Vec<Pubkey> {
|
||||
vec![spl_token_id()]
|
||||
}
|
||||
|
||||
// Check if the provided program id as a known SPL Token program id
|
||||
pub fn is_known_spl_token_id(program_id: &Pubkey) -> bool {
|
||||
*program_id == spl_token_id()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
|
||||
}
|
||||
|
||||
// The program id of the `spl_token_native_mint` account
|
||||
pub fn spl_token_native_mint_program_id() -> Pubkey {
|
||||
spl_token_id()
|
||||
}
|
||||
|
||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::new_from_array(pubkey.to_bytes())
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
clap = "2.33.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,24 +13,25 @@ clap = "2.33.1"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
|
||||
solana-client = { path = "../client", version = "=1.9.0" }
|
||||
solana-core = { path = "../core", version = "=1.9.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
|
||||
solana-client = { path = "../client", version = "=1.9.13" }
|
||||
solana-core = { path = "../core", version = "=1.9.13" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.13" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.13" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.13" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -23,6 +23,7 @@ use {
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_transaction_status::parse_token::spl_token_instruction,
|
||||
std::{
|
||||
cmp::min,
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
@@ -156,24 +157,30 @@ fn make_create_message(
|
||||
fn make_close_message(
|
||||
keypair: &Keypair,
|
||||
base_keypair: &Keypair,
|
||||
max_closed_seed: Arc<AtomicU64>,
|
||||
max_created: Arc<AtomicU64>,
|
||||
max_closed: Arc<AtomicU64>,
|
||||
num_instructions: usize,
|
||||
balance: u64,
|
||||
spl_token: bool,
|
||||
) -> Message {
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
.filter_map(|_| {
|
||||
let program_id = if spl_token {
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let max_created_seed = max_created.load(Ordering::Relaxed);
|
||||
let max_closed_seed = max_closed.load(Ordering::Relaxed);
|
||||
if max_closed_seed >= max_created_seed {
|
||||
return None;
|
||||
}
|
||||
let seed = max_closed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let address =
|
||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||
if spl_token {
|
||||
spl_token_instruction(
|
||||
Some(spl_token_instruction(
|
||||
spl_token::instruction::close_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&address),
|
||||
@@ -182,16 +189,16 @@ fn make_close_message(
|
||||
&[],
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
))
|
||||
} else {
|
||||
system_instruction::transfer_with_seed(
|
||||
Some(system_instruction::transfer_with_seed(
|
||||
&address,
|
||||
&base_keypair.pubkey(),
|
||||
seed,
|
||||
&program_id,
|
||||
&keypair.pubkey(),
|
||||
balance,
|
||||
)
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
@@ -211,6 +218,7 @@ fn run_accounts_bench(
|
||||
maybe_lamports: Option<u64>,
|
||||
num_instructions: usize,
|
||||
mint: Option<Pubkey>,
|
||||
reclaim_accounts: bool,
|
||||
) {
|
||||
assert!(num_instructions > 0);
|
||||
let client =
|
||||
@@ -350,6 +358,7 @@ fn run_accounts_bench(
|
||||
let message = make_close_message(
|
||||
payer_keypairs[0],
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
seed_tracker.max_closed.clone(),
|
||||
1,
|
||||
min_balance,
|
||||
@@ -372,7 +381,7 @@ fn run_accounts_bench(
|
||||
}
|
||||
|
||||
count += 1;
|
||||
if last_log.elapsed().as_millis() > 3000 {
|
||||
if last_log.elapsed().as_millis() > 3000 || count >= iterations {
|
||||
info!(
|
||||
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
||||
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
|
||||
@@ -387,6 +396,83 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
executor.close();
|
||||
|
||||
if reclaim_accounts {
|
||||
let executor = TransactionExecutor::new(entrypoint_addr);
|
||||
loop {
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
let max_created_seed = seed_tracker.max_created.load(Ordering::Relaxed);
|
||||
|
||||
if latest_blockhash.elapsed().as_millis() > 10_000 {
|
||||
blockhash = client.get_latest_blockhash().expect("blockhash");
|
||||
latest_blockhash = Instant::now();
|
||||
}
|
||||
message.recent_blockhash = blockhash;
|
||||
let fee = client
|
||||
.get_fee_for_message(&message)
|
||||
.expect("get_fee_for_message");
|
||||
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size && max_closed_seed < max_created_seed {
|
||||
let num_to_close = min(
|
||||
batch_size - sigs_len,
|
||||
(max_created_seed - max_closed_seed) as usize,
|
||||
);
|
||||
if num_to_close >= payer_keypairs.len() {
|
||||
info!("closing {} accounts", num_to_close);
|
||||
let chunk_size = num_to_close / payer_keypairs.len();
|
||||
info!("{:?} chunk_size", chunk_size);
|
||||
if chunk_size > 0 {
|
||||
for (i, keypair) in payer_keypairs.iter().enumerate() {
|
||||
let txs: Vec<_> = (0..chunk_size)
|
||||
.into_par_iter()
|
||||
.filter_map(|_| {
|
||||
let message = make_close_message(
|
||||
keypair,
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
seed_tracker.max_closed.clone(),
|
||||
num_instructions,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
if message.instructions.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||
Some(Transaction::new(&signers, message, blockhash))
|
||||
})
|
||||
.collect();
|
||||
balances[i] = balances[i].saturating_sub(fee * txs.len() as u64);
|
||||
info!("close txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("close ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_accounts_closed += (num_instructions * new_ids.len()) as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let _ = executor.drain_cleared();
|
||||
}
|
||||
count += 1;
|
||||
if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
|
||||
info!(
|
||||
"total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
||||
total_accounts_closed, tx_sent_count, count, balances
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
|
||||
if max_closed_seed >= max_created_seed {
|
||||
break;
|
||||
}
|
||||
if executor.num_outstanding() >= batch_size {
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
executor.close();
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
@@ -462,7 +548,7 @@ fn main() {
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.value_name("NUM")
|
||||
.help("Number of iterations to make"),
|
||||
.help("Number of iterations to make. 0 = unlimited iterations."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("check_gossip")
|
||||
@@ -475,6 +561,12 @@ fn main() {
|
||||
.takes_value(true)
|
||||
.help("Mint address to initialize account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("reclaim_accounts")
|
||||
.long("reclaim-accounts")
|
||||
.takes_value(false)
|
||||
.help("Reclaim accounts after session ends; incompatible with --iterations 0"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let skip_gossip = !matches.is_present("check_gossip");
|
||||
@@ -556,6 +648,7 @@ fn main() {
|
||||
lamports,
|
||||
num_instructions,
|
||||
mint,
|
||||
matches.is_present("reclaim_accounts"),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -564,18 +657,24 @@ pub mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::run_local_faucet,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::poh_config::PohConfig,
|
||||
solana_sdk::{native_token::sol_to_lamports, poh_config::PohConfig},
|
||||
solana_test_validator::TestValidator,
|
||||
spl_token::{
|
||||
solana_program::program_pack::Pack,
|
||||
state::{Account, Mint},
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_accounts_cluster_bench() {
|
||||
solana_logger::setup();
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let validator_config = ValidatorConfig::default_for_test();
|
||||
let num_nodes = 1;
|
||||
let mut config = ClusterConfig {
|
||||
cluster_lamports: 10_000_000,
|
||||
@@ -605,6 +704,108 @@ pub mod test {
|
||||
maybe_lamports,
|
||||
num_instructions,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
start.stop();
|
||||
info!("{}", start);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_then_reclaim_spl_token_accounts() {
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
let test_validator = TestValidator::with_custom_fees(
|
||||
mint_pubkey,
|
||||
1,
|
||||
Some(faucet_addr),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let rpc_client =
|
||||
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
|
||||
|
||||
// Created funder
|
||||
let funder = Keypair::new();
|
||||
let latest_blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
let signature = rpc_client
|
||||
.request_airdrop_with_blockhash(
|
||||
&funder.pubkey(),
|
||||
sol_to_lamports(1.0),
|
||||
&latest_blockhash,
|
||||
)
|
||||
.unwrap();
|
||||
rpc_client
|
||||
.confirm_transaction_with_spinner(
|
||||
&signature,
|
||||
&latest_blockhash,
|
||||
CommitmentConfig::confirmed(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Create Mint
|
||||
let spl_mint_keypair = Keypair::new();
|
||||
let spl_mint_len = Mint::get_packed_len();
|
||||
let spl_mint_rent = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(spl_mint_len)
|
||||
.unwrap();
|
||||
let transaction = Transaction::new_signed_with_payer(
|
||||
&[
|
||||
system_instruction::create_account(
|
||||
&funder.pubkey(),
|
||||
&spl_mint_keypair.pubkey(),
|
||||
spl_mint_rent,
|
||||
spl_mint_len as u64,
|
||||
&inline_spl_token::id(),
|
||||
),
|
||||
spl_token_instruction(
|
||||
spl_token::instruction::initialize_mint(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
|
||||
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
|
||||
None,
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
],
|
||||
Some(&funder.pubkey()),
|
||||
&[&funder, &spl_mint_keypair],
|
||||
latest_blockhash,
|
||||
);
|
||||
let _sig = rpc_client
|
||||
.send_and_confirm_transaction(&transaction)
|
||||
.unwrap();
|
||||
|
||||
let account_len = Account::get_packed_len();
|
||||
let minimum_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(account_len)
|
||||
.unwrap();
|
||||
|
||||
let iterations = 5;
|
||||
let batch_size = 100;
|
||||
let close_nth_batch = 0;
|
||||
let num_instructions = 4;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
run_accounts_bench(
|
||||
test_validator
|
||||
.rpc_url()
|
||||
.replace("http://", "")
|
||||
.parse()
|
||||
.unwrap(),
|
||||
faucet_addr,
|
||||
&[&keypair0, &keypair1, &keypair2],
|
||||
iterations,
|
||||
Some(account_len as u64),
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
Some(minimum_balance),
|
||||
num_instructions,
|
||||
Some(spl_mint_keypair.pubkey()),
|
||||
true,
|
||||
);
|
||||
start.stop();
|
||||
info!("{}", start);
|
||||
|
@@ -1,20 +0,0 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Solana AccountsDb Plugin Interface
|
||||
|
||||
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
|
||||
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
|
||||
|
||||
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
|
||||
instantiates the implementation of the interface.
|
||||
|
||||
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
|
||||
external PostgreSQL databases.
|
||||
|
||||
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
|
||||
|
||||
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)
|
@@ -1 +0,0 @@
|
||||
pub mod accountsdb_plugin_interface;
|
@@ -1,31 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.9.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.5"
|
||||
libloading = "0.7.2"
|
||||
log = "0.4.11"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
|
||||
thiserror = "1.0.30"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,6 +0,0 @@
|
||||
pub mod accounts_update_notifier;
|
||||
pub mod accountsdb_plugin_manager;
|
||||
pub mod accountsdb_plugin_service;
|
||||
pub mod slot_status_notifier;
|
||||
pub mod slot_status_observer;
|
||||
pub mod transaction_notifier;
|
@@ -1,39 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-postgres"
|
||||
description = "The Solana AccountsDb plugin for PostgreSQL database."
|
||||
version = "1.9.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
postgres = { version = "0.19.2", features = ["with-chrono-0_4"] }
|
||||
postgres-types = { version = "0.2.2", features = ["derive"] }
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
|
||||
thiserror = "1.0.30"
|
||||
tokio-postgres = "0.7.4"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,5 +0,0 @@
|
||||
This is an example implementing the AccountsDb plugin for PostgreSQL database.
|
||||
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
|
||||
|
||||
To create the schema objects for the database, please use `scripts/create_schema.sql`.
|
||||
`scripts/drop_schema.sql` can be used to tear down the schema objects.
|
@@ -1,183 +0,0 @@
|
||||
/**
|
||||
* This plugin implementation for PostgreSQL requires the following tables
|
||||
*/
|
||||
-- The table storing accounts
|
||||
|
||||
|
||||
CREATE TABLE account (
|
||||
pubkey BYTEA PRIMARY KEY,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
-- The table storing slot information
|
||||
CREATE TABLE slot (
|
||||
slot BIGINT PRIMARY KEY,
|
||||
parent BIGINT,
|
||||
status VARCHAR(16) NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
-- Types for Transactions
|
||||
|
||||
Create TYPE "TransactionErrorCode" AS ENUM (
|
||||
'AccountInUse',
|
||||
'AccountLoadedTwice',
|
||||
'AccountNotFound',
|
||||
'ProgramAccountNotFound',
|
||||
'InsufficientFundsForFee',
|
||||
'InvalidAccountForFee',
|
||||
'AlreadyProcessed',
|
||||
'BlockhashNotFound',
|
||||
'InstructionError',
|
||||
'CallChainTooDeep',
|
||||
'MissingSignatureForFee',
|
||||
'InvalidAccountIndex',
|
||||
'SignatureFailure',
|
||||
'InvalidProgramForExecution',
|
||||
'SanitizeFailure',
|
||||
'ClusterMaintenance',
|
||||
'AccountBorrowOutstanding',
|
||||
'WouldExceedMaxAccountCostLimit',
|
||||
'WouldExceedMaxBlockCostLimit',
|
||||
'UnsupportedVersion',
|
||||
'InvalidWritableAccount'
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionError" AS (
|
||||
error_code "TransactionErrorCode",
|
||||
error_detail VARCHAR(256)
|
||||
);
|
||||
|
||||
CREATE TYPE "CompiledInstruction" AS (
|
||||
program_id_index SMALLINT,
|
||||
accounts SMALLINT[],
|
||||
data BYTEA
|
||||
);
|
||||
|
||||
CREATE TYPE "InnerInstructions" AS (
|
||||
index SMALLINT,
|
||||
instructions "CompiledInstruction"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionTokenBalance" AS (
|
||||
account_index SMALLINT,
|
||||
mint VARCHAR(44),
|
||||
ui_token_amount DOUBLE PRECISION,
|
||||
owner VARCHAR(44)
|
||||
);
|
||||
|
||||
Create TYPE "RewardType" AS ENUM (
|
||||
'Fee',
|
||||
'Rent',
|
||||
'Staking',
|
||||
'Voting'
|
||||
);
|
||||
|
||||
CREATE TYPE "Reward" AS (
|
||||
pubkey VARCHAR(44),
|
||||
lamports BIGINT,
|
||||
post_balance BIGINT,
|
||||
reward_type "RewardType",
|
||||
commission SMALLINT
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionStatusMeta" AS (
|
||||
error "TransactionError",
|
||||
fee BIGINT,
|
||||
pre_balances BIGINT[],
|
||||
post_balances BIGINT[],
|
||||
inner_instructions "InnerInstructions"[],
|
||||
log_messages TEXT[],
|
||||
pre_token_balances "TransactionTokenBalance"[],
|
||||
post_token_balances "TransactionTokenBalance"[],
|
||||
rewards "Reward"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessageHeader" AS (
|
||||
num_required_signatures SMALLINT,
|
||||
num_readonly_signed_accounts SMALLINT,
|
||||
num_readonly_unsigned_accounts SMALLINT
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessage" AS (
|
||||
header "TransactionMessageHeader",
|
||||
account_keys BYTEA[],
|
||||
recent_blockhash BYTEA,
|
||||
instructions "CompiledInstruction"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "AddressMapIndexes" AS (
|
||||
writable SMALLINT[],
|
||||
readonly SMALLINT[]
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessageV0" AS (
|
||||
header "TransactionMessageHeader",
|
||||
account_keys BYTEA[],
|
||||
recent_blockhash BYTEA,
|
||||
instructions "CompiledInstruction"[],
|
||||
address_map_indexes "AddressMapIndexes"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "MappedAddresses" AS (
|
||||
writable BYTEA[],
|
||||
readonly BYTEA[]
|
||||
);
|
||||
|
||||
CREATE TYPE "MappedMessage" AS (
|
||||
message "TransactionMessageV0",
|
||||
mapped_addresses "MappedAddresses"
|
||||
);
|
||||
|
||||
-- The table storing transactions
|
||||
CREATE TABLE transaction (
|
||||
slot BIGINT NOT NULL,
|
||||
signature BYTEA NOT NULL,
|
||||
is_vote BOOL NOT NULL,
|
||||
message_type SMALLINT, -- 0: legacy, 1: v0 message
|
||||
legacy_message "TransactionMessage",
|
||||
v0_mapped_message "MappedMessage",
|
||||
signatures BYTEA[],
|
||||
message_hash BYTEA,
|
||||
meta "TransactionStatusMeta",
|
||||
updated_on TIMESTAMP NOT NULL,
|
||||
CONSTRAINT transaction_pk PRIMARY KEY (slot, signature)
|
||||
);
|
||||
|
||||
/**
|
||||
* The following is for keeping historical data for accounts and is not required for plugin to work.
|
||||
*/
|
||||
-- The table storing historical data for accounts
|
||||
CREATE TABLE account_audit (
|
||||
pubkey BYTEA,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX account_audit_account_key ON account_audit (pubkey, write_version);
|
||||
|
||||
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
|
||||
BEGIN
|
||||
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
|
||||
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
|
||||
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
|
||||
RETURN NEW;
|
||||
END;
|
||||
|
||||
$audit_account_update$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
|
||||
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();
|
@@ -1,25 +0,0 @@
|
||||
/**
|
||||
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
|
||||
*/
|
||||
|
||||
DROP TRIGGER account_update_trigger ON account;
|
||||
DROP FUNCTION audit_account_update;
|
||||
DROP TABLE account_audit;
|
||||
DROP TABLE account;
|
||||
DROP TABLE slot;
|
||||
DROP TABLE transaction;
|
||||
|
||||
DROP TYPE "TransactionError" CASCADE;
|
||||
DROP TYPE "TransactionErrorCode" CASCADE;
|
||||
DROP TYPE "MappedMessage" CASCADE;
|
||||
DROP TYPE "MappedAddresses" CASCADE;
|
||||
DROP TYPE "TransactionMessageV0" CASCADE;
|
||||
DROP TYPE "AddressMapIndexes" CASCADE;
|
||||
DROP TYPE "TransactionMessage" CASCADE;
|
||||
DROP TYPE "TransactionMessageHeader" CASCADE;
|
||||
DROP TYPE "TransactionStatusMeta" CASCADE;
|
||||
DROP TYPE "RewardType" CASCADE;
|
||||
DROP TYPE "Reward" CASCADE;
|
||||
DROP TYPE "TransactionTokenBalance" CASCADE;
|
||||
DROP TYPE "InnerInstructions" CASCADE;
|
||||
DROP TYPE "CompiledInstruction" CASCADE;
|
@@ -1,802 +0,0 @@
|
||||
# This a reference configuration file for the PostgreSQL database version 14.
|
||||
|
||||
# -----------------------------
|
||||
# PostgreSQL configuration file
|
||||
# -----------------------------
|
||||
#
|
||||
# This file consists of lines of the form:
|
||||
#
|
||||
# name = value
|
||||
#
|
||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||
# values can be found in the PostgreSQL documentation.
|
||||
#
|
||||
# The commented-out settings shown in this file represent the default values.
|
||||
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||
# you need to reload the server.
|
||||
#
|
||||
# This file is read on server startup and when the server receives a SIGHUP
|
||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||
# require a server shutdown and restart to take effect.
|
||||
#
|
||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||
# with the "SET" SQL command.
|
||||
#
|
||||
# Memory units: B = bytes Time units: us = microseconds
|
||||
# kB = kilobytes ms = milliseconds
|
||||
# MB = megabytes s = seconds
|
||||
# GB = gigabytes min = minutes
|
||||
# TB = terabytes h = hours
|
||||
# d = days
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# FILE LOCATIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# The default values of these variables are driven from the -D command-line
|
||||
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||
|
||||
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
|
||||
# (change requires restart)
|
||||
|
||||
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
|
||||
# (change requires restart)
|
||||
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
|
||||
# (change requires restart)
|
||||
|
||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONNECTIONS AND AUTHENTICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Connection Settings -
|
||||
|
||||
#listen_addresses = 'localhost' # what IP address(es) to listen on;
|
||||
# comma-separated list of addresses;
|
||||
# defaults to 'localhost'; use '*' for all
|
||||
# (change requires restart)
|
||||
listen_addresses = '*'
|
||||
port = 5433 # (change requires restart)
|
||||
max_connections = 200 # (change requires restart)
|
||||
#superuser_reserved_connections = 3 # (change requires restart)
|
||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
||||
# (change requires restart)
|
||||
#unix_socket_group = '' # (change requires restart)
|
||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||
# (change requires restart)
|
||||
#bonjour = off # advertise server via Bonjour
|
||||
# (change requires restart)
|
||||
#bonjour_name = '' # defaults to the computer name
|
||||
# (change requires restart)
|
||||
|
||||
# - TCP settings -
|
||||
# see "man tcp" for details
|
||||
|
||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||
# 0 selects the system default
|
||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
||||
# 0 selects the system default
|
||||
|
||||
#client_connection_check_interval = 0 # time between checks for client
|
||||
# disconnection while running queries;
|
||||
# 0 for never
|
||||
|
||||
# - Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#password_encryption = scram-sha-256 # scram-sha-256 or md5
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - SSL -
|
||||
|
||||
ssl = on
|
||||
#ssl_ca_file = ''
|
||||
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
|
||||
#ssl_crl_file = ''
|
||||
#ssl_crl_dir = ''
|
||||
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
#ssl_prefer_server_ciphers = on
|
||||
#ssl_ecdh_curve = 'prime256v1'
|
||||
#ssl_min_protocol_version = 'TLSv1.2'
|
||||
#ssl_max_protocol_version = ''
|
||||
#ssl_dh_params_file = ''
|
||||
#ssl_passphrase_command = ''
|
||||
#ssl_passphrase_command_supports_reload = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# RESOURCE USAGE (except WAL)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Memory -
|
||||
|
||||
shared_buffers = 1GB # min 128kB
|
||||
# (change requires restart)
|
||||
#huge_pages = try # on, off, or try
|
||||
# (change requires restart)
|
||||
#huge_page_size = 0 # zero for system default
|
||||
# (change requires restart)
|
||||
#temp_buffers = 8MB # min 800kB
|
||||
#max_prepared_transactions = 0 # zero disables the feature
|
||||
# (change requires restart)
|
||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||
# you actively intend to use prepared transactions.
|
||||
#work_mem = 4MB # min 64kB
|
||||
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
|
||||
#maintenance_work_mem = 64MB # min 1MB
|
||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||
#logical_decoding_work_mem = 64MB # min 64kB
|
||||
#max_stack_depth = 2MB # min 100kB
|
||||
#shared_memory_type = mmap # the default is the first option
|
||||
# supported by the operating system:
|
||||
# mmap
|
||||
# sysv
|
||||
# windows
|
||||
# (change requires restart)
|
||||
dynamic_shared_memory_type = posix # the default is the first option
|
||||
# supported by the operating system:
|
||||
# posix
|
||||
# sysv
|
||||
# windows
|
||||
# mmap
|
||||
# (change requires restart)
|
||||
#min_dynamic_shared_memory = 0MB # (change requires restart)
|
||||
|
||||
# - Disk -
|
||||
|
||||
#temp_file_limit = -1 # limits per-process temp file space
|
||||
# in kilobytes, or -1 for no limit
|
||||
|
||||
# - Kernel Resources -
|
||||
|
||||
#max_files_per_process = 1000 # min 64
|
||||
# (change requires restart)
|
||||
|
||||
# - Cost-Based Vacuum Delay -
|
||||
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||
#vacuum_cost_page_miss = 2 # 0-10000 credits
|
||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||
|
||||
# - Background Writer -
|
||||
|
||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||
|
||||
# - Asynchronous Behavior -
|
||||
|
||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
|
||||
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel operations
|
||||
#parallel_leader_participation = on
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# WRITE-AHEAD LOG
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Settings -
|
||||
|
||||
wal_level = minimal # minimal, replica, or logical
|
||||
# (change requires restart)
|
||||
fsync = off # flush data to disk for crash safety
|
||||
# (turning this off can cause
|
||||
# unrecoverable data corruption)
|
||||
synchronous_commit = off # synchronization level;
|
||||
# off, local, remote_write, remote_apply, or on
|
||||
#wal_sync_method = fsync # the default is the first option
|
||||
# supported by the operating system:
|
||||
# open_datasync
|
||||
# fdatasync (default on Linux and FreeBSD)
|
||||
# fsync
|
||||
# fsync_writethrough
|
||||
# open_sync
|
||||
full_page_writes = off # recover from partial page writes
|
||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||
# (change requires restart)
|
||||
#wal_compression = off # enable compression of full-page writes
|
||||
#wal_init_zero = on # zero-fill new WAL files
|
||||
#wal_recycle = on # recycle WAL files
|
||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||
# (change requires restart)
|
||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||
#wal_skip_threshold = 2MB
|
||||
|
||||
#commit_delay = 0 # range 0-100000, in microseconds
|
||||
#commit_siblings = 5 # range 1-1000
|
||||
|
||||
# - Checkpoints -
|
||||
|
||||
#checkpoint_timeout = 5min # range 30s-1d
|
||||
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
|
||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||
#checkpoint_warning = 30s # 0 disables
|
||||
max_wal_size = 1GB
|
||||
min_wal_size = 80MB
|
||||
|
||||
# - Archiving -
|
||||
|
||||
#archive_mode = off # enables archiving; off, on, or always
|
||||
# (change requires restart)
|
||||
#archive_command = '' # command to use to archive a logfile segment
|
||||
# placeholders: %p = path of file to archive
|
||||
# %f = file name only
|
||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||
#archive_timeout = 0 # force a logfile segment switch after this
|
||||
# number of seconds; 0 disables
|
||||
|
||||
# - Archive Recovery -
|
||||
|
||||
# These are only used in recovery mode.
|
||||
|
||||
#restore_command = '' # command to use to restore an archived logfile segment
|
||||
# placeholders: %p = path of file to restore
|
||||
# %f = file name only
|
||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
||||
#recovery_end_command = '' # command to execute at completion of recovery
|
||||
|
||||
# - Recovery Target -
|
||||
|
||||
# Set these only when performing a targeted recovery.
|
||||
|
||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
||||
# consistent state is reached
|
||||
# (change requires restart)
|
||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
||||
# just after the specified recovery target (on)
|
||||
# just before the recovery target (off)
|
||||
# (change requires restart)
|
||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
||||
# (change requires restart)
|
||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPLICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Sending Servers -
|
||||
|
||||
# Set these on the primary and on any standby that will send replication data.
|
||||
|
||||
max_wal_senders = 0 # max number of walsender processes
|
||||
# (change requires restart)
|
||||
#max_replication_slots = 10 # max number of replication slots
|
||||
# (change requires restart)
|
||||
#wal_keep_size = 0 # in megabytes; 0 disables
|
||||
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
|
||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||
# (change requires restart)
|
||||
|
||||
# - Primary Server -
|
||||
|
||||
# These settings are ignored on a standby server.
|
||||
|
||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||
# method to choose sync standbys, number of sync standbys,
|
||||
# and comma-separated list of application_name
|
||||
# from standby(s); '*' = all
|
||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||
|
||||
# - Standby Servers -
|
||||
|
||||
# These settings are ignored on a primary server.
|
||||
|
||||
#primary_conninfo = '' # connection string to sending server
|
||||
#primary_slot_name = '' # replication slot on sending server
|
||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
||||
#hot_standby = on # "off" disallows queries during recovery
|
||||
# (change requires restart)
|
||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||
# when reading WAL from archive;
|
||||
# -1 allows indefinite delay
|
||||
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||
# when reading streaming WAL;
|
||||
# -1 allows indefinite delay
|
||||
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
|
||||
# is not set
|
||||
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||
# 0 disables
|
||||
#hot_standby_feedback = off # send info from standby to prevent
|
||||
# query conflicts
|
||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||
# communication from primary
|
||||
# in milliseconds; 0 disables
|
||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||
# retrieve WAL after a failed attempt
|
||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
||||
|
||||
# - Subscribers -
|
||||
|
||||
# These settings are ignored on a publisher.
|
||||
|
||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||
# (change requires restart)
|
||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# QUERY TUNING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Planner Method Configuration -
|
||||
|
||||
#enable_async_append = on
|
||||
#enable_bitmapscan = on
|
||||
#enable_gathermerge = on
|
||||
#enable_hashagg = on
|
||||
#enable_hashjoin = on
|
||||
#enable_incremental_sort = on
|
||||
#enable_indexscan = on
|
||||
#enable_indexonlyscan = on
|
||||
#enable_material = on
|
||||
#enable_memoize = on
|
||||
#enable_mergejoin = on
|
||||
#enable_nestloop = on
|
||||
#enable_parallel_append = on
|
||||
#enable_parallel_hash = on
|
||||
#enable_partition_pruning = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_tidscan = on
|
||||
|
||||
# - Planner Cost Constants -
|
||||
|
||||
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||
#random_page_cost = 4.0 # same scale as above
|
||||
#cpu_tuple_cost = 0.01 # same scale as above
|
||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||
#cpu_operator_cost = 0.0025 # same scale as above
|
||||
#parallel_setup_cost = 1000.0 # same scale as above
|
||||
#parallel_tuple_cost = 0.1 # same scale as above
|
||||
#min_parallel_table_scan_size = 8MB
|
||||
#min_parallel_index_scan_size = 512kB
|
||||
#effective_cache_size = 4GB
|
||||
|
||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||
# and query more expensive than this;
|
||||
# -1 disables
|
||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||
# more expensive than this; -1 disables
|
||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||
# query is more expensive than this;
|
||||
# -1 disables
|
||||
|
||||
# - Genetic Query Optimizer -
|
||||
|
||||
#geqo = on
|
||||
#geqo_threshold = 12
|
||||
#geqo_effort = 5 # range 1-10
|
||||
#geqo_pool_size = 0 # selects default based on effort
|
||||
#geqo_generations = 0 # selects default based on effort
|
||||
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||
#geqo_seed = 0.0 # range 0.0-1.0
|
||||
|
||||
# - Other Planner Options -
|
||||
|
||||
#default_statistics_target = 100 # range 1-10000
|
||||
#constraint_exclusion = partition # on, off, or partition
|
||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||
#from_collapse_limit = 8
|
||||
#jit = on # allow JIT compilation
|
||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||
# JOIN clauses
|
||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
||||
# force_custom_plan
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPORTING AND LOGGING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Where to Log -
|
||||
|
||||
#log_destination = 'stderr' # Valid values are combinations of
|
||||
# stderr, csvlog, syslog, and eventlog,
|
||||
# depending on platform. csvlog
|
||||
# requires logging_collector to be on.
|
||||
|
||||
# This is used when logging to stderr:
|
||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||
# into log files. Required to be on for
|
||||
# csvlogs.
|
||||
# (change requires restart)
|
||||
|
||||
# These are only used if logging_collector is on:
|
||||
#log_directory = 'log' # directory where log files are written,
|
||||
# can be absolute or relative to PGDATA
|
||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||
# can include strftime() escapes
|
||||
#log_file_mode = 0600 # creation mode for log files,
|
||||
# begin with 0 to use octal notation
|
||||
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||
# happen after that time. 0 disables.
|
||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||
# happen after that much log output.
|
||||
# 0 disables.
|
||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||
# same name as the new log file will be
|
||||
# truncated rather than appended to.
|
||||
# But such truncation only occurs on
|
||||
# time-driven rotation, not on restarts
|
||||
# or size-driven rotation. Default is
|
||||
# off, meaning append to existing files
|
||||
# in all cases.
|
||||
|
||||
# These are relevant when logging to syslog:
|
||||
#syslog_facility = 'LOCAL0'
|
||||
#syslog_ident = 'postgres'
|
||||
#syslog_sequence_numbers = on
|
||||
#syslog_split_messages = on
|
||||
|
||||
# This is only relevant when logging to eventlog (Windows):
|
||||
# (change requires restart)
|
||||
#event_source = 'PostgreSQL'
|
||||
|
||||
# - When to Log -
|
||||
|
||||
#log_min_messages = warning # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic
|
||||
|
||||
#log_min_error_statement = error # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic (effectively off)
|
||||
|
||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
||||
# and their durations, > 0 logs only
|
||||
# statements running at least this number
|
||||
# of milliseconds
|
||||
|
||||
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
|
||||
# and their durations, > 0 logs only a sample of
|
||||
# statements running at least this number
|
||||
# of milliseconds;
|
||||
# sample fraction is determined by log_statement_sample_rate
|
||||
|
||||
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
|
||||
# log_min_duration_sample to be logged;
|
||||
# 1.0 logs all such statements, 0.0 never logs
|
||||
|
||||
|
||||
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
|
||||
# are logged regardless of their duration; 1.0 logs all
|
||||
# statements from all transactions, 0.0 never logs
|
||||
|
||||
# - What to Log -
|
||||
|
||||
#debug_print_parse = off
|
||||
#debug_print_rewritten = off
|
||||
#debug_print_plan = off
|
||||
#debug_pretty_print = on
|
||||
#log_autovacuum_min_duration = -1 # log autovacuum activity;
|
||||
# -1 disables, 0 logs all actions and
|
||||
# their durations, > 0 logs only
|
||||
# actions running at least this number
|
||||
# of milliseconds.
|
||||
#log_checkpoints = off
|
||||
#log_connections = off
|
||||
#log_disconnections = off
|
||||
#log_duration = off
|
||||
#log_error_verbosity = default # terse, default, or verbose messages
|
||||
#log_hostname = off
|
||||
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
|
||||
# %a = application name
|
||||
# %u = user name
|
||||
# %d = database name
|
||||
# %r = remote host and port
|
||||
# %h = remote host
|
||||
# %b = backend type
|
||||
# %p = process ID
|
||||
# %P = process ID of parallel group leader
|
||||
# %t = timestamp without milliseconds
|
||||
# %m = timestamp with milliseconds
|
||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||
# %Q = query ID (0 if none or not computed)
|
||||
# %i = command tag
|
||||
# %e = SQL state
|
||||
# %c = session ID
|
||||
# %l = session line number
|
||||
# %s = session start timestamp
|
||||
# %v = virtual transaction ID
|
||||
# %x = transaction ID (0 if none)
|
||||
# %q = stop here in non-session
|
||||
# processes
|
||||
# %% = '%'
|
||||
# e.g. '<%u%%%d> '
|
||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||
#log_recovery_conflict_waits = off # log standby recovery conflict waits
|
||||
# >= deadlock_timeout
|
||||
#log_parameter_max_length = -1 # when logging statements, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_statement = 'none' # none, ddl, mod, all
|
||||
#log_replication_commands = off
|
||||
#log_temp_files = -1 # log temporary files equal or larger
|
||||
# than the specified size in kilobytes;
|
||||
# -1 disables, 0 logs all temp files
|
||||
log_timezone = 'Etc/UTC'
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# PROCESS TITLE
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
cluster_name = '14/main' # added to process titles if nonempty
|
||||
# (change requires restart)
|
||||
#update_process_title = on
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# STATISTICS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Query and Index Statistics Collector -
|
||||
|
||||
#track_activities = on
|
||||
#track_activity_query_size = 1024 # (change requires restart)
|
||||
#track_counts = on
|
||||
#track_io_timing = off
|
||||
#track_wal_io_timing = off
|
||||
#track_functions = none # none, pl, all
|
||||
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
|
||||
|
||||
|
||||
# - Monitoring -
|
||||
|
||||
#compute_query_id = auto
|
||||
#log_statement_stats = off
|
||||
#log_parser_stats = off
|
||||
#log_planner_stats = off
|
||||
#log_executor_stats = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# AUTOVACUUM
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||
# requires track_counts to also be on.
|
||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||
# (change requires restart)
|
||||
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||
# vacuum
|
||||
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
|
||||
# before vacuum; -1 disables insert
|
||||
# vacuums
|
||||
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||
# analyze
|
||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
|
||||
# size before insert vacuum
|
||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||
# before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
||||
# autovacuum, in milliseconds;
|
||||
# -1 means use vacuum_cost_delay
|
||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||
# autovacuum, -1 means use
|
||||
# vacuum_cost_limit
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CLIENT CONNECTION DEFAULTS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Statement Behavior -
|
||||
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
#search_path = '"$user", public' # schema names
|
||||
#row_security = on
|
||||
#default_table_access_method = 'heap'
|
||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
|
||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||
# only default tablespace
|
||||
#check_function_bodies = on
|
||||
#default_transaction_isolation = 'read committed'
|
||||
#default_transaction_read_only = off
|
||||
#default_transaction_deferrable = off
|
||||
#session_replication_role = 'origin'
|
||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#vacuum_freeze_table_age = 150000000
|
||||
#vacuum_freeze_min_age = 50000000
|
||||
#vacuum_failsafe_age = 1600000000
|
||||
#vacuum_multixact_freeze_table_age = 150000000
|
||||
#vacuum_multixact_freeze_min_age = 5000000
|
||||
#vacuum_multixact_failsafe_age = 1600000000
|
||||
#bytea_output = 'hex' # hex, escape
|
||||
#xmlbinary = 'base64'
|
||||
#xmloption = 'content'
|
||||
#gin_pending_list_limit = 4MB
|
||||
|
||||
# - Locale and Formatting -
|
||||
|
||||
datestyle = 'iso, mdy'
|
||||
#intervalstyle = 'postgres'
|
||||
timezone = 'Etc/UTC'
|
||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||
# abbreviations. Currently, there are
|
||||
# Default
|
||||
# Australia (historical usage)
|
||||
# India
|
||||
# You can create your own file in
|
||||
# share/timezonesets/.
|
||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
||||
# selects precise output mode
|
||||
#client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
|
||||
# These settings are initialized by initdb, but they can be changed.
|
||||
lc_messages = 'C.UTF-8' # locale for system error message
|
||||
# strings
|
||||
lc_monetary = 'C.UTF-8' # locale for monetary formatting
|
||||
lc_numeric = 'C.UTF-8' # locale for number formatting
|
||||
lc_time = 'C.UTF-8' # locale for time formatting
|
||||
|
||||
# default configuration for text search
|
||||
default_text_search_config = 'pg_catalog.english'
|
||||
|
||||
# - Shared Library Preloading -
|
||||
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
#jit_provider = 'llvmjit' # JIT library to use
|
||||
|
||||
# - Other Defaults -
|
||||
|
||||
#dynamic_library_path = '$libdir'
|
||||
#extension_destdir = '' # prepend path when loading extensions
|
||||
# and shared objects (added by Debian)
|
||||
#gin_fuzzy_search_limit = 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# LOCK MANAGEMENT
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_relation = -2 # negative values mean
|
||||
# (max_pred_locks_per_transaction
|
||||
# / -max_pred_locks_per_relation) - 1
|
||||
#max_pred_locks_per_page = 2 # min 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# VERSION AND PLATFORM COMPATIBILITY
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Previous PostgreSQL Versions -
|
||||
|
||||
#array_nulls = on
|
||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||
#escape_string_warning = on
|
||||
#lo_compat_privileges = off
|
||||
#quote_all_identifiers = off
|
||||
#standard_conforming_strings = on
|
||||
#synchronize_seqscans = on
|
||||
|
||||
# - Other Platforms and Clients -
|
||||
|
||||
#transform_null_equals = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# ERROR HANDLING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#exit_on_error = off # terminate session on any error?
|
||||
#restart_after_crash = on # reinitialize after backend crash?
|
||||
#data_sync_retry = off # retry or panic on failure to fsync
|
||||
# data?
|
||||
# (change requires restart)
|
||||
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONFIG FILE INCLUDES
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# These options allow settings to be loaded from files other than the
|
||||
# default postgresql.conf. Note that these are directives, not variable
|
||||
# assignments, so they can usefully be given more than once.
|
||||
|
||||
include_dir = 'conf.d' # include files ending in '.conf' from
|
||||
# a directory, e.g., 'conf.d'
|
||||
#include_if_exists = '...' # include file only if it exists
|
||||
#include = '...' # include file
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CUSTOMIZED OPTIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# Add settings for extensions here
|
@@ -1,74 +0,0 @@
|
||||
use {log::*, std::collections::HashSet};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsSelector {
|
||||
pub accounts: HashSet<Vec<u8>>,
|
||||
pub owners: HashSet<Vec<u8>>,
|
||||
pub select_all_accounts: bool,
|
||||
}
|
||||
|
||||
impl AccountsSelector {
|
||||
pub fn default() -> Self {
|
||||
AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(accounts: &[String], owners: &[String]) -> Self {
|
||||
info!(
|
||||
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
|
||||
accounts, owners
|
||||
);
|
||||
|
||||
let select_all_accounts = accounts.iter().any(|key| key == "*");
|
||||
if select_all_accounts {
|
||||
return AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts,
|
||||
};
|
||||
}
|
||||
let accounts = accounts
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
let owners = owners
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
AccountsSelector {
|
||||
accounts,
|
||||
owners,
|
||||
select_all_accounts,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
|
||||
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
|
||||
}
|
||||
|
||||
/// Check if any account is of interested at all
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.select_all_accounts || !self.accounts.is_empty() || !self.owners.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_accounts_selector() {
|
||||
AccountsSelector::new(
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
&[],
|
||||
);
|
||||
|
||||
AccountsSelector::new(
|
||||
&[],
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
);
|
||||
}
|
||||
}
|
@@ -1,437 +0,0 @@
|
||||
use solana_measure::measure::Measure;
|
||||
/// Main entry for the PostgreSQL plugin
|
||||
use {
|
||||
crate::{
|
||||
accounts_selector::AccountsSelector,
|
||||
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
|
||||
transaction_selector::TransactionSelector,
|
||||
},
|
||||
bs58,
|
||||
log::*,
|
||||
serde_derive::{Deserialize, Serialize},
|
||||
serde_json,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions,
|
||||
ReplicaTransactionInfoVersions, Result, SlotStatus,
|
||||
},
|
||||
solana_metrics::*,
|
||||
std::{fs::File, io::Read},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AccountsDbPluginPostgres {
|
||||
client: Option<ParallelPostgresClient>,
|
||||
accounts_selector: Option<AccountsSelector>,
|
||||
transaction_selector: Option<TransactionSelector>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for AccountsDbPluginPostgres {
|
||||
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AccountsDbPluginPostgresConfig {
|
||||
pub host: Option<String>,
|
||||
pub user: Option<String>,
|
||||
pub port: Option<u16>,
|
||||
pub connection_str: Option<String>,
|
||||
pub threads: Option<usize>,
|
||||
pub batch_size: Option<usize>,
|
||||
pub panic_on_db_errors: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginPostgresError {
|
||||
#[error("Error connecting to the backend data store. Error message: ({msg})")]
|
||||
DataStoreConnectionError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
DataSchemaError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
ConfigurationError { msg: String },
|
||||
}
|
||||
|
||||
impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
fn name(&self) -> &'static str {
|
||||
"AccountsDbPluginPostgres"
|
||||
}
|
||||
|
||||
/// Do initialization for the PostgreSQL plugin.
|
||||
///
|
||||
/// # Format of the config file:
|
||||
/// * The `accounts_selector` section allows the user to controls accounts selections.
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
||||
/// }
|
||||
/// or:
|
||||
/// "accounts_selector" = {
|
||||
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
|
||||
/// }
|
||||
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
|
||||
/// When only owners is specified,
|
||||
/// all accounts belonging to the owners will be streamed.
|
||||
/// The accounts field support wildcard to select all accounts:
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["*"\],
|
||||
/// }
|
||||
/// * "host", optional, specifies the PostgreSQL server.
|
||||
/// * "user", optional, specifies the PostgreSQL user.
|
||||
/// * "port", optional, specifies the PostgreSQL server's port.
|
||||
/// * "connection_str", optional, the custom PostgreSQL connection string.
|
||||
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
|
||||
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
|
||||
/// `host` and `user` must be given.
|
||||
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
|
||||
/// maintains a PostgreSQL connection to the server. The default is '10'.
|
||||
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
|
||||
/// from restoring a snapshot. The default is '10'.
|
||||
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
|
||||
/// PostgreSQL database. The default is 'false'.
|
||||
/// * "transaction_selector", optional, controls if and what transaction to store. If this field is missing
|
||||
/// None of the transction is stored.
|
||||
/// "transaction_selector" : {
|
||||
/// "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
||||
/// }
|
||||
/// The `mentions` field support wildcard to select all transaction or all 'vote' transactions:
|
||||
/// For example, to select all transactions:
|
||||
/// "transaction_selector" : {
|
||||
/// "mentions" : \["*"\],
|
||||
/// }
|
||||
/// To select all vote transactions:
|
||||
/// "transaction_selector" : {
|
||||
/// "mentions" : \["all_votes"\],
|
||||
/// }
|
||||
/// # Examples
|
||||
///
|
||||
/// {
|
||||
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
|
||||
/// "host": "host_foo",
|
||||
/// "user": "solana",
|
||||
/// "threads": 10,
|
||||
/// "accounts_selector" : {
|
||||
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
|
||||
/// }
|
||||
/// }
|
||||
|
||||
fn on_load(&mut self, config_file: &str) -> Result<()> {
|
||||
solana_logger::setup_with_default("info");
|
||||
info!(
|
||||
"Loading plugin {:?} from config_file {:?}",
|
||||
self.name(),
|
||||
config_file
|
||||
);
|
||||
let mut file = File::open(config_file)?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
|
||||
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
|
||||
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
|
||||
self.transaction_selector = Some(Self::create_transaction_selector_from_config(&result));
|
||||
|
||||
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
|
||||
serde_json::from_str(&contents);
|
||||
match result {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::ConfigFileReadError {
|
||||
msg: format!(
|
||||
"The config file is not in the JSON format expected: {:?}",
|
||||
err
|
||||
),
|
||||
})
|
||||
}
|
||||
Ok(config) => {
|
||||
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
|
||||
self.client = Some(client);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn on_unload(&mut self) {
|
||||
info!("Unloading plugin: {:?}", self.name());
|
||||
|
||||
match &mut self.client {
|
||||
None => {}
|
||||
Some(client) => {
|
||||
client.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()> {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
|
||||
match account {
|
||||
ReplicaAccountInfoVersions::V0_0_1(account) => {
|
||||
let mut measure_select =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-select");
|
||||
if let Some(accounts_selector) = &self.accounts_selector {
|
||||
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
measure_select.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-select-us",
|
||||
measure_select.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
bs58::encode(account.owner).into_string(),
|
||||
slot,
|
||||
self.accounts_selector.as_ref().unwrap()
|
||||
);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database."
|
||||
.to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let mut measure_update =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-client");
|
||||
let result = { client.update_account(account, slot, is_startup) };
|
||||
measure_update.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-client-us",
|
||||
measure_update.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-main-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.update_slot_status(slot, parent, status);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<()> {
|
||||
info!("Notifying the end of startup for accounts notifications");
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.notify_end_of_startup();
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_transaction(
|
||||
&mut self,
|
||||
transaction_info: ReplicaTransactionInfoVersions,
|
||||
slot: u64,
|
||||
) -> Result<()> {
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => match transaction_info {
|
||||
ReplicaTransactionInfoVersions::V0_0_1(transaction_info) => {
|
||||
if let Some(transaction_selector) = &self.transaction_selector {
|
||||
if !transaction_selector.is_transaction_selected(
|
||||
transaction_info.is_vote,
|
||||
transaction_info.transaction.message().account_keys_iter(),
|
||||
) {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let result = client.log_transaction_info(transaction_info, slot);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the transaction info to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in account data
|
||||
/// Default is true -- if the plugin is not interested in
|
||||
/// account data, please return false.
|
||||
fn account_data_notifications_enabled(&self) -> bool {
|
||||
self.accounts_selector
|
||||
.as_ref()
|
||||
.map_or_else(|| false, |selector| selector.is_enabled())
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in transaction data
|
||||
fn transaction_notifications_enabled(&self) -> bool {
|
||||
self.transaction_selector
|
||||
.as_ref()
|
||||
.map_or_else(|| false, |selector| selector.is_enabled())
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsDbPluginPostgres {
|
||||
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
|
||||
let accounts_selector = &config["accounts_selector"];
|
||||
|
||||
if accounts_selector.is_null() {
|
||||
AccountsSelector::default()
|
||||
} else {
|
||||
let accounts = &accounts_selector["accounts"];
|
||||
let accounts: Vec<String> = if accounts.is_array() {
|
||||
accounts
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
let owners = &accounts_selector["owners"];
|
||||
let owners: Vec<String> = if owners.is_array() {
|
||||
owners
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
AccountsSelector::new(&accounts, &owners)
|
||||
}
|
||||
}
|
||||
|
||||
fn create_transaction_selector_from_config(config: &serde_json::Value) -> TransactionSelector {
|
||||
let transaction_selector = &config["transaction_selector"];
|
||||
|
||||
if transaction_selector.is_null() {
|
||||
TransactionSelector::default()
|
||||
} else {
|
||||
let accounts = &transaction_selector["mentions"];
|
||||
let accounts: Vec<String> = if accounts.is_array() {
|
||||
accounts
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
TransactionSelector::new(&accounts)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[allow(improper_ctypes_definitions)]
|
||||
/// # Safety
|
||||
///
|
||||
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
|
||||
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
|
||||
let plugin = AccountsDbPluginPostgres::new();
|
||||
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
|
||||
Box::into_raw(plugin)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use {super::*, serde_json};
|
||||
|
||||
#[test]
|
||||
fn test_accounts_selector_from_config() {
|
||||
let config = "{\"accounts_selector\" : { \
|
||||
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
|
||||
}}";
|
||||
|
||||
let config: serde_json::Value = serde_json::from_str(config).unwrap();
|
||||
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
|
||||
}
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
pub mod accounts_selector;
|
||||
pub mod accountsdb_plugin_postgres;
|
||||
pub mod postgres_client;
|
||||
pub mod transaction_selector;
|
@@ -1,905 +0,0 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
mod postgres_client_transaction;
|
||||
|
||||
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
|
||||
use {
|
||||
crate::accountsdb_plugin_postgres::{
|
||||
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
|
||||
},
|
||||
chrono::Utc,
|
||||
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
|
||||
log::*,
|
||||
postgres::{Client, NoTls, Statement},
|
||||
postgres_client_transaction::LogTransactionRequest,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_sdk::timing::AtomicInterval,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
},
|
||||
tokio_postgres::types,
|
||||
};
|
||||
|
||||
/// The maximum asynchronous requests allowed in the channel to avoid excessive
|
||||
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
|
||||
const MAX_ASYNC_REQUESTS: usize = 40960;
|
||||
const DEFAULT_POSTGRES_PORT: u16 = 5432;
|
||||
const DEFAULT_THREADS_COUNT: usize = 100;
|
||||
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
|
||||
const ACCOUNT_COLUMN_COUNT: usize = 9;
|
||||
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
|
||||
|
||||
struct PostgresSqlClientWrapper {
|
||||
client: Client,
|
||||
update_account_stmt: Statement,
|
||||
bulk_account_insert_stmt: Statement,
|
||||
update_slot_with_parent_stmt: Statement,
|
||||
update_slot_without_parent_stmt: Statement,
|
||||
update_transaction_log_stmt: Statement,
|
||||
}
|
||||
|
||||
pub struct SimplePostgresClient {
|
||||
batch_size: usize,
|
||||
pending_account_updates: Vec<DbAccountInfo>,
|
||||
client: Mutex<PostgresSqlClientWrapper>,
|
||||
}
|
||||
|
||||
struct PostgresClientWorker {
|
||||
client: SimplePostgresClient,
|
||||
/// Indicating if accounts notification during startup is done.
|
||||
is_startup_done: bool,
|
||||
}
|
||||
|
||||
impl Eq for DbAccountInfo {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct DbAccountInfo {
|
||||
pub pubkey: Vec<u8>,
|
||||
pub lamports: i64,
|
||||
pub owner: Vec<u8>,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: i64,
|
||||
pub data: Vec<u8>,
|
||||
pub slot: i64,
|
||||
pub write_version: i64,
|
||||
}
|
||||
|
||||
pub(crate) fn abort() -> ! {
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
// standard error is usually redirected to a log file, cry for help on standard output as
|
||||
// well
|
||||
eprintln!("Validator process aborted. The validator log may contain further details");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
panic!("process::exit(1) is intercepted for friendly test failure...");
|
||||
}
|
||||
|
||||
impl DbAccountInfo {
|
||||
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
|
||||
let data = account.data().to_vec();
|
||||
Self {
|
||||
pubkey: account.pubkey().to_vec(),
|
||||
lamports: account.lamports() as i64,
|
||||
owner: account.owner().to_vec(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch() as i64,
|
||||
data,
|
||||
slot: slot as i64,
|
||||
write_version: account.write_version(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ReadableAccountInfo: Sized {
|
||||
fn pubkey(&self) -> &[u8];
|
||||
fn owner(&self) -> &[u8];
|
||||
fn lamports(&self) -> i64;
|
||||
fn executable(&self) -> bool;
|
||||
fn rent_epoch(&self) -> i64;
|
||||
fn data(&self) -> &[u8];
|
||||
fn write_version(&self) -> i64;
|
||||
}
|
||||
|
||||
impl ReadableAccountInfo for DbAccountInfo {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
&self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
&self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports as i64
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch as i64
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version as i64
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PostgresClient {
|
||||
fn join(&mut self) -> thread::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn log_transaction(
|
||||
&mut self,
|
||||
transaction_log_info: LogTransactionRequest,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
}
|
||||
|
||||
impl SimplePostgresClient {
|
||||
fn connect_to_db(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Client, AccountsDbPluginError> {
|
||||
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
|
||||
|
||||
let connection_str = if let Some(connection_str) = &config.connection_str {
|
||||
connection_str.clone()
|
||||
} else {
|
||||
if config.host.is_none() || config.user.is_none() {
|
||||
let msg = format!(
|
||||
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
|
||||
config.connection_str, config.host, config.user
|
||||
);
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::ConfigurationError { msg },
|
||||
)));
|
||||
}
|
||||
format!(
|
||||
"host={} user={} port={}",
|
||||
config.host.as_ref().unwrap(),
|
||||
config.user.as_ref().unwrap(),
|
||||
port
|
||||
)
|
||||
};
|
||||
|
||||
match Client::connect(&connection_str, NoTls) {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
|
||||
err, connection_str
|
||||
);
|
||||
error!("{}", msg);
|
||||
Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
|
||||
)))
|
||||
}
|
||||
Ok(client) => Ok(client),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_bulk_account_insert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
|
||||
for j in 0..batch_size {
|
||||
let row = j * ACCOUNT_COLUMN_COUNT;
|
||||
let val_str = format!(
|
||||
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
|
||||
row + 1,
|
||||
row + 2,
|
||||
row + 3,
|
||||
row + 4,
|
||||
row + 5,
|
||||
row + 6,
|
||||
row + 7,
|
||||
row + 8,
|
||||
row + 9,
|
||||
);
|
||||
|
||||
if j == 0 {
|
||||
stmt = format!("{} {}", &stmt, val_str);
|
||||
} else {
|
||||
stmt = format!("{}, {}", &stmt, val_str);
|
||||
}
|
||||
}
|
||||
|
||||
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
stmt = format!("{} {}", stmt, handle_conflict);
|
||||
|
||||
info!("{}", stmt);
|
||||
let bulk_stmt = client.prepare(&stmt);
|
||||
|
||||
match bulk_stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_single_account_upsert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
|
||||
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slot_upsert_statement_with_parent(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
|
||||
VALUES ($1, $2, $3, $4) \
|
||||
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slot_upsert_statement_without_parent(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO slot (slot, status, updated_on) \
|
||||
VALUES ($1, $2, $3) \
|
||||
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal function for updating or inserting a single account
|
||||
fn upsert_account_internal(
|
||||
account: &DbAccountInfo,
|
||||
statement: &Statement,
|
||||
client: &mut Client,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
let lamports = account.lamports() as i64;
|
||||
let rent_epoch = account.rent_epoch() as i64;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let result = client.query(
|
||||
statement,
|
||||
&[
|
||||
&account.pubkey(),
|
||||
&account.slot,
|
||||
&account.owner(),
|
||||
&lamports,
|
||||
&account.executable(),
|
||||
&rent_epoch,
|
||||
&account.data(),
|
||||
&account.write_version(),
|
||||
&updated_on,
|
||||
],
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update or insert a single account
|
||||
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
Self::upsert_account_internal(account, statement, client)
|
||||
}
|
||||
|
||||
/// Insert accounts in batch to reduce network overhead
|
||||
fn insert_accounts_in_batch(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
self.pending_account_updates.push(account);
|
||||
|
||||
if self.pending_account_updates.len() == self.batch_size {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
|
||||
|
||||
let mut values: Vec<&(dyn types::ToSql + Sync)> =
|
||||
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
for j in 0..self.batch_size {
|
||||
let account = &self.pending_account_updates[j];
|
||||
|
||||
values.push(&account.pubkey);
|
||||
values.push(&account.slot);
|
||||
values.push(&account.owner);
|
||||
values.push(&account.lamports);
|
||||
values.push(&account.executable);
|
||||
values.push(&account.rent_epoch);
|
||||
values.push(&account.data);
|
||||
values.push(&account.write_version);
|
||||
values.push(&updated_on);
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-prepare-values-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let result = client
|
||||
.client
|
||||
.query(&client.bulk_account_insert_stmt, &values);
|
||||
|
||||
self.pending_account_updates.clear();
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-count",
|
||||
self.batch_size,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush any left over accounts in batch which are not processed in the last batch
|
||||
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
if self.pending_account_updates.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
|
||||
for account in self.pending_account_updates.drain(..) {
|
||||
Self::upsert_account_internal(&account, statement, client)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating SimplePostgresClient...");
|
||||
let mut client = Self::connect_to_db(config)?;
|
||||
let bulk_account_insert_stmt =
|
||||
Self::build_bulk_account_insert_statement(&mut client, config)?;
|
||||
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
|
||||
|
||||
let update_slot_with_parent_stmt =
|
||||
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
|
||||
let update_slot_without_parent_stmt =
|
||||
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
|
||||
let update_transaction_log_stmt =
|
||||
Self::build_transaction_info_upsert_statement(&mut client, config)?;
|
||||
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
info!("Created SimplePostgresClient.");
|
||||
Ok(Self {
|
||||
batch_size,
|
||||
pending_account_updates: Vec::with_capacity(batch_size),
|
||||
client: Mutex::new(PostgresSqlClientWrapper {
|
||||
client,
|
||||
update_account_stmt,
|
||||
bulk_account_insert_stmt,
|
||||
update_slot_with_parent_stmt,
|
||||
update_slot_without_parent_stmt,
|
||||
update_transaction_log_stmt,
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PostgresClient for SimplePostgresClient {
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
trace!(
|
||||
"Updating account {} with owner {} at slot {}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
bs58::encode(account.owner()).into_string(),
|
||||
account.slot,
|
||||
);
|
||||
if !is_startup {
|
||||
return self.upsert_account(&account);
|
||||
}
|
||||
self.insert_accounts_in_batch(account)
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
let slot = slot as i64; // postgres only supports i64
|
||||
let parent = parent.map(|parent| parent as i64);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let status_str = status.as_str();
|
||||
let client = self.client.get_mut().unwrap();
|
||||
|
||||
let result = match parent {
|
||||
Some(parent) => client.client.execute(
|
||||
&client.update_slot_with_parent_stmt,
|
||||
&[&slot, &parent, &status_str, &updated_on],
|
||||
),
|
||||
None => client.client.execute(
|
||||
&client.update_slot_without_parent_stmt,
|
||||
&[&slot, &status_str, &updated_on],
|
||||
),
|
||||
};
|
||||
|
||||
match result {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{:?}", msg);
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
|
||||
}
|
||||
Ok(rows) => {
|
||||
assert_eq!(1, rows, "Expected one rows to be updated a time");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
self.flush_buffered_writes()
|
||||
}
|
||||
|
||||
fn log_transaction(
|
||||
&mut self,
|
||||
transaction_log_info: LogTransactionRequest,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
self.log_transaction_impl(transaction_log_info)
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateAccountRequest {
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
}
|
||||
|
||||
struct UpdateSlotRequest {
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
slot_status: SlotStatus,
|
||||
}
|
||||
|
||||
#[warn(clippy::large_enum_variant)]
|
||||
enum DbWorkItem {
|
||||
UpdateAccount(Box<UpdateAccountRequest>),
|
||||
UpdateSlot(Box<UpdateSlotRequest>),
|
||||
LogTransaction(Box<LogTransactionRequest>),
|
||||
}
|
||||
|
||||
impl PostgresClientWorker {
|
||||
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
let result = SimplePostgresClient::new(&config);
|
||||
match result {
|
||||
Ok(client) => Ok(PostgresClientWorker {
|
||||
client,
|
||||
is_startup_done: false,
|
||||
}),
|
||||
Err(err) => {
|
||||
error!("Error in creating SimplePostgresClient: {}", err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_work(
|
||||
&mut self,
|
||||
receiver: Receiver<DbWorkItem>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
panic_on_db_errors: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
while !exit_worker.load(Ordering::Relaxed) {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
|
||||
let work = receiver.recv_timeout(Duration::from_millis(500));
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-worker-recv-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
match work {
|
||||
Ok(work) => match work {
|
||||
DbWorkItem::UpdateAccount(request) => {
|
||||
if let Err(err) = self
|
||||
.client
|
||||
.update_account(request.account, request.is_startup)
|
||||
{
|
||||
error!("Failed to update account: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
DbWorkItem::UpdateSlot(request) => {
|
||||
if let Err(err) = self.client.update_slot_status(
|
||||
request.slot,
|
||||
request.parent,
|
||||
request.slot_status,
|
||||
) {
|
||||
error!("Failed to update slot: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
DbWorkItem::LogTransaction(transaction_log_info) => {
|
||||
self.client.log_transaction(*transaction_log_info)?;
|
||||
}
|
||||
},
|
||||
Err(err) => match err {
|
||||
RecvTimeoutError::Timeout => {
|
||||
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
|
||||
if let Err(err) = self.client.notify_end_of_startup() {
|
||||
error!("Error in notifying end of startup: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
self.is_startup_done = true;
|
||||
startup_done_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
error!("Error in receiving the item {:?}", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct ParallelPostgresClient {
|
||||
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
initialized_worker_count: Arc<AtomicUsize>,
|
||||
sender: Sender<DbWorkItem>,
|
||||
last_report: AtomicInterval,
|
||||
}
|
||||
|
||||
impl ParallelPostgresClient {
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating ParallelPostgresClient...");
|
||||
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
|
||||
let exit_worker = Arc::new(AtomicBool::new(false));
|
||||
let mut workers = Vec::default();
|
||||
let is_startup_done = Arc::new(AtomicBool::new(false));
|
||||
let startup_done_count = Arc::new(AtomicUsize::new(0));
|
||||
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
|
||||
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
|
||||
for i in 0..worker_count {
|
||||
let cloned_receiver = receiver.clone();
|
||||
let exit_clone = exit_worker.clone();
|
||||
let is_startup_done_clone = is_startup_done.clone();
|
||||
let startup_done_count_clone = startup_done_count.clone();
|
||||
let initialized_worker_count_clone = initialized_worker_count.clone();
|
||||
let config = config.clone();
|
||||
let worker = Builder::new()
|
||||
.name(format!("worker-{}", i))
|
||||
.spawn(move || -> Result<(), AccountsDbPluginError> {
|
||||
let panic_on_db_errors = *config
|
||||
.panic_on_db_errors
|
||||
.as_ref()
|
||||
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
|
||||
let result = PostgresClientWorker::new(config);
|
||||
|
||||
match result {
|
||||
Ok(mut worker) => {
|
||||
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
|
||||
worker.do_work(
|
||||
cloned_receiver,
|
||||
exit_clone,
|
||||
is_startup_done_clone,
|
||||
startup_done_count_clone,
|
||||
panic_on_db_errors,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Error when making connection to database: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
workers.push(worker);
|
||||
}
|
||||
|
||||
info!("Created ParallelPostgresClient.");
|
||||
Ok(Self {
|
||||
last_report: AtomicInterval::default(),
|
||||
workers,
|
||||
exit_worker,
|
||||
is_startup_done,
|
||||
startup_done_count,
|
||||
initialized_worker_count,
|
||||
sender,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_worker.store(true, Ordering::Relaxed);
|
||||
while !self.workers.is_empty() {
|
||||
let worker = self.workers.pop();
|
||||
if worker.is_none() {
|
||||
break;
|
||||
}
|
||||
let worker = worker.unwrap();
|
||||
let result = worker.join().unwrap();
|
||||
if result.is_err() {
|
||||
error!("The worker thread has failed: {:?}", result);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_account(
|
||||
&mut self,
|
||||
account: &ReplicaAccountInfo,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if self.last_report.should_update(30000) {
|
||||
datapoint_debug!(
|
||||
"postgres-plugin-stats",
|
||||
("message-queue-length", self.sender.len() as i64, i64),
|
||||
);
|
||||
}
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
|
||||
let wrk_item = DbWorkItem::UpdateAccount(Box::new(UpdateAccountRequest {
|
||||
account: DbAccountInfo::new(account, slot),
|
||||
is_startup,
|
||||
}));
|
||||
|
||||
measure.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-create-work-item-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
|
||||
|
||||
if let Err(err) = self.sender.send(wrk_item) {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!(
|
||||
"Failed to update the account {:?}, error: {:?}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
err
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-send-msg-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if let Err(err) = self
|
||||
.sender
|
||||
.send(DbWorkItem::UpdateSlot(Box::new(UpdateSlotRequest {
|
||||
slot,
|
||||
parent,
|
||||
slot_status: status,
|
||||
})))
|
||||
{
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError {
|
||||
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Notifying the end of startup");
|
||||
// Ensure all items in the queue has been received by the workers
|
||||
while !self.sender.is_empty() {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
self.is_startup_done.store(true, Ordering::Relaxed);
|
||||
|
||||
// Wait for all worker threads to be done with flushing
|
||||
while self.startup_done_count.load(Ordering::Relaxed)
|
||||
!= self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
{
|
||||
info!(
|
||||
"Startup done count: {}, good worker thread count: {}",
|
||||
self.startup_done_count.load(Ordering::Relaxed),
|
||||
self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
);
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
|
||||
info!("Done with notifying the end of startup");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PostgresClientBuilder {}
|
||||
|
||||
impl PostgresClientBuilder {
|
||||
pub fn build_pararallel_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
|
||||
ParallelPostgresClient::new(config)
|
||||
}
|
||||
|
||||
pub fn build_simple_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
|
||||
SimplePostgresClient::new(config)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -1,194 +0,0 @@
|
||||
/// The transaction selector is responsible for filtering transactions
|
||||
/// in the plugin framework.
|
||||
use {log::*, solana_sdk::pubkey::Pubkey, std::collections::HashSet};
|
||||
|
||||
pub(crate) struct TransactionSelector {
|
||||
pub mentioned_addresses: HashSet<Vec<u8>>,
|
||||
pub select_all_transactions: bool,
|
||||
pub select_all_vote_transactions: bool,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl TransactionSelector {
|
||||
pub fn default() -> Self {
|
||||
Self {
|
||||
mentioned_addresses: HashSet::default(),
|
||||
select_all_transactions: false,
|
||||
select_all_vote_transactions: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a selector based on the mentioned addresses
|
||||
/// To select all transactions use ["*"] or ["all"]
|
||||
/// To select all vote transactions, use ["all_votes"]
|
||||
/// To select transactions mentioning specific addresses use ["<pubkey1>", "<pubkey2>", ...]
|
||||
pub fn new(mentioned_addresses: &[String]) -> Self {
|
||||
info!(
|
||||
"Creating TransactionSelector from addresses: {:?}",
|
||||
mentioned_addresses
|
||||
);
|
||||
|
||||
let select_all_transactions = mentioned_addresses
|
||||
.iter()
|
||||
.any(|key| key == "*" || key == "all");
|
||||
if select_all_transactions {
|
||||
return Self {
|
||||
mentioned_addresses: HashSet::default(),
|
||||
select_all_transactions,
|
||||
select_all_vote_transactions: true,
|
||||
};
|
||||
}
|
||||
let select_all_vote_transactions = mentioned_addresses.iter().any(|key| key == "all_votes");
|
||||
if select_all_vote_transactions {
|
||||
return Self {
|
||||
mentioned_addresses: HashSet::default(),
|
||||
select_all_transactions,
|
||||
select_all_vote_transactions: true,
|
||||
};
|
||||
}
|
||||
|
||||
let mentioned_addresses = mentioned_addresses
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
mentioned_addresses,
|
||||
select_all_transactions: false,
|
||||
select_all_vote_transactions: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a transaction is of interest.
|
||||
pub fn is_transaction_selected(
|
||||
&self,
|
||||
is_vote: bool,
|
||||
mentioned_addresses: Box<dyn Iterator<Item = &Pubkey> + '_>,
|
||||
) -> bool {
|
||||
if !self.is_enabled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.select_all_transactions || (self.select_all_vote_transactions && is_vote) {
|
||||
return true;
|
||||
}
|
||||
for address in mentioned_addresses {
|
||||
if self.mentioned_addresses.contains(address.as_ref()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Check if any transaction is of interest at all
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.select_all_transactions
|
||||
|| self.select_all_vote_transactions
|
||||
|| !self.mentioned_addresses.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_select_transaction() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&[pubkey1.to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_all_transaction_using_wildcard() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&["*".to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_all_transaction_all() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&["all".to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_all_vote_transaction() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&["all_votes".to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_no_transaction() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&[]);
|
||||
|
||||
assert!(!selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-core = { path = "../core", version = "=1.9.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-core = { path = "../core", version = "=1.9.13" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.13" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.13" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.13" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.13" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -11,9 +11,10 @@ use {
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::to_packets_chunked,
|
||||
solana_perf::packet::to_packet_batches,
|
||||
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
|
||||
solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
@@ -212,14 +213,19 @@ fn main() {
|
||||
bank.clear_signatures();
|
||||
}
|
||||
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
|
||||
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(
|
||||
&bank,
|
||||
&blockstore,
|
||||
None,
|
||||
Some(leader_schedule_cache.clone()),
|
||||
);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost().info,
|
||||
Arc::new(Keypair::new()),
|
||||
@@ -332,6 +338,7 @@ fn main() {
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
leader_schedule_cache.set_root(&bank);
|
||||
bank_forks.set_root(root, &AbsRequestSender::default(), None);
|
||||
root += 1;
|
||||
}
|
||||
@@ -364,7 +371,7 @@ fn main() {
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,16 +12,17 @@ edition = "2021"
|
||||
[dependencies]
|
||||
borsh = "0.9.1"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
tarpc = { version = "0.26.2", features = ["full"] }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.13" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.9.13" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
73
banks-client/src/error.rs
Normal file
73
banks-client/src/error.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use {
|
||||
solana_sdk::{transaction::TransactionError, transport::TransportError},
|
||||
std::io,
|
||||
tarpc::client::RpcError,
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
/// Errors from BanksClient
|
||||
#[derive(Error, Debug)]
|
||||
pub enum BanksClientError {
|
||||
#[error("client error: {0}")]
|
||||
ClientError(&'static str),
|
||||
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
RpcError(#[from] RpcError),
|
||||
|
||||
#[error("transport transaction error: {0}")]
|
||||
TransactionError(#[from] TransactionError),
|
||||
|
||||
#[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")]
|
||||
SimulationError {
|
||||
err: TransactionError,
|
||||
logs: Vec<String>,
|
||||
units_consumed: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl BanksClientError {
|
||||
pub fn unwrap(&self) -> TransactionError {
|
||||
match self {
|
||||
BanksClientError::TransactionError(err)
|
||||
| BanksClientError::SimulationError { err, .. } => err.clone(),
|
||||
_ => panic!("unexpected transport error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for io::Error {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::Io(err) => err,
|
||||
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::TransactionError(err) => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
BanksClientError::SimulationError { err, .. } => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for TransportError {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::Io(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::RpcError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::TransactionError(err) => Self::TransactionError(err),
|
||||
BanksClientError::SimulationError { err, .. } => Self::TransactionError(err),
|
||||
}
|
||||
}
|
||||
}
|
@@ -5,11 +5,12 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
pub use crate::error::BanksClientError;
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use {
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse},
|
||||
futures::{future::join_all, Future, FutureExt, TryFutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
|
||||
solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar::Sysvar,
|
||||
@@ -22,7 +23,7 @@ use {
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
},
|
||||
std::io::{self, Error, ErrorKind},
|
||||
std::io,
|
||||
tarpc::{
|
||||
client::{self, NewClient, RequestDispatch},
|
||||
context::{self, Context},
|
||||
@@ -33,6 +34,8 @@ use {
|
||||
tokio_serde::formats::Bincode,
|
||||
};
|
||||
|
||||
mod error;
|
||||
|
||||
// This exists only for backward compatibility
|
||||
pub trait BanksClientExt {}
|
||||
|
||||
@@ -58,7 +61,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.inner.send_transaction_with_context(ctx, transaction)
|
||||
self.inner
|
||||
.send_transaction_with_context(ctx, transaction)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
@@ -73,6 +79,8 @@ impl BanksClient {
|
||||
#[allow(deprecated)]
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_transaction_status_with_context(
|
||||
@@ -82,6 +90,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.inner
|
||||
.get_transaction_status_with_context(ctx, signature)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_slot_with_context(
|
||||
@@ -89,7 +99,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_slot_with_context(ctx, commitment)
|
||||
self.inner
|
||||
.get_slot_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_context(
|
||||
@@ -97,7 +110,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_block_height_with_context(ctx, commitment)
|
||||
self.inner
|
||||
.get_block_height_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_commitment_and_context(
|
||||
@@ -108,6 +124,24 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
|
||||
self.inner
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_
|
||||
{
|
||||
self.inner
|
||||
.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_account_with_commitment_and_context(
|
||||
@@ -118,6 +152,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.inner
|
||||
.get_account_with_commitment_and_context(ctx, address, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Send a transaction and return immediately. The server will resend the
|
||||
@@ -148,9 +184,13 @@ impl BanksClient {
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
|
||||
.ok_or(BanksClientError::ClientError("Sysvar not present"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
|
||||
.ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
})
|
||||
}
|
||||
|
||||
@@ -164,7 +204,8 @@ impl BanksClient {
|
||||
/// method to get both a blockhash and the blockhash's last valid slot.
|
||||
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
|
||||
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
self.get_latest_blockhash()
|
||||
#[allow(deprecated)]
|
||||
self.get_fees().map(|result| Ok(result?.1))
|
||||
}
|
||||
|
||||
/// Send a transaction and return after the transaction has been rejected or
|
||||
@@ -178,11 +219,60 @@ impl BanksClient {
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map(|result| match result? {
|
||||
None => {
|
||||
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
|
||||
}
|
||||
None => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
})
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been rejected or reached the given level of commitment.
|
||||
pub fn process_transaction_with_preflight_and_commitment(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map(|result| match result? {
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: None,
|
||||
simulation_details: _,
|
||||
} => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(simulation_details),
|
||||
} => Err(BanksClientError::SimulationError {
|
||||
err,
|
||||
logs: simulation_details.logs,
|
||||
units_consumed: simulation_details.units_consumed,
|
||||
}),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(result),
|
||||
simulation_details: _,
|
||||
} => result.map_err(Into::into),
|
||||
})
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been finalized or rejected.
|
||||
pub fn process_transaction_with_preflight(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transaction_with_preflight_and_commitment(
|
||||
transaction,
|
||||
CommitmentLevel::default(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Send a transaction and return until the transaction has been finalized or rejected.
|
||||
@@ -255,10 +345,12 @@ impl BanksClient {
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
|
||||
let account = result?
|
||||
.ok_or(BanksClientError::ClientError("Account not found"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
T::unpack_from_slice(&account.data)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
|
||||
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
})
|
||||
}
|
||||
|
||||
@@ -269,9 +361,8 @@ impl BanksClient {
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
|
||||
T::try_from_slice(&account.data)
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::try_from_slice(&account.data).map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -330,7 +421,8 @@ impl BanksClient {
|
||||
.map(|result| {
|
||||
result?
|
||||
.map(|x| x.0)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))
|
||||
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -348,6 +440,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
|
||||
self.inner
|
||||
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_fee_for_message_with_commitment_and_context(
|
||||
@@ -358,6 +452,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
|
||||
self.inner
|
||||
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,7 +495,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_server() -> io::Result<()> {
|
||||
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
|
||||
// This test shows the preferred way to interact with BanksServer.
|
||||
// It creates a runtime explicitly (no globals via tokio macros) and calls
|
||||
// `runtime.block_on()` just once, to run all the async code.
|
||||
@@ -432,7 +528,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_client() -> io::Result<()> {
|
||||
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
|
||||
// The caller may not want to hold the connection open until the transaction
|
||||
// is processed (or blockhash expires). In this test, we verify the
|
||||
// server-side functionality is available to the client.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0.130", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
tarpc = { version = "0.26.2", features = ["full"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -30,6 +30,19 @@ pub struct TransactionStatus {
|
||||
pub confirmation_status: Option<TransactionConfirmationStatus>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TransactionSimulationDetails {
|
||||
pub logs: Vec<String>,
|
||||
pub units_consumed: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BanksTransactionResultWithSimulation {
|
||||
pub result: Option<transaction::Result<()>>,
|
||||
pub simulation_details: Option<TransactionSimulationDetails>,
|
||||
}
|
||||
|
||||
#[tarpc::service]
|
||||
pub trait Banks {
|
||||
async fn send_transaction_with_context(transaction: Transaction);
|
||||
@@ -44,6 +57,10 @@ pub trait Banks {
|
||||
-> Option<TransactionStatus>;
|
||||
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
||||
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation;
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,11 +12,11 @@ edition = "2021"
|
||||
[dependencies]
|
||||
bincode = "1.3.3"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
|
||||
tarpc = { version = "0.26.2", features = ["full"] }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.13" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.13" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
tokio-stream = "0.1"
|
||||
|
@@ -2,9 +2,14 @@ use {
|
||||
bincode::{deserialize, serialize},
|
||||
futures::{future, prelude::stream::StreamExt},
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
|
||||
TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus,
|
||||
},
|
||||
solana_runtime::{
|
||||
bank::{Bank, TransactionSimulationResult},
|
||||
bank_forks::BankForks,
|
||||
commitment::BlockCommitmentCache,
|
||||
},
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
@@ -15,7 +20,7 @@ use {
|
||||
message::{Message, SanitizedMessage},
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transaction::{self, SanitizedTransaction, Transaction},
|
||||
},
|
||||
solana_send_transaction_service::{
|
||||
send_transaction_service::{SendTransactionService, TransactionInfo},
|
||||
@@ -35,7 +40,7 @@ use {
|
||||
tarpc::{
|
||||
context::Context,
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Incoming},
|
||||
server::{self, incoming::Incoming, Channel},
|
||||
transport::{self, channel::UnboundedChannel},
|
||||
ClientMessage, Response,
|
||||
},
|
||||
@@ -242,6 +247,47 @@ impl Banks for BanksServer {
|
||||
self.bank(commitment).block_height()
|
||||
}
|
||||
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation {
|
||||
let sanitized_transaction =
|
||||
match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) {
|
||||
Err(err) => {
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: None,
|
||||
};
|
||||
}
|
||||
Ok(tx) => tx,
|
||||
};
|
||||
if let TransactionSimulationResult {
|
||||
result: Err(err),
|
||||
logs,
|
||||
post_simulation_accounts: _,
|
||||
units_consumed,
|
||||
} = self
|
||||
.bank(commitment)
|
||||
.simulate_transaction_unchecked(sanitized_transaction)
|
||||
{
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(TransactionSimulationDetails {
|
||||
logs,
|
||||
units_consumed,
|
||||
}),
|
||||
};
|
||||
}
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: self
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.await,
|
||||
simulation_details: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.13" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,8 +2,8 @@
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg},
|
||||
solana_streamer::{
|
||||
packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketReceiver},
|
||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketBatchReceiver},
|
||||
},
|
||||
std::{
|
||||
cmp::max,
|
||||
@@ -20,19 +20,19 @@ use {
|
||||
|
||||
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut msgs = Packets::default();
|
||||
msgs.packets.resize(10, Packet::default());
|
||||
for w in msgs.packets.iter_mut() {
|
||||
let mut packet_batch = PacketBatch::default();
|
||||
packet_batch.packets.resize(10, Packet::default());
|
||||
for w in packet_batch.packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(addr);
|
||||
}
|
||||
let msgs = Arc::new(msgs);
|
||||
let packet_batch = Arc::new(packet_batch);
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in &msgs.packets {
|
||||
for p in &packet_batch.packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size <= PACKET_DATA_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
@@ -42,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
|
||||
if let Ok(packet_batch) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -81,7 +81,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketBatchRecycler::default();
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,23 +14,23 @@ log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
serde_json = "1.0.72"
|
||||
serde_yaml = "0.8.21"
|
||||
solana-core = { path = "../core", version = "=1.9.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.9.0" }
|
||||
solana-client = { path = "../client", version = "=1.9.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-core = { path = "../core", version = "=1.9.13" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.9.13" }
|
||||
solana-client = { path = "../client", version = "=1.9.13" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.13" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.13" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.13" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -21,7 +21,7 @@ pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
solana_metrics::set_panic_hook("bench-tps");
|
||||
solana_metrics::set_panic_hook("bench-tps", /*version:*/ None);
|
||||
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
@@ -31,7 +31,7 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default(),
|
||||
&ValidatorConfig::default_for_test(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors,
|
||||
|
32
bloom/Cargo.toml
Normal file
32
bloom/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.9.13"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bloom"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
rand = "0.7.0"
|
||||
serde = { version = "1.0.133", features = ["rc"] }
|
||||
rayon = "1.5.1"
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.13" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
log = "0.4.14"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bloom"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
@@ -5,7 +5,7 @@ use {
|
||||
bv::BitVec,
|
||||
fnv::FnvHasher,
|
||||
rand::Rng,
|
||||
solana_runtime::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_bloom::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::Signature,
|
1
bloom/build.rs
Symbolic link
1
bloom/build.rs
Symbolic link
@@ -0,0 +1 @@
|
||||
../frozen-abi/build.rs
|
@@ -101,7 +101,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
}
|
||||
}
|
||||
fn pos(&self, key: &T, k: u64) -> u64 {
|
||||
key.hash_at_index(k) % self.bits.len()
|
||||
key.hash_at_index(k).wrapping_rem(self.bits.len())
|
||||
}
|
||||
pub fn clear(&mut self) {
|
||||
self.bits = BitVec::new_fill(false, self.bits.len());
|
||||
@@ -111,7 +111,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
for k in &self.keys {
|
||||
let pos = self.pos(key, *k);
|
||||
if !self.bits.get(pos) {
|
||||
self.num_bits_set += 1;
|
||||
self.num_bits_set = self.num_bits_set.saturating_add(1);
|
||||
self.bits.set(pos, true);
|
||||
}
|
||||
}
|
||||
@@ -164,21 +164,26 @@ impl<T: BloomHashIndex> From<Bloom<T>> for AtomicBloom<T> {
|
||||
|
||||
impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) {
|
||||
let pos = key.hash_at_index(hash_index) % self.num_bits;
|
||||
let pos = key.hash_at_index(hash_index).wrapping_rem(self.num_bits);
|
||||
// Divide by 64 to figure out which of the
|
||||
// AtomicU64 bit chunks we need to modify.
|
||||
let index = pos >> 6;
|
||||
let index = pos.wrapping_shr(6);
|
||||
// (pos & 63) is equivalent to mod 64 so that we can find
|
||||
// the index of the bit within the AtomicU64 to modify.
|
||||
let mask = 1u64 << (pos & 63);
|
||||
let mask = 1u64.wrapping_shl(u32::try_from(pos & 63).unwrap());
|
||||
(index as usize, mask)
|
||||
}
|
||||
|
||||
pub fn add(&self, key: &T) {
|
||||
/// Adds an item to the bloom filter and returns true if the item
|
||||
/// was not in the filter before.
|
||||
pub fn add(&self, key: &T) -> bool {
|
||||
let mut added = false;
|
||||
for k in &self.keys {
|
||||
let (index, mask) = self.pos(key, *k);
|
||||
self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
let prev_val = self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
added = added || prev_val & mask == 0u64;
|
||||
}
|
||||
added
|
||||
}
|
||||
|
||||
pub fn contains(&self, key: &T) -> bool {
|
||||
@@ -189,6 +194,12 @@ impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clear_for_tests(&mut self) {
|
||||
self.bits.iter().for_each(|bit| {
|
||||
bit.store(0u64, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
@@ -320,7 +331,9 @@ mod test {
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.num_bits, 6168);
|
||||
assert_eq!(bloom.bits.len(), 97);
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
let bloom: Bloom<Hash> = bloom.into();
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.bits.len(), 6168);
|
||||
@@ -362,7 +375,9 @@ mod test {
|
||||
}
|
||||
// Round trip, re-inserting the same hash values.
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
||||
@@ -380,7 +395,9 @@ mod test {
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
assert_eq!(bloom.num_bits, 9731);
|
||||
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
|
||||
more_hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
more_hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
5
bloom/src/lib.rs
Normal file
5
bloom/src/lib.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
pub mod bloom;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_frozen_abi_macro;
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bucket-map"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "solana-bucket-map"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bucket-map"
|
||||
@@ -12,11 +12,11 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
memmap2 = "0.5.0"
|
||||
log = { version = "0.4.11" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
rand = "0.7.0"
|
||||
fs_extra = "1.2.0"
|
||||
tempfile = "3.2.0"
|
||||
|
@@ -9,5 +9,8 @@ for a in "$@"; do
|
||||
fi
|
||||
done
|
||||
|
||||
set -x
|
||||
set -ex
|
||||
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
|
||||
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
|
||||
fi
|
||||
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"
|
||||
|
@@ -102,6 +102,8 @@ command_step() {
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -168,7 +170,7 @@ all_test_steps() {
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
@@ -221,11 +223,26 @@ EOF
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
^ci/test-stable.sh \
|
||||
^sdk/ \
|
||||
; then
|
||||
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
|
||||
else
|
||||
annotate --style info \
|
||||
"wasm skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
@@ -243,7 +260,15 @@ EOF
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
50
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
30
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
|
@@ -16,6 +16,11 @@ steps:
|
||||
- command: "ci/publish-crate.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 240
|
||||
timeout_in_minutes: 360
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build-aarch64-apple-darwin"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball (aarch64-apple-darwin)"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.56.1
|
||||
FROM solanalabs/rust:1.57.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.56.1
|
||||
FROM rust:1.57.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
@@ -11,6 +11,7 @@ RUN set -x \
|
||||
&& apt-get install apt-transport-https \
|
||||
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
|
||||
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
|
||||
&& curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \
|
||||
&& apt update \
|
||||
&& apt install -y \
|
||||
buildkite-agent \
|
||||
@@ -19,15 +20,20 @@ RUN set -x \
|
||||
lcov \
|
||||
libudev-dev \
|
||||
mscgen \
|
||||
nodejs \
|
||||
net-tools \
|
||||
rsync \
|
||||
sudo \
|
||||
golang \
|
||||
unzip \
|
||||
\
|
||||
&& apt remove -y libcurl4-openssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& node --version \
|
||||
&& npm --version \
|
||||
&& rustup component add rustfmt \
|
||||
&& rustup component add clippy \
|
||||
&& rustup target add wasm32-unknown-unknown \
|
||||
&& cargo install cargo-audit \
|
||||
&& cargo install mdbook \
|
||||
&& cargo install mdbook-linkcheck \
|
||||
|
16
ci/env.sh
16
ci/env.sh
@@ -23,6 +23,9 @@ if [[ -n $CI ]]; then
|
||||
elif [[ -n $BUILDKITE ]]; then
|
||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
|
||||
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
@@ -35,7 +38,18 @@ if [[ -n $CI ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
export CI_OS_NAME=linux
|
||||
|
||||
case "$(uname -s)" in
|
||||
Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
Darwin)
|
||||
export CI_OS_NAME=osx
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||
# triggered it
|
||||
|
@@ -39,7 +39,11 @@ fi
|
||||
|
||||
case "$CI_OS_NAME" in
|
||||
osx)
|
||||
TARGET=x86_64-apple-darwin
|
||||
_cputype="$(uname -m)"
|
||||
if [[ $_cputype = arm64 ]]; then
|
||||
_cputype=aarch64
|
||||
fi
|
||||
TARGET=${_cputype}-apple-darwin
|
||||
;;
|
||||
linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
@@ -146,7 +150,7 @@ elif [[ -n $BUILDKITE ]]; then
|
||||
cat > release.solana.com-install <<EOF
|
||||
SOLANA_RELEASE=$CHANNEL_OR_TAG
|
||||
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
|
||||
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
|
||||
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
|
||||
EOF
|
||||
cat install/solana-install-init.sh >> release.solana.com-install
|
||||
|
||||
|
@@ -27,6 +27,8 @@ steps+=(test-stable-perf)
|
||||
steps+=(test-downstream-builds)
|
||||
steps+=(test-bench)
|
||||
steps+=(test-local-cluster)
|
||||
steps+=(test-local-cluster-flakey)
|
||||
steps+=(test-local-cluster-slow)
|
||||
|
||||
step_index=0
|
||||
if [[ -n "$1" ]]; then
|
||||
|
@@ -18,13 +18,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.56.1
|
||||
stable_version=1.57.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2021-11-30
|
||||
nightly_version=2021-12-03
|
||||
fi
|
||||
|
||||
|
||||
|
24
ci/sbf-tools-info.sh
Executable file
24
ci/sbf-tools-info.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Finds the version of sbf-tools used by this source tree.
|
||||
#
|
||||
# stdout of this script may be eval-ed.
|
||||
#
|
||||
|
||||
here="$(dirname "$0")"
|
||||
|
||||
SBF_TOOLS_VERSION=unknown
|
||||
|
||||
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
|
||||
if [[ -f "${cargo_build_bpf_main}" ]]; then
|
||||
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
|
||||
if [[ ${version} != '' ]]; then
|
||||
SBF_TOOLS_VERSION="${version}"
|
||||
else
|
||||
echo '--- unable to parse SBF_TOOLS_VERSION'
|
||||
fi
|
||||
else
|
||||
echo "--- '${cargo_build_bpf_main}' not present"
|
||||
fi
|
||||
|
||||
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"
|
1
ci/test-local-cluster-flakey.sh
Symbolic link
1
ci/test-local-cluster-flakey.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
1
ci/test-local-cluster-slow.sh
Symbolic link
1
ci/test-local-cluster-slow.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
@@ -100,7 +100,30 @@ test-stable-perf)
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-flakey)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-slow)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-wasm)
|
||||
_ node --version
|
||||
_ npm --version
|
||||
for dir in sdk/{program,}; do
|
||||
if [[ -r "$dir"/package.json ]]; then
|
||||
pushd "$dir"
|
||||
_ npm install
|
||||
_ npm test
|
||||
popd
|
||||
fi
|
||||
done
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
|
1
ci/test-wasm.sh
Symbolic link
1
ci/test-wasm.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
@@ -19,13 +19,24 @@ upload-ci-artifact() {
|
||||
upload-s3-artifact() {
|
||||
echo "--- artifact: $1 to $2"
|
||||
(
|
||||
set -x
|
||||
docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
args=(
|
||||
--rm
|
||||
--env AWS_ACCESS_KEY_ID
|
||||
--env AWS_SECRET_ACCESS_KEY
|
||||
--volume "$PWD:/solana"
|
||||
|
||||
)
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
args+=(
|
||||
--platform linux/amd64
|
||||
)
|
||||
fi
|
||||
args+=(
|
||||
eremite/aws-cli:2018.12.18
|
||||
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
||||
)
|
||||
set -x
|
||||
docker run "${args[@]}"
|
||||
)
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,9 +12,9 @@ edition = "2021"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "5.0"
|
||||
solana-perf = { path = "../perf", version = "=1.9.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.13" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
thiserror = "1.0.30"
|
||||
tiny-bip39 = "0.8.2"
|
||||
uriparse = "0.6.3"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -19,12 +19,12 @@ Inflector = "0.11.4"
|
||||
indicatif = "0.16.2"
|
||||
serde = "1.0.130"
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
|
||||
solana-client = { path = "../client", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
|
||||
solana-client = { path = "../client", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -46,6 +46,8 @@ use {
|
||||
},
|
||||
};
|
||||
|
||||
static CHECK_MARK: Emoji = Emoji("✅ ", "");
|
||||
static CROSS_MARK: Emoji = Emoji("❌ ", "");
|
||||
static WARNING: Emoji = Emoji("⚠️", "!");
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@@ -99,7 +101,7 @@ impl OutputFormat {
|
||||
pub struct CliAccount {
|
||||
#[serde(flatten)]
|
||||
pub keyed_account: RpcKeyedAccount,
|
||||
#[serde(skip_serializing)]
|
||||
#[serde(skip_serializing, skip_deserializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
@@ -391,19 +393,19 @@ impl fmt::Display for CliValidators {
|
||||
) -> fmt::Result {
|
||||
fn non_zero_or_dash(v: u64, max_v: u64) -> String {
|
||||
if v == 0 {
|
||||
"- ".into()
|
||||
" - ".into()
|
||||
} else if v == max_v {
|
||||
format!("{:>8} ( 0)", v)
|
||||
format!("{:>9} ( 0)", v)
|
||||
} else if v > max_v.saturating_sub(100) {
|
||||
format!("{:>8} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
|
||||
format!("{:>9} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
|
||||
} else {
|
||||
format!("{:>8} ", v)
|
||||
format!("{:>9} ", v)
|
||||
}
|
||||
}
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {}",
|
||||
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {:>22} ({:.2}%)",
|
||||
if validator.delinquent {
|
||||
WARNING.to_string()
|
||||
} else {
|
||||
@@ -417,19 +419,19 @@ impl fmt::Display for CliValidators {
|
||||
if let Some(skip_rate) = validator.skip_rate {
|
||||
format!("{:.2}%", skip_rate)
|
||||
} else {
|
||||
"- ".to_string()
|
||||
"- ".to_string()
|
||||
},
|
||||
validator.epoch_credits,
|
||||
validator.version,
|
||||
if validator.activated_stake > 0 {
|
||||
format!(
|
||||
"{} ({:.2}%)",
|
||||
build_balance_message(validator.activated_stake, use_lamports_unit, true),
|
||||
100. * validator.activated_stake as f64 / total_active_stake as f64,
|
||||
)
|
||||
} else {
|
||||
"-".into()
|
||||
},
|
||||
build_balance_message_with_config(
|
||||
validator.activated_stake,
|
||||
&BuildBalanceMessageConfig {
|
||||
use_lamports_unit,
|
||||
trim_trailing_zeros: false,
|
||||
..BuildBalanceMessageConfig::default()
|
||||
}
|
||||
),
|
||||
100. * validator.activated_stake as f64 / total_active_stake as f64,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -439,13 +441,13 @@ impl fmt::Display for CliValidators {
|
||||
0
|
||||
};
|
||||
let header = style(format!(
|
||||
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
|
||||
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
|
||||
" ",
|
||||
"Identity",
|
||||
"Vote Account",
|
||||
"Commission",
|
||||
"Last Vote ",
|
||||
"Root Slot ",
|
||||
"Last Vote ",
|
||||
"Root Slot ",
|
||||
"Skip Rate",
|
||||
"Credits",
|
||||
"Version",
|
||||
@@ -2523,6 +2525,172 @@ impl fmt::Display for CliGossipNodes {
|
||||
impl QuietDisplay for CliGossipNodes {}
|
||||
impl VerboseDisplay for CliGossipNodes {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPing {
|
||||
pub source_pubkey: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fixed_blockhash: Option<String>,
|
||||
#[serde(skip_serializing)]
|
||||
pub blockhash_from_cluster: bool,
|
||||
pub pings: Vec<CliPingData>,
|
||||
pub transaction_stats: CliPingTxStats,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub confirmation_stats: Option<CliPingConfirmationStats>,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliPing {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln_name_value(f, "Source Account:", &self.source_pubkey)?;
|
||||
if let Some(fixed_blockhash) = &self.fixed_blockhash {
|
||||
let blockhash_origin = if self.blockhash_from_cluster {
|
||||
"fetched from cluster"
|
||||
} else {
|
||||
"supplied from cli arguments"
|
||||
};
|
||||
writeln!(
|
||||
f,
|
||||
"Fixed blockhash is used: {} ({})",
|
||||
fixed_blockhash, blockhash_origin
|
||||
)?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
for ping in &self.pings {
|
||||
write!(f, "{}", ping)?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
writeln!(f, "--- transaction statistics ---")?;
|
||||
write!(f, "{}", self.transaction_stats)?;
|
||||
if let Some(confirmation_stats) = &self.confirmation_stats {
|
||||
write!(f, "{}", confirmation_stats)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliPing {}
|
||||
impl VerboseDisplay for CliPing {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPingData {
|
||||
pub success: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub signature: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ms: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
#[serde(skip_serializing)]
|
||||
pub print_timestamp: bool,
|
||||
pub timestamp: String,
|
||||
pub sequence: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lamports: Option<u64>,
|
||||
}
|
||||
impl fmt::Display for CliPingData {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let (mark, msg) = if let Some(signature) = &self.signature {
|
||||
if self.success {
|
||||
(
|
||||
CHECK_MARK,
|
||||
format!(
|
||||
"{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
|
||||
self.lamports.unwrap(),
|
||||
self.sequence,
|
||||
self.ms.unwrap(),
|
||||
signature
|
||||
),
|
||||
)
|
||||
} else if let Some(error) = &self.error {
|
||||
(
|
||||
CROSS_MARK,
|
||||
format!(
|
||||
"Transaction failed: seq={:<3} error={:?} signature={}",
|
||||
self.sequence, error, signature
|
||||
),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
CROSS_MARK,
|
||||
format!(
|
||||
"Confirmation timeout: seq={:<3} signature={}",
|
||||
self.sequence, signature
|
||||
),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
(
|
||||
CROSS_MARK,
|
||||
format!(
|
||||
"Submit failed: seq={:<3} error={:?}",
|
||||
self.sequence,
|
||||
self.error.as_ref().unwrap(),
|
||||
),
|
||||
)
|
||||
};
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"{}{}{}",
|
||||
if self.print_timestamp {
|
||||
&self.timestamp
|
||||
} else {
|
||||
""
|
||||
},
|
||||
mark,
|
||||
msg
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliPingData {}
|
||||
impl VerboseDisplay for CliPingData {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPingTxStats {
|
||||
pub num_transactions: u32,
|
||||
pub num_transaction_confirmed: u32,
|
||||
}
|
||||
impl fmt::Display for CliPingTxStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss",
|
||||
self.num_transactions,
|
||||
self.num_transaction_confirmed,
|
||||
(100.
|
||||
- f64::from(self.num_transaction_confirmed) / f64::from(self.num_transactions)
|
||||
* 100.)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliPingTxStats {}
|
||||
impl VerboseDisplay for CliPingTxStats {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliPingConfirmationStats {
|
||||
pub min: f64,
|
||||
pub mean: f64,
|
||||
pub max: f64,
|
||||
pub std_dev: f64,
|
||||
}
|
||||
impl fmt::Display for CliPingConfirmationStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms",
|
||||
self.min, self.mean, self.max, self.std_dev,
|
||||
)
|
||||
}
|
||||
}
|
||||
impl QuietDisplay for CliPingConfirmationStats {}
|
||||
impl VerboseDisplay for CliPingConfirmationStats {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
|
@@ -139,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
|
||||
} else {
|
||||
"-"
|
||||
},
|
||||
if message.is_writable(index, /*demote_program_write_locks=*/ true) {
|
||||
if message.is_writable(index) {
|
||||
"w" // comment for consistent rust fmt (no joking; lol)
|
||||
} else {
|
||||
"-"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -26,29 +26,29 @@ semver = "1.0.4"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.9.0" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.9.0" }
|
||||
solana-client = { path = "../client", version = "=1.9.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
|
||||
solana_rbpf = "=0.2.16"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.9.13" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.9.13" }
|
||||
solana-client = { path = "../client", version = "=1.9.13" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.13" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.13" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.13" }
|
||||
solana_rbpf = "=0.2.24"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.30"
|
||||
tiny-bip39 = "0.8.2"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.13" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.13" }
|
||||
tempfile = "3.2.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -98,10 +98,7 @@ pub fn get_fee_for_messages(
|
||||
) -> Result<u64, CliError> {
|
||||
Ok(messages
|
||||
.iter()
|
||||
.map(|message| {
|
||||
println!("msg {:?}", message.recent_blockhash);
|
||||
rpc_client.get_fee_for_message(message)
|
||||
})
|
||||
.map(|message| rpc_client.get_fee_for_message(message))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.iter()
|
||||
.sum())
|
||||
|
132
cli/src/cli.rs
132
cli/src/cli.rs
@@ -83,7 +83,6 @@ pub enum CliCommand {
|
||||
filter: RpcTransactionLogsFilter,
|
||||
},
|
||||
Ping {
|
||||
lamports: u64,
|
||||
interval: Duration,
|
||||
count: Option<u64>,
|
||||
timeout: Duration,
|
||||
@@ -298,7 +297,13 @@ pub enum CliCommand {
|
||||
authorized_voter: Option<Pubkey>,
|
||||
authorized_withdrawer: Pubkey,
|
||||
commission: u8,
|
||||
sign_only: bool,
|
||||
dump_transaction_message: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
memo: Option<String>,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
ShowVoteAccount {
|
||||
pubkey: Pubkey,
|
||||
@@ -310,19 +315,32 @@ pub enum CliCommand {
|
||||
destination_account_pubkey: Pubkey,
|
||||
withdraw_authority: SignerIndex,
|
||||
withdraw_amount: SpendAmount,
|
||||
sign_only: bool,
|
||||
dump_transaction_message: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
memo: Option<String>,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
CloseVoteAccount {
|
||||
vote_account_pubkey: Pubkey,
|
||||
destination_account_pubkey: Pubkey,
|
||||
withdraw_authority: SignerIndex,
|
||||
memo: Option<String>,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
VoteAuthorize {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_authorized_pubkey: Pubkey,
|
||||
vote_authorize: VoteAuthorize,
|
||||
sign_only: bool,
|
||||
dump_transaction_message: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
memo: Option<String>,
|
||||
fee_payer: SignerIndex,
|
||||
authorized: SignerIndex,
|
||||
new_authorized: Option<SignerIndex>,
|
||||
},
|
||||
@@ -330,13 +348,25 @@ pub enum CliCommand {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
sign_only: bool,
|
||||
dump_transaction_message: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
memo: Option<String>,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
VoteUpdateCommission {
|
||||
vote_account_pubkey: Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
sign_only: bool,
|
||||
dump_transaction_message: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
memo: Option<String>,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
@@ -942,7 +972,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::LiveSlots => process_live_slots(config),
|
||||
CliCommand::Logs { filter } => process_logs(config, filter),
|
||||
CliCommand::Ping {
|
||||
lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
@@ -951,7 +980,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_ping(
|
||||
&rpc_client,
|
||||
config,
|
||||
*lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
@@ -1384,7 +1412,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
authorized_voter,
|
||||
authorized_withdrawer,
|
||||
commission,
|
||||
sign_only,
|
||||
dump_transaction_message,
|
||||
blockhash_query,
|
||||
ref nonce_account,
|
||||
nonce_authority,
|
||||
memo,
|
||||
fee_payer,
|
||||
} => process_create_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1394,7 +1428,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
authorized_voter,
|
||||
*authorized_withdrawer,
|
||||
*commission,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
blockhash_query,
|
||||
nonce_account.as_ref(),
|
||||
*nonce_authority,
|
||||
memo.as_ref(),
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::ShowVoteAccount {
|
||||
pubkey: vote_account_pubkey,
|
||||
@@ -1412,7 +1452,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
withdraw_authority,
|
||||
withdraw_amount,
|
||||
destination_account_pubkey,
|
||||
sign_only,
|
||||
dump_transaction_message,
|
||||
blockhash_query,
|
||||
ref nonce_account,
|
||||
nonce_authority,
|
||||
memo,
|
||||
fee_payer,
|
||||
} => process_withdraw_from_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1420,13 +1466,20 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*withdraw_authority,
|
||||
*withdraw_amount,
|
||||
destination_account_pubkey,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
blockhash_query,
|
||||
nonce_account.as_ref(),
|
||||
*nonce_authority,
|
||||
memo.as_ref(),
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::CloseVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority,
|
||||
destination_account_pubkey,
|
||||
memo,
|
||||
fee_payer,
|
||||
} => process_close_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1434,12 +1487,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*withdraw_authority,
|
||||
destination_account_pubkey,
|
||||
memo.as_ref(),
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey,
|
||||
vote_authorize,
|
||||
sign_only,
|
||||
dump_transaction_message,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
memo,
|
||||
fee_payer,
|
||||
authorized,
|
||||
new_authorized,
|
||||
} => process_vote_authorize(
|
||||
@@ -1450,33 +1510,63 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*vote_authorize,
|
||||
*authorized,
|
||||
*new_authorized,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
*nonce_authority,
|
||||
memo.as_ref(),
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account,
|
||||
withdraw_authority,
|
||||
sign_only,
|
||||
dump_transaction_message,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
memo,
|
||||
fee_payer,
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
*withdraw_authority,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
*nonce_authority,
|
||||
memo.as_ref(),
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority,
|
||||
sign_only,
|
||||
dump_transaction_message,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
memo,
|
||||
fee_payer,
|
||||
} => process_vote_update_commission(
|
||||
&rpc_client,
|
||||
config,
|
||||
vote_account_pubkey,
|
||||
*commission,
|
||||
*withdraw_authority,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
*nonce_authority,
|
||||
memo.as_ref(),
|
||||
*fee_payer,
|
||||
),
|
||||
|
||||
// Wallet Commands
|
||||
@@ -1975,7 +2065,13 @@ mod tests {
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
authorized_withdrawer: bob_pubkey,
|
||||
commission: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
|
||||
let result = process_command(&config);
|
||||
@@ -2006,7 +2102,13 @@ mod tests {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_authorized_pubkey,
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
@@ -2019,7 +2121,13 @@ mod tests {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
@@ -2195,7 +2303,13 @@ mod tests {
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
authorized_withdrawer: bob_pubkey,
|
||||
commission: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
|
||||
assert!(process_command(&config).is_err());
|
||||
@@ -2204,7 +2318,13 @@ mod tests {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_authorized_pubkey: bob_pubkey,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
@@ -2214,7 +2334,13 @@ mod tests {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 1,
|
||||
withdraw_authority: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
@@ -4,7 +4,7 @@ use {
|
||||
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
|
||||
},
|
||||
clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand},
|
||||
console::{style, Emoji},
|
||||
console::style,
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_clap_utils::{
|
||||
input_parsers::*,
|
||||
@@ -15,7 +15,7 @@ use {
|
||||
solana_cli_output::{
|
||||
display::{
|
||||
build_balance_message, format_labeled_address, new_spinner_progress_bar,
|
||||
println_name_value, println_transaction, unix_timestamp_to_string, writeln_name_value,
|
||||
println_transaction, unix_timestamp_to_string, writeln_name_value,
|
||||
},
|
||||
*,
|
||||
},
|
||||
@@ -43,13 +43,13 @@ use {
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
nonce::State as NonceState,
|
||||
pubkey::{self, Pubkey},
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
rpc_port::DEFAULT_RPC_PORT_STR,
|
||||
signature::Signature,
|
||||
slot_history,
|
||||
stake::{self, state::StakeState},
|
||||
system_instruction, system_program,
|
||||
system_instruction,
|
||||
sysvar::{
|
||||
self,
|
||||
slot_history::SlotHistory,
|
||||
@@ -74,9 +74,6 @@ use {
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
static CHECK_MARK: Emoji = Emoji("✅ ", "");
|
||||
static CROSS_MARK: Emoji = Emoji("❌ ", "");
|
||||
|
||||
pub trait ClusterQuerySubCommands {
|
||||
fn cluster_query_subcommands(self) -> Self;
|
||||
}
|
||||
@@ -262,15 +259,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.takes_value(false)
|
||||
.help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("lamports")
|
||||
.long("lamports")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.default_value("1")
|
||||
.validator(is_amount)
|
||||
.help("Number of lamports to transfer for each transaction"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("timeout")
|
||||
.short("t")
|
||||
@@ -515,7 +503,6 @@ pub fn parse_cluster_ping(
|
||||
default_signer: &DefaultSigner,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let lamports = value_t_or_exit!(matches, "lamports", u64);
|
||||
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
|
||||
let count = if matches.is_present("count") {
|
||||
Some(value_t_or_exit!(matches, "count", u64))
|
||||
@@ -527,7 +514,6 @@ pub fn parse_cluster_ping(
|
||||
let print_timestamp = matches.is_present("print_timestamp");
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Ping {
|
||||
lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
@@ -1358,40 +1344,34 @@ pub fn process_get_transaction_count(rpc_client: &RpcClient, _config: &CliConfig
|
||||
pub fn process_ping(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
lamports: u64,
|
||||
interval: &Duration,
|
||||
count: &Option<u64>,
|
||||
timeout: &Duration,
|
||||
fixed_blockhash: &Option<Hash>,
|
||||
print_timestamp: bool,
|
||||
) -> ProcessResult {
|
||||
println_name_value("Source Account:", &config.signers[0].pubkey().to_string());
|
||||
println!();
|
||||
|
||||
let (signal_sender, signal_receiver) = std::sync::mpsc::channel();
|
||||
ctrlc::set_handler(move || {
|
||||
let _ = signal_sender.send(());
|
||||
})
|
||||
.expect("Error setting Ctrl-C handler");
|
||||
|
||||
let mut cli_pings = vec![];
|
||||
|
||||
let mut submit_count = 0;
|
||||
let mut confirmed_count = 0;
|
||||
let mut confirmation_time: VecDeque<u64> = VecDeque::with_capacity(1024);
|
||||
|
||||
let mut blockhash = rpc_client.get_latest_blockhash()?;
|
||||
let mut blockhash_transaction_count = 0;
|
||||
let mut lamports = 0;
|
||||
let mut blockhash_acquired = Instant::now();
|
||||
let mut blockhash_from_cluster = false;
|
||||
if let Some(fixed_blockhash) = fixed_blockhash {
|
||||
let blockhash_origin = if *fixed_blockhash != Hash::default() {
|
||||
if *fixed_blockhash != Hash::default() {
|
||||
blockhash = *fixed_blockhash;
|
||||
"supplied from cli arguments"
|
||||
} else {
|
||||
"fetched from cluster"
|
||||
};
|
||||
println!(
|
||||
"Fixed blockhash is used: {} ({})",
|
||||
blockhash, blockhash_origin
|
||||
);
|
||||
blockhash_from_cluster = true;
|
||||
}
|
||||
}
|
||||
'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) {
|
||||
let now = Instant::now();
|
||||
@@ -1399,15 +1379,12 @@ pub fn process_ping(
|
||||
// Fetch a new blockhash every minute
|
||||
let new_blockhash = rpc_client.get_new_latest_blockhash(&blockhash)?;
|
||||
blockhash = new_blockhash;
|
||||
blockhash_transaction_count = 0;
|
||||
lamports = 0;
|
||||
blockhash_acquired = Instant::now();
|
||||
}
|
||||
|
||||
let seed =
|
||||
&format!("{}{}", blockhash_transaction_count, blockhash)[0..pubkey::MAX_SEED_LEN];
|
||||
let to = Pubkey::create_with_seed(&config.signers[0].pubkey(), seed, &system_program::id())
|
||||
.unwrap();
|
||||
blockhash_transaction_count += 1;
|
||||
let to = config.signers[0].pubkey();
|
||||
lamports += 1;
|
||||
|
||||
let build_message = |lamports| {
|
||||
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
|
||||
@@ -1430,11 +1407,7 @@ pub fn process_ping(
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_micros();
|
||||
if print_timestamp {
|
||||
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
|
||||
};
|
||||
|
||||
match rpc_client.send_transaction(&tx) {
|
||||
@@ -1448,35 +1421,51 @@ pub fn process_ping(
|
||||
Ok(()) => {
|
||||
let elapsed_time_millis = elapsed_time.as_millis() as u64;
|
||||
confirmation_time.push_back(elapsed_time_millis);
|
||||
println!(
|
||||
"{}{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
|
||||
timestamp(),
|
||||
CHECK_MARK, lamports, seq, elapsed_time_millis, signature
|
||||
);
|
||||
let cli_ping_data = CliPingData {
|
||||
success: true,
|
||||
signature: Some(signature.to_string()),
|
||||
ms: Some(elapsed_time_millis),
|
||||
error: None,
|
||||
timestamp: timestamp(),
|
||||
print_timestamp,
|
||||
sequence: seq,
|
||||
lamports: Some(lamports),
|
||||
};
|
||||
eprint!("{}", cli_ping_data);
|
||||
cli_pings.push(cli_ping_data);
|
||||
confirmed_count += 1;
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"{}{}Transaction failed: seq={:<3} error={:?} signature={}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
err,
|
||||
signature
|
||||
);
|
||||
let cli_ping_data = CliPingData {
|
||||
success: false,
|
||||
signature: Some(signature.to_string()),
|
||||
ms: None,
|
||||
error: Some(err.to_string()),
|
||||
timestamp: timestamp(),
|
||||
print_timestamp,
|
||||
sequence: seq,
|
||||
lamports: None,
|
||||
};
|
||||
eprint!("{}", cli_ping_data);
|
||||
cli_pings.push(cli_ping_data);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if elapsed_time >= *timeout {
|
||||
println!(
|
||||
"{}{}Confirmation timeout: seq={:<3} signature={}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
signature
|
||||
);
|
||||
let cli_ping_data = CliPingData {
|
||||
success: false,
|
||||
signature: Some(signature.to_string()),
|
||||
ms: None,
|
||||
error: None,
|
||||
timestamp: timestamp(),
|
||||
print_timestamp,
|
||||
sequence: seq,
|
||||
lamports: None,
|
||||
};
|
||||
eprint!("{}", cli_ping_data);
|
||||
cli_pings.push(cli_ping_data);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1490,13 +1479,18 @@ pub fn process_ping(
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"{}{}Submit failed: seq={:<3} error={:?}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
err
|
||||
);
|
||||
let cli_ping_data = CliPingData {
|
||||
success: false,
|
||||
signature: None,
|
||||
ms: None,
|
||||
error: Some(err.to_string()),
|
||||
timestamp: timestamp(),
|
||||
print_timestamp,
|
||||
sequence: seq,
|
||||
lamports: None,
|
||||
};
|
||||
eprint!("{}", cli_ping_data);
|
||||
cli_pings.push(cli_ping_data);
|
||||
}
|
||||
}
|
||||
submit_count += 1;
|
||||
@@ -1506,28 +1500,34 @@ pub fn process_ping(
|
||||
}
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("--- transaction statistics ---");
|
||||
println!(
|
||||
"{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss",
|
||||
submit_count,
|
||||
confirmed_count,
|
||||
(100. - f64::from(confirmed_count) / f64::from(submit_count) * 100.)
|
||||
);
|
||||
if !confirmation_time.is_empty() {
|
||||
let transaction_stats = CliPingTxStats {
|
||||
num_transactions: submit_count,
|
||||
num_transaction_confirmed: confirmed_count,
|
||||
};
|
||||
let confirmation_stats = if !confirmation_time.is_empty() {
|
||||
let samples: Vec<f64> = confirmation_time.iter().map(|t| *t as f64).collect();
|
||||
let dist = criterion_stats::Distribution::from(samples.into_boxed_slice());
|
||||
let mean = dist.mean();
|
||||
println!(
|
||||
"confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms",
|
||||
dist.min(),
|
||||
Some(CliPingConfirmationStats {
|
||||
min: dist.min(),
|
||||
mean,
|
||||
dist.max(),
|
||||
dist.std_dev(Some(mean))
|
||||
);
|
||||
}
|
||||
max: dist.max(),
|
||||
std_dev: dist.std_dev(Some(mean)),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok("".to_string())
|
||||
let cli_ping = CliPing {
|
||||
source_pubkey: config.signers[0].pubkey().to_string(),
|
||||
fixed_blockhash: fixed_blockhash.map(|_| blockhash.to_string()),
|
||||
blockhash_from_cluster,
|
||||
pings: cli_pings,
|
||||
transaction_stats,
|
||||
confirmation_stats,
|
||||
};
|
||||
|
||||
Ok(config.output_format.formatted_string(&cli_ping))
|
||||
}
|
||||
|
||||
pub fn parse_logs(
|
||||
@@ -2128,7 +2128,7 @@ pub fn process_calculate_rent(
|
||||
timing::years_as_slots(1.0, &seconds_per_tick, clock::DEFAULT_TICKS_PER_SLOT);
|
||||
let slots_per_epoch = epoch_schedule.slots_per_epoch as f64;
|
||||
let years_per_epoch = slots_per_epoch / slots_per_year;
|
||||
let (lamports_per_epoch, _) = rent.due(0, data_length, years_per_epoch);
|
||||
let lamports_per_epoch = rent.due(0, data_length, years_per_epoch).lamports();
|
||||
let cli_rent_calculation = CliRentCalculation {
|
||||
lamports_per_byte_year: rent.lamports_per_byte_year,
|
||||
lamports_per_epoch,
|
||||
@@ -2304,7 +2304,6 @@ mod tests {
|
||||
parse_command(&test_ping, &default_signer, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Ping {
|
||||
lamports: 1,
|
||||
interval: Duration::from_secs(1),
|
||||
count: Some(2),
|
||||
timeout: Duration::from_secs(3),
|
||||
|
@@ -5,7 +5,7 @@ use {
|
||||
},
|
||||
clap::{App, AppSettings, Arg, ArgMatches, SubCommand},
|
||||
console::style,
|
||||
serde::{Deserialize, Serialize},
|
||||
serde::{Deserialize, Deserializer, Serialize, Serializer},
|
||||
solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*},
|
||||
solana_cli_output::{QuietDisplay, VerboseDisplay},
|
||||
solana_client::{client_error::ClientError, rpc_client::RpcClient},
|
||||
@@ -23,6 +23,7 @@ use {
|
||||
cmp::Ordering,
|
||||
collections::{HashMap, HashSet},
|
||||
fmt,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
},
|
||||
};
|
||||
@@ -45,7 +46,7 @@ pub enum FeatureCliCommand {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")]
|
||||
pub enum CliFeatureStatus {
|
||||
Inactive,
|
||||
@@ -53,7 +54,29 @@ pub enum CliFeatureStatus {
|
||||
Active(Slot),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
impl PartialOrd for CliFeatureStatus {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for CliFeatureStatus {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match (self, other) {
|
||||
(Self::Inactive, Self::Inactive) => Ordering::Equal,
|
||||
(Self::Inactive, _) => Ordering::Greater,
|
||||
(_, Self::Inactive) => Ordering::Less,
|
||||
(Self::Pending, Self::Pending) => Ordering::Equal,
|
||||
(Self::Pending, _) => Ordering::Greater,
|
||||
(_, Self::Pending) => Ordering::Less,
|
||||
(Self::Active(self_active_slot), Self::Active(other_active_slot)) => {
|
||||
self_active_slot.cmp(other_active_slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFeature {
|
||||
pub id: String,
|
||||
@@ -62,11 +85,28 @@ pub struct CliFeature {
|
||||
pub status: CliFeatureStatus,
|
||||
}
|
||||
|
||||
impl PartialOrd for CliFeature {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for CliFeature {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match self.status.cmp(&other.status) {
|
||||
Ordering::Equal => self.id.cmp(&other.id),
|
||||
ordering => ordering,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFeatures {
|
||||
pub features: Vec<CliFeature>,
|
||||
pub feature_activation_allowed: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cluster_feature_sets: Option<CliClusterFeatureSets>,
|
||||
#[serde(skip)]
|
||||
pub inactive: bool,
|
||||
}
|
||||
@@ -93,11 +133,16 @@ impl fmt::Display for CliFeatures {
|
||||
CliFeatureStatus::Inactive => style("inactive".to_string()).red(),
|
||||
CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(),
|
||||
CliFeatureStatus::Active(activation_slot) =>
|
||||
style(format!("active since slot {}", activation_slot)).green(),
|
||||
style(format!("active since slot {:>9}", activation_slot)).green(),
|
||||
},
|
||||
feature.description,
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(feature_sets) = &self.cluster_feature_sets {
|
||||
write!(f, "{}", feature_sets)?;
|
||||
}
|
||||
|
||||
if self.inactive && !self.feature_activation_allowed {
|
||||
writeln!(
|
||||
f,
|
||||
@@ -114,6 +159,191 @@ impl fmt::Display for CliFeatures {
|
||||
impl QuietDisplay for CliFeatures {}
|
||||
impl VerboseDisplay for CliFeatures {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliClusterFeatureSets {
|
||||
pub tool_feature_set: u32,
|
||||
pub feature_sets: Vec<CliFeatureSet>,
|
||||
#[serde(skip)]
|
||||
pub stake_allowed: bool,
|
||||
#[serde(skip)]
|
||||
pub rpc_allowed: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliClusterFeatureSets {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let mut tool_feature_set_matches_cluster = false;
|
||||
|
||||
let software_versions_title = "Software Version";
|
||||
let feature_set_title = "Feature Set";
|
||||
let stake_percent_title = "Stake";
|
||||
let rpc_percent_title = "RPC";
|
||||
let mut max_software_versions_len = software_versions_title.len();
|
||||
let mut max_feature_set_len = feature_set_title.len();
|
||||
let mut max_stake_percent_len = stake_percent_title.len();
|
||||
let mut max_rpc_percent_len = rpc_percent_title.len();
|
||||
|
||||
let feature_sets: Vec<_> = self
|
||||
.feature_sets
|
||||
.iter()
|
||||
.map(|feature_set_info| {
|
||||
let me = if self.tool_feature_set == feature_set_info.feature_set {
|
||||
tool_feature_set_matches_cluster = true;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let software_versions: Vec<_> = feature_set_info
|
||||
.software_versions
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect();
|
||||
let software_versions = software_versions.join(", ");
|
||||
let feature_set = if feature_set_info.feature_set == 0 {
|
||||
"unknown".to_string()
|
||||
} else {
|
||||
feature_set_info.feature_set.to_string()
|
||||
};
|
||||
let stake_percent = format!("{:.2}%", feature_set_info.stake_percent);
|
||||
let rpc_percent = format!("{:.2}%", feature_set_info.rpc_percent);
|
||||
|
||||
max_software_versions_len = max_software_versions_len.max(software_versions.len());
|
||||
max_feature_set_len = max_feature_set_len.max(feature_set.len());
|
||||
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
|
||||
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
|
||||
|
||||
(
|
||||
software_versions,
|
||||
feature_set,
|
||||
stake_percent,
|
||||
rpc_percent,
|
||||
me,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
if !tool_feature_set_matches_cluster {
|
||||
writeln!(
|
||||
f,
|
||||
"\n{}",
|
||||
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
|
||||
.bold())?;
|
||||
} else {
|
||||
if !self.stake_allowed {
|
||||
write!(
|
||||
f,
|
||||
"\n{}",
|
||||
style("To activate features the stake must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
)?;
|
||||
}
|
||||
if !self.rpc_allowed {
|
||||
write!(
|
||||
f,
|
||||
"\n{}",
|
||||
style("To activate features the RPC nodes must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
)?;
|
||||
}
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"\n\n{}",
|
||||
style(format!("Tool Feature Set: {}", self.tool_feature_set)).bold()
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(
|
||||
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
|
||||
max_software_versions_len,
|
||||
software_versions_title,
|
||||
max_feature_set_len,
|
||||
feature_set_title,
|
||||
max_stake_percent_len,
|
||||
stake_percent_title,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent_title,
|
||||
))
|
||||
.bold(),
|
||||
)?;
|
||||
for (software_versions, feature_set, stake_percent, rpc_percent, me) in feature_sets {
|
||||
writeln!(
|
||||
f,
|
||||
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
|
||||
max_software_versions_len,
|
||||
software_versions,
|
||||
max_feature_set_len,
|
||||
feature_set,
|
||||
max_stake_percent_len,
|
||||
stake_percent,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent,
|
||||
if me { "<-- me" } else { "" },
|
||||
)?;
|
||||
}
|
||||
writeln!(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliClusterFeatureSets {}
|
||||
impl VerboseDisplay for CliClusterFeatureSets {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFeatureSet {
|
||||
software_versions: Vec<CliVersion>,
|
||||
feature_set: u32,
|
||||
stake_percent: f64,
|
||||
rpc_percent: f32,
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Ord, PartialOrd)]
|
||||
struct CliVersion(Option<semver::Version>);
|
||||
|
||||
impl fmt::Display for CliVersion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let s = match &self.0 {
|
||||
None => "unknown".to_string(),
|
||||
Some(version) => version.to_string(),
|
||||
};
|
||||
write!(f, "{}", s)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for CliVersion {
|
||||
type Err = semver::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let version_option = if s == "unknown" {
|
||||
None
|
||||
} else {
|
||||
Some(semver::Version::from_str(s)?)
|
||||
};
|
||||
Ok(CliVersion(version_option))
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for CliVersion {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for CliVersion {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s: &str = Deserialize::deserialize(deserializer)?;
|
||||
CliVersion::from_str(s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FeatureSubCommands {
|
||||
fn feature_subcommands(self) -> Self;
|
||||
}
|
||||
@@ -330,7 +560,10 @@ fn feature_set_stats(rpc_client: &RpcClient) -> Result<FeatureSetStats, ClientEr
|
||||
}
|
||||
|
||||
// Feature activation is only allowed when 95% of the active stake is on the current feature set
|
||||
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
|
||||
fn feature_activation_allowed(
|
||||
rpc_client: &RpcClient,
|
||||
quiet: bool,
|
||||
) -> Result<(bool, Option<CliClusterFeatureSets>), ClientError> {
|
||||
let my_feature_set = solana_version::Version::default().feature_set;
|
||||
|
||||
let feature_set_stats = feature_set_stats(rpc_client)?;
|
||||
@@ -346,54 +579,43 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
|
||||
)
|
||||
.unwrap_or((false, false));
|
||||
|
||||
if !quiet {
|
||||
if feature_set_stats.get(&my_feature_set).is_none() {
|
||||
println!(
|
||||
"{}",
|
||||
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
|
||||
.bold());
|
||||
} else {
|
||||
if !stake_allowed {
|
||||
print!(
|
||||
"\n{}",
|
||||
style("To activate features the stake must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
);
|
||||
}
|
||||
if !rpc_allowed {
|
||||
print!(
|
||||
"\n{}",
|
||||
style("To activate features the RPC nodes must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
);
|
||||
}
|
||||
}
|
||||
println!(
|
||||
"\n\n{}",
|
||||
style(format!("Tool Feature Set: {}", my_feature_set)).bold()
|
||||
);
|
||||
|
||||
let mut feature_set_stats = feature_set_stats.into_iter().collect::<Vec<_>>();
|
||||
feature_set_stats.sort_by(|l, r| {
|
||||
match l.1.software_versions[0]
|
||||
.cmp(&r.1.software_versions[0])
|
||||
let cluster_feature_sets = if quiet {
|
||||
None
|
||||
} else {
|
||||
let mut feature_sets = feature_set_stats
|
||||
.into_iter()
|
||||
.map(
|
||||
|(
|
||||
feature_set,
|
||||
FeatureSetStatsEntry {
|
||||
stake_percent,
|
||||
rpc_nodes_percent: rpc_percent,
|
||||
software_versions,
|
||||
},
|
||||
)| {
|
||||
CliFeatureSet {
|
||||
software_versions: software_versions.into_iter().map(CliVersion).collect(),
|
||||
feature_set,
|
||||
stake_percent,
|
||||
rpc_percent,
|
||||
}
|
||||
},
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
feature_sets.sort_by(|l, r| {
|
||||
match l.software_versions[0]
|
||||
.cmp(&r.software_versions[0])
|
||||
.reverse()
|
||||
{
|
||||
Ordering::Equal => {
|
||||
match l
|
||||
.1
|
||||
.stake_percent
|
||||
.partial_cmp(&r.1.stake_percent)
|
||||
.partial_cmp(&r.stake_percent)
|
||||
.unwrap()
|
||||
.reverse()
|
||||
{
|
||||
Ordering::Equal => {
|
||||
l.1.rpc_nodes_percent
|
||||
.partial_cmp(&r.1.rpc_nodes_percent)
|
||||
.unwrap()
|
||||
.reverse()
|
||||
l.rpc_percent.partial_cmp(&r.rpc_percent).unwrap().reverse()
|
||||
}
|
||||
o => o,
|
||||
}
|
||||
@@ -401,96 +623,15 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
|
||||
o => o,
|
||||
}
|
||||
});
|
||||
Some(CliClusterFeatureSets {
|
||||
tool_feature_set: my_feature_set,
|
||||
feature_sets,
|
||||
stake_allowed,
|
||||
rpc_allowed,
|
||||
})
|
||||
};
|
||||
|
||||
let software_versions_title = "Software Version";
|
||||
let feature_set_title = "Feature Set";
|
||||
let stake_percent_title = "Stake";
|
||||
let rpc_percent_title = "RPC";
|
||||
let mut stats_output = Vec::new();
|
||||
let mut max_software_versions_len = software_versions_title.len();
|
||||
let mut max_feature_set_len = feature_set_title.len();
|
||||
let mut max_stake_percent_len = stake_percent_title.len();
|
||||
let mut max_rpc_percent_len = rpc_percent_title.len();
|
||||
for (
|
||||
feature_set,
|
||||
FeatureSetStatsEntry {
|
||||
stake_percent,
|
||||
rpc_nodes_percent,
|
||||
software_versions,
|
||||
},
|
||||
) in feature_set_stats.into_iter()
|
||||
{
|
||||
let me = feature_set == my_feature_set;
|
||||
let feature_set = if feature_set == 0 {
|
||||
"unknown".to_string()
|
||||
} else {
|
||||
feature_set.to_string()
|
||||
};
|
||||
let stake_percent = format!("{:.2}%", stake_percent);
|
||||
let rpc_percent = format!("{:.2}%", rpc_nodes_percent);
|
||||
|
||||
let mut has_unknown = false;
|
||||
let mut software_versions = software_versions
|
||||
.iter()
|
||||
.filter_map(|v| {
|
||||
if v.is_none() {
|
||||
has_unknown = true;
|
||||
}
|
||||
v.as_ref()
|
||||
})
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
if has_unknown {
|
||||
software_versions.push("unknown".to_string());
|
||||
}
|
||||
let software_versions = software_versions.join(", ");
|
||||
max_software_versions_len = max_software_versions_len.max(software_versions.len());
|
||||
|
||||
max_feature_set_len = max_feature_set_len.max(feature_set.len());
|
||||
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
|
||||
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
|
||||
|
||||
stats_output.push((
|
||||
software_versions,
|
||||
feature_set,
|
||||
stake_percent,
|
||||
rpc_percent,
|
||||
me,
|
||||
));
|
||||
}
|
||||
println!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
|
||||
max_software_versions_len,
|
||||
software_versions_title,
|
||||
max_feature_set_len,
|
||||
feature_set_title,
|
||||
max_stake_percent_len,
|
||||
stake_percent_title,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent_title,
|
||||
))
|
||||
.bold(),
|
||||
);
|
||||
for (software_versions, feature_set, stake_percent, rpc_percent, me) in stats_output {
|
||||
println!(
|
||||
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
|
||||
max_software_versions_len,
|
||||
software_versions,
|
||||
max_feature_set_len,
|
||||
feature_set,
|
||||
max_stake_percent_len,
|
||||
stake_percent,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent,
|
||||
if me { "<-- me" } else { "" },
|
||||
);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(stake_allowed && rpc_allowed)
|
||||
Ok((stake_allowed && rpc_allowed, cluster_feature_sets))
|
||||
}
|
||||
|
||||
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
|
||||
@@ -550,10 +691,14 @@ fn process_status(
|
||||
});
|
||||
}
|
||||
|
||||
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
|
||||
features.sort_unstable();
|
||||
|
||||
let (feature_activation_allowed, cluster_feature_sets) =
|
||||
feature_activation_allowed(rpc_client, features.len() <= 1)?;
|
||||
let feature_set = CliFeatures {
|
||||
features,
|
||||
feature_activation_allowed,
|
||||
cluster_feature_sets,
|
||||
inactive,
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&feature_set))
|
||||
@@ -577,7 +722,7 @@ fn process_activate(
|
||||
}
|
||||
}
|
||||
|
||||
if !feature_activation_allowed(rpc_client, false)? {
|
||||
if !feature_activation_allowed(rpc_client, false)?.0 {
|
||||
match force {
|
||||
ForceActivation::Almost =>
|
||||
return Err("Add force argument once more to override the sanity check to force feature activation ".into()),
|
||||
|
@@ -1997,10 +1997,7 @@ fn read_and_verify_elf(program_location: &str) -> Result<Vec<u8>, Box<dyn std::e
|
||||
&program_data,
|
||||
Some(verifier::check),
|
||||
Config {
|
||||
reject_unresolved_syscalls: true,
|
||||
verify_mul64_imm_nonzero: false,
|
||||
verify_shift32_imm: true,
|
||||
reject_section_virtual_address_file_offset_mismatch: true,
|
||||
reject_broken_elfs: true,
|
||||
..Config::default()
|
||||
},
|
||||
register_syscalls(&mut invoke_context).unwrap(),
|
||||
|
@@ -16,6 +16,7 @@ use {
|
||||
pub enum SpendAmount {
|
||||
All,
|
||||
Some(u64),
|
||||
RentExempt,
|
||||
}
|
||||
|
||||
impl Default for SpendAmount {
|
||||
@@ -90,6 +91,7 @@ where
|
||||
0,
|
||||
from_pubkey,
|
||||
fee_pubkey,
|
||||
0,
|
||||
build_message,
|
||||
)?;
|
||||
Ok((message, spend))
|
||||
@@ -97,6 +99,12 @@ where
|
||||
let from_balance = rpc_client
|
||||
.get_balance_with_commitment(from_pubkey, commitment)?
|
||||
.value;
|
||||
let from_rent_exempt_minimum = if amount == SpendAmount::RentExempt {
|
||||
let data = rpc_client.get_account_data(from_pubkey)?;
|
||||
rpc_client.get_minimum_balance_for_rent_exemption(data.len())?
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
|
||||
rpc_client,
|
||||
amount,
|
||||
@@ -104,6 +112,7 @@ where
|
||||
from_balance,
|
||||
from_pubkey,
|
||||
fee_pubkey,
|
||||
from_rent_exempt_minimum,
|
||||
build_message,
|
||||
)?;
|
||||
if from_pubkey == fee_pubkey {
|
||||
@@ -140,6 +149,7 @@ fn resolve_spend_message<F>(
|
||||
from_balance: u64,
|
||||
from_pubkey: &Pubkey,
|
||||
fee_pubkey: &Pubkey,
|
||||
from_rent_exempt_minimum: u64,
|
||||
build_message: F,
|
||||
) -> Result<(Message, SpendAndFee), CliError>
|
||||
where
|
||||
@@ -176,5 +186,20 @@ where
|
||||
},
|
||||
))
|
||||
}
|
||||
SpendAmount::RentExempt => {
|
||||
let mut lamports = if from_pubkey == fee_pubkey {
|
||||
from_balance.saturating_sub(fee)
|
||||
} else {
|
||||
from_balance
|
||||
};
|
||||
lamports = lamports.saturating_sub(from_rent_exempt_minimum);
|
||||
Ok((
|
||||
build_message(lamports),
|
||||
SpendAndFee {
|
||||
spend: lamports,
|
||||
fee,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1384,7 +1384,13 @@ pub fn process_stake_authorize(
|
||||
if let Some(authorized) = authorized {
|
||||
match authorization_type {
|
||||
StakeAuthorize::Staker => {
|
||||
check_current_authority(&authorized.staker, &authority.pubkey())?;
|
||||
// first check authorized withdrawer
|
||||
check_current_authority(&authorized.withdrawer, &authority.pubkey())
|
||||
.or_else(|_| {
|
||||
// ...then check authorized staker. If neither matches, error will
|
||||
// print the stake key as `expected`
|
||||
check_current_authority(&authorized.staker, &authority.pubkey())
|
||||
})?;
|
||||
}
|
||||
StakeAuthorize::Withdrawer => {
|
||||
check_current_authority(&authorized.withdrawer, &authority.pubkey())?;
|
||||
|
@@ -1,23 +1,29 @@
|
||||
use {
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, pubkey::Pubkey},
|
||||
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig},
|
||||
std::{thread::sleep, time::Duration},
|
||||
};
|
||||
|
||||
pub fn check_recent_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
|
||||
(0..5).for_each(|tries| {
|
||||
let balance = client
|
||||
.get_balance_with_commitment(pubkey, CommitmentConfig::processed())
|
||||
.unwrap()
|
||||
.value;
|
||||
if balance == expected_balance {
|
||||
return;
|
||||
}
|
||||
if tries == 4 {
|
||||
assert_eq!(balance, expected_balance);
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
});
|
||||
#[macro_export]
|
||||
macro_rules! check_balance {
|
||||
($expected_balance:expr, $client:expr, $pubkey:expr) => {
|
||||
(0..5).for_each(|tries| {
|
||||
let balance = $client
|
||||
.get_balance_with_commitment($pubkey, CommitmentConfig::processed())
|
||||
.unwrap()
|
||||
.value;
|
||||
if balance == $expected_balance {
|
||||
return;
|
||||
}
|
||||
if tries == 4 {
|
||||
assert_eq!(balance, $expected_balance);
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_millis(500));
|
||||
});
|
||||
};
|
||||
($expected_balance:expr, $client:expr, $pubkey:expr,) => {
|
||||
check_balance!($expected_balance, $client, $pubkey)
|
||||
};
|
||||
}
|
||||
|
||||
pub fn check_ready(rpc_client: &RpcClient) {
|
||||
|
@@ -291,8 +291,12 @@ pub fn process_set_validator_info(
|
||||
// Check existence of validator-info account
|
||||
let balance = rpc_client.get_balance(&info_pubkey).unwrap_or(0);
|
||||
|
||||
let lamports =
|
||||
rpc_client.get_minimum_balance_for_rent_exemption(ValidatorInfo::max_space() as usize)?;
|
||||
let keys = vec![
|
||||
(validator_info::id(), false),
|
||||
(config.signers[0].pubkey(), true),
|
||||
];
|
||||
let data_len = ValidatorInfo::max_space() + ConfigKeys::serialized_size(keys.clone());
|
||||
let lamports = rpc_client.get_minimum_balance_for_rent_exemption(data_len as usize)?;
|
||||
|
||||
let signers = if balance == 0 {
|
||||
if info_pubkey != info_keypair.pubkey() {
|
||||
@@ -308,10 +312,7 @@ pub fn process_set_validator_info(
|
||||
};
|
||||
|
||||
let build_message = |lamports| {
|
||||
let keys = vec![
|
||||
(validator_info::id(), false),
|
||||
(config.signers[0].pubkey(), true),
|
||||
];
|
||||
let keys = keys.clone();
|
||||
if balance == 0 {
|
||||
println!(
|
||||
"Publishing info for Validator {:?}",
|
||||
|
987
cli/src/vote.rs
987
cli/src/vote.rs
File diff suppressed because it is too large
Load Diff
@@ -462,18 +462,27 @@ pub fn process_show_account(
|
||||
|
||||
let mut account_string = config.output_format.formatted_string(&cli_account);
|
||||
|
||||
if config.output_format == OutputFormat::Display
|
||||
|| config.output_format == OutputFormat::DisplayVerbose
|
||||
{
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(&data)?;
|
||||
writeln!(&mut account_string)?;
|
||||
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
|
||||
} else if !data.is_empty() {
|
||||
use pretty_hex::*;
|
||||
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
|
||||
match config.output_format {
|
||||
OutputFormat::Json | OutputFormat::JsonCompact => {
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(account_string.as_bytes())?;
|
||||
writeln!(&mut account_string)?;
|
||||
writeln!(&mut account_string, "Wrote account to {}", output_file)?;
|
||||
}
|
||||
}
|
||||
OutputFormat::Display | OutputFormat::DisplayVerbose => {
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(&data)?;
|
||||
writeln!(&mut account_string)?;
|
||||
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
|
||||
} else if !data.is_empty() {
|
||||
use pretty_hex::*;
|
||||
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
|
||||
}
|
||||
}
|
||||
OutputFormat::DisplayQuiet => (),
|
||||
}
|
||||
|
||||
Ok(account_string)
|
||||
|
@@ -1,8 +1,10 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
test_utils::{check_ready, check_recent_balance},
|
||||
test_utils::check_ready,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
@@ -14,6 +16,7 @@ use {
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
system_program,
|
||||
@@ -73,10 +76,14 @@ fn full_battery_tests(
|
||||
&rpc_client,
|
||||
&config_payer,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
2000,
|
||||
sol_to_lamports(2000.0),
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(2000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_balance!(
|
||||
sol_to_lamports(2000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
|
||||
let mut config_nonce = CliConfig::recent_for_tests();
|
||||
config_nonce.json_rpc_url = json_rpc_url;
|
||||
@@ -108,12 +115,16 @@ fn full_battery_tests(
|
||||
seed,
|
||||
nonce_authority: optional_authority,
|
||||
memo: None,
|
||||
amount: SpendAmount::Some(1000),
|
||||
amount: SpendAmount::Some(sol_to_lamports(1000.0)),
|
||||
};
|
||||
|
||||
process_command(&config_payer).unwrap();
|
||||
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_recent_balance(1000, &rpc_client, &nonce_account);
|
||||
check_balance!(
|
||||
sol_to_lamports(1000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(1000.0), &rpc_client, &nonce_account);
|
||||
|
||||
// Get nonce
|
||||
config_payer.signers.pop();
|
||||
@@ -161,12 +172,16 @@ fn full_battery_tests(
|
||||
nonce_authority: index,
|
||||
memo: None,
|
||||
destination_account_pubkey: payee_pubkey,
|
||||
lamports: 100,
|
||||
lamports: sol_to_lamports(100.0),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_recent_balance(900, &rpc_client, &nonce_account);
|
||||
check_recent_balance(100, &rpc_client, &payee_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(1000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(900.0), &rpc_client, &nonce_account);
|
||||
check_balance!(sol_to_lamports(100.0), &rpc_client, &payee_pubkey);
|
||||
|
||||
// Show nonce account
|
||||
config_payer.command = CliCommand::ShowNonceAccount {
|
||||
@@ -208,17 +223,22 @@ fn full_battery_tests(
|
||||
nonce_authority: 1,
|
||||
memo: None,
|
||||
destination_account_pubkey: payee_pubkey,
|
||||
lamports: 100,
|
||||
lamports: sol_to_lamports(100.0),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_recent_balance(800, &rpc_client, &nonce_account);
|
||||
check_recent_balance(200, &rpc_client, &payee_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(1000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(800.0), &rpc_client, &nonce_account);
|
||||
check_balance!(sol_to_lamports(200.0), &rpc_client, &payee_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::redundant_closure)]
|
||||
fn test_create_account_with_seed() {
|
||||
const ONE_SIG_FEE: f64 = 0.000005;
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
@@ -241,19 +261,27 @@ fn test_create_account_with_seed() {
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
42,
|
||||
sol_to_lamports(42.0),
|
||||
)
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
4242,
|
||||
sol_to_lamports(4242.0),
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_recent_balance(4242, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_recent_balance(0, &rpc_client, &to_address);
|
||||
check_balance!(
|
||||
sol_to_lamports(42.0),
|
||||
&rpc_client,
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(4242.0),
|
||||
&rpc_client,
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
);
|
||||
check_balance!(0, &rpc_client, &to_address);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -263,7 +291,7 @@ fn test_create_account_with_seed() {
|
||||
let seed = authority_pubkey.to_string()[0..32].to_string();
|
||||
let nonce_address =
|
||||
Pubkey::create_with_seed(&creator_pubkey, &seed, &system_program::id()).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &nonce_address);
|
||||
check_balance!(0, &rpc_client, &nonce_address);
|
||||
|
||||
let mut creator_config = CliConfig::recent_for_tests();
|
||||
creator_config.json_rpc_url = test_validator.rpc_url();
|
||||
@@ -273,13 +301,21 @@ fn test_create_account_with_seed() {
|
||||
seed: Some(seed),
|
||||
nonce_authority: Some(authority_pubkey),
|
||||
memo: None,
|
||||
amount: SpendAmount::Some(241),
|
||||
amount: SpendAmount::Some(sol_to_lamports(241.0)),
|
||||
};
|
||||
process_command(&creator_config).unwrap();
|
||||
check_recent_balance(241, &rpc_client, &nonce_address);
|
||||
check_recent_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_recent_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_recent_balance(0, &rpc_client, &to_address);
|
||||
check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address);
|
||||
check_balance!(
|
||||
sol_to_lamports(42.0),
|
||||
&rpc_client,
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(4001.0 - ONE_SIG_FEE),
|
||||
&rpc_client,
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
);
|
||||
check_balance!(0, &rpc_client, &to_address);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -299,7 +335,7 @@ fn test_create_account_with_seed() {
|
||||
authority_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&authority_config).unwrap_err();
|
||||
authority_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(10.0)),
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
@@ -325,7 +361,7 @@ fn test_create_account_with_seed() {
|
||||
submit_config.json_rpc_url = test_validator.rpc_url();
|
||||
submit_config.signers = vec![&authority_presigner];
|
||||
submit_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(10.0)),
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -344,8 +380,16 @@ fn test_create_account_with_seed() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&submit_config).unwrap();
|
||||
check_recent_balance(241, &rpc_client, &nonce_address);
|
||||
check_recent_balance(31, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_recent_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_recent_balance(10, &rpc_client, &to_address);
|
||||
check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address);
|
||||
check_balance!(
|
||||
sol_to_lamports(32.0 - ONE_SIG_FEE),
|
||||
&rpc_client,
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(4001.0 - ONE_SIG_FEE),
|
||||
&rpc_client,
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(10.0), &rpc_client, &to_address);
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
serde_json::Value,
|
||||
solana_cli::{
|
||||
|
@@ -1,9 +1,11 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
solana_cli::cli::{process_command, CliCommand, CliConfig},
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_faucet::faucet::run_local_faucet,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
native_token::sol_to_lamports,
|
||||
signature::{Keypair, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
@@ -22,7 +24,7 @@ fn test_cli_request_airdrop() {
|
||||
bob_config.json_rpc_url = test_validator.rpc_url();
|
||||
bob_config.command = CliCommand::Airdrop {
|
||||
pubkey: None,
|
||||
lamports: 50,
|
||||
lamports: sol_to_lamports(50.0),
|
||||
};
|
||||
let keypair = Keypair::new();
|
||||
bob_config.signers = vec![&keypair];
|
||||
@@ -36,5 +38,5 @@ fn test_cli_request_airdrop() {
|
||||
let balance = rpc_client
|
||||
.get_balance(&bob_config.signers[0].pubkey())
|
||||
.unwrap();
|
||||
assert_eq!(balance, 50);
|
||||
assert_eq!(balance, sol_to_lamports(50.0));
|
||||
}
|
||||
|
@@ -1,10 +1,12 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#![allow(clippy::redundant_closure)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
stake::StakeAuthorizationIndexed,
|
||||
test_utils::{check_ready, check_recent_balance},
|
||||
test_utils::check_ready,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
@@ -16,6 +18,7 @@ use {
|
||||
solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
commitment_config::CommitmentConfig,
|
||||
fee::FeeStructure,
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
@@ -59,7 +62,13 @@ fn test_stake_delegation_force() {
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer,
|
||||
commission: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
@@ -144,7 +153,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
|
||||
let stake_address = Pubkey::create_with_seed(
|
||||
&config_validator.signers[0].pubkey(),
|
||||
@@ -233,7 +242,7 @@ fn test_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
|
||||
// Create stake account
|
||||
config_validator.signers.push(&stake_keypair);
|
||||
@@ -327,7 +336,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
@@ -336,7 +345,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_offline.signers[0].pubkey());
|
||||
|
||||
// Create stake account
|
||||
config_validator.signers.push(&stake_keypair);
|
||||
@@ -868,14 +877,15 @@ fn test_stake_authorize() {
|
||||
#[test]
|
||||
fn test_stake_authorize_with_fee_payer() {
|
||||
solana_logger::setup();
|
||||
const SIG_FEE: u64 = 42;
|
||||
let fee_one_sig = FeeStructure::default().get_max_fee(1, 0);
|
||||
let fee_two_sig = FeeStructure::default().get_max_fee(2, 0);
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
let test_validator = TestValidator::with_custom_fees(
|
||||
mint_pubkey,
|
||||
SIG_FEE,
|
||||
1,
|
||||
Some(faucet_addr),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
@@ -904,14 +914,14 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config.signers[0].pubkey());
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 5_000_000).unwrap();
|
||||
check_balance!(5_000_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &payer_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 5_000_000).unwrap();
|
||||
check_balance!(5_000_000, &rpc_client, &payer_pubkey);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 5_000_000).unwrap();
|
||||
check_balance!(5_000_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -926,7 +936,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
amount: SpendAmount::Some(1_000_000),
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
@@ -937,8 +947,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config` balance should be 50,000 - 1 stake account sig - 1 fee sig
|
||||
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
|
||||
|
||||
// Assign authority with separate fee payer
|
||||
config.signers = vec![&default_signer, &payer_keypair];
|
||||
@@ -962,10 +971,10 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config` balance has not changed, despite submitting the TX
|
||||
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
|
||||
// `config_payer` however has paid `config`'s authority sig
|
||||
// and `config_payer`'s fee sig
|
||||
check_recent_balance(100_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey);
|
||||
check_balance!(5_000_000 - fee_two_sig, &rpc_client, &payer_pubkey);
|
||||
|
||||
// Assign authority with offline fee payer
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
@@ -1013,10 +1022,10 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config`'s balance again has not changed
|
||||
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
|
||||
// `config_offline` however has paid 1 sig due to being both authority
|
||||
// and fee payer
|
||||
check_recent_balance(100_000 - SIG_FEE, &rpc_client, &offline_pubkey);
|
||||
check_balance!(5_000_000 - fee_one_sig, &rpc_client, &offline_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1050,12 +1059,17 @@ fn test_stake_split() {
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
|
||||
.unwrap();
|
||||
check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey());
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&config,
|
||||
&config.signers[0].pubkey(),
|
||||
50_000_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance!(50_000_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 1_000_000).unwrap();
|
||||
check_balance!(1_000_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let minimum_stake_balance = rpc_client
|
||||
@@ -1082,7 +1096,7 @@ fn test_stake_split() {
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
10 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
@@ -1102,7 +1116,7 @@ fn test_stake_split() {
|
||||
amount: SpendAmount::Some(minimum_nonce_balance),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
|
||||
check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1116,7 +1130,7 @@ fn test_stake_split() {
|
||||
|
||||
// Nonced offline split
|
||||
let split_account = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &split_account.pubkey());
|
||||
check_balance!(0, &rpc_client, &split_account.pubkey());
|
||||
config_offline.signers.push(&split_account);
|
||||
config_offline.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey,
|
||||
@@ -1156,12 +1170,12 @@ fn test_stake_split() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
8 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
);
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
2 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&split_account.pubkey(),
|
||||
@@ -1199,12 +1213,12 @@ fn test_stake_set_lockup() {
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 5_000_000)
|
||||
.unwrap();
|
||||
check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey());
|
||||
check_balance!(5_000_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 1_000_000).unwrap();
|
||||
check_balance!(1_000_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let minimum_stake_balance = rpc_client
|
||||
@@ -1238,7 +1252,12 @@ fn test_stake_set_lockup() {
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
10 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
);
|
||||
check_balance!(
|
||||
10 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
@@ -1371,7 +1390,7 @@ fn test_stake_set_lockup() {
|
||||
amount: SpendAmount::Some(minimum_nonce_balance),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
|
||||
check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1467,10 +1486,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 200_000)
|
||||
.unwrap();
|
||||
check_recent_balance(200_000, &rpc_client, &config.signers[0].pubkey());
|
||||
check_balance!(200_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
check_balance!(100_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create nonce account
|
||||
let minimum_nonce_balance = rpc_client
|
||||
@@ -1547,7 +1566,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &stake_pubkey);
|
||||
check_balance!(50_000, &rpc_client, &stake_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1566,7 +1585,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
config_offline.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
destination_account_pubkey: recipient_pubkey,
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: true,
|
||||
@@ -1585,7 +1604,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
config.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
destination_account_pubkey: recipient_pubkey,
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: false,
|
||||
@@ -1601,7 +1620,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(42, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(50_000, &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1661,7 +1680,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
process_command(&config).unwrap();
|
||||
let seed_address =
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &seed_address);
|
||||
check_balance!(50_000, &rpc_client, &seed_address);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -1,9 +1,11 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#![allow(clippy::redundant_closure)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
test_utils::{check_ready, check_recent_balance},
|
||||
test_utils::check_ready,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
@@ -14,6 +16,8 @@ use {
|
||||
solana_faucet::faucet::run_local_faucet,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
fee::FeeStructure,
|
||||
native_token::sol_to_lamports,
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
|
||||
@@ -26,6 +30,8 @@ use {
|
||||
#[test]
|
||||
fn test_transfer() {
|
||||
solana_logger::setup();
|
||||
let fee_one_sig = FeeStructure::default().get_max_fee(1, 0);
|
||||
let fee_two_sig = FeeStructure::default().get_max_fee(2, 0);
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
@@ -49,15 +55,16 @@ fn test_transfer() {
|
||||
let sender_pubkey = config.signers[0].pubkey();
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(5.0))
|
||||
.unwrap();
|
||||
check_balance!(sol_to_lamports(5.0), &rpc_client, &sender_pubkey);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
// Plain ole transfer
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(1.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -73,12 +80,16 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_989, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(10, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(4.0) - fee_one_sig,
|
||||
&rpc_client,
|
||||
&sender_pubkey
|
||||
);
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Plain ole transfer, failure due to InsufficientFundsForSpendAndFee
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(49_989),
|
||||
amount: SpendAmount::Some(sol_to_lamports(4.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -94,8 +105,12 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
check_recent_balance(49_989, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(10, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(4.0) - fee_one_sig,
|
||||
&rpc_client,
|
||||
&sender_pubkey
|
||||
);
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
|
||||
|
||||
let mut offline = CliConfig::recent_for_tests();
|
||||
offline.json_rpc_url = String::default();
|
||||
@@ -105,13 +120,14 @@ fn test_transfer() {
|
||||
process_command(&offline).unwrap_err();
|
||||
|
||||
let offline_pubkey = offline.signers[0].pubkey();
|
||||
request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, 50).unwrap();
|
||||
check_recent_balance(50, &rpc_client, &offline_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, sol_to_lamports(1.0))
|
||||
.unwrap();
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &offline_pubkey);
|
||||
|
||||
// Offline transfer
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
offline.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.5)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
@@ -133,7 +149,7 @@ fn test_transfer() {
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.5)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -149,8 +165,12 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(39, &rpc_client, &offline_pubkey);
|
||||
check_recent_balance(20, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(0.5) - fee_one_sig,
|
||||
&rpc_client,
|
||||
&offline_pubkey
|
||||
);
|
||||
check_balance!(sol_to_lamports(1.5), &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Create nonce account
|
||||
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
|
||||
@@ -166,7 +186,11 @@ fn test_transfer() {
|
||||
amount: SpendAmount::Some(minimum_nonce_balance),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_987 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(4.0) - fee_one_sig - fee_two_sig - minimum_nonce_balance,
|
||||
&rpc_client,
|
||||
&sender_pubkey,
|
||||
);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -181,7 +205,7 @@ fn test_transfer() {
|
||||
// Nonced transfer
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(1.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -200,8 +224,12 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_976 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(30, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(3.0) - 2 * fee_one_sig - fee_two_sig - minimum_nonce_balance,
|
||||
&rpc_client,
|
||||
&sender_pubkey,
|
||||
);
|
||||
check_balance!(sol_to_lamports(2.5), &rpc_client, &recipient_pubkey);
|
||||
let new_nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
&rpc_client,
|
||||
&nonce_account.pubkey(),
|
||||
@@ -221,7 +249,11 @@ fn test_transfer() {
|
||||
new_authority: offline_pubkey,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_975 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(3.0) - 3 * fee_one_sig - fee_two_sig - minimum_nonce_balance,
|
||||
&rpc_client,
|
||||
&sender_pubkey,
|
||||
);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -236,7 +268,7 @@ fn test_transfer() {
|
||||
// Offline, nonced transfer
|
||||
offline.signers = vec![&default_offline_signer];
|
||||
offline.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.4)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
@@ -257,7 +289,7 @@ fn test_transfer() {
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.4)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -276,13 +308,18 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(28, &rpc_client, &offline_pubkey);
|
||||
check_recent_balance(40, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(0.1) - 2 * fee_one_sig,
|
||||
&rpc_client,
|
||||
&offline_pubkey
|
||||
);
|
||||
check_balance!(sol_to_lamports(2.9), &rpc_client, &recipient_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_multisession_signing() {
|
||||
solana_logger::setup();
|
||||
let fee = FeeStructure::default().get_max_fee(2, 0);
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
@@ -305,19 +342,27 @@ fn test_transfer_multisession_signing() {
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&offline_from_signer.pubkey(),
|
||||
43,
|
||||
sol_to_lamports(43.0),
|
||||
)
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&offline_fee_payer_signer.pubkey(),
|
||||
3,
|
||||
sol_to_lamports(1.0) + 2 * fee,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(43, &rpc_client, &offline_from_signer.pubkey());
|
||||
check_recent_balance(3, &rpc_client, &offline_fee_payer_signer.pubkey());
|
||||
check_recent_balance(0, &rpc_client, &to_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(43.0),
|
||||
&rpc_client,
|
||||
&offline_from_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(1.0) + 2 * fee,
|
||||
&rpc_client,
|
||||
&offline_fee_payer_signer.pubkey(),
|
||||
);
|
||||
check_balance!(0, &rpc_client, &to_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -331,7 +376,7 @@ fn test_transfer_multisession_signing() {
|
||||
fee_payer_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&fee_payer_config).unwrap_err();
|
||||
fee_payer_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(sol_to_lamports(42.0)),
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
@@ -362,7 +407,7 @@ fn test_transfer_multisession_signing() {
|
||||
from_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&from_config).unwrap_err();
|
||||
from_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(sol_to_lamports(42.0)),
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
@@ -390,7 +435,7 @@ fn test_transfer_multisession_signing() {
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.signers = vec![&fee_payer_presigner, &from_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(sol_to_lamports(42.0)),
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: false,
|
||||
@@ -407,14 +452,23 @@ fn test_transfer_multisession_signing() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
check_recent_balance(1, &rpc_client, &offline_from_signer.pubkey());
|
||||
check_recent_balance(1, &rpc_client, &offline_fee_payer_signer.pubkey());
|
||||
check_recent_balance(42, &rpc_client, &to_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(1.0),
|
||||
&rpc_client,
|
||||
&offline_from_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(1.0) + fee,
|
||||
&rpc_client,
|
||||
&offline_fee_payer_signer.pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(42.0), &rpc_client, &to_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_all() {
|
||||
solana_logger::setup();
|
||||
let fee = FeeStructure::default().get_max_fee(1, 0);
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
@@ -437,9 +491,9 @@ fn test_transfer_all() {
|
||||
let sender_pubkey = config.signers[0].pubkey();
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 500_000).unwrap();
|
||||
check_balance!(500_000, &rpc_client, &sender_pubkey);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -461,8 +515,8 @@ fn test_transfer_all() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(49_999, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(0, &rpc_client, &sender_pubkey);
|
||||
check_balance!(500_000 - fee, &rpc_client, &recipient_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -491,8 +545,8 @@ fn test_transfer_unfunded_recipient() {
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(50_000, &rpc_client, &sender_pubkey);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -521,6 +575,7 @@ fn test_transfer_unfunded_recipient() {
|
||||
#[test]
|
||||
fn test_transfer_with_seed() {
|
||||
solana_logger::setup();
|
||||
let fee = FeeStructure::default().get_max_fee(1, 0);
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
@@ -551,17 +606,19 @@ fn test_transfer_with_seed() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 1).unwrap();
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &derived_address, 50_000).unwrap();
|
||||
check_recent_balance(1, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(50_000, &rpc_client, &derived_address);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(1.0))
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &derived_address, sol_to_lamports(5.0))
|
||||
.unwrap();
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &sender_pubkey);
|
||||
check_balance!(sol_to_lamports(5.0), &rpc_client, &derived_address);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
// Transfer with seed
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(50_000),
|
||||
amount: SpendAmount::Some(sol_to_lamports(5.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -577,7 +634,7 @@ fn test_transfer_with_seed() {
|
||||
derived_address_program_id: Some(derived_address_program_id),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(50_000, &rpc_client, &recipient_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &derived_address);
|
||||
check_balance!(sol_to_lamports(1.0) - fee, &rpc_client, &sender_pubkey);
|
||||
check_balance!(sol_to_lamports(5.0), &rpc_client, &recipient_pubkey);
|
||||
check_balance!(0, &rpc_client, &derived_address);
|
||||
}
|
||||
|
@@ -1,9 +1,11 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
test_utils::check_recent_balance,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
blockhash_query::{self, BlockhashQuery},
|
||||
rpc_client::RpcClient,
|
||||
@@ -12,7 +14,7 @@ use {
|
||||
solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
commitment_config::CommitmentConfig,
|
||||
signature::{Keypair, Signer},
|
||||
signature::{Keypair, NullSigner, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_test_validator::TestValidator,
|
||||
@@ -49,7 +51,13 @@ fn test_vote_authorize_and_withdraw() {
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: config.signers[0].pubkey(),
|
||||
commission: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let vote_account = rpc_client
|
||||
@@ -62,12 +70,12 @@ fn test_vote_authorize_and_withdraw() {
|
||||
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
|
||||
.unwrap()
|
||||
.max(1);
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Transfer in some more SOL
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(1_000),
|
||||
amount: SpendAmount::Some(10_000),
|
||||
to: vote_account_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -83,8 +91,8 @@ fn test_vote_authorize_and_withdraw() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let expected_balance = expected_balance + 1_000;
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
let expected_balance = expected_balance + 10_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Authorize vote account withdrawal to another signer
|
||||
let first_withdraw_authority = Keypair::new();
|
||||
@@ -93,7 +101,13 @@ fn test_vote_authorize_and_withdraw() {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: first_withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
@@ -112,7 +126,13 @@ fn test_vote_authorize_and_withdraw() {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
authorized: 1,
|
||||
new_authorized: Some(1),
|
||||
};
|
||||
@@ -126,7 +146,13 @@ fn test_vote_authorize_and_withdraw() {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
authorized: 1,
|
||||
new_authorized: Some(2),
|
||||
};
|
||||
@@ -144,14 +170,20 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(100),
|
||||
withdraw_amount: SpendAmount::Some(1_000),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let expected_balance = expected_balance - 100;
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_recent_balance(100, &rpc_client, &destination_account);
|
||||
let expected_balance = expected_balance - 1_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(1_000, &rpc_client, &destination_account);
|
||||
|
||||
// Re-assign validator identity
|
||||
let new_identity_keypair = Keypair::new();
|
||||
@@ -160,7 +192,13 @@ fn test_vote_authorize_and_withdraw() {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
@@ -172,8 +210,281 @@ fn test_vote_authorize_and_withdraw() {
|
||||
withdraw_authority: 1,
|
||||
destination_account_pubkey: destination_account,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
|
||||
check_recent_balance(expected_balance, &rpc_client, &destination_account);
|
||||
check_balance!(0, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(expected_balance, &rpc_client, &destination_account);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_offline_vote_authorize_and_withdraw() {
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
let test_validator =
|
||||
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
|
||||
|
||||
let rpc_client =
|
||||
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
|
||||
let default_signer = Keypair::new();
|
||||
|
||||
let mut config_payer = CliConfig::recent_for_tests();
|
||||
config_payer.json_rpc_url = test_validator.rpc_url();
|
||||
config_payer.signers = vec![&default_signer];
|
||||
|
||||
let mut config_offline = CliConfig::recent_for_tests();
|
||||
config_offline.json_rpc_url = String::default();
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
let offline_keypair = Keypair::new();
|
||||
config_offline.signers = vec![&offline_keypair];
|
||||
// Verify that we cannot reach the cluster
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&config_payer,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance!(100_000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&config_offline,
|
||||
&config_offline.signers[0].pubkey(),
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance!(100_000, &rpc_client, &config_offline.signers[0].pubkey());
|
||||
|
||||
// Create vote account with specific withdrawer
|
||||
let vote_account_keypair = Keypair::new();
|
||||
let vote_account_pubkey = vote_account_keypair.pubkey();
|
||||
config_payer.signers = vec![&default_signer, &vote_account_keypair];
|
||||
config_payer.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: offline_keypair.pubkey(),
|
||||
commission: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
let vote_account = rpc_client
|
||||
.get_account(&vote_account_keypair.pubkey())
|
||||
.unwrap();
|
||||
let vote_state: VoteStateVersions = vote_account.state().unwrap();
|
||||
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
|
||||
assert_eq!(authorized_withdrawer, offline_keypair.pubkey());
|
||||
let expected_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
|
||||
.unwrap()
|
||||
.max(1);
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Transfer in some more SOL
|
||||
config_payer.signers = vec![&default_signer];
|
||||
config_payer.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10_000),
|
||||
to: vote_account_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
allow_unfunded_recipient: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
derived_address_seed: None,
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
let expected_balance = expected_balance + 10_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Authorize vote account withdrawal to another signer, offline
|
||||
let withdraw_authority = Keypair::new();
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
config_offline.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
config_offline.output_format = OutputFormat::JsonCompact;
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
assert!(sign_only.has_all_signers());
|
||||
let offline_presigner = sign_only
|
||||
.presigner_of(&config_offline.signers[0].pubkey())
|
||||
.unwrap();
|
||||
config_payer.signers = vec![&offline_presigner];
|
||||
config_payer.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
let vote_account = rpc_client
|
||||
.get_account(&vote_account_keypair.pubkey())
|
||||
.unwrap();
|
||||
let vote_state: VoteStateVersions = vote_account.state().unwrap();
|
||||
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
|
||||
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
|
||||
|
||||
// Withdraw from vote account offline
|
||||
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
let fee_payer_null_signer = NullSigner::new(&default_signer.pubkey());
|
||||
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
|
||||
config_offline.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(1_000),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
config_offline.output_format = OutputFormat::JsonCompact;
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = sign_only
|
||||
.presigner_of(&config_offline.signers[1].pubkey())
|
||||
.unwrap();
|
||||
config_payer.signers = vec![&default_signer, &offline_presigner];
|
||||
config_payer.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(1_000),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
let expected_balance = expected_balance - 1_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(1_000, &rpc_client, &destination_account);
|
||||
|
||||
// Re-assign validator identity offline
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
let new_identity_keypair = Keypair::new();
|
||||
let new_identity_null_signer = NullSigner::new(&new_identity_keypair.pubkey());
|
||||
config_offline.signers = vec![
|
||||
&fee_payer_null_signer,
|
||||
&withdraw_authority,
|
||||
&new_identity_null_signer,
|
||||
];
|
||||
config_offline.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_offline).unwrap();
|
||||
config_offline.output_format = OutputFormat::JsonCompact;
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = sign_only
|
||||
.presigner_of(&config_offline.signers[1].pubkey())
|
||||
.unwrap();
|
||||
config_payer.signers = vec![&default_signer, &offline_presigner, &new_identity_keypair];
|
||||
config_payer.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
// Close vote account offline. Must use WithdrawFromVoteAccount and specify amount, since
|
||||
// CloseVoteAccount requires RpcClient
|
||||
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
|
||||
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
|
||||
config_offline.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(expected_balance),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_offline).unwrap();
|
||||
config_offline.output_format = OutputFormat::JsonCompact;
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sig_response);
|
||||
let offline_presigner = sign_only
|
||||
.presigner_of(&config_offline.signers[1].pubkey())
|
||||
.unwrap();
|
||||
config_payer.signers = vec![&default_signer, &offline_presigner];
|
||||
config_payer.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(expected_balance),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
check_balance!(0, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(expected_balance, &rpc_client, &destination_account);
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client-test"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "Solana RPC Test"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -8,26 +8,29 @@ license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-client-test"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
serde_json = "1.0.72"
|
||||
serial_test = "0.5.1"
|
||||
solana-client = { path = "../client", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-client = { path = "../client", version = "=1.9.13" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.13" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.13" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.13" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.13" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.13" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.13" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.13" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
systemstat = "0.1.10"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,11 +4,16 @@ use {
|
||||
solana_client::{
|
||||
pubsub_client::PubsubClient,
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
|
||||
RpcProgramAccountsConfig,
|
||||
},
|
||||
rpc_response::SlotInfo,
|
||||
},
|
||||
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
|
||||
solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
|
||||
rpc::create_test_transactions_and_populate_blockstore,
|
||||
rpc_pubsub_service::{PubSubConfig, PubSubService},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
},
|
||||
@@ -20,7 +25,7 @@ use {
|
||||
},
|
||||
solana_sdk::{
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentConfig,
|
||||
commitment_config::{CommitmentConfig, CommitmentLevel},
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
rpc_port,
|
||||
@@ -29,11 +34,14 @@ use {
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_test_validator::TestValidator,
|
||||
solana_transaction_status::{
|
||||
ConfirmedBlockWithOptionalMetadata, TransactionDetails, UiTransactionEncoding,
|
||||
},
|
||||
std::{
|
||||
collections::HashSet,
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::sleep,
|
||||
@@ -119,9 +127,10 @@ fn test_account_subscription() {
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bob = Keypair::new();
|
||||
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -194,6 +203,105 @@ fn test_account_subscription() {
|
||||
assert_eq!(errors, [].to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_block_subscription() {
|
||||
// setup BankForks
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair: alice,
|
||||
..
|
||||
} = create_genesis_config(10_000);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
|
||||
// setup Blockstore
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
// populate ledger with test txs
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
let keypair3 = Keypair::new();
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
|
||||
bank.transfer(rent_exempt_amount, &alice, &keypair2.pubkey())
|
||||
.unwrap();
|
||||
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
|
||||
vec![&alice, &keypair1, &keypair2, &keypair3],
|
||||
0,
|
||||
bank,
|
||||
blockstore.clone(),
|
||||
max_complete_transaction_status_slot,
|
||||
);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
// setup RpcSubscriptions && PubSubService
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
blockstore.clone(),
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
));
|
||||
let pubsub_addr = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
||||
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
|
||||
);
|
||||
let pub_cfg = PubSubConfig {
|
||||
enable_block_subscription: true,
|
||||
..PubSubConfig::default()
|
||||
};
|
||||
let (trigger, pubsub_service) = PubSubService::new(pub_cfg, &subscriptions, pubsub_addr);
|
||||
|
||||
std::thread::sleep(Duration::from_millis(400));
|
||||
|
||||
// setup PubsubClient
|
||||
let (mut client, receiver) = PubsubClient::block_subscribe(
|
||||
&format!("ws://0.0.0.0:{}/", pubsub_addr.port()),
|
||||
RpcBlockSubscribeFilter::All,
|
||||
Some(RpcBlockSubscribeConfig {
|
||||
commitment: Some(CommitmentConfig {
|
||||
commitment: CommitmentLevel::Confirmed,
|
||||
}),
|
||||
encoding: Some(UiTransactionEncoding::Json),
|
||||
transaction_details: Some(TransactionDetails::Signatures),
|
||||
show_rewards: None,
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// trigger Gossip notification
|
||||
let slot = bank_forks.read().unwrap().highest_slot();
|
||||
subscriptions.notify_gossip_subscribers(slot);
|
||||
let maybe_actual = receiver.recv_timeout(Duration::from_millis(400));
|
||||
match maybe_actual {
|
||||
Ok(actual) => {
|
||||
let complete_block = blockstore.get_complete_block(slot, false).unwrap();
|
||||
let block = ConfirmedBlockWithOptionalMetadata::from(complete_block).configure(
|
||||
UiTransactionEncoding::Json,
|
||||
TransactionDetails::Signatures,
|
||||
false,
|
||||
);
|
||||
assert_eq!(actual.value.slot, slot);
|
||||
assert!(block.eq(&actual.value.block.unwrap()));
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("unexpected websocket receive timeout");
|
||||
assert_eq!(Some(e), None);
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
trigger.cancel();
|
||||
client.shutdown().unwrap();
|
||||
pubsub_service.close().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_program_subscription() {
|
||||
@@ -215,9 +323,10 @@ fn test_program_subscription() {
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bob = Keypair::new();
|
||||
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -300,9 +409,10 @@ fn test_root_subscription() {
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -350,8 +460,10 @@ fn test_slot_subscription() {
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let optimistically_confirmed_bank =
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
optimistically_confirmed_bank,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -23,15 +23,15 @@ semver = "1.0.4"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
|
||||
@@ -40,7 +40,7 @@ url = "2.2.2"
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.5.0"
|
||||
jsonrpc-http-server = "18.0.0"
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -37,14 +37,14 @@ impl HttpSender {
|
||||
///
|
||||
/// The URL is an HTTP URL, usually for port 8899, as in
|
||||
/// "http://localhost:8899". The sender has a default timeout of 30 seconds.
|
||||
pub fn new(url: String) -> Self {
|
||||
pub fn new<U: ToString>(url: U) -> Self {
|
||||
Self::new_with_timeout(url, Duration::from_secs(30))
|
||||
}
|
||||
|
||||
/// Create an HTTP RPC sender.
|
||||
///
|
||||
/// The URL is an HTTP URL, usually for port 8899.
|
||||
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
|
||||
pub fn new_with_timeout<U: ToString>(url: U, timeout: Duration) -> Self {
|
||||
// `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the
|
||||
// request to a different tokio thread to avoid this
|
||||
let client = Arc::new(
|
||||
@@ -58,7 +58,7 @@ impl HttpSender {
|
||||
|
||||
Self {
|
||||
client,
|
||||
url,
|
||||
url: url.to_string(),
|
||||
request_id: AtomicU64::new(0),
|
||||
stats: RwLock::new(RpcTransportStats::default()),
|
||||
}
|
||||
|
@@ -75,13 +75,13 @@ pub struct MockSender {
|
||||
/// from [`RpcRequest`] to a JSON [`Value`] response, Any entries in this map
|
||||
/// override the default behavior for the given request.
|
||||
impl MockSender {
|
||||
pub fn new(url: String) -> Self {
|
||||
pub fn new<U: ToString>(url: U) -> Self {
|
||||
Self::new_with_mocks(url, Mocks::default())
|
||||
}
|
||||
|
||||
pub fn new_with_mocks(url: String, mocks: Mocks) -> Self {
|
||||
pub fn new_with_mocks<U: ToString>(url: U, mocks: Mocks) -> Self {
|
||||
Self {
|
||||
url,
|
||||
url: url.to_string(),
|
||||
mocks: RwLock::new(mocks),
|
||||
}
|
||||
}
|
||||
|
@@ -1,12 +1,13 @@
|
||||
use {
|
||||
crate::{
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig,
|
||||
RpcTransactionLogsConfig, RpcTransactionLogsFilter,
|
||||
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
|
||||
RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_response::{
|
||||
Response as RpcResponse, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult,
|
||||
SlotInfo, SlotUpdate,
|
||||
Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse,
|
||||
RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate,
|
||||
},
|
||||
},
|
||||
log::*,
|
||||
@@ -173,6 +174,12 @@ pub type SignatureSubscription = (
|
||||
Receiver<RpcResponse<RpcSignatureResult>>,
|
||||
);
|
||||
|
||||
pub type PubsubBlockClientSubscription = PubsubClientSubscription<RpcResponse<RpcBlockUpdate>>;
|
||||
pub type BlockSubscription = (
|
||||
PubsubBlockClientSubscription,
|
||||
Receiver<RpcResponse<RpcBlockUpdate>>,
|
||||
);
|
||||
|
||||
pub type PubsubProgramClientSubscription = PubsubClientSubscription<RpcResponse<RpcKeyedAccount>>;
|
||||
pub type ProgramSubscription = (
|
||||
PubsubProgramClientSubscription,
|
||||
@@ -185,6 +192,9 @@ pub type AccountSubscription = (
|
||||
Receiver<RpcResponse<UiAccount>>,
|
||||
);
|
||||
|
||||
pub type PubsubVoteClientSubscription = PubsubClientSubscription<RpcVote>;
|
||||
pub type VoteSubscription = (PubsubVoteClientSubscription, Receiver<RpcVote>);
|
||||
|
||||
pub type PubsubRootClientSubscription = PubsubClientSubscription<Slot>;
|
||||
pub type RootSubscription = (PubsubRootClientSubscription, Receiver<Slot>);
|
||||
|
||||
@@ -266,6 +276,45 @@ impl PubsubClient {
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn block_subscribe(
|
||||
url: &str,
|
||||
filter: RpcBlockSubscribeFilter,
|
||||
config: Option<RpcBlockSubscribeConfig>,
|
||||
) -> Result<BlockSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_clone = exit.clone();
|
||||
let body = json!({
|
||||
"jsonrpc":"2.0",
|
||||
"id":1,
|
||||
"method":"blockSubscribe",
|
||||
"params":[filter, config]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let subscription_id = PubsubBlockClientSubscription::send_subscribe(&socket_clone, body)?;
|
||||
|
||||
let t_cleanup = std::thread::spawn(move || {
|
||||
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
|
||||
});
|
||||
|
||||
let result = PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: "block",
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
exit,
|
||||
};
|
||||
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn logs_subscribe(
|
||||
url: &str,
|
||||
filter: RpcTransactionLogsFilter,
|
||||
@@ -346,6 +395,39 @@ impl PubsubClient {
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn vote_subscribe(url: &str) -> Result<VoteSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_clone = exit.clone();
|
||||
let body = json!({
|
||||
"jsonrpc":"2.0",
|
||||
"id":1,
|
||||
"method":"voteSubscribe",
|
||||
})
|
||||
.to_string();
|
||||
let subscription_id = PubsubVoteClientSubscription::send_subscribe(&socket_clone, body)?;
|
||||
|
||||
let t_cleanup = std::thread::spawn(move || {
|
||||
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
|
||||
});
|
||||
|
||||
let result = PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: "vote",
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
exit,
|
||||
};
|
||||
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn root_subscribe(url: &str) -> Result<RootSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
|
@@ -163,7 +163,7 @@ impl RpcClient {
|
||||
/// `RpcSender`. Most applications should use one of the other constructors,
|
||||
/// such as [`new`] and [`new_mock`], which create an `RpcClient`
|
||||
/// encapsulating an [`HttpSender`] and [`MockSender`] respectively.
|
||||
fn new_sender<T: RpcSender + Send + Sync + 'static>(
|
||||
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(
|
||||
sender: T,
|
||||
config: RpcClientConfig,
|
||||
) -> Self {
|
||||
@@ -191,7 +191,7 @@ impl RpcClient {
|
||||
/// let url = "http://localhost:8899".to_string();
|
||||
/// let client = RpcClient::new(url);
|
||||
/// ```
|
||||
pub fn new(url: String) -> Self {
|
||||
pub fn new<U: ToString>(url: U) -> Self {
|
||||
Self::new_with_commitment(url, CommitmentConfig::default())
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ impl RpcClient {
|
||||
/// let commitment_config = CommitmentConfig::processed();
|
||||
/// let client = RpcClient::new_with_commitment(url, commitment_config);
|
||||
/// ```
|
||||
pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self {
|
||||
pub fn new_with_commitment<U: ToString>(url: U, commitment_config: CommitmentConfig) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new(url),
|
||||
RpcClientConfig::with_commitment(commitment_config),
|
||||
@@ -240,7 +240,7 @@ impl RpcClient {
|
||||
/// let timeout = Duration::from_secs(1);
|
||||
/// let client = RpcClient::new_with_timeout(url, timeout);
|
||||
/// ```
|
||||
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
|
||||
pub fn new_with_timeout<U: ToString>(url: U, timeout: Duration) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
@@ -269,8 +269,8 @@ impl RpcClient {
|
||||
/// commitment_config,
|
||||
/// );
|
||||
/// ```
|
||||
pub fn new_with_timeout_and_commitment(
|
||||
url: String,
|
||||
pub fn new_with_timeout_and_commitment<U: ToString>(
|
||||
url: U,
|
||||
timeout: Duration,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Self {
|
||||
@@ -312,8 +312,8 @@ impl RpcClient {
|
||||
/// confirm_transaction_initial_timeout,
|
||||
/// );
|
||||
/// ```
|
||||
pub fn new_with_timeouts_and_commitment(
|
||||
url: String,
|
||||
pub fn new_with_timeouts_and_commitment<U: ToString>(
|
||||
url: U,
|
||||
timeout: Duration,
|
||||
commitment_config: CommitmentConfig,
|
||||
confirm_transaction_initial_timeout: Duration,
|
||||
@@ -347,7 +347,7 @@ impl RpcClient {
|
||||
/// let url = "fails".to_string();
|
||||
/// let successful_client = RpcClient::new_mock(url);
|
||||
/// ```
|
||||
pub fn new_mock(url: String) -> Self {
|
||||
pub fn new_mock<U: ToString>(url: U) -> Self {
|
||||
Self::new_sender(
|
||||
MockSender::new(url),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
@@ -381,7 +381,7 @@ impl RpcClient {
|
||||
/// let url = "succeeds".to_string();
|
||||
/// let client = RpcClient::new_mock_with_mocks(url, mocks);
|
||||
/// ```
|
||||
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
|
||||
pub fn new_mock_with_mocks<U: ToString>(url: U, mocks: Mocks) -> Self {
|
||||
Self::new_sender(
|
||||
MockSender::new_with_mocks(url, mocks),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
@@ -1329,7 +1329,7 @@ impl RpcClient {
|
||||
/// # Ok::<(), ClientError>(())
|
||||
/// ```
|
||||
pub fn get_highest_snapshot_slot(&self) -> ClientResult<RpcSnapshotSlotInfo> {
|
||||
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
|
||||
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
|
||||
#[allow(deprecated)]
|
||||
self.get_snapshot_slot().map(|full| RpcSnapshotSlotInfo {
|
||||
full,
|
||||
@@ -4747,7 +4747,7 @@ impl RpcClient {
|
||||
commitment: CommitmentConfig,
|
||||
) -> ClientResult<(Hash, u64)> {
|
||||
let (blockhash, last_valid_block_height) =
|
||||
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
|
||||
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
|
||||
let Fees {
|
||||
blockhash,
|
||||
last_valid_block_height,
|
||||
@@ -4781,7 +4781,7 @@ impl RpcClient {
|
||||
blockhash: &Hash,
|
||||
commitment: CommitmentConfig,
|
||||
) -> ClientResult<bool> {
|
||||
let result = if self.get_node_version()? < semver::Version::new(1, 8, 0) {
|
||||
let result = if self.get_node_version()? < semver::Version::new(1, 9, 0) {
|
||||
self.get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment)?
|
||||
.value
|
||||
.is_some()
|
||||
|
@@ -182,6 +182,23 @@ pub struct RpcSignatureSubscribeConfig {
|
||||
pub enable_received_notification: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcBlockSubscribeFilter {
|
||||
All,
|
||||
MentionsAccountOrProgram(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockSubscribeConfig {
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub encoding: Option<UiTransactionEncoding>,
|
||||
pub transaction_details: Option<TransactionDetails>,
|
||||
pub show_rewards: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignaturesForAddressConfig {
|
||||
|
@@ -9,9 +9,10 @@ use {
|
||||
transaction::{Result, TransactionError},
|
||||
},
|
||||
solana_transaction_status::{
|
||||
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus,
|
||||
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock,
|
||||
},
|
||||
std::{collections::HashMap, fmt, net::SocketAddr},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
pub type RpcResult<T> = client_error::Result<Response<T>>;
|
||||
@@ -289,6 +290,8 @@ pub struct RpcIdentity {
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcVote {
|
||||
/// Vote account address, as base-58 encoded string
|
||||
pub vote_pubkey: String,
|
||||
pub slots: Vec<Slot>,
|
||||
pub hash: String,
|
||||
pub timestamp: Option<UnixTimestamp>,
|
||||
@@ -424,6 +427,20 @@ pub struct RpcInflationReward {
|
||||
pub commission: Option<u8>, // Vote account commission when the reward was credited
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize, Debug, Error, Eq, PartialEq)]
|
||||
pub enum RpcBlockUpdateError {
|
||||
#[error("block store error")]
|
||||
BlockStoreError,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockUpdate {
|
||||
pub slot: Slot,
|
||||
pub block: Option<UiConfirmedBlock>,
|
||||
pub err: Option<RpcBlockUpdateError>,
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
|
||||
let ConfirmedTransactionStatusWithSignature {
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.9.0"
|
||||
version = "1.9.13"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-core"
|
||||
readme = "../README.md"
|
||||
@@ -26,7 +26,7 @@ fs_extra = "1.2.0"
|
||||
histogram = "0.6.9"
|
||||
itertools = "0.10.1"
|
||||
log = "0.4.14"
|
||||
lru = "0.7.0"
|
||||
lru = "0.7.1"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
raptorq = "1.6.4"
|
||||
@@ -34,30 +34,32 @@ rayon = "1.5.1"
|
||||
retain_mut = "0.1.5"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.0" }
|
||||
solana-client = { path = "../client", version = "=1.9.0" }
|
||||
solana-entry = { path = "../entry", version = "=1.9.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.0" }
|
||||
solana-replica-lib = { path = "../replica-lib", version = "=1.9.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.0" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
|
||||
solana-bloom = { path = "../bloom", version = "=1.9.13" }
|
||||
solana-client = { path = "../client", version = "=1.9.13" }
|
||||
solana-entry = { path = "../entry", version = "=1.9.13" }
|
||||
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.9.13" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.13" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.13" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.13" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.13" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.13" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.13" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.13" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.13" }
|
||||
solana-replica-lib = { path = "../replica-lib", version = "=1.9.13" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.13" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.13" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.13" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.13" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.13" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
|
||||
tempfile = "3.2.0"
|
||||
thiserror = "1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.13" }
|
||||
sys-info = "0.9.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
trees = "0.4.2"
|
||||
@@ -71,9 +73,9 @@ matches = "0.1.9"
|
||||
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.72"
|
||||
serial_test = "0.5.1"
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.9.0" }
|
||||
solana-version = { path = "../version", version = "=1.9.0" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.9.13" }
|
||||
solana-version = { path = "../version", version = "=1.9.13" }
|
||||
static_assertions = "1.1.0"
|
||||
systemstat = "0.1.10"
|
||||
|
||||
|
@@ -10,6 +10,7 @@ use {
|
||||
rayon::prelude::*,
|
||||
solana_core::{
|
||||
banking_stage::{BankingStage, BankingStageStats},
|
||||
leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker,
|
||||
qos_service::QosService,
|
||||
},
|
||||
solana_entry::entry::{next_hash, Entry},
|
||||
@@ -20,7 +21,7 @@ use {
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
},
|
||||
solana_perf::{packet::to_packets_chunked, test_tx::test_tx},
|
||||
solana_perf::{packet::to_packet_batches, test_tx::test_tx},
|
||||
solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry},
|
||||
solana_runtime::{bank::Bank, cost_model::CostModel},
|
||||
solana_sdk::{
|
||||
@@ -70,18 +71,18 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, _signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
create_test_recorder(&bank, &blockstore, None, None);
|
||||
|
||||
let recorder = poh_recorder.lock().unwrap().recorder();
|
||||
|
||||
let tx = test_tx();
|
||||
let len = 4096;
|
||||
let chunk_size = 1024;
|
||||
let batches = to_packets_chunked(&vec![tx; len], chunk_size);
|
||||
let mut packets = VecDeque::new();
|
||||
let batches = to_packet_batches(&vec![tx; len], chunk_size);
|
||||
let mut packet_batches = VecDeque::new();
|
||||
for batch in batches {
|
||||
let batch_len = batch.packets.len();
|
||||
packets.push_back((batch, vec![0usize; batch_len], false));
|
||||
packet_batches.push_back((batch, vec![0usize; batch_len], false));
|
||||
}
|
||||
let (s, _r) = unbounded();
|
||||
// This tests the performance of buffering packets.
|
||||
@@ -91,13 +92,14 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
&my_pubkey,
|
||||
std::u128::MAX,
|
||||
&poh_recorder,
|
||||
&mut packets,
|
||||
&mut packet_batches,
|
||||
None,
|
||||
&s,
|
||||
None::<Box<dyn Fn()>>,
|
||||
&BankingStageStats::default(),
|
||||
&recorder,
|
||||
&Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))),
|
||||
&mut LeaderSlotMetricsTracker::new(0),
|
||||
);
|
||||
});
|
||||
|
||||
@@ -174,7 +176,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker()
|
||||
.unwrap()
|
||||
.set_limits(std::u64::MAX, std::u64::MAX);
|
||||
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
||||
|
||||
debug!("threads: {} txs: {}", num_threads, txes);
|
||||
|
||||
@@ -206,14 +208,14 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH);
|
||||
let verified: Vec<_> = to_packet_batches(&transactions, PACKETS_PER_BATCH);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
create_test_recorder(&bank, &blockstore, None, None);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost().info,
|
||||
Arc::new(Keypair::new()),
|
||||
|
@@ -100,7 +100,11 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
let slot = 0;
|
||||
let parent = 0;
|
||||
let shredder = Shredder::new(slot, parent, 0, 0).unwrap();
|
||||
let mut data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
||||
let (mut data_shreds, _) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
|
||||
let num_packets = data_shreds.len();
|
||||
|
||||
|
@@ -40,16 +40,14 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
|
||||
);
|
||||
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
let data_shreds = shredder
|
||||
.entries_to_data_shreds(
|
||||
&Keypair::new(),
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // fec_set_offset
|
||||
&mut ProcessShredsStats::default(),
|
||||
)
|
||||
.0;
|
||||
let data_shreds = shredder.entries_to_data_shreds(
|
||||
&Keypair::new(),
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // fec_set_offset
|
||||
&mut ProcessShredsStats::default(),
|
||||
);
|
||||
assert!(data_shreds.len() >= num_shreds);
|
||||
data_shreds
|
||||
}
|
||||
@@ -76,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
|
||||
let entries = create_ticks(num_ticks, 0, Hash::default());
|
||||
bencher.iter(|| {
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0);
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
|
||||
})
|
||||
}
|
||||
|
||||
@@ -95,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
|
||||
// 1Mb
|
||||
bencher.iter(|| {
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0);
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
|
||||
})
|
||||
}
|
||||
|
||||
@@ -108,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
|
||||
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
|
||||
let entries = create_ticks(num_ticks, 0, Hash::default());
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
let data_shreds = shredder.entries_to_shreds(&kp, &entries, true, 0).0;
|
||||
let (data_shreds, _) = shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
|
||||
bencher.iter(|| {
|
||||
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
|
||||
assert_ne!(raw.len(), 0);
|
||||
@@ -135,6 +133,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
|
||||
Shredder::generate_coding_shreds(
|
||||
&data_shreds[..symbol_count],
|
||||
true, // is_last_in_slot
|
||||
0, // next_code_index
|
||||
)
|
||||
.len();
|
||||
})
|
||||
@@ -147,6 +146,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
|
||||
let coding_shreds = Shredder::generate_coding_shreds(
|
||||
&data_shreds[..symbol_count],
|
||||
true, // is_last_in_slot
|
||||
0, // next_code_index
|
||||
);
|
||||
bencher.iter(|| {
|
||||
Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap();
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#![feature(test)]
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
extern crate solana_core;
|
||||
extern crate test;
|
||||
@@ -8,7 +9,7 @@ use {
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage},
|
||||
solana_perf::{packet::to_packets_chunked, test_tx::test_tx},
|
||||
solana_perf::{packet::to_packet_batches, packet::PacketBatch, test_tx::test_tx},
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::{Keypair, Signer},
|
||||
@@ -22,22 +23,21 @@ use {
|
||||
test::Bencher,
|
||||
};
|
||||
|
||||
#[bench]
|
||||
fn bench_packet_discard(bencher: &mut Bencher) {
|
||||
fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let len = 30 * 1000;
|
||||
let chunk_size = 1024;
|
||||
let tx = test_tx();
|
||||
let mut batches = to_packets_chunked(&vec![tx; len], chunk_size);
|
||||
let mut batches = to_packet_batches(&vec![tx; len], chunk_size);
|
||||
|
||||
let mut total = 0;
|
||||
|
||||
let ips: Vec<_> = (0..10_000)
|
||||
let ips: Vec<_> = (0..num_ips)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let mut addr = [0u16; 8];
|
||||
thread_rng().fill(&mut addr);
|
||||
addr
|
||||
std::net::IpAddr::from(addr)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -52,29 +52,72 @@ fn bench_packet_discard(bencher: &mut Bencher) {
|
||||
|
||||
bencher.iter(move || {
|
||||
SigVerifyStage::discard_excess_packets(&mut batches, 10_000);
|
||||
let mut num_packets = 0;
|
||||
for batch in batches.iter_mut() {
|
||||
for p in batch.packets.iter_mut() {
|
||||
p.meta.discard = false;
|
||||
if !p.meta.discard() {
|
||||
num_packets += 1;
|
||||
}
|
||||
p.meta.set_discard(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(num_packets, 10_000);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sigverify_stage(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let (packet_s, packet_r) = channel();
|
||||
let (verified_s, verified_r) = unbounded();
|
||||
let verifier = TransactionSigVerifier::default();
|
||||
let stage = SigVerifyStage::new(packet_r, verified_s, verifier);
|
||||
fn bench_packet_discard_many_senders(bencher: &mut Bencher) {
|
||||
run_bench_packet_discard(1000, bencher);
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
#[bench]
|
||||
fn bench_packet_discard_single_sender(bencher: &mut Bencher) {
|
||||
run_bench_packet_discard(1, bencher);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_packet_discard_mixed_senders(bencher: &mut Bencher) {
|
||||
const SIZE: usize = 30 * 1000;
|
||||
const CHUNK_SIZE: usize = 1024;
|
||||
fn new_rand_addr<R: Rng>(rng: &mut R) -> std::net::IpAddr {
|
||||
let mut addr = [0u16; 8];
|
||||
rng.fill(&mut addr);
|
||||
std::net::IpAddr::from(addr)
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let mut batches = to_packet_batches(&vec![test_tx(); SIZE], CHUNK_SIZE);
|
||||
let spam_addr = new_rand_addr(&mut rng);
|
||||
for batch in batches.iter_mut() {
|
||||
for packet in batch.packets.iter_mut() {
|
||||
// One spam address, ~1000 unique addresses.
|
||||
packet.meta.addr = if rng.gen_ratio(1, 30) {
|
||||
new_rand_addr(&mut rng)
|
||||
} else {
|
||||
spam_addr
|
||||
}
|
||||
}
|
||||
}
|
||||
bencher.iter(move || {
|
||||
SigVerifyStage::discard_excess_packets(&mut batches, 10_000);
|
||||
let mut num_packets = 0;
|
||||
for batch in batches.iter_mut() {
|
||||
for packet in batch.packets.iter_mut() {
|
||||
if !packet.meta.discard() {
|
||||
num_packets += 1;
|
||||
}
|
||||
packet.meta.set_discard(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(num_packets, 10_000);
|
||||
});
|
||||
}
|
||||
|
||||
fn gen_batches(use_same_tx: bool) -> Vec<PacketBatch> {
|
||||
let len = 4096;
|
||||
let use_same_tx = true;
|
||||
let chunk_size = 1024;
|
||||
let mut batches = if use_same_tx {
|
||||
if use_same_tx {
|
||||
let tx = test_tx();
|
||||
to_packets_chunked(&vec![tx; len], chunk_size)
|
||||
to_packet_batches(&vec![tx; len], chunk_size)
|
||||
} else {
|
||||
let from_keypair = Keypair::new();
|
||||
let to_keypair = Keypair::new();
|
||||
@@ -89,15 +132,29 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
to_packets_chunked(&txs, chunk_size)
|
||||
};
|
||||
to_packet_batches(&txs, chunk_size)
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"starting... generation took: {} ms batches: {}",
|
||||
duration_as_ms(&now.elapsed()),
|
||||
batches.len()
|
||||
);
|
||||
#[bench]
|
||||
fn bench_sigverify_stage(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
trace!("start");
|
||||
let (packet_s, packet_r) = channel();
|
||||
let (verified_s, verified_r) = unbounded();
|
||||
let verifier = TransactionSigVerifier::default();
|
||||
let stage = SigVerifyStage::new(packet_r, verified_s, verifier);
|
||||
|
||||
let use_same_tx = true;
|
||||
bencher.iter(move || {
|
||||
let now = Instant::now();
|
||||
let mut batches = gen_batches(use_same_tx);
|
||||
trace!(
|
||||
"starting... generation took: {} ms batches: {}",
|
||||
duration_as_ms(&now.elapsed()),
|
||||
batches.len()
|
||||
);
|
||||
|
||||
let mut sent_len = 0;
|
||||
for _ in 0..batches.len() {
|
||||
if let Some(batch) = batches.pop() {
|
||||
@@ -113,7 +170,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
|
||||
received += v.packets.len();
|
||||
batches.push(v);
|
||||
}
|
||||
if received >= sent_len {
|
||||
if use_same_tx || received >= sent_len {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@@ -14,7 +14,7 @@ use {
|
||||
solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::{
|
||||
packet::{limited_deserialize, Packet, Packets},
|
||||
packet::{limited_deserialize, Packet, PacketBatch},
|
||||
recycler::Recycler,
|
||||
},
|
||||
solana_runtime::bank::Bank,
|
||||
@@ -23,7 +23,7 @@ use {
|
||||
pubkey::Pubkey,
|
||||
timing::timestamp,
|
||||
},
|
||||
solana_streamer::streamer::{self, PacketReceiver},
|
||||
solana_streamer::streamer::{self, PacketBatchReceiver},
|
||||
std::{
|
||||
collections::HashSet,
|
||||
net::UdpSocket,
|
||||
@@ -197,7 +197,7 @@ impl AncestorHashesService {
|
||||
/// Listen for responses to our ancestors hashes repair requests
|
||||
fn run_responses_listener(
|
||||
ancestor_hashes_request_statuses: Arc<DashMap<Slot, DeadSlotAncestorRequestStatus>>,
|
||||
response_receiver: PacketReceiver,
|
||||
response_receiver: PacketBatchReceiver,
|
||||
blockstore: Arc<Blockstore>,
|
||||
outstanding_requests: Arc<RwLock<OutstandingAncestorHashesRepairs>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
@@ -240,7 +240,7 @@ impl AncestorHashesService {
|
||||
/// Process messages from the network
|
||||
fn process_new_packets_from_channel(
|
||||
ancestor_hashes_request_statuses: &DashMap<Slot, DeadSlotAncestorRequestStatus>,
|
||||
response_receiver: &PacketReceiver,
|
||||
response_receiver: &PacketBatchReceiver,
|
||||
blockstore: &Blockstore,
|
||||
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
|
||||
stats: &mut AncestorHashesResponsesStats,
|
||||
@@ -249,17 +249,17 @@ impl AncestorHashesService {
|
||||
retryable_slots_sender: &RetryableSlotsSender,
|
||||
) -> Result<()> {
|
||||
let timeout = Duration::new(1, 0);
|
||||
let mut responses = vec![response_receiver.recv_timeout(timeout)?];
|
||||
let mut total_packets = responses[0].packets.len();
|
||||
let mut packet_batches = vec![response_receiver.recv_timeout(timeout)?];
|
||||
let mut total_packets = packet_batches[0].packets.len();
|
||||
|
||||
let mut dropped_packets = 0;
|
||||
while let Ok(more) = response_receiver.try_recv() {
|
||||
total_packets += more.packets.len();
|
||||
while let Ok(batch) = response_receiver.try_recv() {
|
||||
total_packets += batch.packets.len();
|
||||
if total_packets < *max_packets {
|
||||
// Drop the rest in the channel in case of DOS
|
||||
responses.push(more);
|
||||
packet_batches.push(batch);
|
||||
} else {
|
||||
dropped_packets += more.packets.len();
|
||||
dropped_packets += batch.packets.len();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,10 +267,10 @@ impl AncestorHashesService {
|
||||
stats.total_packets += total_packets;
|
||||
|
||||
let mut time = Measure::start("ancestor_hashes::handle_packets");
|
||||
for response in responses {
|
||||
Self::process_single_packets(
|
||||
for packet_batch in packet_batches {
|
||||
Self::process_packet_batch(
|
||||
ancestor_hashes_request_statuses,
|
||||
response,
|
||||
packet_batch,
|
||||
stats,
|
||||
outstanding_requests,
|
||||
blockstore,
|
||||
@@ -289,16 +289,16 @@ impl AncestorHashesService {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_single_packets(
|
||||
fn process_packet_batch(
|
||||
ancestor_hashes_request_statuses: &DashMap<Slot, DeadSlotAncestorRequestStatus>,
|
||||
packets: Packets,
|
||||
packet_batch: PacketBatch,
|
||||
stats: &mut AncestorHashesResponsesStats,
|
||||
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
|
||||
blockstore: &Blockstore,
|
||||
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
|
||||
retryable_slots_sender: &RetryableSlotsSender,
|
||||
) {
|
||||
packets.packets.iter().for_each(|packet| {
|
||||
packet_batch.packets.iter().for_each(|packet| {
|
||||
let decision = Self::verify_and_process_ancestor_response(
|
||||
packet,
|
||||
ancestor_hashes_request_statuses,
|
||||
@@ -328,7 +328,7 @@ impl AncestorHashesService {
|
||||
blockstore: &Blockstore,
|
||||
) -> Option<(Slot, DuplicateAncestorDecision)> {
|
||||
let from_addr = packet.meta.addr();
|
||||
limited_deserialize(&packet.data[..packet.meta.size - SIZE_OF_NONCE])
|
||||
limited_deserialize(&packet.data[..packet.meta.size.saturating_sub(SIZE_OF_NONCE)])
|
||||
.ok()
|
||||
.and_then(|ancestor_hashes_response| {
|
||||
// Verify the response
|
||||
@@ -871,7 +871,7 @@ mod test {
|
||||
t_listen: JoinHandle<()>,
|
||||
exit: Arc<AtomicBool>,
|
||||
responder_info: ContactInfo,
|
||||
response_receiver: PacketReceiver,
|
||||
response_receiver: PacketBatchReceiver,
|
||||
correct_bank_hashes: HashMap<Slot, Hash>,
|
||||
}
|
||||
|
||||
@@ -1033,15 +1033,6 @@ mod test {
|
||||
is_frozen,
|
||||
);
|
||||
|
||||
/*{
|
||||
let w_bank_forks = bank_forks.write().unwrap();
|
||||
assert!(w_bank_forks.get(dead_slot).is_none());
|
||||
let parent = w_bank_forks.get(dead_slot - 1).unwrap().clone();
|
||||
let dead_bank = Bank::new_from_parent(&parent, &Pubkey::default(), dead_slot);
|
||||
bank_forks.insert(dead_bank);
|
||||
|
||||
}*/
|
||||
|
||||
// Create slots [slot, slot + num_ancestors) with 5 shreds apiece
|
||||
let (shreds, _) = make_many_slot_entries(dead_slot, dead_slot, 5);
|
||||
blockstore
|
||||
@@ -1369,6 +1360,34 @@ mod test {
|
||||
assert!(ancestor_hashes_request_statuses.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_and_process_ancestor_responses_invalid_packet() {
|
||||
let bank0 = Bank::default_for_tests();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
||||
|
||||
let ManageAncestorHashesState {
|
||||
ancestor_hashes_request_statuses,
|
||||
outstanding_requests,
|
||||
..
|
||||
} = ManageAncestorHashesState::new(bank_forks);
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
|
||||
// Create invalid packet with fewer bytes than the size of the nonce
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = 0;
|
||||
|
||||
assert!(AncestorHashesService::verify_and_process_ancestor_response(
|
||||
&packet,
|
||||
&ancestor_hashes_request_statuses,
|
||||
&mut AncestorHashesResponsesStats::default(),
|
||||
&outstanding_requests,
|
||||
&blockstore,
|
||||
)
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ancestor_hashes_service_manage_ancestor_hashes_after_replay_dump() {
|
||||
let dead_slot = MAX_ANCESTOR_RESPONSES as Slot;
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user