Compare commits
122 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
b04ce80255 | ||
|
a788021181 | ||
|
553e9fb8cd | ||
|
f1bc7ec4fa | ||
|
581181e87f | ||
|
36e1f9fae8 | ||
|
f10ae394c8 | ||
|
f7905d369a | ||
|
ef079d202b | ||
|
b7efc2373c | ||
|
d3b50bc55b | ||
|
8fd3465f8a | ||
|
23b9e6eae3 | ||
|
fe1a977f9e | ||
|
5e538eff7c | ||
|
3efe4b5478 | ||
|
90e0d4fefe | ||
|
2e983fb39f | ||
|
527b20fbbd | ||
|
a0c4b4e5fc | ||
|
282315a721 | ||
|
b8198f8cc5 | ||
|
68ad2dcce1 | ||
|
e87c3421bc | ||
|
20754a7115 | ||
|
8a57ee181e | ||
|
4e6b5a9808 | ||
|
f24fbde43b | ||
|
47f60c7607 | ||
|
8b307ed409 | ||
|
cf21719a07 | ||
|
3157b464c4 | ||
|
2581db5748 | ||
|
634959b3ab | ||
|
03b21f2e9d | ||
|
cc5565b17e | ||
|
50beef0b15 | ||
|
06a54e1423 | ||
|
4d731ecd08 | ||
|
ee06789a66 | ||
|
2dabe1d706 | ||
|
3b1279a005 | ||
|
5c9f85f28d | ||
|
e12dd46ef3 | ||
|
c4fa03b478 | ||
|
9fb749deb7 | ||
|
bd48344de2 | ||
|
78e54f1d2c | ||
|
76a6576976 | ||
|
92ec1ae255 | ||
|
0d203728cc | ||
|
625773e5b8 | ||
|
a4cb1e45ae | ||
|
8aded2778e | ||
|
d940c5b1a3 | ||
|
1be045df94 | ||
|
86191911c7 | ||
|
8f852d8a6b | ||
|
68a439f8da | ||
|
e021832708 | ||
|
87b11aa187 | ||
|
7475a6f444 | ||
|
86ce650661 | ||
|
4dc5a53014 | ||
|
5e35cf3536 | ||
|
e8a8d1efb3 | ||
|
defd9238fa | ||
|
5f061dcea1 | ||
|
e6ee27a738 | ||
|
dd2d25d698 | ||
|
9096c3df02 | ||
|
9f94c2a9a0 | ||
|
34213da9f4 | ||
|
c3c4991c44 | ||
|
9d37a33dcd | ||
|
a04ca03fee | ||
|
64ce4a6203 | ||
|
7ac3c9ec76 | ||
|
7d91515e8d | ||
|
4e3f2c3d2d | ||
|
8b67ba6d3d | ||
|
c2ce68ab90 | ||
|
fe87cb1cd1 | ||
|
1c8f6a836a | ||
|
3d5ff7968e | ||
|
d6160f7744 | ||
|
5e9ce99abf | ||
|
ebd6fe7acb | ||
|
9e91a2c2fd | ||
|
899f57962a | ||
|
3176b00e57 | ||
|
08b9da8397 | ||
|
2bc21ecba2 | ||
|
5b2a65fab3 | ||
|
f5d56eabf3 | ||
|
af45efb62c | ||
|
f528cda832 | ||
|
eeef9f4e59 | ||
|
32124b59e9 | ||
|
aa9772f9c0 | ||
|
5f183bd773 | ||
|
2238e5001b | ||
|
79fa7ef55c | ||
|
07df827411 | ||
|
a259ff0e72 | ||
|
d7d3e767e7 | ||
|
6e8aa9af17 | ||
|
0236de7bc8 | ||
|
899bd1572a | ||
|
97ec4cd44e | ||
|
5500970a7e | ||
|
caea04d8d5 | ||
|
b1a90c3580 | ||
|
5bd4e38345 | ||
|
fddba08571 | ||
|
87963764fa | ||
|
b691a159dd | ||
|
5af1d48be8 | ||
|
3b3ec3313f | ||
|
be00246fb5 | ||
|
1d80ba9edf | ||
|
4bcf976ecd |
2
.buildkite/env/secrets.ejson
vendored
2
.buildkite/env/secrets.ejson
vendored
@@ -2,6 +2,6 @@
|
|||||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||||
"_comment": "These credentials are encrypted and pose no risk",
|
"_comment": "These credentials are encrypted and pose no risk",
|
||||||
"environment": {
|
"environment": {
|
||||||
"CODECOV_TOKEN": "EJ[1:KToenD1Sr3w82lHGxz1n+j3hwNlLk/1pYrjZHlvY6kE=:hN1Q25omtJ+4yYVn+qzIsPLKT3O6J9XN:DMLNLXi/pkWgvwF6gNIcNF222sgsRR9LnwLZYj0P0wGj7q6w8YQnd1Rskj+sRroI/z5pQg==]"
|
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -36,7 +36,4 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
|||||||
# `std:
|
# `std:
|
||||||
# "found possibly newer version of crate `std` which `xyz` depends on
|
# "found possibly newer version of crate `std` which `xyz` depends on
|
||||||
rm -rf target/bpfel-unknown-unknown
|
rm -rf target/bpfel-unknown-unknown
|
||||||
if [[ $BUILDKITE_LABEL = "stable-perf" ]]; then
|
|
||||||
rm -rf target/release
|
|
||||||
fi
|
|
||||||
)
|
)
|
||||||
|
15
.travis.yml
15
.travis.yml
@@ -29,7 +29,6 @@ jobs:
|
|||||||
if: type IN (api, cron) OR tag IS present
|
if: type IN (api, cron) OR tag IS present
|
||||||
name: "macOS release artifacts"
|
name: "macOS release artifacts"
|
||||||
os: osx
|
os: osx
|
||||||
osx_image: xcode12
|
|
||||||
language: rust
|
language: rust
|
||||||
rust:
|
rust:
|
||||||
- stable
|
- stable
|
||||||
@@ -37,12 +36,8 @@ jobs:
|
|||||||
- source ci/rust-version.sh
|
- source ci/rust-version.sh
|
||||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||||
- readlink -f .
|
- readlink -f .
|
||||||
- brew install gnu-tar
|
|
||||||
- PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH"
|
|
||||||
- tar --version
|
|
||||||
script:
|
script:
|
||||||
- source ci/env.sh
|
- source ci/env.sh
|
||||||
- rustup set profile default
|
|
||||||
- ci/publish-tarball.sh
|
- ci/publish-tarball.sh
|
||||||
deploy:
|
deploy:
|
||||||
- provider: s3
|
- provider: s3
|
||||||
@@ -65,12 +60,6 @@ jobs:
|
|||||||
- <<: *release-artifacts
|
- <<: *release-artifacts
|
||||||
name: "Windows release artifacts"
|
name: "Windows release artifacts"
|
||||||
os: windows
|
os: windows
|
||||||
install:
|
|
||||||
- choco install openssl
|
|
||||||
- export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64"
|
|
||||||
- source ci/rust-version.sh
|
|
||||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
|
||||||
- readlink -f .
|
|
||||||
# Linux release artifacts are still built by ci/buildkite-secondary.yml
|
# Linux release artifacts are still built by ci/buildkite-secondary.yml
|
||||||
#- <<: *release-artifacts
|
#- <<: *release-artifacts
|
||||||
# name: "Linux release artifacts"
|
# name: "Linux release artifacts"
|
||||||
@@ -84,7 +73,7 @@ jobs:
|
|||||||
|
|
||||||
language: node_js
|
language: node_js
|
||||||
node_js:
|
node_js:
|
||||||
- "lts/*"
|
- "node"
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
directories:
|
directories:
|
||||||
@@ -127,7 +116,7 @@ jobs:
|
|||||||
if: type IN (push, pull_request) OR tag IS present
|
if: type IN (push, pull_request) OR tag IS present
|
||||||
language: node_js
|
language: node_js
|
||||||
node_js:
|
node_js:
|
||||||
- "lts/*"
|
- "node"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- docker
|
- docker
|
||||||
|
2963
Cargo.lock
generated
2963
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
26
Cargo.toml
26
Cargo.toml
@@ -1,8 +1,5 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"accountsdb-plugin-interface",
|
|
||||||
"accountsdb-plugin-manager",
|
|
||||||
"accountsdb-plugin-postgres",
|
|
||||||
"accounts-cluster-bench",
|
"accounts-cluster-bench",
|
||||||
"bench-exchange",
|
"bench-exchange",
|
||||||
"bench-streamer",
|
"bench-streamer",
|
||||||
@@ -24,7 +21,6 @@ members = [
|
|||||||
"perf",
|
"perf",
|
||||||
"validator",
|
"validator",
|
||||||
"genesis",
|
"genesis",
|
||||||
"genesis-utils",
|
|
||||||
"gossip",
|
"gossip",
|
||||||
"install",
|
"install",
|
||||||
"keygen",
|
"keygen",
|
||||||
@@ -35,6 +31,7 @@ members = [
|
|||||||
"log-analyzer",
|
"log-analyzer",
|
||||||
"merkle-root-bench",
|
"merkle-root-bench",
|
||||||
"merkle-tree",
|
"merkle-tree",
|
||||||
|
"stake-o-matic",
|
||||||
"storage-bigtable",
|
"storage-bigtable",
|
||||||
"storage-proto",
|
"storage-proto",
|
||||||
"streamer",
|
"streamer",
|
||||||
@@ -42,19 +39,21 @@ members = [
|
|||||||
"metrics",
|
"metrics",
|
||||||
"net-shaper",
|
"net-shaper",
|
||||||
"notifier",
|
"notifier",
|
||||||
"poh",
|
|
||||||
"poh-bench",
|
"poh-bench",
|
||||||
"program-test",
|
"program-test",
|
||||||
|
"programs/secp256k1",
|
||||||
"programs/bpf_loader",
|
"programs/bpf_loader",
|
||||||
"programs/compute-budget",
|
"programs/budget",
|
||||||
"programs/config",
|
"programs/config",
|
||||||
"programs/exchange",
|
"programs/exchange",
|
||||||
"programs/ed25519",
|
"programs/failure",
|
||||||
"programs/secp256k1",
|
"programs/noop",
|
||||||
|
"programs/ownable",
|
||||||
"programs/stake",
|
"programs/stake",
|
||||||
|
"programs/vest",
|
||||||
"programs/vote",
|
"programs/vote",
|
||||||
"remote-wallet",
|
"remote-wallet",
|
||||||
"rpc",
|
"ramp-tps",
|
||||||
"runtime",
|
"runtime",
|
||||||
"runtime/store-tool",
|
"runtime/store-tool",
|
||||||
"sdk",
|
"sdk",
|
||||||
@@ -62,6 +61,7 @@ members = [
|
|||||||
"sdk/cargo-test-bpf",
|
"sdk/cargo-test-bpf",
|
||||||
"scripts",
|
"scripts",
|
||||||
"stake-accounts",
|
"stake-accounts",
|
||||||
|
"stake-monitor",
|
||||||
"sys-tuner",
|
"sys-tuner",
|
||||||
"tokens",
|
"tokens",
|
||||||
"transaction-status",
|
"transaction-status",
|
||||||
@@ -77,11 +77,3 @@ members = [
|
|||||||
exclude = [
|
exclude = [
|
||||||
"programs/bpf",
|
"programs/bpf",
|
||||||
]
|
]
|
||||||
|
|
||||||
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
|
|
||||||
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
|
|
||||||
# specify arch-specific features.
|
|
||||||
resolver = "2"
|
|
||||||
|
|
||||||
[profile.dev]
|
|
||||||
split-debuginfo = "unpacked"
|
|
||||||
|
15
README.md
15
README.md
@@ -1,6 +1,6 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://solana.com">
|
<a href="https://solana.com">
|
||||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
|
||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ $ source $HOME/.cargo/env
|
|||||||
$ rustup component add rustfmt
|
$ rustup component add rustfmt
|
||||||
```
|
```
|
||||||
|
|
||||||
Please make sure you are always using the latest stable rust version by running:
|
Please sure you are always using the latest stable rust version by running:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ rustup update
|
$ rustup update
|
||||||
@@ -32,12 +32,6 @@ $ sudo apt-get update
|
|||||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
||||||
```
|
```
|
||||||
|
|
||||||
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ softwareupdate --install-rosetta
|
|
||||||
```
|
|
||||||
|
|
||||||
## **2. Download the source code.**
|
## **2. Download the source code.**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -51,6 +45,11 @@ $ cd solana
|
|||||||
$ cargo build
|
$ cargo build
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## **4. Run a minimal local cluster.**
|
||||||
|
```bash
|
||||||
|
$ ./run.sh
|
||||||
|
```
|
||||||
|
|
||||||
# Testing
|
# Testing
|
||||||
|
|
||||||
**Run the test suite:**
|
**Run the test suite:**
|
||||||
|
20
SECURITY.md
20
SECURITY.md
@@ -51,27 +51,13 @@ The following components are out of scope for the bounty program
|
|||||||
* Attacks that require social engineering
|
* Attacks that require social engineering
|
||||||
|
|
||||||
Eligibility:
|
Eligibility:
|
||||||
* The participant submitting the bug report shall follow the process outlined within this document
|
* The participant submitting the bug bounty shall follow the process outlined within this document
|
||||||
* Valid exploits can be eligible even if they are not successfully executed on the cluster
|
* Valid exploits can be eligible even if they are not successfully executed on the cluster
|
||||||
* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
|
* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
|
||||||
* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
|
* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
|
||||||
|
|
||||||
Payment of Bug Bounties:
|
Notes:
|
||||||
* Payments for eligible bug reports are distributed monthly.
|
* All locked tokens can be staked during the lockup period
|
||||||
* Bounties for all bug reports submitted in a given month are paid out in the middle of the
|
|
||||||
following month.
|
|
||||||
* The SOL/USD conversion rate used for payments is the market price at the end of
|
|
||||||
the last day of the month for the month in which the bug was submitted.
|
|
||||||
* The reference for this price is the Closing Price given by Coingecko.com on
|
|
||||||
that date given here:
|
|
||||||
https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
|
|
||||||
* For example, for all bugs submitted in March 2021, the SOL/USD price for bug
|
|
||||||
payouts is the Close price on 2021-03-31 of $19.49. This applies to all bugs
|
|
||||||
submitted in March 2021, to be paid in mid-April 2021.
|
|
||||||
* Bug bounties are paid out in
|
|
||||||
[stake accounts](https://solana.com/staking) with a
|
|
||||||
[lockup](https://docs.solana.com/staking/stake-accounts#lockups)
|
|
||||||
expiring 12 months from the last day of the month in which the bug was submitted.
|
|
||||||
|
|
||||||
<a name="process"></a>
|
<a name="process"></a>
|
||||||
## Incident Response Process
|
## Incident Response Process
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-account-decoder"
|
name = "solana-account-decoder"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
description = "Solana account decoder"
|
description = "Solana account decoder"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@@ -19,10 +19,11 @@ lazy_static = "1.4.0"
|
|||||||
serde = "1.0.122"
|
serde = "1.0.122"
|
||||||
serde_derive = "1.0.103"
|
serde_derive = "1.0.103"
|
||||||
serde_json = "1.0.56"
|
serde_json = "1.0.56"
|
||||||
solana-config-program = { path = "../programs/config", version = "=1.8.13" }
|
solana-config-program = { path = "../programs/config", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.13" }
|
solana-stake-program = { path = "../programs/stake", version = "=1.6.4" }
|
||||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
solana-vote-program = { path = "../programs/vote", version = "=1.6.4" }
|
||||||
|
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
zstd = "0.5.1"
|
zstd = "0.5.1"
|
||||||
|
|
||||||
|
@@ -17,10 +17,8 @@ pub mod validator_info;
|
|||||||
use {
|
use {
|
||||||
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
|
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
|
||||||
solana_sdk::{
|
solana_sdk::{
|
||||||
account::{ReadableAccount, WritableAccount},
|
account::ReadableAccount, account::WritableAccount, clock::Epoch,
|
||||||
clock::Epoch,
|
fee_calculator::FeeCalculator, pubkey::Pubkey,
|
||||||
fee_calculator::FeeCalculator,
|
|
||||||
pubkey::Pubkey,
|
|
||||||
},
|
},
|
||||||
std::{
|
std::{
|
||||||
io::{Read, Write},
|
io::{Read, Write},
|
||||||
@@ -30,7 +28,6 @@ use {
|
|||||||
|
|
||||||
pub type StringAmount = String;
|
pub type StringAmount = String;
|
||||||
pub type StringDecimals = String;
|
pub type StringDecimals = String;
|
||||||
pub const MAX_BASE58_BYTES: usize = 128;
|
|
||||||
|
|
||||||
/// A duplicate representation of an Account for pretty JSON serialization
|
/// A duplicate representation of an Account for pretty JSON serialization
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
@@ -51,7 +48,7 @@ pub enum UiAccountData {
|
|||||||
Binary(String, UiAccountEncoding),
|
Binary(String, UiAccountEncoding),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub enum UiAccountEncoding {
|
pub enum UiAccountEncoding {
|
||||||
Binary, // Legacy. Retained for RPC backwards compatibility
|
Binary, // Legacy. Retained for RPC backwards compatibility
|
||||||
@@ -63,53 +60,41 @@ pub enum UiAccountEncoding {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl UiAccount {
|
impl UiAccount {
|
||||||
fn encode_bs58<T: ReadableAccount>(
|
|
||||||
account: &T,
|
|
||||||
data_slice_config: Option<UiDataSliceConfig>,
|
|
||||||
) -> String {
|
|
||||||
if account.data().len() <= MAX_BASE58_BYTES {
|
|
||||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string()
|
|
||||||
} else {
|
|
||||||
"error: data too large for bs58 encoding".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encode<T: ReadableAccount>(
|
pub fn encode<T: ReadableAccount>(
|
||||||
pubkey: &Pubkey,
|
pubkey: &Pubkey,
|
||||||
account: &T,
|
account: T,
|
||||||
encoding: UiAccountEncoding,
|
encoding: UiAccountEncoding,
|
||||||
additional_data: Option<AccountAdditionalData>,
|
additional_data: Option<AccountAdditionalData>,
|
||||||
data_slice_config: Option<UiDataSliceConfig>,
|
data_slice_config: Option<UiDataSliceConfig>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let data = match encoding {
|
let data = match encoding {
|
||||||
UiAccountEncoding::Binary => {
|
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
|
||||||
let data = Self::encode_bs58(account, data_slice_config);
|
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||||
UiAccountData::LegacyBinary(data)
|
),
|
||||||
}
|
UiAccountEncoding::Base58 => UiAccountData::Binary(
|
||||||
UiAccountEncoding::Base58 => {
|
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||||
let data = Self::encode_bs58(account, data_slice_config);
|
encoding,
|
||||||
UiAccountData::Binary(data, encoding)
|
),
|
||||||
}
|
|
||||||
UiAccountEncoding::Base64 => UiAccountData::Binary(
|
UiAccountEncoding::Base64 => UiAccountData::Binary(
|
||||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||||
encoding,
|
encoding,
|
||||||
),
|
),
|
||||||
UiAccountEncoding::Base64Zstd => {
|
UiAccountEncoding::Base64Zstd => {
|
||||||
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
|
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
|
||||||
match encoder
|
match encoder
|
||||||
.write_all(slice_data(account.data(), data_slice_config))
|
.write_all(slice_data(&account.data(), data_slice_config))
|
||||||
.and_then(|()| encoder.finish())
|
.and_then(|()| encoder.finish())
|
||||||
{
|
{
|
||||||
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
|
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
|
||||||
Err(_) => UiAccountData::Binary(
|
Err(_) => UiAccountData::Binary(
|
||||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||||
UiAccountEncoding::Base64,
|
UiAccountEncoding::Base64,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
UiAccountEncoding::JsonParsed => {
|
UiAccountEncoding::JsonParsed => {
|
||||||
if let Ok(parsed_data) =
|
if let Ok(parsed_data) =
|
||||||
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
|
parse_account_data(pubkey, &account.owner(), &account.data(), additional_data)
|
||||||
{
|
{
|
||||||
UiAccountData::Json(parsed_data)
|
UiAccountData::Json(parsed_data)
|
||||||
} else {
|
} else {
|
||||||
@@ -181,7 +166,7 @@ impl Default for UiFeeCalculator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct UiDataSliceConfig {
|
pub struct UiDataSliceConfig {
|
||||||
pub offset: usize,
|
pub offset: usize,
|
||||||
@@ -204,10 +189,8 @@ fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_sdk::account::{Account, AccountSharedData};
|
||||||
solana_sdk::account::{Account, AccountSharedData},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_slice_data() {
|
fn test_slice_data() {
|
||||||
@@ -241,7 +224,7 @@ mod test {
|
|||||||
fn test_base64_zstd() {
|
fn test_base64_zstd() {
|
||||||
let encoded_account = UiAccount::encode(
|
let encoded_account = UiAccount::encode(
|
||||||
&Pubkey::default(),
|
&Pubkey::default(),
|
||||||
&AccountSharedData::from(Account {
|
AccountSharedData::from(Account {
|
||||||
data: vec![0; 1024],
|
data: vec![0; 1024],
|
||||||
..Account::default()
|
..Account::default()
|
||||||
}),
|
}),
|
||||||
|
@@ -1,27 +1,25 @@
|
|||||||
use {
|
use crate::{
|
||||||
crate::{
|
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
parse_config::parse_config,
|
||||||
parse_config::parse_config,
|
parse_nonce::parse_nonce,
|
||||||
parse_nonce::parse_nonce,
|
parse_stake::parse_stake,
|
||||||
parse_stake::parse_stake,
|
parse_sysvar::parse_sysvar,
|
||||||
parse_sysvar::parse_sysvar,
|
parse_token::{parse_token, spl_token_id_v2_0},
|
||||||
parse_token::{parse_token, spl_token_id},
|
parse_vote::parse_vote,
|
||||||
parse_vote::parse_vote,
|
|
||||||
},
|
|
||||||
inflector::Inflector,
|
|
||||||
serde_json::Value,
|
|
||||||
solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar},
|
|
||||||
std::collections::HashMap,
|
|
||||||
thiserror::Error,
|
|
||||||
};
|
};
|
||||||
|
use inflector::Inflector;
|
||||||
|
use serde_json::Value;
|
||||||
|
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
||||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
|
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
|
||||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||||
let mut m = HashMap::new();
|
let mut m = HashMap::new();
|
||||||
@@ -114,14 +112,12 @@ pub fn parse_account_data(
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_sdk::nonce::{
|
||||||
solana_sdk::nonce::{
|
state::{Data, Versions},
|
||||||
state::{Data, Versions},
|
State,
|
||||||
State,
|
|
||||||
},
|
|
||||||
solana_vote_program::vote_state::{VoteState, VoteStateVersions},
|
|
||||||
};
|
};
|
||||||
|
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_account_data() {
|
fn test_parse_account_data() {
|
||||||
|
@@ -1,11 +1,9 @@
|
|||||||
use {
|
use crate::{
|
||||||
crate::{
|
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
UiAccountData, UiAccountEncoding,
|
||||||
UiAccountData, UiAccountEncoding,
|
|
||||||
},
|
|
||||||
bincode::{deserialize, serialized_size},
|
|
||||||
solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey},
|
|
||||||
};
|
};
|
||||||
|
use bincode::{deserialize, serialized_size};
|
||||||
|
use solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey};
|
||||||
|
|
||||||
pub fn parse_bpf_upgradeable_loader(
|
pub fn parse_bpf_upgradeable_loader(
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
@@ -92,7 +90,9 @@ pub struct UiProgramData {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {super::*, bincode::serialize, solana_sdk::pubkey::Pubkey};
|
use super::*;
|
||||||
|
use bincode::serialize;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_bpf_upgradeable_loader_accounts() {
|
fn test_parse_bpf_upgradeable_loader_accounts() {
|
||||||
|
@@ -1,19 +1,15 @@
|
|||||||
use {
|
use crate::{
|
||||||
crate::{
|
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
validator_info,
|
||||||
validator_info,
|
|
||||||
},
|
|
||||||
bincode::deserialize,
|
|
||||||
serde_json::Value,
|
|
||||||
solana_config_program::{get_config_data, ConfigKeys},
|
|
||||||
solana_sdk::{
|
|
||||||
pubkey::Pubkey,
|
|
||||||
stake::config::{self as stake_config, Config as StakeConfig},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
use bincode::deserialize;
|
||||||
|
use serde_json::Value;
|
||||||
|
use solana_config_program::{get_config_data, ConfigKeys};
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use solana_stake_program::config::Config as StakeConfig;
|
||||||
|
|
||||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||||
let parsed_account = if pubkey == &stake_config::id() {
|
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||||
get_config_data(data)
|
get_config_data(data)
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||||
@@ -41,7 +37,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
|
|||||||
where
|
where
|
||||||
T: serde::de::DeserializeOwned,
|
T: serde::de::DeserializeOwned,
|
||||||
{
|
{
|
||||||
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
|
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||||
let keys = keys
|
let keys = keys
|
||||||
.iter()
|
.iter()
|
||||||
.map(|key| UiConfigKey {
|
.map(|key| UiConfigKey {
|
||||||
@@ -91,10 +87,11 @@ pub struct UiConfig<T> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use super::*;
|
||||||
super::*, crate::validator_info::ValidatorInfo, serde_json::json,
|
use crate::validator_info::ValidatorInfo;
|
||||||
solana_config_program::create_config_account, solana_sdk::account::ReadableAccount,
|
use serde_json::json;
|
||||||
};
|
use solana_config_program::create_config_account;
|
||||||
|
use solana_sdk::account::ReadableAccount;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_config() {
|
fn test_parse_config() {
|
||||||
@@ -104,7 +101,11 @@ mod test {
|
|||||||
};
|
};
|
||||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
|
parse_config(
|
||||||
|
&stake_config_account.data(),
|
||||||
|
&solana_stake_program::config::id()
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||||
warmup_cooldown_rate: 0.25,
|
warmup_cooldown_rate: 0.25,
|
||||||
slash_penalty: 50,
|
slash_penalty: 50,
|
||||||
@@ -124,7 +125,7 @@ mod test {
|
|||||||
10,
|
10,
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
|
parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||||
keys: vec![
|
keys: vec![
|
||||||
UiConfigKey {
|
UiConfigKey {
|
||||||
|
@@ -1,9 +1,7 @@
|
|||||||
use {
|
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||||
crate::{parse_account_data::ParseAccountError, UiFeeCalculator},
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
instruction::InstructionError,
|
||||||
instruction::InstructionError,
|
nonce::{state::Versions, State},
|
||||||
nonce::{state::Versions, State},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||||
@@ -11,13 +9,7 @@ pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
|||||||
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
||||||
let nonce_state = nonce_state.convert_to_current();
|
let nonce_state = nonce_state.convert_to_current();
|
||||||
match nonce_state {
|
match nonce_state {
|
||||||
// This prevents parsing an allocated System-owned account with empty data of any non-zero
|
State::Uninitialized => Ok(UiNonceState::Uninitialized),
|
||||||
// length as `uninitialized` nonce. An empty account of the wrong length can never be
|
|
||||||
// initialized as a nonce account, and an empty account of the correct length may not be an
|
|
||||||
// uninitialized nonce account, since it can be assigned to another program.
|
|
||||||
State::Uninitialized => Err(ParseAccountError::from(
|
|
||||||
InstructionError::InvalidAccountData,
|
|
||||||
)),
|
|
||||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||||
authority: data.authority.to_string(),
|
authority: data.authority.to_string(),
|
||||||
blockhash: data.blockhash.to_string(),
|
blockhash: data.blockhash.to_string(),
|
||||||
@@ -44,16 +36,14 @@ pub struct UiNonceData {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
hash::Hash,
|
||||||
hash::Hash,
|
nonce::{
|
||||||
nonce::{
|
state::{Data, Versions},
|
||||||
state::{Data, Versions},
|
State,
|
||||||
State,
|
|
||||||
},
|
|
||||||
pubkey::Pubkey,
|
|
||||||
},
|
},
|
||||||
|
pubkey::Pubkey,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@@ -1,14 +1,10 @@
|
|||||||
use {
|
use crate::{
|
||||||
crate::{
|
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
StringAmount,
|
||||||
StringAmount,
|
|
||||||
},
|
|
||||||
bincode::deserialize,
|
|
||||||
solana_sdk::{
|
|
||||||
clock::{Epoch, UnixTimestamp},
|
|
||||||
stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
use bincode::deserialize;
|
||||||
|
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||||
|
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||||
|
|
||||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||||
let stake_state: StakeState = deserialize(data)
|
let stake_state: StakeState = deserialize(data)
|
||||||
@@ -136,7 +132,8 @@ impl From<Delegation> for UiDelegation {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {super::*, bincode::serialize};
|
use super::*;
|
||||||
|
use bincode::serialize;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_stake() {
|
fn test_parse_stake() {
|
||||||
|
@@ -1,20 +1,18 @@
|
|||||||
use {
|
use crate::{
|
||||||
crate::{
|
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
StringAmount, UiFeeCalculator,
|
||||||
StringAmount, UiFeeCalculator,
|
};
|
||||||
},
|
use bincode::deserialize;
|
||||||
bincode::deserialize,
|
use bv::BitVec;
|
||||||
bv::BitVec,
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
epoch_schedule::EpochSchedule,
|
||||||
epoch_schedule::EpochSchedule,
|
pubkey::Pubkey,
|
||||||
pubkey::Pubkey,
|
rent::Rent,
|
||||||
rent::Rent,
|
slot_hashes::SlotHashes,
|
||||||
slot_hashes::SlotHashes,
|
slot_history::{self, SlotHistory},
|
||||||
slot_history::{self, SlotHistory},
|
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||||
@@ -214,12 +212,10 @@ pub struct UiStakeHistoryEntry {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
|
||||||
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
|
sysvar::recent_blockhashes::IterItem,
|
||||||
sysvar::recent_blockhashes::IterItem,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@@ -1,38 +1,36 @@
|
|||||||
use {
|
use crate::{
|
||||||
crate::{
|
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
StringAmount, StringDecimals,
|
||||||
StringAmount, StringDecimals,
|
|
||||||
},
|
|
||||||
solana_sdk::pubkey::Pubkey,
|
|
||||||
spl_token::{
|
|
||||||
solana_program::{
|
|
||||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
|
||||||
},
|
|
||||||
state::{Account, AccountState, Mint, Multisig},
|
|
||||||
},
|
|
||||||
std::str::FromStr,
|
|
||||||
};
|
};
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use spl_token_v2_0::{
|
||||||
|
solana_program::{
|
||||||
|
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||||
|
},
|
||||||
|
state::{Account, AccountState, Mint, Multisig},
|
||||||
|
};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
|
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
|
||||||
// solana_sdk::pubkey::Pubkey
|
// solana_sdk::pubkey::Pubkey
|
||||||
pub fn spl_token_id() -> Pubkey {
|
pub fn spl_token_id_v2_0() -> Pubkey {
|
||||||
Pubkey::new_from_array(spl_token::id().to_bytes())
|
Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||||
// solana_sdk::pubkey::Pubkey
|
// solana_sdk::pubkey::Pubkey
|
||||||
pub fn spl_token_native_mint() -> Pubkey {
|
pub fn spl_token_v2_0_native_mint() -> Pubkey {
|
||||||
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
|
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||||
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||||
SplTokenPubkey::new_from_array(pubkey.to_bytes())
|
SplTokenPubkey::from_str(&pubkey.to_string()).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
|
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
|
||||||
pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey {
|
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||||
Pubkey::new_from_array(pubkey.to_bytes())
|
Pubkey::from_str(&pubkey.to_string()).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_token(
|
pub fn parse_token(
|
||||||
|
@@ -1,11 +1,9 @@
|
|||||||
use {
|
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||||
crate::{parse_account_data::ParseAccountError, StringAmount},
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
clock::{Epoch, Slot},
|
||||||
clock::{Epoch, Slot},
|
pubkey::Pubkey,
|
||||||
pubkey::Pubkey,
|
|
||||||
},
|
|
||||||
solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState},
|
|
||||||
};
|
};
|
||||||
|
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||||
|
|
||||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||||
@@ -123,7 +121,8 @@ struct UiEpochCredits {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {super::*, solana_vote_program::vote_state::VoteStateVersions};
|
use super::*;
|
||||||
|
use solana_vote_program::vote_state::VoteStateVersions;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_vote() {
|
fn test_parse_vote() {
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-accounts-bench"
|
name = "solana-accounts-bench"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@@ -11,11 +11,11 @@ publish = false
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.11"
|
log = "0.4.11"
|
||||||
rayon = "1.5.0"
|
rayon = "1.5.0"
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
solana-logger = { path = "../logger", version = "=1.6.4" }
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
solana-runtime = { path = "../runtime", version = "=1.6.4" }
|
||||||
solana-measure = { path = "../measure", version = "=1.8.13" }
|
solana-measure = { path = "../measure", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-version = { path = "../version", version = "=1.8.13" }
|
solana-version = { path = "../version", version = "=1.6.4" }
|
||||||
rand = "0.7.0"
|
rand = "0.7.0"
|
||||||
clap = "2.33.1"
|
clap = "2.33.1"
|
||||||
crossbeam-channel = "0.4"
|
crossbeam-channel = "0.4"
|
||||||
|
@@ -1,19 +1,15 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
use {
|
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
use rayon::prelude::*;
|
||||||
rayon::prelude::*,
|
use solana_measure::measure::Measure;
|
||||||
solana_measure::measure::Measure,
|
use solana_runtime::{
|
||||||
solana_runtime::{
|
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
accounts_index::Ancestors,
|
||||||
accounts_db::AccountShrinkThreshold,
|
|
||||||
accounts_index::AccountSecondaryIndexes,
|
|
||||||
ancestors::Ancestors,
|
|
||||||
},
|
|
||||||
solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey},
|
|
||||||
std::{env, fs, path::PathBuf},
|
|
||||||
};
|
};
|
||||||
|
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
|
||||||
|
use std::{collections::HashSet, env, fs, path::PathBuf};
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
@@ -62,14 +58,8 @@ fn main() {
|
|||||||
if fs::remove_dir_all(path.clone()).is_err() {
|
if fs::remove_dir_all(path.clone()).is_err() {
|
||||||
println!("Warning: Couldn't remove {:?}", path);
|
println!("Warning: Couldn't remove {:?}", path);
|
||||||
}
|
}
|
||||||
let accounts = Accounts::new_with_config(
|
let accounts =
|
||||||
vec![path],
|
Accounts::new_with_config(vec![path], &ClusterType::Testnet, HashSet::new(), false);
|
||||||
&ClusterType::Testnet,
|
|
||||||
AccountSecondaryIndexes::default(),
|
|
||||||
false,
|
|
||||||
AccountShrinkThreshold::default(),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
println!("Creating {} accounts", num_accounts);
|
println!("Creating {} accounts", num_accounts);
|
||||||
let mut create_time = Measure::start("create accounts");
|
let mut create_time = Measure::start("create accounts");
|
||||||
let pubkeys: Vec<_> = (0..num_slots)
|
let pubkeys: Vec<_> = (0..num_slots)
|
||||||
@@ -93,19 +83,17 @@ fn main() {
|
|||||||
num_slots,
|
num_slots,
|
||||||
create_time
|
create_time
|
||||||
);
|
);
|
||||||
let mut ancestors = Vec::with_capacity(num_slots);
|
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||||
ancestors.push(0);
|
|
||||||
for i in 1..num_slots {
|
for i in 1..num_slots {
|
||||||
ancestors.push(i as u64);
|
ancestors.insert(i as u64, i - 1);
|
||||||
accounts.add_root(i as u64);
|
accounts.add_root(i as u64);
|
||||||
}
|
}
|
||||||
let ancestors = Ancestors::from(ancestors);
|
|
||||||
let mut elapsed = vec![0; iterations];
|
let mut elapsed = vec![0; iterations];
|
||||||
let mut elapsed_store = vec![0; iterations];
|
let mut elapsed_store = vec![0; iterations];
|
||||||
for x in 0..iterations {
|
for x in 0..iterations {
|
||||||
if clean {
|
if clean {
|
||||||
let mut time = Measure::start("clean");
|
let mut time = Measure::start("clean");
|
||||||
accounts.accounts_db.clean_accounts(None, false);
|
accounts.accounts_db.clean_accounts(None);
|
||||||
time.stop();
|
time.stop();
|
||||||
println!("{}", time);
|
println!("{}", time);
|
||||||
for slot in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
@@ -124,8 +112,6 @@ fn main() {
|
|||||||
solana_sdk::clock::Slot::default(),
|
solana_sdk::clock::Slot::default(),
|
||||||
&ancestors,
|
&ancestors,
|
||||||
None,
|
None,
|
||||||
false,
|
|
||||||
None,
|
|
||||||
);
|
);
|
||||||
time_store.stop();
|
time_store.stop();
|
||||||
if results != results_store {
|
if results != results_store {
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-accounts-cluster-bench"
|
name = "solana-accounts-cluster-bench"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@@ -13,24 +13,22 @@ clap = "2.33.1"
|
|||||||
log = "0.4.11"
|
log = "0.4.11"
|
||||||
rand = "0.7.0"
|
rand = "0.7.0"
|
||||||
rayon = "1.4.1"
|
rayon = "1.4.1"
|
||||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.13" }
|
solana-account-decoder = { path = "../account-decoder", version = "=1.6.4" }
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.13" }
|
solana-clap-utils = { path = "../clap-utils", version = "=1.6.4" }
|
||||||
solana-client = { path = "../client", version = "=1.8.13" }
|
solana-client = { path = "../client", version = "=1.6.4" }
|
||||||
solana-core = { path = "../core", version = "=1.8.13" }
|
solana-core = { path = "../core", version = "=1.6.4" }
|
||||||
solana-faucet = { path = "../faucet", version = "=1.8.13" }
|
solana-measure = { path = "../measure", version = "=1.6.4" }
|
||||||
solana-gossip = { path = "../gossip", version = "=1.8.13" }
|
solana-logger = { path = "../logger", version = "=1.6.4" }
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
solana-net-utils = { path = "../net-utils", version = "=1.6.4" }
|
||||||
solana-measure = { path = "../measure", version = "=1.8.13" }
|
solana-faucet = { path = "../faucet", version = "=1.6.4" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "=1.8.13" }
|
solana-runtime = { path = "../runtime", version = "=1.6.4" }
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-transaction-status = { path = "../transaction-status", version = "=1.6.4" }
|
||||||
solana-streamer = { path = "../streamer", version = "=1.8.13" }
|
solana-version = { path = "../version", version = "=1.6.4" }
|
||||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.13" }
|
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||||
solana-version = { path = "../version", version = "=1.8.13" }
|
|
||||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.13" }
|
solana-local-cluster = { path = "../local-cluster", version = "=1.6.4" }
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@@ -1,38 +1,35 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
use {
|
use clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg};
|
||||||
clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg},
|
use log::*;
|
||||||
log::*,
|
use rand::{thread_rng, Rng};
|
||||||
rand::{thread_rng, Rng},
|
use rayon::prelude::*;
|
||||||
rayon::prelude::*,
|
use solana_account_decoder::parse_token::spl_token_v2_0_pubkey;
|
||||||
solana_account_decoder::parse_token::spl_token_pubkey,
|
use solana_clap_utils::input_parsers::pubkey_of;
|
||||||
solana_clap_utils::input_parsers::pubkey_of,
|
use solana_client::rpc_client::RpcClient;
|
||||||
solana_client::rpc_client::RpcClient,
|
use solana_core::gossip_service::discover;
|
||||||
solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT},
|
use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT};
|
||||||
solana_gossip::gossip_service::discover,
|
use solana_measure::measure::Measure;
|
||||||
solana_measure::measure::Measure,
|
use solana_runtime::inline_spl_token_v2_0;
|
||||||
solana_runtime::inline_spl_token,
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
commitment_config::CommitmentConfig,
|
||||||
commitment_config::CommitmentConfig,
|
message::Message,
|
||||||
message::Message,
|
pubkey::Pubkey,
|
||||||
pubkey::Pubkey,
|
rpc_port::DEFAULT_RPC_PORT,
|
||||||
rpc_port::DEFAULT_RPC_PORT,
|
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
system_instruction, system_program,
|
||||||
system_instruction, system_program,
|
timing::timestamp,
|
||||||
timing::timestamp,
|
transaction::Transaction,
|
||||||
transaction::Transaction,
|
};
|
||||||
},
|
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
|
||||||
solana_streamer::socket::SocketAddrSpace,
|
use std::{
|
||||||
solana_transaction_status::parse_token::spl_token_instruction,
|
net::SocketAddr,
|
||||||
std::{
|
process::exit,
|
||||||
net::SocketAddr,
|
sync::{
|
||||||
process::exit,
|
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||||
sync::{
|
Arc, RwLock,
|
||||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
|
||||||
Arc, RwLock,
|
|
||||||
},
|
|
||||||
thread::{sleep, Builder, JoinHandle},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
},
|
},
|
||||||
|
thread::{sleep, Builder, JoinHandle},
|
||||||
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create and close messages both require 2 signatures; if transaction construction changes, update
|
// Create and close messages both require 2 signatures; if transaction construction changes, update
|
||||||
@@ -58,7 +55,7 @@ pub fn airdrop_lamports(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||||
Ok(transaction) => {
|
Ok(transaction) => {
|
||||||
let mut tries = 0;
|
let mut tries = 0;
|
||||||
loop {
|
loop {
|
||||||
@@ -192,13 +189,14 @@ impl TransactionExecutor {
|
|||||||
let mut start = Measure::start("sig_status");
|
let mut start = Measure::start("sig_status");
|
||||||
let statuses: Vec<_> = sigs_w
|
let statuses: Vec<_> = sigs_w
|
||||||
.chunks(200)
|
.chunks(200)
|
||||||
.flat_map(|sig_chunk| {
|
.map(|sig_chunk| {
|
||||||
let only_sigs: Vec<_> = sig_chunk.iter().map(|s| s.0).collect();
|
let only_sigs: Vec<_> = sig_chunk.iter().map(|s| s.0).collect();
|
||||||
client
|
client
|
||||||
.get_signature_statuses(&only_sigs)
|
.get_signature_statuses(&only_sigs)
|
||||||
.expect("status fail")
|
.expect("status fail")
|
||||||
.value
|
.value
|
||||||
})
|
})
|
||||||
|
.flatten()
|
||||||
.collect();
|
.collect();
|
||||||
let mut num_cleared = 0;
|
let mut num_cleared = 0;
|
||||||
let start_len = sigs_w.len();
|
let start_len = sigs_w.len();
|
||||||
@@ -276,7 +274,7 @@ fn make_create_message(
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let program_id = if mint.is_some() {
|
let program_id = if mint.is_some() {
|
||||||
inline_spl_token::id()
|
inline_spl_token_v2_0::id()
|
||||||
} else {
|
} else {
|
||||||
system_program::id()
|
system_program::id()
|
||||||
};
|
};
|
||||||
@@ -293,12 +291,12 @@ fn make_create_message(
|
|||||||
&program_id,
|
&program_id,
|
||||||
)];
|
)];
|
||||||
if let Some(mint_address) = mint {
|
if let Some(mint_address) = mint {
|
||||||
instructions.push(spl_token_instruction(
|
instructions.push(spl_token_v2_0_instruction(
|
||||||
spl_token::instruction::initialize_account(
|
spl_token_v2_0::instruction::initialize_account(
|
||||||
&spl_token::id(),
|
&spl_token_v2_0::id(),
|
||||||
&spl_token_pubkey(&to_pubkey),
|
&spl_token_v2_0_pubkey(&to_pubkey),
|
||||||
&spl_token_pubkey(&mint_address),
|
&spl_token_v2_0_pubkey(&mint_address),
|
||||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
));
|
));
|
||||||
@@ -324,7 +322,7 @@ fn make_close_message(
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let program_id = if spl_token {
|
let program_id = if spl_token {
|
||||||
inline_spl_token::id()
|
inline_spl_token_v2_0::id()
|
||||||
} else {
|
} else {
|
||||||
system_program::id()
|
system_program::id()
|
||||||
};
|
};
|
||||||
@@ -332,12 +330,12 @@ fn make_close_message(
|
|||||||
let address =
|
let address =
|
||||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||||
if spl_token {
|
if spl_token {
|
||||||
spl_token_instruction(
|
spl_token_v2_0_instruction(
|
||||||
spl_token::instruction::close_account(
|
spl_token_v2_0::instruction::close_account(
|
||||||
&spl_token::id(),
|
&spl_token_v2_0::id(),
|
||||||
&spl_token_pubkey(&address),
|
&spl_token_v2_0_pubkey(&address),
|
||||||
&spl_token_pubkey(&keypair.pubkey()),
|
&spl_token_v2_0_pubkey(&keypair.pubkey()),
|
||||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
|
||||||
&[],
|
&[],
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
@@ -362,11 +360,11 @@ fn make_close_message(
|
|||||||
fn run_accounts_bench(
|
fn run_accounts_bench(
|
||||||
entrypoint_addr: SocketAddr,
|
entrypoint_addr: SocketAddr,
|
||||||
faucet_addr: SocketAddr,
|
faucet_addr: SocketAddr,
|
||||||
payer_keypairs: &[&Keypair],
|
keypair: &Keypair,
|
||||||
iterations: usize,
|
iterations: usize,
|
||||||
maybe_space: Option<u64>,
|
maybe_space: Option<u64>,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
close_nth_batch: u64,
|
close_nth: u64,
|
||||||
maybe_lamports: Option<u64>,
|
maybe_lamports: Option<u64>,
|
||||||
num_instructions: usize,
|
num_instructions: usize,
|
||||||
mint: Option<Pubkey>,
|
mint: Option<Pubkey>,
|
||||||
@@ -375,7 +373,7 @@ fn run_accounts_bench(
|
|||||||
let client =
|
let client =
|
||||||
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
|
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
|
||||||
|
|
||||||
info!("Targeting {}", entrypoint_addr);
|
info!("Targetting {}", entrypoint_addr);
|
||||||
|
|
||||||
let mut last_blockhash = Instant::now();
|
let mut last_blockhash = Instant::now();
|
||||||
let mut last_log = Instant::now();
|
let mut last_log = Instant::now();
|
||||||
@@ -384,10 +382,7 @@ fn run_accounts_bench(
|
|||||||
let mut tx_sent_count = 0;
|
let mut tx_sent_count = 0;
|
||||||
let mut total_accounts_created = 0;
|
let mut total_accounts_created = 0;
|
||||||
let mut total_accounts_closed = 0;
|
let mut total_accounts_closed = 0;
|
||||||
let mut balances: Vec<_> = payer_keypairs
|
let mut balance = client.get_balance(&keypair.pubkey()).unwrap_or(0);
|
||||||
.iter()
|
|
||||||
.map(|keypair| client.get_balance(&keypair.pubkey()).unwrap_or(0))
|
|
||||||
.collect();
|
|
||||||
let mut last_balance = Instant::now();
|
let mut last_balance = Instant::now();
|
||||||
|
|
||||||
let default_max_lamports = 1000;
|
let default_max_lamports = 1000;
|
||||||
@@ -404,7 +399,7 @@ fn run_accounts_bench(
|
|||||||
max_closed: Arc::new(AtomicU64::default()),
|
max_closed: Arc::new(AtomicU64::default()),
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("Starting balance(s): {:?}", balances);
|
info!("Starting balance: {}", balance);
|
||||||
|
|
||||||
let executor = TransactionExecutor::new(entrypoint_addr);
|
let executor = TransactionExecutor::new(entrypoint_addr);
|
||||||
|
|
||||||
@@ -420,88 +415,69 @@ fn run_accounts_bench(
|
|||||||
.saturating_mul(NUM_SIGNATURES);
|
.saturating_mul(NUM_SIGNATURES);
|
||||||
let lamports = min_balance + fee;
|
let lamports = min_balance + fee;
|
||||||
|
|
||||||
for (i, balance) in balances.iter_mut().enumerate() {
|
if balance < lamports || last_balance.elapsed().as_millis() > 2000 {
|
||||||
if *balance < lamports || last_balance.elapsed().as_millis() > 2000 {
|
if let Ok(b) = client.get_balance(&keypair.pubkey()) {
|
||||||
if let Ok(b) = client.get_balance(&payer_keypairs[i].pubkey()) {
|
balance = b;
|
||||||
*balance = b;
|
}
|
||||||
}
|
last_balance = Instant::now();
|
||||||
last_balance = Instant::now();
|
if balance < lamports {
|
||||||
if *balance < lamports * 2 {
|
info!(
|
||||||
info!(
|
"Balance {} is less than needed: {}, doing aidrop...",
|
||||||
"Balance {} is less than needed: {}, doing aidrop...",
|
balance, lamports
|
||||||
balance, lamports
|
);
|
||||||
);
|
if !airdrop_lamports(&client, &faucet_addr, keypair, lamports * 100_000) {
|
||||||
if !airdrop_lamports(
|
warn!("failed airdrop, exiting");
|
||||||
&client,
|
return;
|
||||||
&faucet_addr,
|
|
||||||
payer_keypairs[i],
|
|
||||||
lamports * 100_000,
|
|
||||||
) {
|
|
||||||
warn!("failed airdrop, exiting");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create accounts
|
|
||||||
let sigs_len = executor.num_outstanding();
|
let sigs_len = executor.num_outstanding();
|
||||||
if sigs_len < batch_size {
|
if sigs_len < batch_size {
|
||||||
let num_to_create = batch_size - sigs_len;
|
let num_to_create = batch_size - sigs_len;
|
||||||
if num_to_create >= payer_keypairs.len() {
|
info!("creating {} new", num_to_create);
|
||||||
info!("creating {} new", num_to_create);
|
let txs: Vec<_> = (0..num_to_create)
|
||||||
let chunk_size = num_to_create / payer_keypairs.len();
|
.into_par_iter()
|
||||||
if chunk_size > 0 {
|
.map(|_| {
|
||||||
for (i, keypair) in payer_keypairs.iter().enumerate() {
|
let message = make_create_message(
|
||||||
let txs: Vec<_> = (0..chunk_size)
|
keypair,
|
||||||
.into_par_iter()
|
&base_keypair,
|
||||||
.map(|_| {
|
seed_tracker.max_created.clone(),
|
||||||
let message = make_create_message(
|
num_instructions,
|
||||||
keypair,
|
min_balance,
|
||||||
&base_keypair,
|
maybe_space,
|
||||||
seed_tracker.max_created.clone(),
|
mint,
|
||||||
num_instructions,
|
);
|
||||||
min_balance,
|
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||||
maybe_space,
|
Transaction::new(&signers, message, recent_blockhash.0)
|
||||||
mint,
|
})
|
||||||
);
|
.collect();
|
||||||
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
balance = balance.saturating_sub(lamports * txs.len() as u64);
|
||||||
Transaction::new(&signers, message, recent_blockhash.0)
|
info!("txs: {}", txs.len());
|
||||||
})
|
let new_ids = executor.push_transactions(txs);
|
||||||
.collect();
|
info!("ids: {}", new_ids.len());
|
||||||
balances[i] = balances[i].saturating_sub(lamports * txs.len() as u64);
|
tx_sent_count += new_ids.len();
|
||||||
info!("txs: {}", txs.len());
|
total_accounts_created += num_instructions * new_ids.len();
|
||||||
let new_ids = executor.push_transactions(txs);
|
|
||||||
info!("ids: {}", new_ids.len());
|
|
||||||
tx_sent_count += new_ids.len();
|
|
||||||
total_accounts_created += num_instructions * new_ids.len();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if close_nth_batch > 0 {
|
if close_nth > 0 {
|
||||||
let num_batches_to_close =
|
let expected_closed = total_accounts_created as u64 / close_nth;
|
||||||
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
|
if expected_closed > total_accounts_closed {
|
||||||
let expected_closed = num_batches_to_close * batch_size as u64;
|
let txs: Vec<_> = (0..expected_closed - total_accounts_closed)
|
||||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
|
||||||
// Close every account we've created with seed between max_closed_seed..expected_closed
|
|
||||||
if max_closed_seed < expected_closed {
|
|
||||||
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
|
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let message = make_close_message(
|
let message = make_close_message(
|
||||||
payer_keypairs[0],
|
keypair,
|
||||||
&base_keypair,
|
&base_keypair,
|
||||||
seed_tracker.max_closed.clone(),
|
seed_tracker.max_closed.clone(),
|
||||||
1,
|
1,
|
||||||
min_balance,
|
min_balance,
|
||||||
mint.is_some(),
|
mint.is_some(),
|
||||||
);
|
);
|
||||||
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
|
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||||
Transaction::new(&signers, message, recent_blockhash.0)
|
Transaction::new(&signers, message, recent_blockhash.0)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
balances[0] = balances[0].saturating_sub(fee * txs.len() as u64);
|
balance = balance.saturating_sub(fee * txs.len() as u64);
|
||||||
info!("close txs: {}", txs.len());
|
info!("close txs: {}", txs.len());
|
||||||
let new_ids = executor.push_transactions(txs);
|
let new_ids = executor.push_transactions(txs);
|
||||||
info!("close ids: {}", new_ids.len());
|
info!("close ids: {}", new_ids.len());
|
||||||
@@ -516,8 +492,8 @@ fn run_accounts_bench(
|
|||||||
count += 1;
|
count += 1;
|
||||||
if last_log.elapsed().as_millis() > 3000 {
|
if last_log.elapsed().as_millis() > 3000 {
|
||||||
info!(
|
info!(
|
||||||
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance: {}",
|
||||||
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
|
total_accounts_created, total_accounts_closed, tx_sent_count, count, balance
|
||||||
);
|
);
|
||||||
last_log = Instant::now();
|
last_log = Instant::now();
|
||||||
}
|
}
|
||||||
@@ -568,7 +544,6 @@ fn main() {
|
|||||||
Arg::with_name("identity")
|
Arg::with_name("identity")
|
||||||
.long("identity")
|
.long("identity")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.multiple(true)
|
|
||||||
.value_name("FILE")
|
.value_name("FILE")
|
||||||
.help("keypair file"),
|
.help("keypair file"),
|
||||||
)
|
)
|
||||||
@@ -580,14 +555,14 @@ fn main() {
|
|||||||
.help("Number of transactions to send per batch"),
|
.help("Number of transactions to send per batch"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("close_nth_batch")
|
Arg::with_name("close_nth")
|
||||||
.long("close-frequency")
|
.long("close-frequency")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.value_name("BYTES")
|
.value_name("BYTES")
|
||||||
.help(
|
.help(
|
||||||
"Every `n` batches, create a batch of close transactions for
|
"Send close transactions after this many accounts created. \
|
||||||
the earliest remaining batch of accounts created.
|
Note: a `close-frequency` value near or below `batch-size` \
|
||||||
Note: Should be > 1 to avoid situations where the close \
|
may result in transaction-simulation errors, as the close \
|
||||||
transactions will be submitted before the corresponding \
|
transactions will be submitted before the corresponding \
|
||||||
create transactions have been confirmed",
|
create transactions have been confirmed",
|
||||||
),
|
),
|
||||||
@@ -640,7 +615,7 @@ fn main() {
|
|||||||
let space = value_t!(matches, "space", u64).ok();
|
let space = value_t!(matches, "space", u64).ok();
|
||||||
let lamports = value_t!(matches, "lamports", u64).ok();
|
let lamports = value_t!(matches, "lamports", u64).ok();
|
||||||
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
|
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
|
||||||
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
|
let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0);
|
||||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
|
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
|
||||||
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
|
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
|
||||||
if num_instructions == 0 || num_instructions > 500 {
|
if num_instructions == 0 || num_instructions > 500 {
|
||||||
@@ -650,30 +625,20 @@ fn main() {
|
|||||||
|
|
||||||
let mint = pubkey_of(&matches, "mint");
|
let mint = pubkey_of(&matches, "mint");
|
||||||
|
|
||||||
let payer_keypairs: Vec<_> = values_t_or_exit!(matches, "identity", String)
|
let keypair =
|
||||||
.iter()
|
read_keypair_file(&value_t_or_exit!(matches, "identity", String)).expect("bad keypair");
|
||||||
.map(|keypair_string| {
|
|
||||||
read_keypair_file(keypair_string)
|
|
||||||
.unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_string))
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let mut payer_keypair_refs: Vec<&Keypair> = vec![];
|
|
||||||
for keypair in payer_keypairs.iter() {
|
|
||||||
payer_keypair_refs.push(keypair);
|
|
||||||
}
|
|
||||||
|
|
||||||
let rpc_addr = if !skip_gossip {
|
let rpc_addr = if !skip_gossip {
|
||||||
info!("Finding cluster entry: {:?}", entrypoint_addr);
|
info!("Finding cluster entry: {:?}", entrypoint_addr);
|
||||||
let (gossip_nodes, _validators) = discover(
|
let (gossip_nodes, _validators) = discover(
|
||||||
None, // keypair
|
None,
|
||||||
Some(&entrypoint_addr),
|
Some(&entrypoint_addr),
|
||||||
None, // num_nodes
|
None,
|
||||||
Duration::from_secs(60), // timeout
|
Some(60),
|
||||||
None, // find_node_by_pubkey
|
None,
|
||||||
Some(&entrypoint_addr), // find_node_by_gossip_addr
|
Some(&entrypoint_addr),
|
||||||
None, // my_gossip_addr
|
None,
|
||||||
0, // my_shred_version
|
0,
|
||||||
SocketAddrSpace::Unspecified,
|
|
||||||
)
|
)
|
||||||
.unwrap_or_else(|err| {
|
.unwrap_or_else(|err| {
|
||||||
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
|
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
|
||||||
@@ -690,11 +655,11 @@ fn main() {
|
|||||||
run_accounts_bench(
|
run_accounts_bench(
|
||||||
rpc_addr,
|
rpc_addr,
|
||||||
faucet_addr,
|
faucet_addr,
|
||||||
&payer_keypair_refs,
|
&keypair,
|
||||||
iterations,
|
iterations,
|
||||||
space,
|
space,
|
||||||
batch_size,
|
batch_size,
|
||||||
close_nth_batch,
|
close_nth,
|
||||||
lamports,
|
lamports,
|
||||||
num_instructions,
|
num_instructions,
|
||||||
mint,
|
mint,
|
||||||
@@ -703,15 +668,13 @@ fn main() {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_core::validator::ValidatorConfig;
|
||||||
solana_core::validator::ValidatorConfig,
|
use solana_local_cluster::{
|
||||||
solana_local_cluster::{
|
local_cluster::{ClusterConfig, LocalCluster},
|
||||||
local_cluster::{ClusterConfig, LocalCluster},
|
validator_configs::make_identical_validator_configs,
|
||||||
validator_configs::make_identical_validator_configs,
|
|
||||||
},
|
|
||||||
solana_sdk::poh_config::PohConfig,
|
|
||||||
};
|
};
|
||||||
|
use solana_sdk::poh_config::PohConfig;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_accounts_cluster_bench() {
|
fn test_accounts_cluster_bench() {
|
||||||
@@ -727,22 +690,22 @@ pub mod test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
|
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
|
||||||
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
|
let cluster = LocalCluster::new(&mut config);
|
||||||
let iterations = 10;
|
let iterations = 10;
|
||||||
let maybe_space = None;
|
let maybe_space = None;
|
||||||
let batch_size = 100;
|
let batch_size = 100;
|
||||||
let close_nth_batch = 100;
|
let close_nth = 100;
|
||||||
let maybe_lamports = None;
|
let maybe_lamports = None;
|
||||||
let num_instructions = 2;
|
let num_instructions = 2;
|
||||||
let mut start = Measure::start("total accounts run");
|
let mut start = Measure::start("total accounts run");
|
||||||
run_accounts_bench(
|
run_accounts_bench(
|
||||||
cluster.entry_point_info.rpc,
|
cluster.entry_point_info.rpc,
|
||||||
faucet_addr,
|
faucet_addr,
|
||||||
&[&cluster.funding_keypair],
|
&cluster.funding_keypair,
|
||||||
iterations,
|
iterations,
|
||||||
maybe_space,
|
maybe_space,
|
||||||
batch_size,
|
batch_size,
|
||||||
close_nth_batch,
|
close_nth,
|
||||||
maybe_lamports,
|
maybe_lamports,
|
||||||
num_instructions,
|
num_instructions,
|
||||||
None,
|
None,
|
||||||
|
@@ -1,17 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
|
||||||
edition = "2018"
|
|
||||||
name = "solana-accountsdb-plugin-interface"
|
|
||||||
description = "The Solana AccountsDb plugin interface."
|
|
||||||
version = "1.8.13"
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
log = "0.4.11"
|
|
||||||
thiserror = "1.0.29"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
@@ -1,20 +0,0 @@
|
|||||||
<p align="center">
|
|
||||||
<a href="https://solana.com">
|
|
||||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
|
||||||
</a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
# Solana AccountsDb Plugin Interface
|
|
||||||
|
|
||||||
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
|
|
||||||
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
|
|
||||||
|
|
||||||
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
|
|
||||||
instantiates the implementation of the interface.
|
|
||||||
|
|
||||||
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
|
|
||||||
external PostgreSQL databases.
|
|
||||||
|
|
||||||
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
|
|
||||||
|
|
||||||
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)
|
|
@@ -1,143 +0,0 @@
|
|||||||
/// The interface for AccountsDb plugins. A plugin must implement
|
|
||||||
/// the AccountsDbPlugin trait to work with the runtime.
|
|
||||||
/// In addition, the dynamic library must export a "C" function _create_plugin which
|
|
||||||
/// creates the implementation of the plugin.
|
|
||||||
use {
|
|
||||||
std::{any::Any, error, io},
|
|
||||||
thiserror::Error,
|
|
||||||
};
|
|
||||||
|
|
||||||
impl Eq for ReplicaAccountInfo<'_> {}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Debug)]
|
|
||||||
/// Information about an account being updated
|
|
||||||
pub struct ReplicaAccountInfo<'a> {
|
|
||||||
/// The Pubkey for the account
|
|
||||||
pub pubkey: &'a [u8],
|
|
||||||
|
|
||||||
/// The lamports for the account
|
|
||||||
pub lamports: u64,
|
|
||||||
|
|
||||||
/// The Pubkey of the owner program account
|
|
||||||
pub owner: &'a [u8],
|
|
||||||
|
|
||||||
/// This account's data contains a loaded program (and is now read-only)
|
|
||||||
pub executable: bool,
|
|
||||||
|
|
||||||
/// The epoch at which this account will next owe rent
|
|
||||||
pub rent_epoch: u64,
|
|
||||||
|
|
||||||
/// The data held in this account.
|
|
||||||
pub data: &'a [u8],
|
|
||||||
|
|
||||||
/// A global monotonically increasing atomic number, which can be used
|
|
||||||
/// to tell the order of the account update. For example, when an
|
|
||||||
/// account is updated in the same slot multiple times, the update
|
|
||||||
/// with higher write_version should supersede the one with lower
|
|
||||||
/// write_version.
|
|
||||||
pub write_version: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A wrapper to future-proof ReplicaAccountInfo handling.
|
|
||||||
/// If there were a change to the structure of ReplicaAccountInfo,
|
|
||||||
/// there would be new enum entry for the newer version, forcing
|
|
||||||
/// plugin implementations to handle the change.
|
|
||||||
pub enum ReplicaAccountInfoVersions<'a> {
|
|
||||||
V0_0_1(&'a ReplicaAccountInfo<'a>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Errors returned by plugin calls
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum AccountsDbPluginError {
|
|
||||||
/// Error opening the configuration file; for example, when the file
|
|
||||||
/// is not found or when the validator process has no permission to read it.
|
|
||||||
#[error("Error opening config file. Error detail: ({0}).")]
|
|
||||||
ConfigFileOpenError(#[from] io::Error),
|
|
||||||
|
|
||||||
/// Error in reading the content of the config file or the content
|
|
||||||
/// is not in the expected format.
|
|
||||||
#[error("Error reading config file. Error message: ({msg})")]
|
|
||||||
ConfigFileReadError { msg: String },
|
|
||||||
|
|
||||||
/// Error when updating the account.
|
|
||||||
#[error("Error updating account. Error message: ({msg})")]
|
|
||||||
AccountsUpdateError { msg: String },
|
|
||||||
|
|
||||||
/// Error when updating the slot status
|
|
||||||
#[error("Error updating slot status. Error message: ({msg})")]
|
|
||||||
SlotStatusUpdateError { msg: String },
|
|
||||||
|
|
||||||
/// Any custom error defined by the plugin.
|
|
||||||
#[error("Plugin-defined custom error. Error message: ({0})")]
|
|
||||||
Custom(Box<dyn error::Error + Send + Sync>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The current status of a slot
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum SlotStatus {
|
|
||||||
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
|
|
||||||
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
|
|
||||||
/// the fork the validator believes is most likely to finalize.
|
|
||||||
Processed,
|
|
||||||
|
|
||||||
/// The highest slot having reached max vote lockout.
|
|
||||||
Rooted,
|
|
||||||
|
|
||||||
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
|
|
||||||
Confirmed,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SlotStatus {
|
|
||||||
pub fn as_str(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
SlotStatus::Confirmed => "confirmed",
|
|
||||||
SlotStatus::Processed => "processed",
|
|
||||||
SlotStatus::Rooted => "rooted",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
|
|
||||||
|
|
||||||
/// Defines an AccountsDb plugin, to stream data from the runtime.
|
|
||||||
/// AccountsDb plugins must describe desired behavior for load and unload,
|
|
||||||
/// as well as how they will handle streamed data.
|
|
||||||
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
|
|
||||||
fn name(&self) -> &'static str;
|
|
||||||
|
|
||||||
/// The callback called when a plugin is loaded by the system,
|
|
||||||
/// used for doing whatever initialization is required by the plugin.
|
|
||||||
/// The _config_file contains the name of the
|
|
||||||
/// of the config file. The config must be in JSON format and
|
|
||||||
/// include a field "libpath" indicating the full path
|
|
||||||
/// name of the shared library implementing this interface.
|
|
||||||
fn on_load(&mut self, _config_file: &str) -> Result<()> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The callback called right before a plugin is unloaded by the system
|
|
||||||
/// Used for doing cleanup before unload.
|
|
||||||
fn on_unload(&mut self) {}
|
|
||||||
|
|
||||||
/// Called when an account is updated at a slot.
|
|
||||||
/// When `is_startup` is true, it indicates the account is loaded from
|
|
||||||
/// snapshots when the validator starts up. When `is_startup` is false,
|
|
||||||
/// the account is updated during transaction processing.
|
|
||||||
fn update_account(
|
|
||||||
&mut self,
|
|
||||||
account: ReplicaAccountInfoVersions,
|
|
||||||
slot: u64,
|
|
||||||
is_startup: bool,
|
|
||||||
) -> Result<()>;
|
|
||||||
|
|
||||||
/// Called when all accounts are notified of during startup.
|
|
||||||
fn notify_end_of_startup(&mut self) -> Result<()>;
|
|
||||||
|
|
||||||
/// Called when a slot status is updated
|
|
||||||
fn update_slot_status(
|
|
||||||
&mut self,
|
|
||||||
slot: u64,
|
|
||||||
parent: Option<u64>,
|
|
||||||
status: SlotStatus,
|
|
||||||
) -> Result<()>;
|
|
||||||
}
|
|
@@ -1 +0,0 @@
|
|||||||
pub mod accountsdb_plugin_interface;
|
|
@@ -1,30 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
|
||||||
edition = "2018"
|
|
||||||
name = "solana-accountsdb-plugin-manager"
|
|
||||||
description = "The Solana AccountsDb plugin manager."
|
|
||||||
version = "1.8.13"
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
documentation = "https://docs.rs/solana-validator"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
bs58 = "0.4.0"
|
|
||||||
crossbeam-channel = "0.4"
|
|
||||||
libloading = "0.7.0"
|
|
||||||
log = "0.4.11"
|
|
||||||
serde = "1.0.130"
|
|
||||||
serde_derive = "1.0.103"
|
|
||||||
serde_json = "1.0.67"
|
|
||||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.13" }
|
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
|
||||||
solana-measure = { path = "../measure", version = "=1.8.13" }
|
|
||||||
solana-metrics = { path = "../metrics", version = "=1.8.13" }
|
|
||||||
solana-rpc = { path = "../rpc", version = "=1.8.13" }
|
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
|
||||||
thiserror = "1.0.21"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
@@ -1,227 +0,0 @@
|
|||||||
/// Module responsible for notifying plugins of account updates
|
|
||||||
use {
|
|
||||||
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
|
|
||||||
log::*,
|
|
||||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
|
||||||
ReplicaAccountInfo, ReplicaAccountInfoVersions, SlotStatus,
|
|
||||||
},
|
|
||||||
solana_measure::measure::Measure,
|
|
||||||
solana_metrics::*,
|
|
||||||
solana_runtime::{
|
|
||||||
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
|
|
||||||
append_vec::{StoredAccountMeta, StoredMeta},
|
|
||||||
},
|
|
||||||
solana_sdk::{
|
|
||||||
account::{AccountSharedData, ReadableAccount},
|
|
||||||
clock::Slot,
|
|
||||||
},
|
|
||||||
std::sync::{Arc, RwLock},
|
|
||||||
};
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub(crate) struct AccountsUpdateNotifierImpl {
|
|
||||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
|
|
||||||
fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData) {
|
|
||||||
if let Some(account_info) = self.accountinfo_from_shared_account_data(meta, account) {
|
|
||||||
self.notify_plugins_of_account_update(account_info, slot, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
|
|
||||||
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
|
|
||||||
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
|
|
||||||
|
|
||||||
let account = self.accountinfo_from_stored_account_meta(account);
|
|
||||||
measure_copy.stop();
|
|
||||||
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-copy-stored-account-info-us",
|
|
||||||
measure_copy.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(account_info) = account {
|
|
||||||
self.notify_plugins_of_account_update(account_info, slot, true);
|
|
||||||
}
|
|
||||||
measure_all.stop();
|
|
||||||
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-notify-account-restore-all-us",
|
|
||||||
measure_all.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_end_of_restore_from_snapshot(&self) {
|
|
||||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
|
||||||
if plugin_manager.plugins.is_empty() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for plugin in plugin_manager.plugins.iter_mut() {
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
|
|
||||||
match plugin.notify_end_of_startup() {
|
|
||||||
Err(err) => {
|
|
||||||
error!(
|
|
||||||
"Failed to notify the end of restore from snapshot, error: {} to plugin {}",
|
|
||||||
err,
|
|
||||||
plugin.name()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
Ok(_) => {
|
|
||||||
trace!(
|
|
||||||
"Successfully notified the end of restore from snapshot to plugin {}",
|
|
||||||
plugin.name()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
measure.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-end-of-restore-from-snapshot",
|
|
||||||
measure.as_us() as usize
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>) {
|
|
||||||
self.notify_slot_status(slot, parent, SlotStatus::Confirmed);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>) {
|
|
||||||
self.notify_slot_status(slot, parent, SlotStatus::Processed);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>) {
|
|
||||||
self.notify_slot_status(slot, parent, SlotStatus::Rooted);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountsUpdateNotifierImpl {
|
|
||||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
|
||||||
AccountsUpdateNotifierImpl { plugin_manager }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accountinfo_from_shared_account_data<'a>(
|
|
||||||
&self,
|
|
||||||
meta: &'a StoredMeta,
|
|
||||||
account: &'a AccountSharedData,
|
|
||||||
) -> Option<ReplicaAccountInfo<'a>> {
|
|
||||||
Some(ReplicaAccountInfo {
|
|
||||||
pubkey: meta.pubkey.as_ref(),
|
|
||||||
lamports: account.lamports(),
|
|
||||||
owner: account.owner().as_ref(),
|
|
||||||
executable: account.executable(),
|
|
||||||
rent_epoch: account.rent_epoch(),
|
|
||||||
data: account.data(),
|
|
||||||
write_version: meta.write_version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accountinfo_from_stored_account_meta<'a>(
|
|
||||||
&self,
|
|
||||||
stored_account_meta: &'a StoredAccountMeta,
|
|
||||||
) -> Option<ReplicaAccountInfo<'a>> {
|
|
||||||
Some(ReplicaAccountInfo {
|
|
||||||
pubkey: stored_account_meta.meta.pubkey.as_ref(),
|
|
||||||
lamports: stored_account_meta.account_meta.lamports,
|
|
||||||
owner: stored_account_meta.account_meta.owner.as_ref(),
|
|
||||||
executable: stored_account_meta.account_meta.executable,
|
|
||||||
rent_epoch: stored_account_meta.account_meta.rent_epoch,
|
|
||||||
data: stored_account_meta.data,
|
|
||||||
write_version: stored_account_meta.meta.write_version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_plugins_of_account_update(
|
|
||||||
&self,
|
|
||||||
account: ReplicaAccountInfo,
|
|
||||||
slot: Slot,
|
|
||||||
is_startup: bool,
|
|
||||||
) {
|
|
||||||
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
|
|
||||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
|
||||||
|
|
||||||
if plugin_manager.plugins.is_empty() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
for plugin in plugin_manager.plugins.iter_mut() {
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-update-account");
|
|
||||||
match plugin.update_account(
|
|
||||||
ReplicaAccountInfoVersions::V0_0_1(&account),
|
|
||||||
slot,
|
|
||||||
is_startup,
|
|
||||||
) {
|
|
||||||
Err(err) => {
|
|
||||||
error!(
|
|
||||||
"Failed to update account {} at slot {}, error: {} to plugin {}",
|
|
||||||
bs58::encode(account.pubkey).into_string(),
|
|
||||||
slot,
|
|
||||||
err,
|
|
||||||
plugin.name()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
Ok(_) => {
|
|
||||||
trace!(
|
|
||||||
"Successfully updated account {} at slot {} to plugin {}",
|
|
||||||
bs58::encode(account.pubkey).into_string(),
|
|
||||||
slot,
|
|
||||||
plugin.name()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
measure.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-update-account-us",
|
|
||||||
measure.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
}
|
|
||||||
measure2.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-notify_plugins_of_account_update-us",
|
|
||||||
measure2.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn notify_slot_status(&self, slot: Slot, parent: Option<Slot>, slot_status: SlotStatus) {
|
|
||||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
|
||||||
if plugin_manager.plugins.is_empty() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for plugin in plugin_manager.plugins.iter_mut() {
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-update-slot");
|
|
||||||
match plugin.update_slot_status(slot, parent, slot_status.clone()) {
|
|
||||||
Err(err) => {
|
|
||||||
error!(
|
|
||||||
"Failed to update slot status at slot {}, error: {} to plugin {}",
|
|
||||||
slot,
|
|
||||||
err,
|
|
||||||
plugin.name()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
Ok(_) => {
|
|
||||||
trace!(
|
|
||||||
"Successfully updated slot status at slot {} to plugin {}",
|
|
||||||
slot,
|
|
||||||
plugin.name()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
measure.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-update-slot-us",
|
|
||||||
measure.as_us() as usize,
|
|
||||||
1000,
|
|
||||||
1000
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,55 +0,0 @@
|
|||||||
/// Managing the AccountsDb plugins
|
|
||||||
use {
|
|
||||||
libloading::{Library, Symbol},
|
|
||||||
log::*,
|
|
||||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::AccountsDbPlugin,
|
|
||||||
std::error::Error,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
|
||||||
pub struct AccountsDbPluginManager {
|
|
||||||
pub plugins: Vec<Box<dyn AccountsDbPlugin>>,
|
|
||||||
libs: Vec<Library>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountsDbPluginManager {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
AccountsDbPluginManager {
|
|
||||||
plugins: Vec::default(),
|
|
||||||
libs: Vec::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// This function loads the dynamically linked library specified in the path. The library
|
|
||||||
/// must do necessary initializations.
|
|
||||||
pub unsafe fn load_plugin(
|
|
||||||
&mut self,
|
|
||||||
libpath: &str,
|
|
||||||
config_file: &str,
|
|
||||||
) -> Result<(), Box<dyn Error>> {
|
|
||||||
type PluginConstructor = unsafe fn() -> *mut dyn AccountsDbPlugin;
|
|
||||||
let lib = Library::new(libpath)?;
|
|
||||||
let constructor: Symbol<PluginConstructor> = lib.get(b"_create_plugin")?;
|
|
||||||
let plugin_raw = constructor();
|
|
||||||
let mut plugin = Box::from_raw(plugin_raw);
|
|
||||||
plugin.on_load(config_file)?;
|
|
||||||
self.plugins.push(plugin);
|
|
||||||
self.libs.push(lib);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Unload all plugins and loaded plugin libraries, making sure to fire
|
|
||||||
/// their `on_plugin_unload()` methods so they can do any necessary cleanup.
|
|
||||||
pub fn unload(&mut self) {
|
|
||||||
for mut plugin in self.plugins.drain(..) {
|
|
||||||
info!("Unloading plugin for {:?}", plugin.name());
|
|
||||||
plugin.on_unload();
|
|
||||||
}
|
|
||||||
|
|
||||||
for lib in self.libs.drain(..) {
|
|
||||||
drop(lib);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,157 +0,0 @@
|
|||||||
use {
|
|
||||||
crate::{
|
|
||||||
accounts_update_notifier::AccountsUpdateNotifierImpl,
|
|
||||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
|
||||||
slot_status_observer::SlotStatusObserver,
|
|
||||||
},
|
|
||||||
crossbeam_channel::Receiver,
|
|
||||||
log::*,
|
|
||||||
serde_json,
|
|
||||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
|
||||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
|
||||||
std::{
|
|
||||||
fs::File,
|
|
||||||
io::Read,
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
thread,
|
|
||||||
},
|
|
||||||
thiserror::Error,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum AccountsdbPluginServiceError {
|
|
||||||
#[error("Cannot open the the plugin config file")]
|
|
||||||
CannotOpenConfigFile(String),
|
|
||||||
|
|
||||||
#[error("Cannot read the the plugin config file")]
|
|
||||||
CannotReadConfigFile(String),
|
|
||||||
|
|
||||||
#[error("The config file is not in a valid Json format")]
|
|
||||||
InvalidConfigFileFormat(String),
|
|
||||||
|
|
||||||
#[error("Plugin library path is not specified in the config file")]
|
|
||||||
LibPathNotSet,
|
|
||||||
|
|
||||||
#[error("Invalid plugin path")]
|
|
||||||
InvalidPluginPath,
|
|
||||||
|
|
||||||
#[error("Cannot load plugin shared library")]
|
|
||||||
PluginLoadError(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The service managing the AccountsDb plugin workflow.
|
|
||||||
pub struct AccountsDbPluginService {
|
|
||||||
slot_status_observer: SlotStatusObserver,
|
|
||||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
|
||||||
accounts_update_notifier: AccountsUpdateNotifier,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountsDbPluginService {
|
|
||||||
/// Creates and returns the AccountsDbPluginService.
|
|
||||||
/// # Arguments
|
|
||||||
/// * `confirmed_bank_receiver` - The receiver for confirmed bank notification
|
|
||||||
/// * `accountsdb_plugin_config_file` - The config file path for the plugin. The
|
|
||||||
/// config file controls the plugin responsible
|
|
||||||
/// for transporting the data to external data stores. It is defined in JSON format.
|
|
||||||
/// The `libpath` field should be pointed to the full path of the dynamic shared library
|
|
||||||
/// (.so file) to be loaded. The shared library must implement the `AccountsDbPlugin`
|
|
||||||
/// trait. And the shared library shall export a `C` function `_create_plugin` which
|
|
||||||
/// shall create the implementation of `AccountsDbPlugin` and returns to the caller.
|
|
||||||
/// The rest of the JSON fields' definition is up to to the concrete plugin implementation
|
|
||||||
/// It is usually used to configure the connection information for the external data store.
|
|
||||||
|
|
||||||
pub fn new(
|
|
||||||
confirmed_bank_receiver: Receiver<BankNotification>,
|
|
||||||
accountsdb_plugin_config_files: &[PathBuf],
|
|
||||||
) -> Result<Self, AccountsdbPluginServiceError> {
|
|
||||||
info!(
|
|
||||||
"Starting AccountsDbPluginService from config files: {:?}",
|
|
||||||
accountsdb_plugin_config_files
|
|
||||||
);
|
|
||||||
let mut plugin_manager = AccountsDbPluginManager::new();
|
|
||||||
|
|
||||||
for accountsdb_plugin_config_file in accountsdb_plugin_config_files {
|
|
||||||
Self::load_plugin(&mut plugin_manager, accountsdb_plugin_config_file)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let plugin_manager = Arc::new(RwLock::new(plugin_manager));
|
|
||||||
let accounts_update_notifier = Arc::new(RwLock::new(AccountsUpdateNotifierImpl::new(
|
|
||||||
plugin_manager.clone(),
|
|
||||||
)));
|
|
||||||
let slot_status_observer =
|
|
||||||
SlotStatusObserver::new(confirmed_bank_receiver, accounts_update_notifier.clone());
|
|
||||||
|
|
||||||
info!("Started AccountsDbPluginService");
|
|
||||||
Ok(AccountsDbPluginService {
|
|
||||||
slot_status_observer,
|
|
||||||
plugin_manager,
|
|
||||||
accounts_update_notifier,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_plugin(
|
|
||||||
plugin_manager: &mut AccountsDbPluginManager,
|
|
||||||
accountsdb_plugin_config_file: &Path,
|
|
||||||
) -> Result<(), AccountsdbPluginServiceError> {
|
|
||||||
let mut file = match File::open(accountsdb_plugin_config_file) {
|
|
||||||
Ok(file) => file,
|
|
||||||
Err(err) => {
|
|
||||||
return Err(AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
|
|
||||||
"Failed to open the plugin config file {:?}, error: {:?}",
|
|
||||||
accountsdb_plugin_config_file, err
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut contents = String::new();
|
|
||||||
if let Err(err) = file.read_to_string(&mut contents) {
|
|
||||||
return Err(AccountsdbPluginServiceError::CannotReadConfigFile(format!(
|
|
||||||
"Failed to read the plugin config file {:?}, error: {:?}",
|
|
||||||
accountsdb_plugin_config_file, err
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let result: serde_json::Value = match serde_json::from_str(&contents) {
|
|
||||||
Ok(value) => value,
|
|
||||||
Err(err) => {
|
|
||||||
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
|
|
||||||
format!(
|
|
||||||
"The config file {:?} is not in a valid Json format, error: {:?}",
|
|
||||||
accountsdb_plugin_config_file, err
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let libpath = result["libpath"]
|
|
||||||
.as_str()
|
|
||||||
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
|
|
||||||
let config_file = accountsdb_plugin_config_file
|
|
||||||
.as_os_str()
|
|
||||||
.to_str()
|
|
||||||
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
let result = plugin_manager.load_plugin(libpath, config_file);
|
|
||||||
if let Err(err) = result {
|
|
||||||
let msg = format!(
|
|
||||||
"Failed to load the plugin library: {:?}, error: {:?}",
|
|
||||||
libpath, err
|
|
||||||
);
|
|
||||||
return Err(AccountsdbPluginServiceError::PluginLoadError(msg));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_accounts_update_notifier(&self) -> AccountsUpdateNotifier {
|
|
||||||
self.accounts_update_notifier.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn join(mut self) -> thread::Result<()> {
|
|
||||||
self.slot_status_observer.join()?;
|
|
||||||
self.plugin_manager.write().unwrap().unload();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,4 +0,0 @@
|
|||||||
pub mod accounts_update_notifier;
|
|
||||||
pub mod accountsdb_plugin_manager;
|
|
||||||
pub mod accountsdb_plugin_service;
|
|
||||||
pub mod slot_status_observer;
|
|
@@ -1,80 +0,0 @@
|
|||||||
use {
|
|
||||||
crossbeam_channel::Receiver,
|
|
||||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
|
||||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
|
||||||
std::{
|
|
||||||
sync::{
|
|
||||||
atomic::{AtomicBool, Ordering},
|
|
||||||
Arc,
|
|
||||||
},
|
|
||||||
thread::{self, Builder, JoinHandle},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub(crate) struct SlotStatusObserver {
|
|
||||||
bank_notification_receiver_service: Option<JoinHandle<()>>,
|
|
||||||
exit_updated_slot_server: Arc<AtomicBool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SlotStatusObserver {
|
|
||||||
pub fn new(
|
|
||||||
bank_notification_receiver: Receiver<BankNotification>,
|
|
||||||
accounts_update_notifier: AccountsUpdateNotifier,
|
|
||||||
) -> Self {
|
|
||||||
let exit_updated_slot_server = Arc::new(AtomicBool::new(false));
|
|
||||||
|
|
||||||
Self {
|
|
||||||
bank_notification_receiver_service: Some(Self::run_bank_notification_receiver(
|
|
||||||
bank_notification_receiver,
|
|
||||||
exit_updated_slot_server.clone(),
|
|
||||||
accounts_update_notifier,
|
|
||||||
)),
|
|
||||||
exit_updated_slot_server,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn join(&mut self) -> thread::Result<()> {
|
|
||||||
self.exit_updated_slot_server.store(true, Ordering::Relaxed);
|
|
||||||
self.bank_notification_receiver_service
|
|
||||||
.take()
|
|
||||||
.map(JoinHandle::join)
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_bank_notification_receiver(
|
|
||||||
bank_notification_receiver: Receiver<BankNotification>,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
accounts_update_notifier: AccountsUpdateNotifier,
|
|
||||||
) -> JoinHandle<()> {
|
|
||||||
Builder::new()
|
|
||||||
.name("bank_notification_receiver".to_string())
|
|
||||||
.spawn(move || {
|
|
||||||
while !exit.load(Ordering::Relaxed) {
|
|
||||||
if let Ok(slot) = bank_notification_receiver.recv() {
|
|
||||||
match slot {
|
|
||||||
BankNotification::OptimisticallyConfirmed(slot) => {
|
|
||||||
accounts_update_notifier
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.notify_slot_confirmed(slot, None);
|
|
||||||
}
|
|
||||||
BankNotification::Frozen(bank) => {
|
|
||||||
accounts_update_notifier
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.notify_slot_processed(bank.slot(), Some(bank.parent_slot()));
|
|
||||||
}
|
|
||||||
BankNotification::Root(bank) => {
|
|
||||||
accounts_update_notifier
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.notify_slot_rooted(bank.slot(), Some(bank.parent_slot()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,33 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
|
||||||
edition = "2018"
|
|
||||||
name = "solana-accountsdb-plugin-postgres"
|
|
||||||
description = "The Solana AccountsDb plugin for PostgreSQL database."
|
|
||||||
version = "1.8.13"
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
documentation = "https://docs.rs/solana-validator"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
crate-type = ["cdylib", "rlib"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
bs58 = "0.4.0"
|
|
||||||
chrono = { version = "0.4.11", features = ["serde"] }
|
|
||||||
crossbeam-channel = "0.5"
|
|
||||||
log = "0.4.14"
|
|
||||||
postgres = { version = "0.19.1", features = ["with-chrono-0_4"] }
|
|
||||||
serde = "1.0.130"
|
|
||||||
serde_derive = "1.0.103"
|
|
||||||
serde_json = "1.0.67"
|
|
||||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.13" }
|
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
|
||||||
solana-measure = { path = "../measure", version = "=1.8.13" }
|
|
||||||
solana-metrics = { path = "../metrics", version = "=1.8.13" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
|
||||||
thiserror = "1.0.21"
|
|
||||||
tokio-postgres = "0.7.3"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
@@ -1,5 +0,0 @@
|
|||||||
This is an example implementing the AccountsDb plugin for PostgreSQL database.
|
|
||||||
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
|
|
||||||
|
|
||||||
To create the schema objects for the database, please use `scripts/create_schema.sql`.
|
|
||||||
`scripts/drop_schema.sql` can be used to tear down the schema objects.
|
|
@@ -1,54 +0,0 @@
|
|||||||
/**
|
|
||||||
* This plugin implementation for PostgreSQL requires the following tables
|
|
||||||
*/
|
|
||||||
-- The table storing accounts
|
|
||||||
|
|
||||||
|
|
||||||
CREATE TABLE account (
|
|
||||||
pubkey BYTEA PRIMARY KEY,
|
|
||||||
owner BYTEA,
|
|
||||||
lamports BIGINT NOT NULL,
|
|
||||||
slot BIGINT NOT NULL,
|
|
||||||
executable BOOL NOT NULL,
|
|
||||||
rent_epoch BIGINT NOT NULL,
|
|
||||||
data BYTEA,
|
|
||||||
write_version BIGINT NOT NULL,
|
|
||||||
updated_on TIMESTAMP NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
-- The table storing slot information
|
|
||||||
CREATE TABLE slot (
|
|
||||||
slot BIGINT PRIMARY KEY,
|
|
||||||
parent BIGINT,
|
|
||||||
status varchar(16) NOT NULL,
|
|
||||||
updated_on TIMESTAMP NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The following is for keeping historical data for accounts and is not required for plugin to work.
|
|
||||||
*/
|
|
||||||
-- The table storing historical data for accounts
|
|
||||||
CREATE TABLE account_audit (
|
|
||||||
pubkey BYTEA,
|
|
||||||
owner BYTEA,
|
|
||||||
lamports BIGINT NOT NULL,
|
|
||||||
slot BIGINT NOT NULL,
|
|
||||||
executable BOOL NOT NULL,
|
|
||||||
rent_epoch BIGINT NOT NULL,
|
|
||||||
data BYTEA,
|
|
||||||
write_version BIGINT NOT NULL,
|
|
||||||
updated_on TIMESTAMP NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
|
|
||||||
BEGIN
|
|
||||||
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
|
|
||||||
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
|
|
||||||
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
|
|
||||||
RETURN NEW;
|
|
||||||
END;
|
|
||||||
|
|
||||||
$audit_account_update$ LANGUAGE plpgsql;
|
|
||||||
|
|
||||||
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
|
|
||||||
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();
|
|
@@ -1,9 +0,0 @@
|
|||||||
/**
|
|
||||||
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
|
|
||||||
*/
|
|
||||||
|
|
||||||
DROP TRIGGER account_update_trigger ON account;
|
|
||||||
DROP FUNCTION audit_account_update;
|
|
||||||
DROP TABLE account_audit;
|
|
||||||
DROP TABLE account;
|
|
||||||
DROP TABLE slot;
|
|
@@ -1,802 +0,0 @@
|
|||||||
# This a reference configuration file for the PostgreSQL database version 14.
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# PostgreSQL configuration file
|
|
||||||
# -----------------------------
|
|
||||||
#
|
|
||||||
# This file consists of lines of the form:
|
|
||||||
#
|
|
||||||
# name = value
|
|
||||||
#
|
|
||||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
|
||||||
# "#" anywhere on a line. The complete list of parameter names and allowed
|
|
||||||
# values can be found in the PostgreSQL documentation.
|
|
||||||
#
|
|
||||||
# The commented-out settings shown in this file represent the default values.
|
|
||||||
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
|
||||||
# you need to reload the server.
|
|
||||||
#
|
|
||||||
# This file is read on server startup and when the server receives a SIGHUP
|
|
||||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
|
||||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
|
||||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
|
||||||
# require a server shutdown and restart to take effect.
|
|
||||||
#
|
|
||||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
|
||||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
|
||||||
# with the "SET" SQL command.
|
|
||||||
#
|
|
||||||
# Memory units: B = bytes Time units: us = microseconds
|
|
||||||
# kB = kilobytes ms = milliseconds
|
|
||||||
# MB = megabytes s = seconds
|
|
||||||
# GB = gigabytes min = minutes
|
|
||||||
# TB = terabytes h = hours
|
|
||||||
# d = days
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# FILE LOCATIONS
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# The default values of these variables are driven from the -D command-line
|
|
||||||
# option or PGDATA environment variable, represented here as ConfigDir.
|
|
||||||
|
|
||||||
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
|
|
||||||
# (change requires restart)
|
|
||||||
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
|
||||||
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# CONNECTIONS AND AUTHENTICATION
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Connection Settings -
|
|
||||||
|
|
||||||
#listen_addresses = 'localhost' # what IP address(es) to listen on;
|
|
||||||
# comma-separated list of addresses;
|
|
||||||
# defaults to 'localhost'; use '*' for all
|
|
||||||
# (change requires restart)
|
|
||||||
listen_addresses = '*'
|
|
||||||
port = 5433 # (change requires restart)
|
|
||||||
max_connections = 200 # (change requires restart)
|
|
||||||
#superuser_reserved_connections = 3 # (change requires restart)
|
|
||||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
|
||||||
# (change requires restart)
|
|
||||||
#unix_socket_group = '' # (change requires restart)
|
|
||||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
|
||||||
# (change requires restart)
|
|
||||||
#bonjour = off # advertise server via Bonjour
|
|
||||||
# (change requires restart)
|
|
||||||
#bonjour_name = '' # defaults to the computer name
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
# - TCP settings -
|
|
||||||
# see "man tcp" for details
|
|
||||||
|
|
||||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
|
||||||
# 0 selects the system default
|
|
||||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
|
||||||
# 0 selects the system default
|
|
||||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
|
||||||
# 0 selects the system default
|
|
||||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
|
||||||
# 0 selects the system default
|
|
||||||
|
|
||||||
#client_connection_check_interval = 0 # time between checks for client
|
|
||||||
# disconnection while running queries;
|
|
||||||
# 0 for never
|
|
||||||
|
|
||||||
# - Authentication -
|
|
||||||
|
|
||||||
#authentication_timeout = 1min # 1s-600s
|
|
||||||
#password_encryption = scram-sha-256 # scram-sha-256 or md5
|
|
||||||
#db_user_namespace = off
|
|
||||||
|
|
||||||
# GSSAPI using Kerberos
|
|
||||||
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
|
|
||||||
#krb_caseins_users = off
|
|
||||||
|
|
||||||
# - SSL -
|
|
||||||
|
|
||||||
ssl = on
|
|
||||||
#ssl_ca_file = ''
|
|
||||||
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
|
|
||||||
#ssl_crl_file = ''
|
|
||||||
#ssl_crl_dir = ''
|
|
||||||
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
|
|
||||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
|
||||||
#ssl_prefer_server_ciphers = on
|
|
||||||
#ssl_ecdh_curve = 'prime256v1'
|
|
||||||
#ssl_min_protocol_version = 'TLSv1.2'
|
|
||||||
#ssl_max_protocol_version = ''
|
|
||||||
#ssl_dh_params_file = ''
|
|
||||||
#ssl_passphrase_command = ''
|
|
||||||
#ssl_passphrase_command_supports_reload = off
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# RESOURCE USAGE (except WAL)
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Memory -
|
|
||||||
|
|
||||||
shared_buffers = 1GB # min 128kB
|
|
||||||
# (change requires restart)
|
|
||||||
#huge_pages = try # on, off, or try
|
|
||||||
# (change requires restart)
|
|
||||||
#huge_page_size = 0 # zero for system default
|
|
||||||
# (change requires restart)
|
|
||||||
#temp_buffers = 8MB # min 800kB
|
|
||||||
#max_prepared_transactions = 0 # zero disables the feature
|
|
||||||
# (change requires restart)
|
|
||||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
|
||||||
# you actively intend to use prepared transactions.
|
|
||||||
#work_mem = 4MB # min 64kB
|
|
||||||
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
|
|
||||||
#maintenance_work_mem = 64MB # min 1MB
|
|
||||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
|
||||||
#logical_decoding_work_mem = 64MB # min 64kB
|
|
||||||
#max_stack_depth = 2MB # min 100kB
|
|
||||||
#shared_memory_type = mmap # the default is the first option
|
|
||||||
# supported by the operating system:
|
|
||||||
# mmap
|
|
||||||
# sysv
|
|
||||||
# windows
|
|
||||||
# (change requires restart)
|
|
||||||
dynamic_shared_memory_type = posix # the default is the first option
|
|
||||||
# supported by the operating system:
|
|
||||||
# posix
|
|
||||||
# sysv
|
|
||||||
# windows
|
|
||||||
# mmap
|
|
||||||
# (change requires restart)
|
|
||||||
#min_dynamic_shared_memory = 0MB # (change requires restart)
|
|
||||||
|
|
||||||
# - Disk -
|
|
||||||
|
|
||||||
#temp_file_limit = -1 # limits per-process temp file space
|
|
||||||
# in kilobytes, or -1 for no limit
|
|
||||||
|
|
||||||
# - Kernel Resources -
|
|
||||||
|
|
||||||
#max_files_per_process = 1000 # min 64
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
# - Cost-Based Vacuum Delay -
|
|
||||||
|
|
||||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
|
||||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
|
||||||
#vacuum_cost_page_miss = 2 # 0-10000 credits
|
|
||||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
|
||||||
#vacuum_cost_limit = 200 # 1-10000 credits
|
|
||||||
|
|
||||||
# - Background Writer -
|
|
||||||
|
|
||||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
|
||||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
|
||||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
|
||||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
|
||||||
|
|
||||||
# - Asynchronous Behavior -
|
|
||||||
|
|
||||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
|
||||||
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
|
|
||||||
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
|
|
||||||
#max_worker_processes = 8 # (change requires restart)
|
|
||||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
|
||||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
|
||||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
|
||||||
# can be used in parallel operations
|
|
||||||
#parallel_leader_participation = on
|
|
||||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# WRITE-AHEAD LOG
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Settings -
|
|
||||||
|
|
||||||
wal_level = minimal # minimal, replica, or logical
|
|
||||||
# (change requires restart)
|
|
||||||
fsync = off # flush data to disk for crash safety
|
|
||||||
# (turning this off can cause
|
|
||||||
# unrecoverable data corruption)
|
|
||||||
synchronous_commit = off # synchronization level;
|
|
||||||
# off, local, remote_write, remote_apply, or on
|
|
||||||
#wal_sync_method = fsync # the default is the first option
|
|
||||||
# supported by the operating system:
|
|
||||||
# open_datasync
|
|
||||||
# fdatasync (default on Linux and FreeBSD)
|
|
||||||
# fsync
|
|
||||||
# fsync_writethrough
|
|
||||||
# open_sync
|
|
||||||
full_page_writes = off # recover from partial page writes
|
|
||||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
|
||||||
# (change requires restart)
|
|
||||||
#wal_compression = off # enable compression of full-page writes
|
|
||||||
#wal_init_zero = on # zero-fill new WAL files
|
|
||||||
#wal_recycle = on # recycle WAL files
|
|
||||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
|
||||||
# (change requires restart)
|
|
||||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
|
||||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
|
||||||
#wal_skip_threshold = 2MB
|
|
||||||
|
|
||||||
#commit_delay = 0 # range 0-100000, in microseconds
|
|
||||||
#commit_siblings = 5 # range 1-1000
|
|
||||||
|
|
||||||
# - Checkpoints -
|
|
||||||
|
|
||||||
#checkpoint_timeout = 5min # range 30s-1d
|
|
||||||
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
|
|
||||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
|
||||||
#checkpoint_warning = 30s # 0 disables
|
|
||||||
max_wal_size = 1GB
|
|
||||||
min_wal_size = 80MB
|
|
||||||
|
|
||||||
# - Archiving -
|
|
||||||
|
|
||||||
#archive_mode = off # enables archiving; off, on, or always
|
|
||||||
# (change requires restart)
|
|
||||||
#archive_command = '' # command to use to archive a logfile segment
|
|
||||||
# placeholders: %p = path of file to archive
|
|
||||||
# %f = file name only
|
|
||||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
|
||||||
#archive_timeout = 0 # force a logfile segment switch after this
|
|
||||||
# number of seconds; 0 disables
|
|
||||||
|
|
||||||
# - Archive Recovery -
|
|
||||||
|
|
||||||
# These are only used in recovery mode.
|
|
||||||
|
|
||||||
#restore_command = '' # command to use to restore an archived logfile segment
|
|
||||||
# placeholders: %p = path of file to restore
|
|
||||||
# %f = file name only
|
|
||||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
|
||||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
|
||||||
#recovery_end_command = '' # command to execute at completion of recovery
|
|
||||||
|
|
||||||
# - Recovery Target -
|
|
||||||
|
|
||||||
# Set these only when performing a targeted recovery.
|
|
||||||
|
|
||||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
|
||||||
# consistent state is reached
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
|
||||||
# just after the specified recovery target (on)
|
|
||||||
# just before the recovery target (off)
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# REPLICATION
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Sending Servers -
|
|
||||||
|
|
||||||
# Set these on the primary and on any standby that will send replication data.
|
|
||||||
|
|
||||||
max_wal_senders = 0 # max number of walsender processes
|
|
||||||
# (change requires restart)
|
|
||||||
#max_replication_slots = 10 # max number of replication slots
|
|
||||||
# (change requires restart)
|
|
||||||
#wal_keep_size = 0 # in megabytes; 0 disables
|
|
||||||
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
|
|
||||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
|
||||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
# - Primary Server -
|
|
||||||
|
|
||||||
# These settings are ignored on a standby server.
|
|
||||||
|
|
||||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
|
||||||
# method to choose sync standbys, number of sync standbys,
|
|
||||||
# and comma-separated list of application_name
|
|
||||||
# from standby(s); '*' = all
|
|
||||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
|
||||||
|
|
||||||
# - Standby Servers -
|
|
||||||
|
|
||||||
# These settings are ignored on a primary server.
|
|
||||||
|
|
||||||
#primary_conninfo = '' # connection string to sending server
|
|
||||||
#primary_slot_name = '' # replication slot on sending server
|
|
||||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
|
||||||
#hot_standby = on # "off" disallows queries during recovery
|
|
||||||
# (change requires restart)
|
|
||||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
|
||||||
# when reading WAL from archive;
|
|
||||||
# -1 allows indefinite delay
|
|
||||||
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
|
||||||
# when reading streaming WAL;
|
|
||||||
# -1 allows indefinite delay
|
|
||||||
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
|
|
||||||
# is not set
|
|
||||||
#wal_receiver_status_interval = 10s # send replies at least this often
|
|
||||||
# 0 disables
|
|
||||||
#hot_standby_feedback = off # send info from standby to prevent
|
|
||||||
# query conflicts
|
|
||||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
|
||||||
# communication from primary
|
|
||||||
# in milliseconds; 0 disables
|
|
||||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
|
||||||
# retrieve WAL after a failed attempt
|
|
||||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
|
||||||
|
|
||||||
# - Subscribers -
|
|
||||||
|
|
||||||
# These settings are ignored on a publisher.
|
|
||||||
|
|
||||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
|
||||||
# (change requires restart)
|
|
||||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# QUERY TUNING
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Planner Method Configuration -
|
|
||||||
|
|
||||||
#enable_async_append = on
|
|
||||||
#enable_bitmapscan = on
|
|
||||||
#enable_gathermerge = on
|
|
||||||
#enable_hashagg = on
|
|
||||||
#enable_hashjoin = on
|
|
||||||
#enable_incremental_sort = on
|
|
||||||
#enable_indexscan = on
|
|
||||||
#enable_indexonlyscan = on
|
|
||||||
#enable_material = on
|
|
||||||
#enable_memoize = on
|
|
||||||
#enable_mergejoin = on
|
|
||||||
#enable_nestloop = on
|
|
||||||
#enable_parallel_append = on
|
|
||||||
#enable_parallel_hash = on
|
|
||||||
#enable_partition_pruning = on
|
|
||||||
#enable_partitionwise_join = off
|
|
||||||
#enable_partitionwise_aggregate = off
|
|
||||||
#enable_seqscan = on
|
|
||||||
#enable_sort = on
|
|
||||||
#enable_tidscan = on
|
|
||||||
|
|
||||||
# - Planner Cost Constants -
|
|
||||||
|
|
||||||
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
|
||||||
#random_page_cost = 4.0 # same scale as above
|
|
||||||
#cpu_tuple_cost = 0.01 # same scale as above
|
|
||||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
|
||||||
#cpu_operator_cost = 0.0025 # same scale as above
|
|
||||||
#parallel_setup_cost = 1000.0 # same scale as above
|
|
||||||
#parallel_tuple_cost = 0.1 # same scale as above
|
|
||||||
#min_parallel_table_scan_size = 8MB
|
|
||||||
#min_parallel_index_scan_size = 512kB
|
|
||||||
#effective_cache_size = 4GB
|
|
||||||
|
|
||||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
|
||||||
# and query more expensive than this;
|
|
||||||
# -1 disables
|
|
||||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
|
||||||
# more expensive than this; -1 disables
|
|
||||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
|
||||||
# query is more expensive than this;
|
|
||||||
# -1 disables
|
|
||||||
|
|
||||||
# - Genetic Query Optimizer -
|
|
||||||
|
|
||||||
#geqo = on
|
|
||||||
#geqo_threshold = 12
|
|
||||||
#geqo_effort = 5 # range 1-10
|
|
||||||
#geqo_pool_size = 0 # selects default based on effort
|
|
||||||
#geqo_generations = 0 # selects default based on effort
|
|
||||||
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
|
||||||
#geqo_seed = 0.0 # range 0.0-1.0
|
|
||||||
|
|
||||||
# - Other Planner Options -
|
|
||||||
|
|
||||||
#default_statistics_target = 100 # range 1-10000
|
|
||||||
#constraint_exclusion = partition # on, off, or partition
|
|
||||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
|
||||||
#from_collapse_limit = 8
|
|
||||||
#jit = on # allow JIT compilation
|
|
||||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
|
||||||
# JOIN clauses
|
|
||||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
|
||||||
# force_custom_plan
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# REPORTING AND LOGGING
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Where to Log -
|
|
||||||
|
|
||||||
#log_destination = 'stderr' # Valid values are combinations of
|
|
||||||
# stderr, csvlog, syslog, and eventlog,
|
|
||||||
# depending on platform. csvlog
|
|
||||||
# requires logging_collector to be on.
|
|
||||||
|
|
||||||
# This is used when logging to stderr:
|
|
||||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
|
||||||
# into log files. Required to be on for
|
|
||||||
# csvlogs.
|
|
||||||
# (change requires restart)
|
|
||||||
|
|
||||||
# These are only used if logging_collector is on:
|
|
||||||
#log_directory = 'log' # directory where log files are written,
|
|
||||||
# can be absolute or relative to PGDATA
|
|
||||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
|
||||||
# can include strftime() escapes
|
|
||||||
#log_file_mode = 0600 # creation mode for log files,
|
|
||||||
# begin with 0 to use octal notation
|
|
||||||
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
|
||||||
# happen after that time. 0 disables.
|
|
||||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
|
||||||
# happen after that much log output.
|
|
||||||
# 0 disables.
|
|
||||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
|
||||||
# same name as the new log file will be
|
|
||||||
# truncated rather than appended to.
|
|
||||||
# But such truncation only occurs on
|
|
||||||
# time-driven rotation, not on restarts
|
|
||||||
# or size-driven rotation. Default is
|
|
||||||
# off, meaning append to existing files
|
|
||||||
# in all cases.
|
|
||||||
|
|
||||||
# These are relevant when logging to syslog:
|
|
||||||
#syslog_facility = 'LOCAL0'
|
|
||||||
#syslog_ident = 'postgres'
|
|
||||||
#syslog_sequence_numbers = on
|
|
||||||
#syslog_split_messages = on
|
|
||||||
|
|
||||||
# This is only relevant when logging to eventlog (Windows):
|
|
||||||
# (change requires restart)
|
|
||||||
#event_source = 'PostgreSQL'
|
|
||||||
|
|
||||||
# - When to Log -
|
|
||||||
|
|
||||||
#log_min_messages = warning # values in order of decreasing detail:
|
|
||||||
# debug5
|
|
||||||
# debug4
|
|
||||||
# debug3
|
|
||||||
# debug2
|
|
||||||
# debug1
|
|
||||||
# info
|
|
||||||
# notice
|
|
||||||
# warning
|
|
||||||
# error
|
|
||||||
# log
|
|
||||||
# fatal
|
|
||||||
# panic
|
|
||||||
|
|
||||||
#log_min_error_statement = error # values in order of decreasing detail:
|
|
||||||
# debug5
|
|
||||||
# debug4
|
|
||||||
# debug3
|
|
||||||
# debug2
|
|
||||||
# debug1
|
|
||||||
# info
|
|
||||||
# notice
|
|
||||||
# warning
|
|
||||||
# error
|
|
||||||
# log
|
|
||||||
# fatal
|
|
||||||
# panic (effectively off)
|
|
||||||
|
|
||||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
|
||||||
# and their durations, > 0 logs only
|
|
||||||
# statements running at least this number
|
|
||||||
# of milliseconds
|
|
||||||
|
|
||||||
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
|
|
||||||
# and their durations, > 0 logs only a sample of
|
|
||||||
# statements running at least this number
|
|
||||||
# of milliseconds;
|
|
||||||
# sample fraction is determined by log_statement_sample_rate
|
|
||||||
|
|
||||||
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
|
|
||||||
# log_min_duration_sample to be logged;
|
|
||||||
# 1.0 logs all such statements, 0.0 never logs
|
|
||||||
|
|
||||||
|
|
||||||
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
|
|
||||||
# are logged regardless of their duration; 1.0 logs all
|
|
||||||
# statements from all transactions, 0.0 never logs
|
|
||||||
|
|
||||||
# - What to Log -
|
|
||||||
|
|
||||||
#debug_print_parse = off
|
|
||||||
#debug_print_rewritten = off
|
|
||||||
#debug_print_plan = off
|
|
||||||
#debug_pretty_print = on
|
|
||||||
#log_autovacuum_min_duration = -1 # log autovacuum activity;
|
|
||||||
# -1 disables, 0 logs all actions and
|
|
||||||
# their durations, > 0 logs only
|
|
||||||
# actions running at least this number
|
|
||||||
# of milliseconds.
|
|
||||||
#log_checkpoints = off
|
|
||||||
#log_connections = off
|
|
||||||
#log_disconnections = off
|
|
||||||
#log_duration = off
|
|
||||||
#log_error_verbosity = default # terse, default, or verbose messages
|
|
||||||
#log_hostname = off
|
|
||||||
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
|
|
||||||
# %a = application name
|
|
||||||
# %u = user name
|
|
||||||
# %d = database name
|
|
||||||
# %r = remote host and port
|
|
||||||
# %h = remote host
|
|
||||||
# %b = backend type
|
|
||||||
# %p = process ID
|
|
||||||
# %P = process ID of parallel group leader
|
|
||||||
# %t = timestamp without milliseconds
|
|
||||||
# %m = timestamp with milliseconds
|
|
||||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
|
||||||
# %Q = query ID (0 if none or not computed)
|
|
||||||
# %i = command tag
|
|
||||||
# %e = SQL state
|
|
||||||
# %c = session ID
|
|
||||||
# %l = session line number
|
|
||||||
# %s = session start timestamp
|
|
||||||
# %v = virtual transaction ID
|
|
||||||
# %x = transaction ID (0 if none)
|
|
||||||
# %q = stop here in non-session
|
|
||||||
# processes
|
|
||||||
# %% = '%'
|
|
||||||
# e.g. '<%u%%%d> '
|
|
||||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
|
||||||
#log_recovery_conflict_waits = off # log standby recovery conflict waits
|
|
||||||
# >= deadlock_timeout
|
|
||||||
#log_parameter_max_length = -1 # when logging statements, limit logged
|
|
||||||
# bind-parameter values to N bytes;
|
|
||||||
# -1 means print in full, 0 disables
|
|
||||||
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
|
|
||||||
# bind-parameter values to N bytes;
|
|
||||||
# -1 means print in full, 0 disables
|
|
||||||
#log_statement = 'none' # none, ddl, mod, all
|
|
||||||
#log_replication_commands = off
|
|
||||||
#log_temp_files = -1 # log temporary files equal or larger
|
|
||||||
# than the specified size in kilobytes;
|
|
||||||
# -1 disables, 0 logs all temp files
|
|
||||||
log_timezone = 'Etc/UTC'
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# PROCESS TITLE
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
cluster_name = '14/main' # added to process titles if nonempty
|
|
||||||
# (change requires restart)
|
|
||||||
#update_process_title = on
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# STATISTICS
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Query and Index Statistics Collector -
|
|
||||||
|
|
||||||
#track_activities = on
|
|
||||||
#track_activity_query_size = 1024 # (change requires restart)
|
|
||||||
#track_counts = on
|
|
||||||
#track_io_timing = off
|
|
||||||
#track_wal_io_timing = off
|
|
||||||
#track_functions = none # none, pl, all
|
|
||||||
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
|
|
||||||
|
|
||||||
|
|
||||||
# - Monitoring -
|
|
||||||
|
|
||||||
#compute_query_id = auto
|
|
||||||
#log_statement_stats = off
|
|
||||||
#log_parser_stats = off
|
|
||||||
#log_planner_stats = off
|
|
||||||
#log_executor_stats = off
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# AUTOVACUUM
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
|
||||||
# requires track_counts to also be on.
|
|
||||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
|
||||||
# (change requires restart)
|
|
||||||
#autovacuum_naptime = 1min # time between autovacuum runs
|
|
||||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
|
||||||
# vacuum
|
|
||||||
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
|
|
||||||
# before vacuum; -1 disables insert
|
|
||||||
# vacuums
|
|
||||||
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
|
||||||
# analyze
|
|
||||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
|
||||||
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
|
|
||||||
# size before insert vacuum
|
|
||||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
|
||||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
|
||||||
# (change requires restart)
|
|
||||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
|
||||||
# before forced vacuum
|
|
||||||
# (change requires restart)
|
|
||||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
|
||||||
# autovacuum, in milliseconds;
|
|
||||||
# -1 means use vacuum_cost_delay
|
|
||||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
|
||||||
# autovacuum, -1 means use
|
|
||||||
# vacuum_cost_limit
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# CLIENT CONNECTION DEFAULTS
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Statement Behavior -
|
|
||||||
|
|
||||||
#client_min_messages = notice # values in order of decreasing detail:
|
|
||||||
# debug5
|
|
||||||
# debug4
|
|
||||||
# debug3
|
|
||||||
# debug2
|
|
||||||
# debug1
|
|
||||||
# log
|
|
||||||
# notice
|
|
||||||
# warning
|
|
||||||
# error
|
|
||||||
#search_path = '"$user", public' # schema names
|
|
||||||
#row_security = on
|
|
||||||
#default_table_access_method = 'heap'
|
|
||||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
|
||||||
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
|
|
||||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
|
||||||
# only default tablespace
|
|
||||||
#check_function_bodies = on
|
|
||||||
#default_transaction_isolation = 'read committed'
|
|
||||||
#default_transaction_read_only = off
|
|
||||||
#default_transaction_deferrable = off
|
|
||||||
#session_replication_role = 'origin'
|
|
||||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
|
||||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
|
||||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
|
||||||
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
|
|
||||||
#vacuum_freeze_table_age = 150000000
|
|
||||||
#vacuum_freeze_min_age = 50000000
|
|
||||||
#vacuum_failsafe_age = 1600000000
|
|
||||||
#vacuum_multixact_freeze_table_age = 150000000
|
|
||||||
#vacuum_multixact_freeze_min_age = 5000000
|
|
||||||
#vacuum_multixact_failsafe_age = 1600000000
|
|
||||||
#bytea_output = 'hex' # hex, escape
|
|
||||||
#xmlbinary = 'base64'
|
|
||||||
#xmloption = 'content'
|
|
||||||
#gin_pending_list_limit = 4MB
|
|
||||||
|
|
||||||
# - Locale and Formatting -
|
|
||||||
|
|
||||||
datestyle = 'iso, mdy'
|
|
||||||
#intervalstyle = 'postgres'
|
|
||||||
timezone = 'Etc/UTC'
|
|
||||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
|
||||||
# abbreviations. Currently, there are
|
|
||||||
# Default
|
|
||||||
# Australia (historical usage)
|
|
||||||
# India
|
|
||||||
# You can create your own file in
|
|
||||||
# share/timezonesets/.
|
|
||||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
|
||||||
# selects precise output mode
|
|
||||||
#client_encoding = sql_ascii # actually, defaults to database
|
|
||||||
# encoding
|
|
||||||
|
|
||||||
# These settings are initialized by initdb, but they can be changed.
|
|
||||||
lc_messages = 'C.UTF-8' # locale for system error message
|
|
||||||
# strings
|
|
||||||
lc_monetary = 'C.UTF-8' # locale for monetary formatting
|
|
||||||
lc_numeric = 'C.UTF-8' # locale for number formatting
|
|
||||||
lc_time = 'C.UTF-8' # locale for time formatting
|
|
||||||
|
|
||||||
# default configuration for text search
|
|
||||||
default_text_search_config = 'pg_catalog.english'
|
|
||||||
|
|
||||||
# - Shared Library Preloading -
|
|
||||||
|
|
||||||
#local_preload_libraries = ''
|
|
||||||
#session_preload_libraries = ''
|
|
||||||
#shared_preload_libraries = '' # (change requires restart)
|
|
||||||
#jit_provider = 'llvmjit' # JIT library to use
|
|
||||||
|
|
||||||
# - Other Defaults -
|
|
||||||
|
|
||||||
#dynamic_library_path = '$libdir'
|
|
||||||
#extension_destdir = '' # prepend path when loading extensions
|
|
||||||
# and shared objects (added by Debian)
|
|
||||||
#gin_fuzzy_search_limit = 0
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# LOCK MANAGEMENT
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#deadlock_timeout = 1s
|
|
||||||
#max_locks_per_transaction = 64 # min 10
|
|
||||||
# (change requires restart)
|
|
||||||
#max_pred_locks_per_transaction = 64 # min 10
|
|
||||||
# (change requires restart)
|
|
||||||
#max_pred_locks_per_relation = -2 # negative values mean
|
|
||||||
# (max_pred_locks_per_transaction
|
|
||||||
# / -max_pred_locks_per_relation) - 1
|
|
||||||
#max_pred_locks_per_page = 2 # min 0
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# VERSION AND PLATFORM COMPATIBILITY
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# - Previous PostgreSQL Versions -
|
|
||||||
|
|
||||||
#array_nulls = on
|
|
||||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
|
||||||
#escape_string_warning = on
|
|
||||||
#lo_compat_privileges = off
|
|
||||||
#quote_all_identifiers = off
|
|
||||||
#standard_conforming_strings = on
|
|
||||||
#synchronize_seqscans = on
|
|
||||||
|
|
||||||
# - Other Platforms and Clients -
|
|
||||||
|
|
||||||
#transform_null_equals = off
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# ERROR HANDLING
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#exit_on_error = off # terminate session on any error?
|
|
||||||
#restart_after_crash = on # reinitialize after backend crash?
|
|
||||||
#data_sync_retry = off # retry or panic on failure to fsync
|
|
||||||
# data?
|
|
||||||
# (change requires restart)
|
|
||||||
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# CONFIG FILE INCLUDES
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# These options allow settings to be loaded from files other than the
|
|
||||||
# default postgresql.conf. Note that these are directives, not variable
|
|
||||||
# assignments, so they can usefully be given more than once.
|
|
||||||
|
|
||||||
include_dir = 'conf.d' # include files ending in '.conf' from
|
|
||||||
# a directory, e.g., 'conf.d'
|
|
||||||
#include_if_exists = '...' # include file only if it exists
|
|
||||||
#include = '...' # include file
|
|
||||||
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# CUSTOMIZED OPTIONS
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# Add settings for extensions here
|
|
@@ -1,69 +0,0 @@
|
|||||||
use {log::*, std::collections::HashSet};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub(crate) struct AccountsSelector {
|
|
||||||
pub accounts: HashSet<Vec<u8>>,
|
|
||||||
pub owners: HashSet<Vec<u8>>,
|
|
||||||
pub select_all_accounts: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountsSelector {
|
|
||||||
pub fn default() -> Self {
|
|
||||||
AccountsSelector {
|
|
||||||
accounts: HashSet::default(),
|
|
||||||
owners: HashSet::default(),
|
|
||||||
select_all_accounts: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(accounts: &[String], owners: &[String]) -> Self {
|
|
||||||
info!(
|
|
||||||
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
|
|
||||||
accounts, owners
|
|
||||||
);
|
|
||||||
|
|
||||||
let select_all_accounts = accounts.iter().any(|key| key == "*");
|
|
||||||
if select_all_accounts {
|
|
||||||
return AccountsSelector {
|
|
||||||
accounts: HashSet::default(),
|
|
||||||
owners: HashSet::default(),
|
|
||||||
select_all_accounts,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
let accounts = accounts
|
|
||||||
.iter()
|
|
||||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
|
||||||
.collect();
|
|
||||||
let owners = owners
|
|
||||||
.iter()
|
|
||||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
|
||||||
.collect();
|
|
||||||
AccountsSelector {
|
|
||||||
accounts,
|
|
||||||
owners,
|
|
||||||
select_all_accounts,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
|
|
||||||
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_create_accounts_selector() {
|
|
||||||
AccountsSelector::new(
|
|
||||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
|
||||||
&[],
|
|
||||||
);
|
|
||||||
|
|
||||||
AccountsSelector::new(
|
|
||||||
&[],
|
|
||||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,345 +0,0 @@
|
|||||||
use solana_measure::measure::Measure;
|
|
||||||
/// Main entry for the PostgreSQL plugin
|
|
||||||
use {
|
|
||||||
crate::{
|
|
||||||
accounts_selector::AccountsSelector,
|
|
||||||
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
|
|
||||||
},
|
|
||||||
bs58,
|
|
||||||
log::*,
|
|
||||||
serde_derive::{Deserialize, Serialize},
|
|
||||||
serde_json,
|
|
||||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
|
||||||
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions, Result, SlotStatus,
|
|
||||||
},
|
|
||||||
solana_metrics::*,
|
|
||||||
std::{fs::File, io::Read},
|
|
||||||
thiserror::Error,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct AccountsDbPluginPostgres {
|
|
||||||
client: Option<ParallelPostgresClient>,
|
|
||||||
accounts_selector: Option<AccountsSelector>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Debug for AccountsDbPluginPostgres {
|
|
||||||
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
|
||||||
pub struct AccountsDbPluginPostgresConfig {
|
|
||||||
pub host: Option<String>,
|
|
||||||
pub user: Option<String>,
|
|
||||||
pub port: Option<u16>,
|
|
||||||
pub connection_str: Option<String>,
|
|
||||||
pub threads: Option<usize>,
|
|
||||||
pub batch_size: Option<usize>,
|
|
||||||
pub panic_on_db_errors: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum AccountsDbPluginPostgresError {
|
|
||||||
#[error("Error connecting to the backend data store. Error message: ({msg})")]
|
|
||||||
DataStoreConnectionError { msg: String },
|
|
||||||
|
|
||||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
|
||||||
DataSchemaError { msg: String },
|
|
||||||
|
|
||||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
|
||||||
ConfigurationError { msg: String },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
|
||||||
fn name(&self) -> &'static str {
|
|
||||||
"AccountsDbPluginPostgres"
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Do initialization for the PostgreSQL plugin.
|
|
||||||
///
|
|
||||||
/// # Format of the config file:
|
|
||||||
/// * The `accounts_selector` section allows the user to controls accounts selections.
|
|
||||||
/// "accounts_selector" : {
|
|
||||||
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
|
||||||
/// }
|
|
||||||
/// or:
|
|
||||||
/// "accounts_selector" = {
|
|
||||||
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
|
|
||||||
/// }
|
|
||||||
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
|
|
||||||
/// When only owners is specified,
|
|
||||||
/// all accounts belonging to the owners will be streamed.
|
|
||||||
/// The accounts field support wildcard to select all accounts:
|
|
||||||
/// "accounts_selector" : {
|
|
||||||
/// "accounts" : \["*"\],
|
|
||||||
/// }
|
|
||||||
/// * "host", optional, specifies the PostgreSQL server.
|
|
||||||
/// * "user", optional, specifies the PostgreSQL user.
|
|
||||||
/// * "port", optional, specifies the PostgreSQL server's port.
|
|
||||||
/// * "connection_str", optional, the custom PostgreSQL connection string.
|
|
||||||
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
|
|
||||||
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
|
|
||||||
/// `host` and `user` must be given.
|
|
||||||
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
|
|
||||||
/// maintains a PostgreSQL connection to the server. The default is '10'.
|
|
||||||
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
|
|
||||||
/// from restoring a snapshot. The default is '10'.
|
|
||||||
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
|
|
||||||
/// PostgreSQL database. The default is 'false'.
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// {
|
|
||||||
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
|
|
||||||
/// "host": "host_foo",
|
|
||||||
/// "user": "solana",
|
|
||||||
/// "threads": 10,
|
|
||||||
/// "accounts_selector" : {
|
|
||||||
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
|
|
||||||
fn on_load(&mut self, config_file: &str) -> Result<()> {
|
|
||||||
solana_logger::setup_with_default("info");
|
|
||||||
info!(
|
|
||||||
"Loading plugin {:?} from config_file {:?}",
|
|
||||||
self.name(),
|
|
||||||
config_file
|
|
||||||
);
|
|
||||||
let mut file = File::open(config_file)?;
|
|
||||||
let mut contents = String::new();
|
|
||||||
file.read_to_string(&mut contents)?;
|
|
||||||
|
|
||||||
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
|
|
||||||
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
|
|
||||||
|
|
||||||
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
|
|
||||||
serde_json::from_str(&contents);
|
|
||||||
match result {
|
|
||||||
Err(err) => {
|
|
||||||
return Err(AccountsDbPluginError::ConfigFileReadError {
|
|
||||||
msg: format!(
|
|
||||||
"The config file is not in the JSON format expected: {:?}",
|
|
||||||
err
|
|
||||||
),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Ok(config) => {
|
|
||||||
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
|
|
||||||
self.client = Some(client);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_unload(&mut self) {
|
|
||||||
info!("Unloading plugin: {:?}", self.name());
|
|
||||||
|
|
||||||
match &mut self.client {
|
|
||||||
None => {}
|
|
||||||
Some(client) => {
|
|
||||||
client.join().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_account(
|
|
||||||
&mut self,
|
|
||||||
account: ReplicaAccountInfoVersions,
|
|
||||||
slot: u64,
|
|
||||||
is_startup: bool,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
|
|
||||||
match account {
|
|
||||||
ReplicaAccountInfoVersions::V0_0_1(account) => {
|
|
||||||
let mut measure_select =
|
|
||||||
Measure::start("accountsdb-plugin-postgres-update-account-select");
|
|
||||||
if let Some(accounts_selector) = &self.accounts_selector {
|
|
||||||
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
measure_select.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-postgres-update-account-select-us",
|
|
||||||
measure_select.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
|
|
||||||
bs58::encode(account.pubkey).into_string(),
|
|
||||||
bs58::encode(account.owner).into_string(),
|
|
||||||
slot,
|
|
||||||
self.accounts_selector.as_ref().unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
match &mut self.client {
|
|
||||||
None => {
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
|
||||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
|
||||||
msg: "There is no connection to the PostgreSQL database."
|
|
||||||
.to_string(),
|
|
||||||
},
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
Some(client) => {
|
|
||||||
let mut measure_update =
|
|
||||||
Measure::start("accountsdb-plugin-postgres-update-account-client");
|
|
||||||
let result = { client.update_account(account, slot, is_startup) };
|
|
||||||
measure_update.stop();
|
|
||||||
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-postgres-update-account-client-us",
|
|
||||||
measure_update.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Err(err) = result {
|
|
||||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
|
||||||
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
measure_all.stop();
|
|
||||||
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-postgres-update-account-main-us",
|
|
||||||
measure_all.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_slot_status(
|
|
||||||
&mut self,
|
|
||||||
slot: u64,
|
|
||||||
parent: Option<u64>,
|
|
||||||
status: SlotStatus,
|
|
||||||
) -> Result<()> {
|
|
||||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
|
||||||
|
|
||||||
match &mut self.client {
|
|
||||||
None => {
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
|
||||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
|
||||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
|
||||||
},
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
Some(client) => {
|
|
||||||
let result = client.update_slot_status(slot, parent, status);
|
|
||||||
|
|
||||||
if let Err(err) = result {
|
|
||||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
|
||||||
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_end_of_startup(&mut self) -> Result<()> {
|
|
||||||
info!("Notifying the end of startup for accounts notifications");
|
|
||||||
match &mut self.client {
|
|
||||||
None => {
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
|
||||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
|
||||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
|
||||||
},
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
Some(client) => {
|
|
||||||
let result = client.notify_end_of_startup();
|
|
||||||
|
|
||||||
if let Err(err) = result {
|
|
||||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
|
||||||
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountsDbPluginPostgres {
|
|
||||||
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
|
|
||||||
let accounts_selector = &config["accounts_selector"];
|
|
||||||
|
|
||||||
if accounts_selector.is_null() {
|
|
||||||
AccountsSelector::default()
|
|
||||||
} else {
|
|
||||||
let accounts = &accounts_selector["accounts"];
|
|
||||||
let accounts: Vec<String> = if accounts.is_array() {
|
|
||||||
accounts
|
|
||||||
.as_array()
|
|
||||||
.unwrap()
|
|
||||||
.iter()
|
|
||||||
.map(|val| val.as_str().unwrap().to_string())
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
Vec::default()
|
|
||||||
};
|
|
||||||
let owners = &accounts_selector["owners"];
|
|
||||||
let owners: Vec<String> = if owners.is_array() {
|
|
||||||
owners
|
|
||||||
.as_array()
|
|
||||||
.unwrap()
|
|
||||||
.iter()
|
|
||||||
.map(|val| val.as_str().unwrap().to_string())
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
Vec::default()
|
|
||||||
};
|
|
||||||
AccountsSelector::new(&accounts, &owners)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new() -> Self {
|
|
||||||
AccountsDbPluginPostgres {
|
|
||||||
client: None,
|
|
||||||
accounts_selector: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[no_mangle]
|
|
||||||
#[allow(improper_ctypes_definitions)]
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
|
|
||||||
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
|
|
||||||
let plugin = AccountsDbPluginPostgres::new();
|
|
||||||
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
|
|
||||||
Box::into_raw(plugin)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) mod tests {
|
|
||||||
use {super::*, serde_json};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_accounts_selector_from_config() {
|
|
||||||
let config = "{\"accounts_selector\" : { \
|
|
||||||
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
|
|
||||||
}}";
|
|
||||||
|
|
||||||
let config: serde_json::Value = serde_json::from_str(config).unwrap();
|
|
||||||
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,3 +0,0 @@
|
|||||||
pub mod accounts_selector;
|
|
||||||
pub mod accountsdb_plugin_postgres;
|
|
||||||
pub mod postgres_client;
|
|
@@ -1,879 +0,0 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
|
||||||
|
|
||||||
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
|
|
||||||
use {
|
|
||||||
crate::accountsdb_plugin_postgres::{
|
|
||||||
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
|
|
||||||
},
|
|
||||||
chrono::Utc,
|
|
||||||
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
|
|
||||||
log::*,
|
|
||||||
postgres::{Client, NoTls, Statement},
|
|
||||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
|
||||||
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
|
|
||||||
},
|
|
||||||
solana_measure::measure::Measure,
|
|
||||||
solana_metrics::*,
|
|
||||||
solana_sdk::timing::AtomicInterval,
|
|
||||||
std::{
|
|
||||||
sync::{
|
|
||||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
|
||||||
Arc, Mutex,
|
|
||||||
},
|
|
||||||
thread::{self, sleep, Builder, JoinHandle},
|
|
||||||
time::Duration,
|
|
||||||
},
|
|
||||||
tokio_postgres::types::ToSql,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// The maximum asynchronous requests allowed in the channel to avoid excessive
|
|
||||||
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
|
|
||||||
const MAX_ASYNC_REQUESTS: usize = 40960;
|
|
||||||
const DEFAULT_POSTGRES_PORT: u16 = 5432;
|
|
||||||
const DEFAULT_THREADS_COUNT: usize = 100;
|
|
||||||
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
|
|
||||||
const ACCOUNT_COLUMN_COUNT: usize = 9;
|
|
||||||
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
|
|
||||||
|
|
||||||
struct PostgresSqlClientWrapper {
|
|
||||||
client: Client,
|
|
||||||
update_account_stmt: Statement,
|
|
||||||
bulk_account_insert_stmt: Statement,
|
|
||||||
update_slot_with_parent_stmt: Statement,
|
|
||||||
update_slot_without_parent_stmt: Statement,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SimplePostgresClient {
|
|
||||||
batch_size: usize,
|
|
||||||
pending_account_updates: Vec<DbAccountInfo>,
|
|
||||||
client: Mutex<PostgresSqlClientWrapper>,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct PostgresClientWorker {
|
|
||||||
client: SimplePostgresClient,
|
|
||||||
/// Indicating if accounts notification during startup is done.
|
|
||||||
is_startup_done: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Eq for DbAccountInfo {}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Debug)]
|
|
||||||
pub struct DbAccountInfo {
|
|
||||||
pub pubkey: Vec<u8>,
|
|
||||||
pub lamports: i64,
|
|
||||||
pub owner: Vec<u8>,
|
|
||||||
pub executable: bool,
|
|
||||||
pub rent_epoch: i64,
|
|
||||||
pub data: Vec<u8>,
|
|
||||||
pub slot: i64,
|
|
||||||
pub write_version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn abort() -> ! {
|
|
||||||
#[cfg(not(test))]
|
|
||||||
{
|
|
||||||
// standard error is usually redirected to a log file, cry for help on standard output as
|
|
||||||
// well
|
|
||||||
eprintln!("Validator process aborted. The validator log may contain further details");
|
|
||||||
std::process::exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
panic!("process::exit(1) is intercepted for friendly test failure...");
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DbAccountInfo {
|
|
||||||
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
|
|
||||||
let data = account.data().to_vec();
|
|
||||||
Self {
|
|
||||||
pubkey: account.pubkey().to_vec(),
|
|
||||||
lamports: account.lamports() as i64,
|
|
||||||
owner: account.owner().to_vec(),
|
|
||||||
executable: account.executable(),
|
|
||||||
rent_epoch: account.rent_epoch() as i64,
|
|
||||||
data,
|
|
||||||
slot: slot as i64,
|
|
||||||
write_version: account.write_version(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ReadableAccountInfo: Sized {
|
|
||||||
fn pubkey(&self) -> &[u8];
|
|
||||||
fn owner(&self) -> &[u8];
|
|
||||||
fn lamports(&self) -> i64;
|
|
||||||
fn executable(&self) -> bool;
|
|
||||||
fn rent_epoch(&self) -> i64;
|
|
||||||
fn data(&self) -> &[u8];
|
|
||||||
fn write_version(&self) -> i64;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadableAccountInfo for DbAccountInfo {
|
|
||||||
fn pubkey(&self) -> &[u8] {
|
|
||||||
&self.pubkey
|
|
||||||
}
|
|
||||||
|
|
||||||
fn owner(&self) -> &[u8] {
|
|
||||||
&self.owner
|
|
||||||
}
|
|
||||||
|
|
||||||
fn lamports(&self) -> i64 {
|
|
||||||
self.lamports
|
|
||||||
}
|
|
||||||
|
|
||||||
fn executable(&self) -> bool {
|
|
||||||
self.executable
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rent_epoch(&self) -> i64 {
|
|
||||||
self.rent_epoch
|
|
||||||
}
|
|
||||||
|
|
||||||
fn data(&self) -> &[u8] {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_version(&self) -> i64 {
|
|
||||||
self.write_version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
|
|
||||||
fn pubkey(&self) -> &[u8] {
|
|
||||||
self.pubkey
|
|
||||||
}
|
|
||||||
|
|
||||||
fn owner(&self) -> &[u8] {
|
|
||||||
self.owner
|
|
||||||
}
|
|
||||||
|
|
||||||
fn lamports(&self) -> i64 {
|
|
||||||
self.lamports as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
fn executable(&self) -> bool {
|
|
||||||
self.executable
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rent_epoch(&self) -> i64 {
|
|
||||||
self.rent_epoch as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
fn data(&self) -> &[u8] {
|
|
||||||
self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_version(&self) -> i64 {
|
|
||||||
self.write_version as i64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait PostgresClient {
|
|
||||||
fn join(&mut self) -> thread::Result<()> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_account(
|
|
||||||
&mut self,
|
|
||||||
account: DbAccountInfo,
|
|
||||||
is_startup: bool,
|
|
||||||
) -> Result<(), AccountsDbPluginError>;
|
|
||||||
|
|
||||||
fn update_slot_status(
|
|
||||||
&mut self,
|
|
||||||
slot: u64,
|
|
||||||
parent: Option<u64>,
|
|
||||||
status: SlotStatus,
|
|
||||||
) -> Result<(), AccountsDbPluginError>;
|
|
||||||
|
|
||||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SimplePostgresClient {
|
|
||||||
fn connect_to_db(
|
|
||||||
config: &AccountsDbPluginPostgresConfig,
|
|
||||||
) -> Result<Client, AccountsDbPluginError> {
|
|
||||||
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
|
|
||||||
|
|
||||||
let connection_str = if let Some(connection_str) = &config.connection_str {
|
|
||||||
connection_str.clone()
|
|
||||||
} else {
|
|
||||||
if config.host.is_none() || config.user.is_none() {
|
|
||||||
let msg = format!(
|
|
||||||
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
|
|
||||||
config.connection_str, config.host, config.user
|
|
||||||
);
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
|
||||||
AccountsDbPluginPostgresError::ConfigurationError { msg },
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
format!(
|
|
||||||
"host={} user={} port={}",
|
|
||||||
config.host.as_ref().unwrap(),
|
|
||||||
config.user.as_ref().unwrap(),
|
|
||||||
port
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
match Client::connect(&connection_str, NoTls) {
|
|
||||||
Err(err) => {
|
|
||||||
let msg = format!(
|
|
||||||
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
|
|
||||||
err, connection_str
|
|
||||||
);
|
|
||||||
error!("{}", msg);
|
|
||||||
Err(AccountsDbPluginError::Custom(Box::new(
|
|
||||||
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
Ok(client) => Ok(client),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_bulk_account_insert_statement(
|
|
||||||
client: &mut Client,
|
|
||||||
config: &AccountsDbPluginPostgresConfig,
|
|
||||||
) -> Result<Statement, AccountsDbPluginError> {
|
|
||||||
let batch_size = config
|
|
||||||
.batch_size
|
|
||||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
|
||||||
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
|
|
||||||
for j in 0..batch_size {
|
|
||||||
let row = j * ACCOUNT_COLUMN_COUNT;
|
|
||||||
let val_str = format!(
|
|
||||||
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
|
|
||||||
row + 1,
|
|
||||||
row + 2,
|
|
||||||
row + 3,
|
|
||||||
row + 4,
|
|
||||||
row + 5,
|
|
||||||
row + 6,
|
|
||||||
row + 7,
|
|
||||||
row + 8,
|
|
||||||
row + 9,
|
|
||||||
);
|
|
||||||
|
|
||||||
if j == 0 {
|
|
||||||
stmt = format!("{} {}", &stmt, val_str);
|
|
||||||
} else {
|
|
||||||
stmt = format!("{}, {}", &stmt, val_str);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
|
||||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
|
||||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
|
||||||
|
|
||||||
stmt = format!("{} {}", stmt, handle_conflict);
|
|
||||||
|
|
||||||
info!("{}", stmt);
|
|
||||||
let bulk_stmt = client.prepare(&stmt);
|
|
||||||
|
|
||||||
match bulk_stmt {
|
|
||||||
Err(err) => {
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
|
||||||
msg: format!(
|
|
||||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
|
||||||
err, config.host, config.user, config
|
|
||||||
),
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_single_account_upsert_statement(
|
|
||||||
client: &mut Client,
|
|
||||||
config: &AccountsDbPluginPostgresConfig,
|
|
||||||
) -> Result<Statement, AccountsDbPluginError> {
|
|
||||||
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
|
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
|
|
||||||
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
|
||||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
|
||||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
|
||||||
|
|
||||||
let stmt = client.prepare(stmt);
|
|
||||||
|
|
||||||
match stmt {
|
|
||||||
Err(err) => {
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
|
||||||
msg: format!(
|
|
||||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
|
||||||
err, config.host, config.user, config
|
|
||||||
),
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_slot_upsert_statement_with_parent(
|
|
||||||
client: &mut Client,
|
|
||||||
config: &AccountsDbPluginPostgresConfig,
|
|
||||||
) -> Result<Statement, AccountsDbPluginError> {
|
|
||||||
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
|
|
||||||
VALUES ($1, $2, $3, $4) \
|
|
||||||
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
|
|
||||||
|
|
||||||
let stmt = client.prepare(stmt);
|
|
||||||
|
|
||||||
match stmt {
|
|
||||||
Err(err) => {
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
|
||||||
msg: format!(
|
|
||||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
|
||||||
err, config.host, config.user, config
|
|
||||||
),
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
Ok(stmt) => Ok(stmt),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_slot_upsert_statement_without_parent(
|
|
||||||
client: &mut Client,
|
|
||||||
config: &AccountsDbPluginPostgresConfig,
|
|
||||||
) -> Result<Statement, AccountsDbPluginError> {
|
|
||||||
let stmt = "INSERT INTO slot (slot, status, updated_on) \
|
|
||||||
VALUES ($1, $2, $3) \
|
|
||||||
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
|
|
||||||
|
|
||||||
let stmt = client.prepare(stmt);
|
|
||||||
|
|
||||||
match stmt {
|
|
||||||
Err(err) => {
|
|
||||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
|
||||||
msg: format!(
|
|
||||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
|
||||||
err, config.host, config.user, config
|
|
||||||
),
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
Ok(stmt) => Ok(stmt),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Internal function for updating or inserting a single account
|
|
||||||
fn upsert_account_internal(
|
|
||||||
account: &DbAccountInfo,
|
|
||||||
statement: &Statement,
|
|
||||||
client: &mut Client,
|
|
||||||
) -> Result<(), AccountsDbPluginError> {
|
|
||||||
let lamports = account.lamports() as i64;
|
|
||||||
let rent_epoch = account.rent_epoch() as i64;
|
|
||||||
let updated_on = Utc::now().naive_utc();
|
|
||||||
let result = client.query(
|
|
||||||
statement,
|
|
||||||
&[
|
|
||||||
&account.pubkey(),
|
|
||||||
&account.slot,
|
|
||||||
&account.owner(),
|
|
||||||
&lamports,
|
|
||||||
&account.executable(),
|
|
||||||
&rent_epoch,
|
|
||||||
&account.data(),
|
|
||||||
&account.write_version(),
|
|
||||||
&updated_on,
|
|
||||||
],
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Err(err) = result {
|
|
||||||
let msg = format!(
|
|
||||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
error!("{}", msg);
|
|
||||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update or insert a single account
|
|
||||||
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
|
|
||||||
let client = self.client.get_mut().unwrap();
|
|
||||||
let statement = &client.update_account_stmt;
|
|
||||||
let client = &mut client.client;
|
|
||||||
Self::upsert_account_internal(account, statement, client)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Insert accounts in batch to reduce network overhead
|
|
||||||
fn insert_accounts_in_batch(
|
|
||||||
&mut self,
|
|
||||||
account: DbAccountInfo,
|
|
||||||
) -> Result<(), AccountsDbPluginError> {
|
|
||||||
self.pending_account_updates.push(account);
|
|
||||||
|
|
||||||
if self.pending_account_updates.len() == self.batch_size {
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
|
|
||||||
|
|
||||||
let mut values: Vec<&(dyn ToSql + Sync)> =
|
|
||||||
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
|
|
||||||
let updated_on = Utc::now().naive_utc();
|
|
||||||
for j in 0..self.batch_size {
|
|
||||||
let account = &self.pending_account_updates[j];
|
|
||||||
|
|
||||||
values.push(&account.pubkey);
|
|
||||||
values.push(&account.slot);
|
|
||||||
values.push(&account.owner);
|
|
||||||
values.push(&account.lamports);
|
|
||||||
values.push(&account.executable);
|
|
||||||
values.push(&account.rent_epoch);
|
|
||||||
values.push(&account.data);
|
|
||||||
values.push(&account.write_version);
|
|
||||||
values.push(&updated_on);
|
|
||||||
}
|
|
||||||
measure.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-postgres-prepare-values-us",
|
|
||||||
measure.as_us() as usize,
|
|
||||||
10000,
|
|
||||||
10000
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
|
|
||||||
let client = self.client.get_mut().unwrap();
|
|
||||||
let result = client
|
|
||||||
.client
|
|
||||||
.query(&client.bulk_account_insert_stmt, &values);
|
|
||||||
|
|
||||||
self.pending_account_updates.clear();
|
|
||||||
if let Err(err) = result {
|
|
||||||
let msg = format!(
|
|
||||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
error!("{}", msg);
|
|
||||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
|
||||||
}
|
|
||||||
measure.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-postgres-update-account-us",
|
|
||||||
measure.as_us() as usize,
|
|
||||||
10000,
|
|
||||||
10000
|
|
||||||
);
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-postgres-update-account-count",
|
|
||||||
self.batch_size,
|
|
||||||
10000,
|
|
||||||
10000
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Flush any left over accounts in batch which are not processed in the last batch
|
|
||||||
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
|
|
||||||
if self.pending_account_updates.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let client = self.client.get_mut().unwrap();
|
|
||||||
let statement = &client.update_account_stmt;
|
|
||||||
let client = &mut client.client;
|
|
||||||
|
|
||||||
for account in self.pending_account_updates.drain(..) {
|
|
||||||
Self::upsert_account_internal(&account, statement, client)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
|
||||||
info!("Creating SimplePostgresClient...");
|
|
||||||
let mut client = Self::connect_to_db(config)?;
|
|
||||||
let bulk_account_insert_stmt =
|
|
||||||
Self::build_bulk_account_insert_statement(&mut client, config)?;
|
|
||||||
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
|
|
||||||
|
|
||||||
let update_slot_with_parent_stmt =
|
|
||||||
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
|
|
||||||
let update_slot_without_parent_stmt =
|
|
||||||
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
|
|
||||||
|
|
||||||
let batch_size = config
|
|
||||||
.batch_size
|
|
||||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
|
||||||
info!("Created SimplePostgresClient.");
|
|
||||||
Ok(Self {
|
|
||||||
batch_size,
|
|
||||||
pending_account_updates: Vec::with_capacity(batch_size),
|
|
||||||
client: Mutex::new(PostgresSqlClientWrapper {
|
|
||||||
client,
|
|
||||||
update_account_stmt,
|
|
||||||
bulk_account_insert_stmt,
|
|
||||||
update_slot_with_parent_stmt,
|
|
||||||
update_slot_without_parent_stmt,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PostgresClient for SimplePostgresClient {
|
|
||||||
fn update_account(
|
|
||||||
&mut self,
|
|
||||||
account: DbAccountInfo,
|
|
||||||
is_startup: bool,
|
|
||||||
) -> Result<(), AccountsDbPluginError> {
|
|
||||||
trace!(
|
|
||||||
"Updating account {} with owner {} at slot {}",
|
|
||||||
bs58::encode(account.pubkey()).into_string(),
|
|
||||||
bs58::encode(account.owner()).into_string(),
|
|
||||||
account.slot,
|
|
||||||
);
|
|
||||||
if !is_startup {
|
|
||||||
return self.upsert_account(&account);
|
|
||||||
}
|
|
||||||
self.insert_accounts_in_batch(account)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_slot_status(
|
|
||||||
&mut self,
|
|
||||||
slot: u64,
|
|
||||||
parent: Option<u64>,
|
|
||||||
status: SlotStatus,
|
|
||||||
) -> Result<(), AccountsDbPluginError> {
|
|
||||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
|
||||||
|
|
||||||
let slot = slot as i64; // postgres only supports i64
|
|
||||||
let parent = parent.map(|parent| parent as i64);
|
|
||||||
let updated_on = Utc::now().naive_utc();
|
|
||||||
let status_str = status.as_str();
|
|
||||||
let client = self.client.get_mut().unwrap();
|
|
||||||
|
|
||||||
let result = match parent {
|
|
||||||
Some(parent) => client.client.execute(
|
|
||||||
&client.update_slot_with_parent_stmt,
|
|
||||||
&[&slot, &parent, &status_str, &updated_on],
|
|
||||||
),
|
|
||||||
None => client.client.execute(
|
|
||||||
&client.update_slot_without_parent_stmt,
|
|
||||||
&[&slot, &status_str, &updated_on],
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Err(err) => {
|
|
||||||
let msg = format!(
|
|
||||||
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
error!("{:?}", msg);
|
|
||||||
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
assert_eq!(1, rows, "Expected one rows to be updated a time");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
|
||||||
self.flush_buffered_writes()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct UpdateAccountRequest {
|
|
||||||
account: DbAccountInfo,
|
|
||||||
is_startup: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct UpdateSlotRequest {
|
|
||||||
slot: u64,
|
|
||||||
parent: Option<u64>,
|
|
||||||
slot_status: SlotStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
enum DbWorkItem {
|
|
||||||
UpdateAccount(UpdateAccountRequest),
|
|
||||||
UpdateSlot(UpdateSlotRequest),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PostgresClientWorker {
|
|
||||||
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
|
||||||
let result = SimplePostgresClient::new(&config);
|
|
||||||
match result {
|
|
||||||
Ok(client) => Ok(PostgresClientWorker {
|
|
||||||
client,
|
|
||||||
is_startup_done: false,
|
|
||||||
}),
|
|
||||||
Err(err) => {
|
|
||||||
error!("Error in creating SimplePostgresClient: {}", err);
|
|
||||||
Err(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn do_work(
|
|
||||||
&mut self,
|
|
||||||
receiver: Receiver<DbWorkItem>,
|
|
||||||
exit_worker: Arc<AtomicBool>,
|
|
||||||
is_startup_done: Arc<AtomicBool>,
|
|
||||||
startup_done_count: Arc<AtomicUsize>,
|
|
||||||
panic_on_db_errors: bool,
|
|
||||||
) -> Result<(), AccountsDbPluginError> {
|
|
||||||
while !exit_worker.load(Ordering::Relaxed) {
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
|
|
||||||
let work = receiver.recv_timeout(Duration::from_millis(500));
|
|
||||||
measure.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-postgres-worker-recv-us",
|
|
||||||
measure.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
match work {
|
|
||||||
Ok(work) => match work {
|
|
||||||
DbWorkItem::UpdateAccount(request) => {
|
|
||||||
if let Err(err) = self
|
|
||||||
.client
|
|
||||||
.update_account(request.account, request.is_startup)
|
|
||||||
{
|
|
||||||
error!("Failed to update account: ({})", err);
|
|
||||||
if panic_on_db_errors {
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
DbWorkItem::UpdateSlot(request) => {
|
|
||||||
if let Err(err) = self.client.update_slot_status(
|
|
||||||
request.slot,
|
|
||||||
request.parent,
|
|
||||||
request.slot_status,
|
|
||||||
) {
|
|
||||||
error!("Failed to update slot: ({})", err);
|
|
||||||
if panic_on_db_errors {
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(err) => match err {
|
|
||||||
RecvTimeoutError::Timeout => {
|
|
||||||
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
|
|
||||||
if let Err(err) = self.client.notify_end_of_startup() {
|
|
||||||
error!("Error in notifying end of startup: ({})", err);
|
|
||||||
if panic_on_db_errors {
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.is_startup_done = true;
|
|
||||||
startup_done_count.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
error!("Error in receiving the item {:?}", err);
|
|
||||||
if panic_on_db_errors {
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub struct ParallelPostgresClient {
|
|
||||||
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
|
|
||||||
exit_worker: Arc<AtomicBool>,
|
|
||||||
is_startup_done: Arc<AtomicBool>,
|
|
||||||
startup_done_count: Arc<AtomicUsize>,
|
|
||||||
initialized_worker_count: Arc<AtomicUsize>,
|
|
||||||
sender: Sender<DbWorkItem>,
|
|
||||||
last_report: AtomicInterval,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ParallelPostgresClient {
|
|
||||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
|
||||||
info!("Creating ParallelPostgresClient...");
|
|
||||||
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
|
|
||||||
let exit_worker = Arc::new(AtomicBool::new(false));
|
|
||||||
let mut workers = Vec::default();
|
|
||||||
let is_startup_done = Arc::new(AtomicBool::new(false));
|
|
||||||
let startup_done_count = Arc::new(AtomicUsize::new(0));
|
|
||||||
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
|
|
||||||
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
|
|
||||||
for i in 0..worker_count {
|
|
||||||
let cloned_receiver = receiver.clone();
|
|
||||||
let exit_clone = exit_worker.clone();
|
|
||||||
let is_startup_done_clone = is_startup_done.clone();
|
|
||||||
let startup_done_count_clone = startup_done_count.clone();
|
|
||||||
let initialized_worker_count_clone = initialized_worker_count.clone();
|
|
||||||
let config = config.clone();
|
|
||||||
let worker = Builder::new()
|
|
||||||
.name(format!("worker-{}", i))
|
|
||||||
.spawn(move || -> Result<(), AccountsDbPluginError> {
|
|
||||||
let panic_on_db_errors = *config
|
|
||||||
.panic_on_db_errors
|
|
||||||
.as_ref()
|
|
||||||
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
|
|
||||||
let result = PostgresClientWorker::new(config);
|
|
||||||
|
|
||||||
match result {
|
|
||||||
Ok(mut worker) => {
|
|
||||||
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
|
|
||||||
worker.do_work(
|
|
||||||
cloned_receiver,
|
|
||||||
exit_clone,
|
|
||||||
is_startup_done_clone,
|
|
||||||
startup_done_count_clone,
|
|
||||||
panic_on_db_errors,
|
|
||||||
)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
error!("Error when making connection to database: ({})", err);
|
|
||||||
if panic_on_db_errors {
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
Err(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
workers.push(worker);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Created ParallelPostgresClient.");
|
|
||||||
Ok(Self {
|
|
||||||
last_report: AtomicInterval::default(),
|
|
||||||
workers,
|
|
||||||
exit_worker,
|
|
||||||
is_startup_done,
|
|
||||||
startup_done_count,
|
|
||||||
initialized_worker_count,
|
|
||||||
sender,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn join(&mut self) -> thread::Result<()> {
|
|
||||||
self.exit_worker.store(true, Ordering::Relaxed);
|
|
||||||
while !self.workers.is_empty() {
|
|
||||||
let worker = self.workers.pop();
|
|
||||||
if worker.is_none() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let worker = worker.unwrap();
|
|
||||||
let result = worker.join().unwrap();
|
|
||||||
if result.is_err() {
|
|
||||||
error!("The worker thread has failed: {:?}", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_account(
|
|
||||||
&mut self,
|
|
||||||
account: &ReplicaAccountInfo,
|
|
||||||
slot: u64,
|
|
||||||
is_startup: bool,
|
|
||||||
) -> Result<(), AccountsDbPluginError> {
|
|
||||||
if self.last_report.should_update(30000) {
|
|
||||||
datapoint_debug!(
|
|
||||||
"postgres-plugin-stats",
|
|
||||||
("message-queue-length", self.sender.len() as i64, i64),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
|
|
||||||
let wrk_item = DbWorkItem::UpdateAccount(UpdateAccountRequest {
|
|
||||||
account: DbAccountInfo::new(account, slot),
|
|
||||||
is_startup,
|
|
||||||
});
|
|
||||||
|
|
||||||
measure.stop();
|
|
||||||
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-posgres-create-work-item-us",
|
|
||||||
measure.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
|
|
||||||
|
|
||||||
if let Err(err) = self.sender.send(wrk_item) {
|
|
||||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
|
||||||
msg: format!(
|
|
||||||
"Failed to update the account {:?}, error: {:?}",
|
|
||||||
bs58::encode(account.pubkey()).into_string(),
|
|
||||||
err
|
|
||||||
),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
measure.stop();
|
|
||||||
inc_new_counter_debug!(
|
|
||||||
"accountsdb-plugin-posgres-send-msg-us",
|
|
||||||
measure.as_us() as usize,
|
|
||||||
100000,
|
|
||||||
100000
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_slot_status(
|
|
||||||
&mut self,
|
|
||||||
slot: u64,
|
|
||||||
parent: Option<u64>,
|
|
||||||
status: SlotStatus,
|
|
||||||
) -> Result<(), AccountsDbPluginError> {
|
|
||||||
if let Err(err) = self.sender.send(DbWorkItem::UpdateSlot(UpdateSlotRequest {
|
|
||||||
slot,
|
|
||||||
parent,
|
|
||||||
slot_status: status,
|
|
||||||
})) {
|
|
||||||
return Err(AccountsDbPluginError::SlotStatusUpdateError {
|
|
||||||
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
|
||||||
info!("Notifying the end of startup");
|
|
||||||
// Ensure all items in the queue has been received by the workers
|
|
||||||
while !self.sender.is_empty() {
|
|
||||||
sleep(Duration::from_millis(100));
|
|
||||||
}
|
|
||||||
self.is_startup_done.store(true, Ordering::Relaxed);
|
|
||||||
|
|
||||||
// Wait for all worker threads to be done with flushing
|
|
||||||
while self.startup_done_count.load(Ordering::Relaxed)
|
|
||||||
!= self.initialized_worker_count.load(Ordering::Relaxed)
|
|
||||||
{
|
|
||||||
info!(
|
|
||||||
"Startup done count: {}, good worker thread count: {}",
|
|
||||||
self.startup_done_count.load(Ordering::Relaxed),
|
|
||||||
self.initialized_worker_count.load(Ordering::Relaxed)
|
|
||||||
);
|
|
||||||
sleep(Duration::from_millis(100));
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Done with notifying the end of startup");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct PostgresClientBuilder {}
|
|
||||||
|
|
||||||
impl PostgresClientBuilder {
|
|
||||||
pub fn build_pararallel_postgres_client(
|
|
||||||
config: &AccountsDbPluginPostgresConfig,
|
|
||||||
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
|
|
||||||
ParallelPostgresClient::new(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_simple_postgres_client(
|
|
||||||
config: &AccountsDbPluginPostgresConfig,
|
|
||||||
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
|
|
||||||
SimplePostgresClient::new(config)
|
|
||||||
}
|
|
||||||
}
|
|
@@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-banking-bench"
|
name = "solana-banking-bench"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@@ -14,18 +14,16 @@ crossbeam-channel = "0.4"
|
|||||||
log = "0.4.11"
|
log = "0.4.11"
|
||||||
rand = "0.7.0"
|
rand = "0.7.0"
|
||||||
rayon = "1.5.0"
|
rayon = "1.5.0"
|
||||||
solana-core = { path = "../core", version = "=1.8.13" }
|
solana-core = { path = "../core", version = "=1.6.4" }
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.13" }
|
solana-clap-utils = { path = "../clap-utils", version = "=1.6.4" }
|
||||||
solana-gossip = { path = "../gossip", version = "=1.8.13" }
|
solana-streamer = { path = "../streamer", version = "=1.6.4" }
|
||||||
solana-ledger = { path = "../ledger", version = "=1.8.13" }
|
solana-perf = { path = "../perf", version = "=1.6.4" }
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
solana-ledger = { path = "../ledger", version = "=1.6.4" }
|
||||||
solana-measure = { path = "../measure", version = "=1.8.13" }
|
solana-logger = { path = "../logger", version = "=1.6.4" }
|
||||||
solana-perf = { path = "../perf", version = "=1.8.13" }
|
solana-runtime = { path = "../runtime", version = "=1.6.4" }
|
||||||
solana-poh = { path = "../poh", version = "=1.8.13" }
|
solana-measure = { path = "../measure", version = "=1.6.4" }
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-streamer = { path = "../streamer", version = "=1.8.13" }
|
solana-version = { path = "../version", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
|
||||||
solana-version = { path = "../version", version = "=1.8.13" }
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@@ -1,37 +1,38 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
use {
|
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
use crossbeam_channel::unbounded;
|
||||||
crossbeam_channel::unbounded,
|
use log::*;
|
||||||
log::*,
|
use rand::{thread_rng, Rng};
|
||||||
rand::{thread_rng, Rng},
|
use rayon::prelude::*;
|
||||||
rayon::prelude::*,
|
use solana_core::{
|
||||||
solana_core::banking_stage::BankingStage,
|
banking_stage::{create_test_recorder, BankingStage},
|
||||||
solana_gossip::cluster_info::{ClusterInfo, Node},
|
cluster_info::ClusterInfo,
|
||||||
solana_ledger::{
|
cluster_info::Node,
|
||||||
blockstore::Blockstore,
|
poh_recorder::PohRecorder,
|
||||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
poh_recorder::WorkingBankEntry,
|
||||||
get_tmp_ledger_path,
|
};
|
||||||
},
|
use solana_ledger::{
|
||||||
solana_measure::measure::Measure,
|
blockstore::Blockstore,
|
||||||
solana_perf::packet::to_packet_batches,
|
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||||
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
|
get_tmp_ledger_path,
|
||||||
solana_runtime::{
|
};
|
||||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
use solana_measure::measure::Measure;
|
||||||
cost_model::CostModel,
|
use solana_perf::packet::to_packets_chunked;
|
||||||
},
|
use solana_runtime::{
|
||||||
solana_sdk::{
|
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||||
hash::Hash,
|
};
|
||||||
signature::{Keypair, Signature},
|
use solana_sdk::{
|
||||||
system_transaction,
|
hash::Hash,
|
||||||
timing::{duration_as_us, timestamp},
|
signature::Keypair,
|
||||||
transaction::Transaction,
|
signature::Signature,
|
||||||
},
|
system_transaction,
|
||||||
solana_streamer::socket::SocketAddrSpace,
|
timing::{duration_as_us, timestamp},
|
||||||
std::{
|
transaction::Transaction,
|
||||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
};
|
||||||
thread::sleep,
|
use std::{
|
||||||
time::{Duration, Instant},
|
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||||
},
|
thread::sleep,
|
||||||
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn check_txs(
|
fn check_txs(
|
||||||
@@ -77,7 +78,7 @@ fn make_accounts_txs(
|
|||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let mut new = dummy.clone();
|
let mut new = dummy.clone();
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||||
if !same_payer {
|
if !same_payer {
|
||||||
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
||||||
}
|
}
|
||||||
@@ -168,7 +169,6 @@ fn main() {
|
|||||||
|
|
||||||
let (verified_sender, verified_receiver) = unbounded();
|
let (verified_sender, verified_receiver) = unbounded();
|
||||||
let (vote_sender, vote_receiver) = unbounded();
|
let (vote_sender, vote_receiver) = unbounded();
|
||||||
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
|
|
||||||
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
||||||
let bank0 = Bank::new(&genesis_config);
|
let bank0 = Bank::new(&genesis_config);
|
||||||
let mut bank_forks = BankForks::new(bank0);
|
let mut bank_forks = BankForks::new(bank0);
|
||||||
@@ -189,7 +189,7 @@ fn main() {
|
|||||||
genesis_config.hash(),
|
genesis_config.hash(),
|
||||||
);
|
);
|
||||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||||
let x = bank.process_transaction(&fund);
|
let x = bank.process_transaction(&fund);
|
||||||
x.unwrap();
|
x.unwrap();
|
||||||
@@ -199,7 +199,7 @@ fn main() {
|
|||||||
if !skip_sanity {
|
if !skip_sanity {
|
||||||
//sanity check, make sure all the transactions can execute sequentially
|
//sanity check, make sure all the transactions can execute sequentially
|
||||||
transactions.iter().for_each(|tx| {
|
transactions.iter().for_each(|tx| {
|
||||||
let res = bank.process_transaction(tx);
|
let res = bank.process_transaction(&tx);
|
||||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||||
});
|
});
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
@@ -211,7 +211,7 @@ fn main() {
|
|||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
|
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blockstore = Arc::new(
|
let blockstore = Arc::new(
|
||||||
@@ -219,21 +219,15 @@ fn main() {
|
|||||||
);
|
);
|
||||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||||
create_test_recorder(&bank, &blockstore, None);
|
create_test_recorder(&bank, &blockstore, None);
|
||||||
let cluster_info = ClusterInfo::new(
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
Node::new_localhost().info,
|
|
||||||
Arc::new(Keypair::new()),
|
|
||||||
SocketAddrSpace::Unspecified,
|
|
||||||
);
|
|
||||||
let cluster_info = Arc::new(cluster_info);
|
let cluster_info = Arc::new(cluster_info);
|
||||||
let banking_stage = BankingStage::new(
|
let banking_stage = BankingStage::new(
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
verified_receiver,
|
verified_receiver,
|
||||||
tpu_vote_receiver,
|
|
||||||
vote_receiver,
|
vote_receiver,
|
||||||
None,
|
None,
|
||||||
replay_vote_sender,
|
replay_vote_sender,
|
||||||
Arc::new(RwLock::new(CostModel::default())),
|
|
||||||
);
|
);
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||||
|
|
||||||
@@ -361,10 +355,10 @@ fn main() {
|
|||||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||||
for tx in transactions.iter_mut() {
|
for tx in transactions.iter_mut() {
|
||||||
tx.message.recent_blockhash = bank.last_blockhash();
|
tx.message.recent_blockhash = bank.last_blockhash();
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||||
}
|
}
|
||||||
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
|
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
start += chunk_len;
|
start += chunk_len;
|
||||||
@@ -386,7 +380,6 @@ fn main() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
drop(verified_sender);
|
drop(verified_sender);
|
||||||
drop(tpu_vote_sender);
|
|
||||||
drop(vote_sender);
|
drop(vote_sender);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
banking_stage.join().unwrap();
|
banking_stage.join().unwrap();
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-banks-client"
|
name = "solana-banks-client"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
description = "Solana banks client"
|
description = "Solana banks client"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@@ -11,20 +11,20 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.3.1"
|
bincode = "1.3.1"
|
||||||
borsh = "0.9.0"
|
borsh = "0.8.1"
|
||||||
borsh-derive = "0.9.0"
|
borsh-derive = "0.8.1"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
mio = "0.7.6"
|
mio = "0.7.6"
|
||||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.13" }
|
solana-banks-interface = { path = "../banks-interface", version = "=1.6.4" }
|
||||||
solana-program = { path = "../sdk/program", version = "=1.8.13" }
|
solana-program = { path = "../sdk/program", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
tarpc = { version = "0.24.1", features = ["full"] }
|
tarpc = { version = "0.24.1", features = ["full"] }
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1.1", features = ["full"] }
|
||||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
solana-runtime = { path = "../runtime", version = "=1.6.4" }
|
||||||
solana-banks-server = { path = "../banks-server", version = "=1.8.13" }
|
solana-banks-server = { path = "../banks-server", version = "=1.6.4" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib"]
|
crate-type = ["lib"]
|
||||||
|
@@ -5,38 +5,31 @@
|
|||||||
//! but they are undocumented, may change over time, and are generally more
|
//! but they are undocumented, may change over time, and are generally more
|
||||||
//! cumbersome to use.
|
//! cumbersome to use.
|
||||||
|
|
||||||
|
use borsh::BorshDeserialize;
|
||||||
|
use futures::{future::join_all, Future, FutureExt};
|
||||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||||
use {
|
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||||
borsh::BorshDeserialize,
|
use solana_program::{
|
||||||
futures::{future::join_all, Future, FutureExt},
|
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||||
solana_banks_interface::{BanksRequest, BanksResponse},
|
rent::Rent, sysvar,
|
||||||
solana_program::{
|
|
||||||
clock::{Clock, Slot},
|
|
||||||
fee_calculator::FeeCalculator,
|
|
||||||
hash::Hash,
|
|
||||||
program_pack::Pack,
|
|
||||||
pubkey::Pubkey,
|
|
||||||
rent::Rent,
|
|
||||||
sysvar::{self, Sysvar},
|
|
||||||
},
|
|
||||||
solana_sdk::{
|
|
||||||
account::{from_account, Account},
|
|
||||||
commitment_config::CommitmentLevel,
|
|
||||||
signature::Signature,
|
|
||||||
transaction::{self, Transaction},
|
|
||||||
transport,
|
|
||||||
},
|
|
||||||
std::io::{self, Error, ErrorKind},
|
|
||||||
tarpc::{
|
|
||||||
client::{self, channel::RequestDispatch, NewClient},
|
|
||||||
context::{self, Context},
|
|
||||||
rpc::{ClientMessage, Response},
|
|
||||||
serde_transport::tcp,
|
|
||||||
Transport,
|
|
||||||
},
|
|
||||||
tokio::{net::ToSocketAddrs, time::Duration},
|
|
||||||
tokio_serde::formats::Bincode,
|
|
||||||
};
|
};
|
||||||
|
use solana_sdk::{
|
||||||
|
account::{from_account, Account},
|
||||||
|
commitment_config::CommitmentLevel,
|
||||||
|
signature::Signature,
|
||||||
|
transaction::{self, Transaction},
|
||||||
|
transport,
|
||||||
|
};
|
||||||
|
use std::io::{self, Error, ErrorKind};
|
||||||
|
use tarpc::{
|
||||||
|
client::{self, channel::RequestDispatch, NewClient},
|
||||||
|
context::{self, Context},
|
||||||
|
rpc::{ClientMessage, Response},
|
||||||
|
serde_transport::tcp,
|
||||||
|
Transport,
|
||||||
|
};
|
||||||
|
use tokio::{net::ToSocketAddrs, time::Duration};
|
||||||
|
use tokio_serde::formats::Bincode;
|
||||||
|
|
||||||
// This exists only for backward compatibility
|
// This exists only for backward compatibility
|
||||||
pub trait BanksClientExt {}
|
pub trait BanksClientExt {}
|
||||||
@@ -70,7 +63,7 @@ impl BanksClient {
|
|||||||
&mut self,
|
&mut self,
|
||||||
ctx: Context,
|
ctx: Context,
|
||||||
commitment: CommitmentLevel,
|
commitment: CommitmentLevel,
|
||||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||||
self.inner
|
self.inner
|
||||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||||
}
|
}
|
||||||
@@ -92,14 +85,6 @@ impl BanksClient {
|
|||||||
self.inner.get_slot_with_context(ctx, commitment)
|
self.inner.get_slot_with_context(ctx, commitment)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_block_height_with_context(
|
|
||||||
&mut self,
|
|
||||||
ctx: Context,
|
|
||||||
commitment: CommitmentLevel,
|
|
||||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
|
||||||
self.inner.get_block_height_with_context(ctx, commitment)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_transaction_with_commitment_and_context(
|
pub fn process_transaction_with_commitment_and_context(
|
||||||
&mut self,
|
&mut self,
|
||||||
ctx: Context,
|
ctx: Context,
|
||||||
@@ -130,39 +115,24 @@ impl BanksClient {
|
|||||||
self.send_transaction_with_context(context::current(), transaction)
|
self.send_transaction_with_context(context::current(), transaction)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the cluster clock
|
|
||||||
pub fn get_clock(&mut self) -> impl Future<Output = io::Result<Clock>> + '_ {
|
|
||||||
self.get_account(sysvar::clock::id()).map(|result| {
|
|
||||||
let clock_sysvar = result?
|
|
||||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Clock sysvar not present"))?;
|
|
||||||
from_account::<Clock, _>(&clock_sysvar).ok_or_else(|| {
|
|
||||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Clock sysvar")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
||||||
/// will use the transaction's blockhash to look up these same fee parameters and
|
/// will use the transaction's blockhash to look up these same fee parameters and
|
||||||
/// use them to calculate the transaction fee.
|
/// use them to calculate the transaction fee.
|
||||||
pub fn get_fees(
|
pub fn get_fees(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the cluster Sysvar
|
|
||||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
|
||||||
self.get_account(T::id()).map(|result| {
|
|
||||||
let sysvar = result?
|
|
||||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
|
|
||||||
from_account::<T, _>(&sysvar)
|
|
||||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the cluster rent
|
/// Return the cluster rent
|
||||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||||
self.get_sysvar::<Rent>()
|
self.get_account(sysvar::rent::id()).map(|result| {
|
||||||
|
let rent_sysvar = result?
|
||||||
|
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||||
|
from_account::<Rent, _>(&rent_sysvar).ok_or_else(|| {
|
||||||
|
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||||
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
||||||
@@ -222,18 +192,12 @@ impl BanksClient {
|
|||||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the most recent rooted slot. All transactions at or below this slot
|
/// Return the most recent rooted slot height. All transactions at or below this height
|
||||||
/// are said to be finalized. The cluster will not fork to a higher slot.
|
/// are said to be finalized. The cluster will not fork to a higher slot height.
|
||||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||||
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the most recent rooted block height. All transactions at or below this height
|
|
||||||
/// are said to be finalized. The cluster will not fork to a higher block height.
|
|
||||||
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
|
||||||
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the account at the given address at the slot corresponding to the given
|
/// Return the account at the given address at the slot corresponding to the given
|
||||||
/// commitment level. If the account is not found, None is returned.
|
/// commitment level. If the account is not found, None is returned.
|
||||||
pub fn get_account_with_commitment(
|
pub fn get_account_with_commitment(
|
||||||
@@ -349,18 +313,16 @@ pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClie
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_banks_server::banks_server::start_local_server;
|
||||||
solana_banks_server::banks_server::start_local_server,
|
use solana_runtime::{
|
||||||
solana_runtime::{
|
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
genesis_utils::create_genesis_config,
|
||||||
genesis_utils::create_genesis_config,
|
|
||||||
},
|
|
||||||
solana_sdk::{message::Message, signature::Signer, system_instruction},
|
|
||||||
std::sync::{Arc, RwLock},
|
|
||||||
tarpc::transport,
|
|
||||||
tokio::{runtime::Runtime, time::sleep},
|
|
||||||
};
|
};
|
||||||
|
use solana_sdk::{message::Message, signature::Signer, system_instruction};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use tarpc::transport;
|
||||||
|
use tokio::{runtime::Runtime, time::sleep};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_banks_client_new() {
|
fn test_banks_client_new() {
|
||||||
@@ -388,9 +350,7 @@ mod tests {
|
|||||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||||
|
|
||||||
Runtime::new()?.block_on(async {
|
Runtime::new()?.block_on(async {
|
||||||
let client_transport =
|
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
|
||||||
.await;
|
|
||||||
let mut banks_client = start_client(client_transport).await?;
|
let mut banks_client = start_client(client_transport).await?;
|
||||||
|
|
||||||
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
||||||
@@ -417,15 +377,13 @@ mod tests {
|
|||||||
|
|
||||||
let mint_pubkey = &genesis.mint_keypair.pubkey();
|
let mint_pubkey = &genesis.mint_keypair.pubkey();
|
||||||
let bob_pubkey = solana_sdk::pubkey::new_rand();
|
let bob_pubkey = solana_sdk::pubkey::new_rand();
|
||||||
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
|
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
|
||||||
let message = Message::new(&[instruction], Some(mint_pubkey));
|
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||||
|
|
||||||
Runtime::new()?.block_on(async {
|
Runtime::new()?.block_on(async {
|
||||||
let client_transport =
|
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
|
||||||
.await;
|
|
||||||
let mut banks_client = start_client(client_transport).await?;
|
let mut banks_client = start_client(client_transport).await?;
|
||||||
let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?;
|
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
|
||||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||||
let signature = transaction.signatures[0];
|
let signature = transaction.signatures[0];
|
||||||
banks_client.send_transaction(transaction).await?;
|
banks_client.send_transaction(transaction).await?;
|
||||||
@@ -433,8 +391,8 @@ mod tests {
|
|||||||
let mut status = banks_client.get_transaction_status(signature).await?;
|
let mut status = banks_client.get_transaction_status(signature).await?;
|
||||||
|
|
||||||
while status.is_none() {
|
while status.is_none() {
|
||||||
let root_block_height = banks_client.get_root_block_height().await?;
|
let root_slot = banks_client.get_root_slot().await?;
|
||||||
if root_block_height > last_valid_block_height {
|
if root_slot > last_valid_slot {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sleep(Duration::from_millis(100)).await;
|
sleep(Duration::from_millis(100)).await;
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-banks-interface"
|
name = "solana-banks-interface"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
description = "Solana banks RPC interface"
|
description = "Solana banks RPC interface"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@@ -12,11 +12,11 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
mio = "0.7.6"
|
mio = "0.7.6"
|
||||||
serde = { version = "1.0.122", features = ["derive"] }
|
serde = { version = "1.0.122", features = ["derive"] }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
tarpc = { version = "0.24.1", features = ["full"] }
|
tarpc = { version = "0.24.1", features = ["full"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1.1", features = ["full"] }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib"]
|
crate-type = ["lib"]
|
||||||
|
@@ -1,15 +1,13 @@
|
|||||||
use {
|
use serde::{Deserialize, Serialize};
|
||||||
serde::{Deserialize, Serialize},
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
account::Account,
|
||||||
account::Account,
|
clock::Slot,
|
||||||
clock::Slot,
|
commitment_config::CommitmentLevel,
|
||||||
commitment_config::CommitmentLevel,
|
fee_calculator::FeeCalculator,
|
||||||
fee_calculator::FeeCalculator,
|
hash::Hash,
|
||||||
hash::Hash,
|
pubkey::Pubkey,
|
||||||
pubkey::Pubkey,
|
signature::Signature,
|
||||||
signature::Signature,
|
transaction::{self, Transaction, TransactionError},
|
||||||
transaction::{self, Transaction, TransactionError},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
@@ -36,7 +34,6 @@ pub trait Banks {
|
|||||||
async fn get_transaction_status_with_context(signature: Signature)
|
async fn get_transaction_status_with_context(signature: Signature)
|
||||||
-> Option<TransactionStatus>;
|
-> Option<TransactionStatus>;
|
||||||
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
||||||
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
|
|
||||||
async fn process_transaction_with_commitment_and_context(
|
async fn process_transaction_with_commitment_and_context(
|
||||||
transaction: Transaction,
|
transaction: Transaction,
|
||||||
commitment: CommitmentLevel,
|
commitment: CommitmentLevel,
|
||||||
@@ -49,10 +46,8 @@ pub trait Banks {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use tarpc::{client, transport};
|
||||||
tarpc::{client, transport},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_banks_client_new() {
|
fn test_banks_client_new() {
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-banks-server"
|
name = "solana-banks-server"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
description = "Solana banks server"
|
description = "Solana banks server"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@@ -14,12 +14,12 @@ bincode = "1.3.1"
|
|||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
log = "0.4.11"
|
log = "0.4.11"
|
||||||
mio = "0.7.6"
|
mio = "0.7.6"
|
||||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.13" }
|
solana-banks-interface = { path = "../banks-interface", version = "=1.6.4" }
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
solana-runtime = { path = "../runtime", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-metrics = { path = "../metrics", version = "=1.8.13" }
|
solana-metrics = { path = "../metrics", version = "=1.6.4" }
|
||||||
tarpc = { version = "0.24.1", features = ["full"] }
|
tarpc = { version = "0.24.1", features = ["full"] }
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1.1", features = ["full"] }
|
||||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
|
|
||||||
|
@@ -1,52 +1,48 @@
|
|||||||
use {
|
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
|
||||||
crate::send_transaction_service::{SendTransactionService, TransactionInfo},
|
use bincode::{deserialize, serialize};
|
||||||
bincode::{deserialize, serialize},
|
use futures::{
|
||||||
futures::{
|
future,
|
||||||
future,
|
prelude::stream::{self, StreamExt},
|
||||||
prelude::stream::{self, StreamExt},
|
|
||||||
},
|
|
||||||
solana_banks_interface::{
|
|
||||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
|
||||||
},
|
|
||||||
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
|
|
||||||
solana_sdk::{
|
|
||||||
account::Account,
|
|
||||||
clock::Slot,
|
|
||||||
commitment_config::CommitmentLevel,
|
|
||||||
feature_set::FeatureSet,
|
|
||||||
fee_calculator::FeeCalculator,
|
|
||||||
hash::Hash,
|
|
||||||
pubkey::Pubkey,
|
|
||||||
signature::Signature,
|
|
||||||
transaction::{self, Transaction},
|
|
||||||
},
|
|
||||||
std::{
|
|
||||||
io,
|
|
||||||
net::{Ipv4Addr, SocketAddr},
|
|
||||||
sync::{
|
|
||||||
mpsc::{channel, Receiver, Sender},
|
|
||||||
Arc, RwLock,
|
|
||||||
},
|
|
||||||
thread::Builder,
|
|
||||||
time::Duration,
|
|
||||||
},
|
|
||||||
tarpc::{
|
|
||||||
context::Context,
|
|
||||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
|
||||||
serde_transport::tcp,
|
|
||||||
server::{self, Channel, Handler},
|
|
||||||
transport,
|
|
||||||
},
|
|
||||||
tokio::time::sleep,
|
|
||||||
tokio_serde::formats::Bincode,
|
|
||||||
};
|
};
|
||||||
|
use solana_banks_interface::{
|
||||||
|
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||||
|
};
|
||||||
|
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||||
|
use solana_sdk::{
|
||||||
|
account::Account,
|
||||||
|
clock::Slot,
|
||||||
|
commitment_config::CommitmentLevel,
|
||||||
|
fee_calculator::FeeCalculator,
|
||||||
|
hash::Hash,
|
||||||
|
pubkey::Pubkey,
|
||||||
|
signature::Signature,
|
||||||
|
transaction::{self, Transaction},
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
io,
|
||||||
|
net::{Ipv4Addr, SocketAddr},
|
||||||
|
sync::{
|
||||||
|
mpsc::{channel, Receiver, Sender},
|
||||||
|
Arc, RwLock,
|
||||||
|
},
|
||||||
|
thread::Builder,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
use tarpc::{
|
||||||
|
context::Context,
|
||||||
|
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||||
|
serde_transport::tcp,
|
||||||
|
server::{self, Channel, Handler},
|
||||||
|
transport,
|
||||||
|
};
|
||||||
|
use tokio::time::sleep;
|
||||||
|
use tokio_serde::formats::Bincode;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct BanksServer {
|
struct BanksServer {
|
||||||
bank_forks: Arc<RwLock<BankForks>>,
|
bank_forks: Arc<RwLock<BankForks>>,
|
||||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||||
transaction_sender: Sender<TransactionInfo>,
|
transaction_sender: Sender<TransactionInfo>,
|
||||||
poll_signature_status_sleep_duration: Duration,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BanksServer {
|
impl BanksServer {
|
||||||
@@ -58,13 +54,11 @@ impl BanksServer {
|
|||||||
bank_forks: Arc<RwLock<BankForks>>,
|
bank_forks: Arc<RwLock<BankForks>>,
|
||||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||||
transaction_sender: Sender<TransactionInfo>,
|
transaction_sender: Sender<TransactionInfo>,
|
||||||
poll_signature_status_sleep_duration: Duration,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
bank_forks,
|
bank_forks,
|
||||||
block_commitment_cache,
|
block_commitment_cache,
|
||||||
transaction_sender,
|
transaction_sender,
|
||||||
poll_signature_status_sleep_duration,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,7 +81,6 @@ impl BanksServer {
|
|||||||
fn new_loopback(
|
fn new_loopback(
|
||||||
bank_forks: Arc<RwLock<BankForks>>,
|
bank_forks: Arc<RwLock<BankForks>>,
|
||||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||||
poll_signature_status_sleep_duration: Duration,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let (transaction_sender, transaction_receiver) = channel();
|
let (transaction_sender, transaction_receiver) = channel();
|
||||||
let bank = bank_forks.read().unwrap().working_bank();
|
let bank = bank_forks.read().unwrap().working_bank();
|
||||||
@@ -102,12 +95,7 @@ impl BanksServer {
|
|||||||
.name("solana-bank-forks-client".to_string())
|
.name("solana-bank-forks-client".to_string())
|
||||||
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
|
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
Self::new(
|
Self::new(bank_forks, block_commitment_cache, transaction_sender)
|
||||||
bank_forks,
|
|
||||||
block_commitment_cache,
|
|
||||||
transaction_sender,
|
|
||||||
poll_signature_status_sleep_duration,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn slot(&self, commitment: CommitmentLevel) -> Slot {
|
fn slot(&self, commitment: CommitmentLevel) -> Slot {
|
||||||
@@ -125,16 +113,16 @@ impl BanksServer {
|
|||||||
self,
|
self,
|
||||||
signature: &Signature,
|
signature: &Signature,
|
||||||
blockhash: &Hash,
|
blockhash: &Hash,
|
||||||
last_valid_block_height: u64,
|
last_valid_slot: Slot,
|
||||||
commitment: CommitmentLevel,
|
commitment: CommitmentLevel,
|
||||||
) -> Option<transaction::Result<()>> {
|
) -> Option<transaction::Result<()>> {
|
||||||
let mut status = self
|
let mut status = self
|
||||||
.bank(commitment)
|
.bank(commitment)
|
||||||
.get_signature_status_with_blockhash(signature, blockhash);
|
.get_signature_status_with_blockhash(signature, blockhash);
|
||||||
while status.is_none() {
|
while status.is_none() {
|
||||||
sleep(self.poll_signature_status_sleep_duration).await;
|
sleep(Duration::from_millis(200)).await;
|
||||||
let bank = self.bank(commitment);
|
let bank = self.bank(commitment);
|
||||||
if bank.block_height() > last_valid_block_height {
|
if bank.slot() > last_valid_slot {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
status = bank.get_signature_status_with_blockhash(signature, blockhash);
|
status = bank.get_signature_status_with_blockhash(signature, blockhash);
|
||||||
@@ -143,13 +131,10 @@ impl BanksServer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_transaction(
|
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||||
transaction: &Transaction,
|
|
||||||
feature_set: &Arc<FeatureSet>,
|
|
||||||
) -> transaction::Result<()> {
|
|
||||||
if let Err(err) = transaction.verify() {
|
if let Err(err) = transaction.verify() {
|
||||||
Err(err)
|
Err(err)
|
||||||
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
|
} else if let Err(err) = transaction.verify_precompiles() {
|
||||||
Err(err)
|
Err(err)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -160,19 +145,16 @@ fn verify_transaction(
|
|||||||
impl Banks for BanksServer {
|
impl Banks for BanksServer {
|
||||||
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
|
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
|
||||||
let blockhash = &transaction.message.recent_blockhash;
|
let blockhash = &transaction.message.recent_blockhash;
|
||||||
let last_valid_block_height = self
|
let last_valid_slot = self
|
||||||
.bank_forks
|
.bank_forks
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.root_bank()
|
.root_bank()
|
||||||
.get_blockhash_last_valid_block_height(blockhash)
|
.get_blockhash_last_valid_slot(&blockhash)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||||
let info = TransactionInfo::new(
|
let info =
|
||||||
signature,
|
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||||
serialize(&transaction).unwrap(),
|
|
||||||
last_valid_block_height,
|
|
||||||
);
|
|
||||||
self.transaction_sender.send(info).unwrap();
|
self.transaction_sender.send(info).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,13 +162,11 @@ impl Banks for BanksServer {
|
|||||||
self,
|
self,
|
||||||
_: Context,
|
_: Context,
|
||||||
commitment: CommitmentLevel,
|
commitment: CommitmentLevel,
|
||||||
) -> (FeeCalculator, Hash, u64) {
|
) -> (FeeCalculator, Hash, Slot) {
|
||||||
let bank = self.bank(commitment);
|
let bank = self.bank(commitment);
|
||||||
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
|
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
|
||||||
let last_valid_block_height = bank
|
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
|
||||||
.get_blockhash_last_valid_block_height(&blockhash)
|
(fee_calculator, blockhash, last_valid_slot)
|
||||||
.unwrap();
|
|
||||||
(fee_calculator, blockhash, last_valid_block_height)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_transaction_status_with_context(
|
async fn get_transaction_status_with_context(
|
||||||
@@ -229,33 +209,29 @@ impl Banks for BanksServer {
|
|||||||
self.slot(commitment)
|
self.slot(commitment)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_block_height_with_context(self, _: Context, commitment: CommitmentLevel) -> u64 {
|
|
||||||
self.bank(commitment).block_height()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn process_transaction_with_commitment_and_context(
|
async fn process_transaction_with_commitment_and_context(
|
||||||
self,
|
self,
|
||||||
_: Context,
|
_: Context,
|
||||||
transaction: Transaction,
|
transaction: Transaction,
|
||||||
commitment: CommitmentLevel,
|
commitment: CommitmentLevel,
|
||||||
) -> Option<transaction::Result<()>> {
|
) -> Option<transaction::Result<()>> {
|
||||||
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
|
if let Err(err) = verify_transaction(&transaction) {
|
||||||
return Some(Err(err));
|
return Some(Err(err));
|
||||||
}
|
}
|
||||||
|
|
||||||
let blockhash = &transaction.message.recent_blockhash;
|
let blockhash = &transaction.message.recent_blockhash;
|
||||||
let last_valid_block_height = self
|
let last_valid_slot = self
|
||||||
.bank(commitment)
|
.bank_forks
|
||||||
.get_blockhash_last_valid_block_height(blockhash)
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.root_bank()
|
||||||
|
.get_blockhash_last_valid_slot(blockhash)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||||
let info = TransactionInfo::new(
|
let info =
|
||||||
signature,
|
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||||
serialize(&transaction).unwrap(),
|
|
||||||
last_valid_block_height,
|
|
||||||
);
|
|
||||||
self.transaction_sender.send(info).unwrap();
|
self.transaction_sender.send(info).unwrap();
|
||||||
self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment)
|
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,13 +249,8 @@ impl Banks for BanksServer {
|
|||||||
pub async fn start_local_server(
|
pub async fn start_local_server(
|
||||||
bank_forks: Arc<RwLock<BankForks>>,
|
bank_forks: Arc<RwLock<BankForks>>,
|
||||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||||
poll_signature_status_sleep_duration: Duration,
|
|
||||||
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
|
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
|
||||||
let banks_server = BanksServer::new_loopback(
|
let banks_server = BanksServer::new_loopback(bank_forks, block_commitment_cache);
|
||||||
bank_forks,
|
|
||||||
block_commitment_cache,
|
|
||||||
poll_signature_status_sleep_duration,
|
|
||||||
);
|
|
||||||
let (client_transport, server_transport) = transport::channel::unbounded();
|
let (client_transport, server_transport) = transport::channel::unbounded();
|
||||||
let server = server::new(server::Config::default())
|
let server = server::new(server::Config::default())
|
||||||
.incoming(stream::once(future::ready(server_transport)))
|
.incoming(stream::once(future::ready(server_transport)))
|
||||||
@@ -314,12 +285,8 @@ pub async fn start_tcp_server(
|
|||||||
|
|
||||||
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
|
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
|
||||||
|
|
||||||
let server = BanksServer::new(
|
let server =
|
||||||
bank_forks.clone(),
|
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
|
||||||
block_commitment_cache.clone(),
|
|
||||||
sender,
|
|
||||||
Duration::from_millis(200),
|
|
||||||
);
|
|
||||||
chan.respond_with(server.serve()).execute()
|
chan.respond_with(server.serve()).execute()
|
||||||
})
|
})
|
||||||
// Max 10 channels.
|
// Max 10 channels.
|
||||||
|
@@ -1,23 +1,21 @@
|
|||||||
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
|
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
|
||||||
|
|
||||||
use {
|
use crate::banks_server::start_tcp_server;
|
||||||
crate::banks_server::start_tcp_server,
|
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
|
||||||
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
|
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||||
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
|
use std::{
|
||||||
std::{
|
net::SocketAddr,
|
||||||
net::SocketAddr,
|
sync::{
|
||||||
sync::{
|
atomic::{AtomicBool, Ordering},
|
||||||
atomic::{AtomicBool, Ordering},
|
Arc, RwLock,
|
||||||
Arc, RwLock,
|
|
||||||
},
|
|
||||||
thread::{self, Builder, JoinHandle},
|
|
||||||
},
|
},
|
||||||
tokio::{
|
thread::{self, Builder, JoinHandle},
|
||||||
runtime::Runtime,
|
|
||||||
time::{self, Duration},
|
|
||||||
},
|
|
||||||
tokio_stream::wrappers::IntervalStream,
|
|
||||||
};
|
};
|
||||||
|
use tokio::{
|
||||||
|
runtime::Runtime,
|
||||||
|
time::{self, Duration},
|
||||||
|
};
|
||||||
|
use tokio_stream::wrappers::IntervalStream;
|
||||||
|
|
||||||
pub struct RpcBanksService {
|
pub struct RpcBanksService {
|
||||||
thread_hdl: JoinHandle<()>,
|
thread_hdl: JoinHandle<()>,
|
||||||
@@ -103,7 +101,8 @@ impl RpcBanksService {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use {super::*, solana_runtime::bank::Bank};
|
use super::*;
|
||||||
|
use solana_runtime::bank::Bank;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_banks_server_exit() {
|
fn test_rpc_banks_server_exit() {
|
||||||
|
@@ -1,19 +1,17 @@
|
|||||||
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
|
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
|
||||||
use {
|
use log::*;
|
||||||
log::*,
|
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||||
solana_metrics::{datapoint_warn, inc_new_counter_info},
|
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||||
solana_runtime::{bank::Bank, bank_forks::BankForks},
|
use solana_sdk::{clock::Slot, signature::Signature};
|
||||||
solana_sdk::signature::Signature,
|
use std::{
|
||||||
std::{
|
collections::HashMap,
|
||||||
collections::HashMap,
|
net::{SocketAddr, UdpSocket},
|
||||||
net::{SocketAddr, UdpSocket},
|
sync::{
|
||||||
sync::{
|
mpsc::{Receiver, RecvTimeoutError},
|
||||||
mpsc::{Receiver, RecvTimeoutError},
|
Arc, RwLock,
|
||||||
Arc, RwLock,
|
|
||||||
},
|
|
||||||
thread::{self, Builder, JoinHandle},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
},
|
},
|
||||||
|
thread::{self, Builder, JoinHandle},
|
||||||
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Maximum size of the transaction queue
|
/// Maximum size of the transaction queue
|
||||||
@@ -26,19 +24,15 @@ pub struct SendTransactionService {
|
|||||||
pub struct TransactionInfo {
|
pub struct TransactionInfo {
|
||||||
pub signature: Signature,
|
pub signature: Signature,
|
||||||
pub wire_transaction: Vec<u8>,
|
pub wire_transaction: Vec<u8>,
|
||||||
pub last_valid_block_height: u64,
|
pub last_valid_slot: Slot,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TransactionInfo {
|
impl TransactionInfo {
|
||||||
pub fn new(
|
pub fn new(signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) -> Self {
|
||||||
signature: Signature,
|
|
||||||
wire_transaction: Vec<u8>,
|
|
||||||
last_valid_block_height: u64,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
Self {
|
||||||
signature,
|
signature,
|
||||||
wire_transaction,
|
wire_transaction,
|
||||||
last_valid_block_height,
|
last_valid_slot,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -130,7 +124,7 @@ impl SendTransactionService {
|
|||||||
result.rooted += 1;
|
result.rooted += 1;
|
||||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||||
false
|
false
|
||||||
} else if transaction_info.last_valid_block_height < root_bank.block_height() {
|
} else if transaction_info.last_valid_slot < root_bank.slot() {
|
||||||
info!("Dropping expired transaction: {}", signature);
|
info!("Dropping expired transaction: {}", signature);
|
||||||
result.expired += 1;
|
result.expired += 1;
|
||||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||||
@@ -144,8 +138,8 @@ impl SendTransactionService {
|
|||||||
result.retried += 1;
|
result.retried += 1;
|
||||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||||
Self::send_transaction(
|
Self::send_transaction(
|
||||||
send_socket,
|
&send_socket,
|
||||||
tpu_address,
|
&tpu_address,
|
||||||
&transaction_info.wire_transaction,
|
&transaction_info.wire_transaction,
|
||||||
);
|
);
|
||||||
true
|
true
|
||||||
@@ -185,14 +179,12 @@ impl SendTransactionService {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
||||||
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
system_transaction,
|
||||||
system_transaction,
|
|
||||||
},
|
|
||||||
std::sync::mpsc::channel,
|
|
||||||
};
|
};
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn service_exit() {
|
fn service_exit() {
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-exchange"
|
name = "solana-bench-exchange"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@@ -18,23 +18,21 @@ rand = "0.7.0"
|
|||||||
rayon = "1.5.0"
|
rayon = "1.5.0"
|
||||||
serde_json = "1.0.56"
|
serde_json = "1.0.56"
|
||||||
serde_yaml = "0.8.13"
|
serde_yaml = "0.8.13"
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.13" }
|
solana-clap-utils = { path = "../clap-utils", version = "=1.6.4" }
|
||||||
solana-core = { path = "../core", version = "=1.8.13" }
|
solana-core = { path = "../core", version = "=1.6.4" }
|
||||||
solana-genesis = { path = "../genesis", version = "=1.8.13" }
|
solana-genesis = { path = "../genesis", version = "=1.6.4" }
|
||||||
solana-client = { path = "../client", version = "=1.8.13" }
|
solana-client = { path = "../client", version = "=1.6.4" }
|
||||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.8.13" }
|
solana-faucet = { path = "../faucet", version = "=1.6.4" }
|
||||||
solana-faucet = { path = "../faucet", version = "=1.8.13" }
|
solana-exchange-program = { path = "../programs/exchange", version = "=1.6.4" }
|
||||||
solana-gossip = { path = "../gossip", version = "=1.8.13" }
|
solana-logger = { path = "../logger", version = "=1.6.4" }
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
solana-metrics = { path = "../metrics", version = "=1.6.4" }
|
||||||
solana-metrics = { path = "../metrics", version = "=1.8.13" }
|
solana-net-utils = { path = "../net-utils", version = "=1.6.4" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "=1.8.13" }
|
solana-runtime = { path = "../runtime", version = "=1.6.4" }
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-version = { path = "../version", version = "=1.6.4" }
|
||||||
solana-streamer = { path = "../streamer", version = "=1.8.13" }
|
|
||||||
solana-version = { path = "../version", version = "=1.8.13" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.13" }
|
solana-local-cluster = { path = "../local-cluster", version = "=1.6.4" }
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@@ -1,45 +1,43 @@
|
|||||||
#![allow(clippy::useless_attribute)]
|
#![allow(clippy::useless_attribute)]
|
||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
|
|
||||||
use {
|
use crate::order_book::*;
|
||||||
crate::order_book::*,
|
use itertools::izip;
|
||||||
itertools::izip,
|
use log::*;
|
||||||
log::*,
|
use rand::{thread_rng, Rng};
|
||||||
rand::{thread_rng, Rng},
|
use rayon::prelude::*;
|
||||||
rayon::prelude::*,
|
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
use solana_core::gen_keys::GenKeys;
|
||||||
solana_core::gen_keys::GenKeys,
|
use solana_exchange_program::{exchange_instruction, exchange_state::*, id};
|
||||||
solana_exchange_program::{exchange_instruction, exchange_state::*, id},
|
use solana_faucet::faucet::request_airdrop_transaction;
|
||||||
solana_faucet::faucet::request_airdrop_transaction,
|
use solana_genesis::Base64Account;
|
||||||
solana_genesis::Base64Account,
|
use solana_metrics::datapoint_info;
|
||||||
solana_metrics::datapoint_info,
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
client::{Client, SyncClient},
|
||||||
client::{Client, SyncClient},
|
commitment_config::CommitmentConfig,
|
||||||
commitment_config::CommitmentConfig,
|
message::Message,
|
||||||
message::Message,
|
pubkey::Pubkey,
|
||||||
pubkey::Pubkey,
|
signature::{Keypair, Signer},
|
||||||
signature::{Keypair, Signer},
|
timing::{duration_as_ms, duration_as_s},
|
||||||
system_instruction, system_program,
|
transaction::Transaction,
|
||||||
timing::{duration_as_ms, duration_as_s},
|
{system_instruction, system_program},
|
||||||
transaction::Transaction,
|
};
|
||||||
},
|
use std::{
|
||||||
std::{
|
cmp,
|
||||||
cmp,
|
collections::{HashMap, VecDeque},
|
||||||
collections::{HashMap, VecDeque},
|
fs::File,
|
||||||
fs::File,
|
io::prelude::*,
|
||||||
io::prelude::*,
|
mem,
|
||||||
mem,
|
net::SocketAddr,
|
||||||
net::SocketAddr,
|
path::Path,
|
||||||
path::Path,
|
process::exit,
|
||||||
process::exit,
|
sync::{
|
||||||
sync::{
|
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
mpsc::{channel, Receiver, Sender},
|
||||||
mpsc::{channel, Receiver, Sender},
|
Arc, RwLock,
|
||||||
Arc, RwLock,
|
|
||||||
},
|
|
||||||
thread::{sleep, Builder},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
},
|
},
|
||||||
|
thread::{sleep, Builder},
|
||||||
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
|
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
|
||||||
@@ -453,13 +451,13 @@ fn swapper<T>(
|
|||||||
let to_swap_txs: Vec<_> = to_swap
|
let to_swap_txs: Vec<_> = to_swap
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|(signer, swap, profit)| {
|
.map(|(signer, swap, profit)| {
|
||||||
let s: &Keypair = signer;
|
let s: &Keypair = &signer;
|
||||||
let owner = &signer.pubkey();
|
let owner = &signer.pubkey();
|
||||||
let instruction = exchange_instruction::swap_request(
|
let instruction = exchange_instruction::swap_request(
|
||||||
owner,
|
owner,
|
||||||
&swap.0.pubkey,
|
&swap.0.pubkey,
|
||||||
&swap.1.pubkey,
|
&swap.1.pubkey,
|
||||||
profit,
|
&profit,
|
||||||
);
|
);
|
||||||
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
||||||
Transaction::new(&[s], message, blockhash)
|
Transaction::new(&[s], message, blockhash)
|
||||||
@@ -602,7 +600,7 @@ fn trader<T>(
|
|||||||
src,
|
src,
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
let message = Message::new(&instructions, Some(owner_pubkey));
|
let message = Message::new(&instructions, Some(&owner_pubkey));
|
||||||
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@@ -741,7 +739,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
|||||||
let mut to_fund_txs: Vec<_> = chunk
|
let mut to_fund_txs: Vec<_> = chunk
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|(k, m)| {
|
.map(|(k, m)| {
|
||||||
let instructions = system_instruction::transfer_many(&k.pubkey(), m);
|
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
|
||||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||||
(k.clone(), Transaction::new_unsigned(message))
|
(k.clone(), Transaction::new_unsigned(message))
|
||||||
})
|
})
|
||||||
@@ -779,7 +777,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
|||||||
let mut waits = 0;
|
let mut waits = 0;
|
||||||
loop {
|
loop {
|
||||||
sleep(Duration::from_millis(200));
|
sleep(Duration::from_millis(200));
|
||||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount));
|
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
|
||||||
if to_fund_txs.is_empty() {
|
if to_fund_txs.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -838,7 +836,7 @@ pub fn create_token_accounts<T: Client>(
|
|||||||
);
|
);
|
||||||
let request_ix =
|
let request_ix =
|
||||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||||
let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey));
|
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
|
||||||
(
|
(
|
||||||
(from_keypair, new_keypair),
|
(from_keypair, new_keypair),
|
||||||
Transaction::new_unsigned(message),
|
Transaction::new_unsigned(message),
|
||||||
@@ -874,7 +872,7 @@ pub fn create_token_accounts<T: Client>(
|
|||||||
let mut waits = 0;
|
let mut waits = 0;
|
||||||
while !to_create_txs.is_empty() {
|
while !to_create_txs.is_empty() {
|
||||||
sleep(Duration::from_millis(200));
|
sleep(Duration::from_millis(200));
|
||||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx));
|
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
|
||||||
if to_create_txs.is_empty() {
|
if to_create_txs.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -960,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>, tot
|
|||||||
|
|
||||||
fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
||||||
let mut seed = [0_u8; 32];
|
let mut seed = [0_u8; 32];
|
||||||
seed.copy_from_slice(Keypair::new().pubkey().as_ref());
|
seed.copy_from_slice(&Keypair::new().pubkey().as_ref());
|
||||||
let mut rnd = GenKeys::new(seed);
|
let mut rnd = GenKeys::new(seed);
|
||||||
rnd.gen_n_keypairs(num)
|
rnd.gen_n_keypairs(num)
|
||||||
}
|
}
|
||||||
@@ -991,7 +989,7 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||||
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
|
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
|
||||||
.expect("Failed to get blockhash");
|
.expect("Failed to get blockhash");
|
||||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||||
Ok(transaction) => {
|
Ok(transaction) => {
|
||||||
let signature = client.async_send_transaction(transaction).unwrap();
|
let signature = client.async_send_transaction(transaction).unwrap();
|
||||||
|
|
||||||
|
@@ -1,10 +1,10 @@
|
|||||||
use {
|
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
||||||
clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches},
|
use solana_core::gen_keys::GenKeys;
|
||||||
solana_core::gen_keys::GenKeys,
|
use solana_faucet::faucet::FAUCET_PORT;
|
||||||
solana_faucet::faucet::FAUCET_PORT,
|
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||||
solana_sdk::signature::{read_keypair_file, Keypair},
|
use std::net::SocketAddr;
|
||||||
std::{net::SocketAddr, process::exit, time::Duration},
|
use std::process::exit;
|
||||||
};
|
use std::time::Duration;
|
||||||
|
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
pub entrypoint_addr: SocketAddr,
|
pub entrypoint_addr: SocketAddr,
|
||||||
|
@@ -3,13 +3,10 @@ pub mod bench;
|
|||||||
mod cli;
|
mod cli;
|
||||||
pub mod order_book;
|
pub mod order_book;
|
||||||
|
|
||||||
use {
|
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||||
crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config},
|
use log::*;
|
||||||
log::*,
|
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||||
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
|
use solana_sdk::signature::Signer;
|
||||||
solana_sdk::signature::Signer,
|
|
||||||
solana_streamer::socket::SocketAddrSpace,
|
|
||||||
};
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
@@ -58,12 +55,11 @@ fn main() {
|
|||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
info!("Connecting to the cluster");
|
info!("Connecting to the cluster");
|
||||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified)
|
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||||
.unwrap_or_else(|_| {
|
panic!("Failed to discover nodes");
|
||||||
panic!("Failed to discover nodes");
|
});
|
||||||
});
|
|
||||||
|
|
||||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
|
|
||||||
info!("{} nodes found", num_clients);
|
info!("{} nodes found", num_clients);
|
||||||
if num_clients < num_nodes {
|
if num_clients < num_nodes {
|
||||||
|
@@ -1,13 +1,11 @@
|
|||||||
use {
|
use itertools::EitherOrBoth::{Both, Left, Right};
|
||||||
itertools::{
|
use itertools::Itertools;
|
||||||
EitherOrBoth::{Both, Left, Right},
|
use log::*;
|
||||||
Itertools,
|
use solana_exchange_program::exchange_state::*;
|
||||||
},
|
use solana_sdk::pubkey::Pubkey;
|
||||||
log::*,
|
use std::cmp::Ordering;
|
||||||
solana_exchange_program::exchange_state::*,
|
use std::collections::BinaryHeap;
|
||||||
solana_sdk::pubkey::Pubkey,
|
use std::{error, fmt};
|
||||||
std::{cmp::Ordering, collections::BinaryHeap, error, fmt},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
pub struct ToOrder {
|
pub struct ToOrder {
|
||||||
|
@@ -1,24 +1,23 @@
|
|||||||
use {
|
use log::*;
|
||||||
log::*,
|
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||||
solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config},
|
use solana_core::{
|
||||||
solana_core::validator::ValidatorConfig,
|
gossip_service::{discover_cluster, get_multi_client},
|
||||||
solana_exchange_program::{
|
validator::ValidatorConfig,
|
||||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
|
||||||
},
|
|
||||||
solana_faucet::faucet::run_local_faucet_with_port,
|
|
||||||
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
|
|
||||||
solana_local_cluster::{
|
|
||||||
local_cluster::{ClusterConfig, LocalCluster},
|
|
||||||
validator_configs::make_identical_validator_configs,
|
|
||||||
},
|
|
||||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
|
||||||
solana_sdk::{
|
|
||||||
genesis_config::create_genesis_config,
|
|
||||||
signature::{Keypair, Signer},
|
|
||||||
},
|
|
||||||
solana_streamer::socket::SocketAddrSpace,
|
|
||||||
std::{process::exit, sync::mpsc::channel, time::Duration},
|
|
||||||
};
|
};
|
||||||
|
use solana_exchange_program::{
|
||||||
|
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||||
|
};
|
||||||
|
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||||
|
use solana_local_cluster::{
|
||||||
|
local_cluster::{ClusterConfig, LocalCluster},
|
||||||
|
validator_configs::make_identical_validator_configs,
|
||||||
|
};
|
||||||
|
use solana_runtime::{bank::Bank, bank_client::BankClient};
|
||||||
|
use solana_sdk::{
|
||||||
|
genesis_config::create_genesis_config,
|
||||||
|
signature::{Keypair, Signer},
|
||||||
|
};
|
||||||
|
use std::{process::exit, sync::mpsc::channel, time::Duration};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
@@ -46,19 +45,13 @@ fn test_exchange_local_cluster() {
|
|||||||
} = config;
|
} = config;
|
||||||
let accounts_in_groups = batch_size * account_groups;
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
|
|
||||||
let cluster = LocalCluster::new(
|
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||||
&mut ClusterConfig {
|
node_stakes: vec![100_000; NUM_NODES],
|
||||||
node_stakes: vec![100_000; NUM_NODES],
|
cluster_lamports: 100_000_000_000_000,
|
||||||
cluster_lamports: 100_000_000_000_000,
|
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||||
validator_configs: make_identical_validator_configs(
|
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||||
&ValidatorConfig::default(),
|
..ClusterConfig::default()
|
||||||
NUM_NODES,
|
});
|
||||||
),
|
|
||||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
|
||||||
..ClusterConfig::default()
|
|
||||||
},
|
|
||||||
SocketAddrSpace::Unspecified,
|
|
||||||
);
|
|
||||||
|
|
||||||
let faucet_keypair = Keypair::new();
|
let faucet_keypair = Keypair::new();
|
||||||
cluster.transfer(
|
cluster.transfer(
|
||||||
@@ -75,17 +68,13 @@ fn test_exchange_local_cluster() {
|
|||||||
.expect("faucet_addr");
|
.expect("faucet_addr");
|
||||||
|
|
||||||
info!("Connecting to the cluster");
|
info!("Connecting to the cluster");
|
||||||
let nodes = discover_cluster(
|
let nodes =
|
||||||
&cluster.entry_point_info.gossip,
|
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||||
NUM_NODES,
|
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||||
SocketAddrSpace::Unspecified,
|
exit(1);
|
||||||
)
|
});
|
||||||
.unwrap_or_else(|err| {
|
|
||||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
|
|
||||||
info!("clients: {}", num_clients);
|
info!("clients: {}", num_clients);
|
||||||
assert!(num_clients >= NUM_NODES);
|
assert!(num_clients >= NUM_NODES);
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@@ -10,11 +10,11 @@ publish = false
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.1"
|
clap = "2.33.1"
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.13" }
|
solana-clap-utils = { path = "../clap-utils", version = "=1.6.4" }
|
||||||
solana-streamer = { path = "../streamer", version = "=1.8.13" }
|
solana-streamer = { path = "../streamer", version = "=1.6.4" }
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
solana-logger = { path = "../logger", version = "=1.6.4" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "=1.8.13" }
|
solana-net-utils = { path = "../net-utils", version = "=1.6.4" }
|
||||||
solana-version = { path = "../version", version = "=1.8.13" }
|
solana-version = { path = "../version", version = "=1.6.4" }
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@@ -1,38 +1,32 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
use {
|
use clap::{crate_description, crate_name, App, Arg};
|
||||||
clap::{crate_description, crate_name, App, Arg},
|
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||||
solana_streamer::{
|
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
use std::cmp::max;
|
||||||
streamer::{receiver, PacketBatchReceiver},
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||||
},
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
std::{
|
use std::sync::mpsc::channel;
|
||||||
cmp::max,
|
use std::sync::Arc;
|
||||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
use std::thread::sleep;
|
||||||
sync::{
|
use std::thread::{spawn, JoinHandle, Result};
|
||||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
use std::time::Duration;
|
||||||
mpsc::channel,
|
use std::time::SystemTime;
|
||||||
Arc,
|
|
||||||
},
|
|
||||||
thread::{sleep, spawn, JoinHandle, Result},
|
|
||||||
time::{Duration, SystemTime},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let mut packet_batch = PacketBatch::default();
|
let mut msgs = Packets::default();
|
||||||
packet_batch.packets.resize(10, Packet::default());
|
msgs.packets.resize(10, Packet::default());
|
||||||
for w in packet_batch.packets.iter_mut() {
|
for w in msgs.packets.iter_mut() {
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(addr);
|
w.meta.set_addr(&addr);
|
||||||
}
|
}
|
||||||
let packet_batch = Arc::new(packet_batch);
|
let msgs = Arc::new(msgs);
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
for p in &packet_batch.packets {
|
for p in &msgs.packets {
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
assert!(p.meta.size <= PACKET_DATA_SIZE);
|
assert!(p.meta.size <= PACKET_DATA_SIZE);
|
||||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||||
@@ -42,14 +36,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
|
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
if let Ok(packet_batch) = r.recv_timeout(timer) {
|
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||||
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
|
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -81,7 +75,7 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
let mut read_channels = Vec::new();
|
let mut read_channels = Vec::new();
|
||||||
let mut read_threads = Vec::new();
|
let mut read_threads = Vec::new();
|
||||||
let recycler = PacketBatchRecycler::default();
|
let recycler = PacketsRecycler::new_without_limit("bench-streamer-recycler-shrink-stats");
|
||||||
for _ in 0..num_sockets {
|
for _ in 0..num_sockets {
|
||||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||||
@@ -98,7 +92,6 @@ fn main() -> Result<()> {
|
|||||||
recycler.clone(),
|
recycler.clone(),
|
||||||
"bench-streamer-test",
|
"bench-streamer-test",
|
||||||
1,
|
1,
|
||||||
true,
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@@ -15,24 +15,22 @@ log = "0.4.11"
|
|||||||
rayon = "1.5.0"
|
rayon = "1.5.0"
|
||||||
serde_json = "1.0.56"
|
serde_json = "1.0.56"
|
||||||
serde_yaml = "0.8.13"
|
serde_yaml = "0.8.13"
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.13" }
|
solana-clap-utils = { path = "../clap-utils", version = "=1.6.4" }
|
||||||
solana-core = { path = "../core", version = "=1.8.13" }
|
solana-core = { path = "../core", version = "=1.6.4" }
|
||||||
solana-genesis = { path = "../genesis", version = "=1.8.13" }
|
solana-genesis = { path = "../genesis", version = "=1.6.4" }
|
||||||
solana-client = { path = "../client", version = "=1.8.13" }
|
solana-client = { path = "../client", version = "=1.6.4" }
|
||||||
solana-faucet = { path = "../faucet", version = "=1.8.13" }
|
solana-faucet = { path = "../faucet", version = "=1.6.4" }
|
||||||
solana-gossip = { path = "../gossip", version = "=1.8.13" }
|
solana-logger = { path = "../logger", version = "=1.6.4" }
|
||||||
solana-logger = { path = "../logger", version = "=1.8.13" }
|
solana-metrics = { path = "../metrics", version = "=1.6.4" }
|
||||||
solana-metrics = { path = "../metrics", version = "=1.8.13" }
|
solana-measure = { path = "../measure", version = "=1.6.4" }
|
||||||
solana-measure = { path = "../measure", version = "=1.8.13" }
|
solana-net-utils = { path = "../net-utils", version = "=1.6.4" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "=1.8.13" }
|
solana-runtime = { path = "../runtime", version = "=1.6.4" }
|
||||||
solana-runtime = { path = "../runtime", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-version = { path = "../version", version = "=1.6.4" }
|
||||||
solana-streamer = { path = "../streamer", version = "=1.8.13" }
|
|
||||||
solana-version = { path = "../version", version = "=1.8.13" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serial_test = "0.4.0"
|
serial_test = "0.4.0"
|
||||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.13" }
|
solana-local-cluster = { path = "../local-cluster", version = "=1.6.4" }
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
@@ -1,36 +1,34 @@
|
|||||||
use {
|
use crate::cli::Config;
|
||||||
crate::cli::Config,
|
use log::*;
|
||||||
log::*,
|
use rayon::prelude::*;
|
||||||
rayon::prelude::*,
|
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
use solana_core::gen_keys::GenKeys;
|
||||||
solana_core::gen_keys::GenKeys,
|
use solana_faucet::faucet::request_airdrop_transaction;
|
||||||
solana_faucet::faucet::request_airdrop_transaction,
|
use solana_measure::measure::Measure;
|
||||||
solana_measure::measure::Measure,
|
use solana_metrics::{self, datapoint_info};
|
||||||
solana_metrics::{self, datapoint_info},
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
client::Client,
|
||||||
client::Client,
|
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||||
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
commitment_config::CommitmentConfig,
|
||||||
commitment_config::CommitmentConfig,
|
fee_calculator::FeeCalculator,
|
||||||
fee_calculator::FeeCalculator,
|
hash::Hash,
|
||||||
hash::Hash,
|
message::Message,
|
||||||
message::Message,
|
pubkey::Pubkey,
|
||||||
pubkey::Pubkey,
|
signature::{Keypair, Signer},
|
||||||
signature::{Keypair, Signer},
|
system_instruction, system_transaction,
|
||||||
system_instruction, system_transaction,
|
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
transaction::Transaction,
|
||||||
transaction::Transaction,
|
};
|
||||||
},
|
use std::{
|
||||||
std::{
|
collections::{HashSet, VecDeque},
|
||||||
collections::{HashSet, VecDeque},
|
net::SocketAddr,
|
||||||
net::SocketAddr,
|
process::exit,
|
||||||
process::exit,
|
sync::{
|
||||||
sync::{
|
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
Arc, Mutex, RwLock,
|
||||||
Arc, Mutex, RwLock,
|
|
||||||
},
|
|
||||||
thread::{sleep, Builder, JoinHandle},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
},
|
},
|
||||||
|
thread::{sleep, Builder, JoinHandle},
|
||||||
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
// The point at which transactions become "too old", in seconds.
|
// The point at which transactions become "too old", in seconds.
|
||||||
@@ -546,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
|||||||
|
|
||||||
// re-sign retained to_fund_txes with updated blockhash
|
// re-sign retained to_fund_txes with updated blockhash
|
||||||
self.sign(blockhash);
|
self.sign(blockhash);
|
||||||
self.send(client);
|
self.send(&client);
|
||||||
|
|
||||||
// Sleep a few slots to allow transactions to process
|
// Sleep a few slots to allow transactions to process
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
|
|
||||||
self.verify(client, to_lamports);
|
self.verify(&client, to_lamports);
|
||||||
|
|
||||||
// retry anything that seems to have dropped through cracks
|
// retry anything that seems to have dropped through cracks
|
||||||
// again since these txs are all or nothing, they're fine to
|
// again since these txs are all or nothing, they're fine to
|
||||||
@@ -566,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
|||||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|(k, t)| {
|
.map(|(k, t)| {
|
||||||
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
|
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||||
(*k, Transaction::new_unsigned(message))
|
(*k, Transaction::new_unsigned(message))
|
||||||
})
|
})
|
||||||
@@ -619,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
|
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
|
||||||
verified_txs.fetch_add(1, Ordering::Relaxed);
|
verified_txs.fetch_add(1, Ordering::Relaxed);
|
||||||
Some(k.pubkey())
|
Some(k.pubkey())
|
||||||
} else {
|
} else {
|
||||||
@@ -735,7 +733,7 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||||
Ok(transaction) => {
|
Ok(transaction) => {
|
||||||
let mut tries = 0;
|
let mut tries = 0;
|
||||||
loop {
|
loop {
|
||||||
@@ -926,14 +924,12 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use solana_runtime::bank::Bank;
|
||||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
use solana_runtime::bank_client::BankClient;
|
||||||
solana_sdk::{
|
use solana_sdk::client::SyncClient;
|
||||||
client::SyncClient, fee_calculator::FeeRateGovernor,
|
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||||
genesis_config::create_genesis_config,
|
use solana_sdk::genesis_config::create_genesis_config;
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bench_tps_bank_client() {
|
fn test_bench_tps_bank_client() {
|
||||||
|
@@ -1,13 +1,11 @@
|
|||||||
use {
|
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
||||||
clap::{crate_description, crate_name, App, Arg, ArgMatches},
|
use solana_faucet::faucet::FAUCET_PORT;
|
||||||
solana_faucet::faucet::FAUCET_PORT,
|
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||||
solana_sdk::{
|
use solana_sdk::{
|
||||||
fee_calculator::FeeRateGovernor,
|
pubkey::Pubkey,
|
||||||
pubkey::Pubkey,
|
signature::{read_keypair_file, Keypair},
|
||||||
signature::{read_keypair_file, Keypair},
|
|
||||||
},
|
|
||||||
std::{net::SocketAddr, process::exit, time::Duration},
|
|
||||||
};
|
};
|
||||||
|
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||||
|
|
||||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||||
|
|
||||||
|
@@ -1,20 +1,13 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
use {
|
use log::*;
|
||||||
log::*,
|
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||||
solana_bench_tps::{
|
use solana_bench_tps::cli;
|
||||||
bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs},
|
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||||
cli,
|
use solana_genesis::Base64Account;
|
||||||
},
|
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||||
solana_genesis::Base64Account,
|
use solana_sdk::signature::{Keypair, Signer};
|
||||||
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
|
use solana_sdk::system_program;
|
||||||
solana_sdk::{
|
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||||
fee_calculator::FeeRateGovernor,
|
|
||||||
signature::{Keypair, Signer},
|
|
||||||
system_program,
|
|
||||||
},
|
|
||||||
solana_streamer::socket::SocketAddrSpace,
|
|
||||||
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||||
@@ -46,7 +39,7 @@ fn main() {
|
|||||||
let keypair_count = *tx_count * keypair_multiplier;
|
let keypair_count = *tx_count * keypair_multiplier;
|
||||||
if *write_to_client_file {
|
if *write_to_client_file {
|
||||||
info!("Generating {} keypairs", keypair_count);
|
info!("Generating {} keypairs", keypair_count);
|
||||||
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
|
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||||
let num_accounts = keypairs.len() as u64;
|
let num_accounts = keypairs.len() as u64;
|
||||||
let max_fee =
|
let max_fee =
|
||||||
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||||
@@ -75,14 +68,13 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("Connecting to the cluster");
|
info!("Connecting to the cluster");
|
||||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||||
.unwrap_or_else(|err| {
|
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
exit(1);
|
||||||
exit(1);
|
});
|
||||||
});
|
|
||||||
|
|
||||||
let client = if *multi_client {
|
let client = if *multi_client {
|
||||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
if nodes.len() < num_clients {
|
if nodes.len() < num_clients {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||||
@@ -96,7 +88,7 @@ fn main() {
|
|||||||
let mut target_client = None;
|
let mut target_client = None;
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
if node.id == *target_node {
|
if node.id == *target_node {
|
||||||
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
target_client = Some(Arc::new(get_client(&[node])));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,7 +97,7 @@ fn main() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
Arc::new(get_client(&nodes))
|
||||||
};
|
};
|
||||||
|
|
||||||
let keypairs = if *read_from_client_file {
|
let keypairs = if *read_from_client_file {
|
||||||
@@ -143,7 +135,7 @@ fn main() {
|
|||||||
generate_and_fund_keypairs(
|
generate_and_fund_keypairs(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
Some(*faucet_addr),
|
Some(*faucet_addr),
|
||||||
id,
|
&id,
|
||||||
keypair_count,
|
keypair_count,
|
||||||
*num_lamports_per_account,
|
*num_lamports_per_account,
|
||||||
)
|
)
|
||||||
|
@@ -1,24 +1,20 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
use {
|
use serial_test::serial;
|
||||||
serial_test::serial,
|
use solana_bench_tps::{
|
||||||
solana_bench_tps::{
|
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
cli::Config,
|
||||||
cli::Config,
|
};
|
||||||
},
|
use solana_client::thin_client::create_client;
|
||||||
solana_client::thin_client::create_client,
|
use solana_core::{cluster_info::VALIDATOR_PORT_RANGE, validator::ValidatorConfig};
|
||||||
solana_core::validator::ValidatorConfig,
|
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||||
solana_faucet::faucet::run_local_faucet_with_port,
|
use solana_local_cluster::{
|
||||||
solana_gossip::cluster_info::VALIDATOR_PORT_RANGE,
|
local_cluster::{ClusterConfig, LocalCluster},
|
||||||
solana_local_cluster::{
|
validator_configs::make_identical_validator_configs,
|
||||||
local_cluster::{ClusterConfig, LocalCluster},
|
};
|
||||||
validator_configs::make_identical_validator_configs,
|
use solana_sdk::signature::{Keypair, Signer};
|
||||||
},
|
use std::{
|
||||||
solana_sdk::signature::{Keypair, Signer},
|
sync::{mpsc::channel, Arc},
|
||||||
solana_streamer::socket::SocketAddrSpace,
|
time::Duration,
|
||||||
std::{
|
|
||||||
sync::{mpsc::channel, Arc},
|
|
||||||
time::Duration,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
fn test_bench_tps_local_cluster(config: Config) {
|
fn test_bench_tps_local_cluster(config: Config) {
|
||||||
@@ -26,19 +22,13 @@ fn test_bench_tps_local_cluster(config: Config) {
|
|||||||
|
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
const NUM_NODES: usize = 1;
|
const NUM_NODES: usize = 1;
|
||||||
let cluster = LocalCluster::new(
|
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||||
&mut ClusterConfig {
|
node_stakes: vec![999_990; NUM_NODES],
|
||||||
node_stakes: vec![999_990; NUM_NODES],
|
cluster_lamports: 200_000_000,
|
||||||
cluster_lamports: 200_000_000,
|
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||||
validator_configs: make_identical_validator_configs(
|
native_instruction_processors,
|
||||||
&ValidatorConfig::default(),
|
..ClusterConfig::default()
|
||||||
NUM_NODES,
|
});
|
||||||
),
|
|
||||||
native_instruction_processors,
|
|
||||||
..ClusterConfig::default()
|
|
||||||
},
|
|
||||||
SocketAddrSpace::Unspecified,
|
|
||||||
);
|
|
||||||
|
|
||||||
let faucet_keypair = Keypair::new();
|
let faucet_keypair = Keypair::new();
|
||||||
cluster.transfer(
|
cluster.transfer(
|
||||||
|
9
cargo
9
cargo
@@ -3,22 +3,25 @@
|
|||||||
# shellcheck source=ci/rust-version.sh
|
# shellcheck source=ci/rust-version.sh
|
||||||
here=$(dirname "$0")
|
here=$(dirname "$0")
|
||||||
|
|
||||||
|
source "${here}"/ci/rust-version.sh all
|
||||||
|
|
||||||
toolchain=
|
toolchain=
|
||||||
case "$1" in
|
case "$1" in
|
||||||
stable)
|
stable)
|
||||||
source "${here}"/ci/rust-version.sh stable
|
|
||||||
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
||||||
toolchain="$rust_stable"
|
toolchain="$rust_stable"
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
nightly)
|
nightly)
|
||||||
source "${here}"/ci/rust-version.sh nightly
|
|
||||||
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
|
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
|
||||||
toolchain="$rust_nightly"
|
toolchain="$rust_nightly"
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
+*)
|
||||||
|
toolchain="${1#+}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
source "${here}"/ci/rust-version.sh stable
|
|
||||||
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
||||||
toolchain="$rust_stable"
|
toolchain="$rust_stable"
|
||||||
;;
|
;;
|
||||||
|
@@ -137,7 +137,7 @@ all_test_steps() {
|
|||||||
^ci/test-coverage.sh \
|
^ci/test-coverage.sh \
|
||||||
^scripts/coverage.sh \
|
^scripts/coverage.sh \
|
||||||
; then
|
; then
|
||||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
|
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
|
||||||
wait_step
|
wait_step
|
||||||
else
|
else
|
||||||
annotate --style info --context test-coverage \
|
annotate --style info --context test-coverage \
|
||||||
@@ -148,33 +148,6 @@ all_test_steps() {
|
|||||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||||
wait_step
|
wait_step
|
||||||
|
|
||||||
# BPF test suite
|
|
||||||
if affects \
|
|
||||||
.rs$ \
|
|
||||||
Cargo.lock$ \
|
|
||||||
Cargo.toml$ \
|
|
||||||
^ci/rust-version.sh \
|
|
||||||
^ci/test-stable-bpf.sh \
|
|
||||||
^ci/test-stable.sh \
|
|
||||||
^ci/test-local-cluster.sh \
|
|
||||||
^core/build.rs \
|
|
||||||
^fetch-perf-libs.sh \
|
|
||||||
^programs/ \
|
|
||||||
^sdk/ \
|
|
||||||
; then
|
|
||||||
cat >> "$output_file" <<"EOF"
|
|
||||||
- command: "ci/test-stable-bpf.sh"
|
|
||||||
name: "stable-bpf"
|
|
||||||
timeout_in_minutes: 20
|
|
||||||
artifact_paths: "bpf-dumps.tar.bz2"
|
|
||||||
agents:
|
|
||||||
- "queue=default"
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
annotate --style info \
|
|
||||||
"Stable-BPF skipped as no relevant files were modified"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Perf test suite
|
# Perf test suite
|
||||||
if affects \
|
if affects \
|
||||||
.rs$ \
|
.rs$ \
|
||||||
@@ -192,7 +165,7 @@ EOF
|
|||||||
cat >> "$output_file" <<"EOF"
|
cat >> "$output_file" <<"EOF"
|
||||||
- command: "ci/test-stable-perf.sh"
|
- command: "ci/test-stable-perf.sh"
|
||||||
name: "stable-perf"
|
name: "stable-perf"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 40
|
||||||
artifact_paths: "log-*.txt"
|
artifact_paths: "log-*.txt"
|
||||||
agents:
|
agents:
|
||||||
- "queue=cuda"
|
- "queue=cuda"
|
||||||
@@ -243,15 +216,7 @@ EOF
|
|||||||
|
|
||||||
command_step "local-cluster" \
|
command_step "local-cluster" \
|
||||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||||
40
|
45
|
||||||
|
|
||||||
command_step "local-cluster-flakey" \
|
|
||||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
|
||||||
10
|
|
||||||
|
|
||||||
command_step "local-cluster-slow" \
|
|
||||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
|
||||||
30
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pull_or_push_steps() {
|
pull_or_push_steps() {
|
||||||
|
@@ -3,24 +3,16 @@
|
|||||||
# Pull requests to not run these steps.
|
# Pull requests to not run these steps.
|
||||||
steps:
|
steps:
|
||||||
- command: "ci/publish-tarball.sh"
|
- command: "ci/publish-tarball.sh"
|
||||||
agents:
|
|
||||||
- "queue=release-build"
|
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
name: "publish tarball"
|
name: "publish tarball"
|
||||||
|
- command: "ci/publish-bpf-sdk.sh"
|
||||||
|
timeout_in_minutes: 5
|
||||||
|
name: "publish bpf sdk"
|
||||||
- wait
|
- wait
|
||||||
- command: "sdk/docker-solana/build.sh"
|
- command: "sdk/docker-solana/build.sh"
|
||||||
agents:
|
|
||||||
- "queue=release-build"
|
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
name: "publish docker"
|
name: "publish docker"
|
||||||
- command: "ci/publish-crate.sh"
|
- command: "ci/publish-crate.sh"
|
||||||
agents:
|
|
||||||
- "queue=release-build"
|
|
||||||
timeout_in_minutes: 240
|
timeout_in_minutes: 240
|
||||||
name: "publish crate"
|
name: "publish crate"
|
||||||
branches: "!master"
|
branches: "!master"
|
||||||
- command: "ci/publish-tarball.sh"
|
|
||||||
agents:
|
|
||||||
- "queue=release-build-aarch64-apple-darwin"
|
|
||||||
timeout_in_minutes: 60
|
|
||||||
name: "publish tarball (aarch64-apple-darwin)"
|
|
||||||
|
@@ -7,6 +7,8 @@ src_root="$(readlink -f "${here}/..")"
|
|||||||
|
|
||||||
cd "${src_root}"
|
cd "${src_root}"
|
||||||
|
|
||||||
|
source ci/rust-version.sh stable
|
||||||
|
|
||||||
cargo_audit_ignores=(
|
cargo_audit_ignores=(
|
||||||
# failure is officially deprecated/unmaintained
|
# failure is officially deprecated/unmaintained
|
||||||
#
|
#
|
||||||
@@ -28,29 +30,16 @@ cargo_audit_ignores=(
|
|||||||
# Blocked on multiple crates updating `time` to >= 0.2.23
|
# Blocked on multiple crates updating `time` to >= 0.2.23
|
||||||
--ignore RUSTSEC-2020-0071
|
--ignore RUSTSEC-2020-0071
|
||||||
|
|
||||||
|
# difference is unmaintained
|
||||||
|
#
|
||||||
|
# Blocked on predicates v1.0.6 removing its dependency on `difference`
|
||||||
|
--ignore RUSTSEC-2020-0095
|
||||||
|
|
||||||
# generic-array: arr! macro erases lifetimes
|
# generic-array: arr! macro erases lifetimes
|
||||||
#
|
#
|
||||||
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
||||||
# https://github.com/paritytech/libsecp256k1/issues/66
|
# https://github.com/paritytech/libsecp256k1/issues/66
|
||||||
--ignore RUSTSEC-2020-0146
|
--ignore RUSTSEC-2020-0146
|
||||||
|
|
||||||
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
|
|
||||||
#
|
|
||||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
|
||||||
# https://github.com/paritytech/jsonrpc/issues/605
|
|
||||||
--ignore RUSTSEC-2021-0078
|
|
||||||
|
|
||||||
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
|
|
||||||
#
|
|
||||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
|
||||||
# https://github.com/paritytech/jsonrpc/issues/605
|
|
||||||
--ignore RUSTSEC-2021-0079
|
|
||||||
|
|
||||||
# chrono: Potential segfault in `localtime_r` invocations
|
|
||||||
#
|
|
||||||
# Blocked due to no safe upgrade
|
|
||||||
# https://github.com/chronotope/chrono/issues/499
|
|
||||||
--ignore RUSTSEC-2020-0159
|
|
||||||
|
|
||||||
)
|
)
|
||||||
scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}"
|
scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
FROM solanalabs/rust:1.52.1
|
FROM solanalabs/rust:1.50.0
|
||||||
ARG date
|
ARG date
|
||||||
|
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
# Note: when the rust version is changed also modify
|
# Note: when the rust version is changed also modify
|
||||||
# ci/rust-version.sh to pick up the new image tag
|
# ci/rust-version.sh to pick up the new image tag
|
||||||
FROM rust:1.52.1
|
FROM rust:1.50.0
|
||||||
|
|
||||||
# Add Google Protocol Buffers for Libra's metrics library.
|
# Add Google Protocol Buffers for Libra's metrics library.
|
||||||
ENV PROTOC_VERSION 3.8.0
|
ENV PROTOC_VERSION 3.8.0
|
||||||
|
21
ci/env.sh
21
ci/env.sh
@@ -23,9 +23,6 @@ if [[ -n $CI ]]; then
|
|||||||
elif [[ -n $BUILDKITE ]]; then
|
elif [[ -n $BUILDKITE ]]; then
|
||||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||||
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
|
|
||||||
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
|
|
||||||
fi
|
|
||||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||||
@@ -38,18 +35,7 @@ if [[ -n $CI ]]; then
|
|||||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||||
export CI_PULL_REQUEST=
|
export CI_PULL_REQUEST=
|
||||||
fi
|
fi
|
||||||
|
export CI_OS_NAME=linux
|
||||||
case "$(uname -s)" in
|
|
||||||
Linux)
|
|
||||||
export CI_OS_NAME=linux
|
|
||||||
;;
|
|
||||||
Darwin)
|
|
||||||
export CI_OS_NAME=osx
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||||
# triggered it
|
# triggered it
|
||||||
@@ -88,13 +74,10 @@ else
|
|||||||
export CI_BUILD_ID=
|
export CI_BUILD_ID=
|
||||||
export CI_COMMIT=
|
export CI_COMMIT=
|
||||||
export CI_JOB_ID=
|
export CI_JOB_ID=
|
||||||
|
export CI_OS_NAME=
|
||||||
export CI_PULL_REQUEST=
|
export CI_PULL_REQUEST=
|
||||||
export CI_REPO_SLUG=
|
export CI_REPO_SLUG=
|
||||||
export CI_TAG=
|
export CI_TAG=
|
||||||
# Don't override ci/run-local.sh
|
|
||||||
if [[ -z $CI_LOCAL_RUN ]]; then
|
|
||||||
export CI_OS_NAME=
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
@@ -70,7 +70,7 @@ done
|
|||||||
|
|
||||||
source ci/upload-ci-artifact.sh
|
source ci/upload-ci-artifact.sh
|
||||||
source scripts/configure-metrics.sh
|
source scripts/configure-metrics.sh
|
||||||
source multinode-demo/common.sh --prebuild
|
source multinode-demo/common.sh
|
||||||
|
|
||||||
nodes=(
|
nodes=(
|
||||||
"multinode-demo/bootstrap-validator.sh \
|
"multinode-demo/bootstrap-validator.sh \
|
||||||
@@ -127,7 +127,7 @@ startNode() {
|
|||||||
waitForNodeToInit() {
|
waitForNodeToInit() {
|
||||||
declare initCompleteFile=$1
|
declare initCompleteFile=$1
|
||||||
while [[ ! -r $initCompleteFile ]]; do
|
while [[ ! -r $initCompleteFile ]]; do
|
||||||
if [[ $SECONDS -ge 300 ]]; then
|
if [[ $SECONDS -ge 240 ]]; then
|
||||||
echo "^^^ +++"
|
echo "^^^ +++"
|
||||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||||
exit 1
|
exit 1
|
||||||
|
@@ -12,14 +12,10 @@ import json
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys;
|
import sys;
|
||||||
|
|
||||||
real_file = os.path.realpath(__file__)
|
|
||||||
ci_path = os.path.dirname(real_file)
|
|
||||||
src_root = os.path.dirname(ci_path)
|
|
||||||
|
|
||||||
def load_metadata():
|
def load_metadata():
|
||||||
cmd = f'{src_root}/cargo metadata --no-deps --format-version=1'
|
|
||||||
return json.loads(subprocess.Popen(
|
return json.loads(subprocess.Popen(
|
||||||
cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
|
'cargo metadata --no-deps --format-version=1',
|
||||||
|
shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||||
|
|
||||||
def get_packages():
|
def get_packages():
|
||||||
metadata = load_metadata()
|
metadata = load_metadata()
|
||||||
|
27
ci/publish-bpf-sdk.sh
Executable file
27
ci/publish-bpf-sdk.sh
Executable file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
|
if [[ -n "$CI_TAG" ]]; then
|
||||||
|
CHANNEL_OR_TAG=$CI_TAG
|
||||||
|
else
|
||||||
|
CHANNEL_OR_TAG=$CHANNEL
|
||||||
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
sdk/bpf/scripts/package.sh
|
||||||
|
[[ -f bpf-sdk.tar.bz2 ]]
|
||||||
|
)
|
||||||
|
|
||||||
|
source ci/upload-ci-artifact.sh
|
||||||
|
echo --- AWS S3 Store
|
||||||
|
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||||
|
echo Skipped
|
||||||
|
else
|
||||||
|
upload-s3-artifact "/solana/bpf-sdk.tar.bz2" "s3://solana-sdk/$CHANNEL_OR_TAG/bpf-sdk.tar.bz2"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
@@ -39,11 +39,7 @@ fi
|
|||||||
|
|
||||||
case "$CI_OS_NAME" in
|
case "$CI_OS_NAME" in
|
||||||
osx)
|
osx)
|
||||||
_cputype="$(uname -m)"
|
TARGET=x86_64-apple-darwin
|
||||||
if [[ $_cputype = arm64 ]]; then
|
|
||||||
_cputype=aarch64
|
|
||||||
fi
|
|
||||||
TARGET=${_cputype}-apple-darwin
|
|
||||||
;;
|
;;
|
||||||
linux)
|
linux)
|
||||||
TARGET=x86_64-unknown-linux-gnu
|
TARGET=x86_64-unknown-linux-gnu
|
||||||
@@ -87,7 +83,7 @@ echo --- Creating release tarball
|
|||||||
export CHANNEL
|
export CHANNEL
|
||||||
|
|
||||||
source ci/rust-version.sh stable
|
source ci/rust-version.sh stable
|
||||||
scripts/cargo-install-all.sh stable "${RELEASE_BASENAME}"
|
scripts/cargo-install-all.sh +"$rust_stable" "${RELEASE_BASENAME}"
|
||||||
|
|
||||||
tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}"
|
tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}"
|
||||||
bzip2 "${TARBALL_BASENAME}"-$TARGET.tar
|
bzip2 "${TARBALL_BASENAME}"-$TARGET.tar
|
||||||
|
@@ -1,57 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
|
||||||
|
|
||||||
export CI_LOCAL_RUN=true
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
case $(uname -o) in
|
|
||||||
*/Linux)
|
|
||||||
export CI_OS_NAME=linux
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "local CI runs are only supported on Linux" 1>&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
steps=()
|
|
||||||
steps+=(test-sanity)
|
|
||||||
steps+=(shellcheck)
|
|
||||||
steps+=(test-checks)
|
|
||||||
steps+=(test-coverage)
|
|
||||||
steps+=(test-stable)
|
|
||||||
steps+=(test-stable-bpf)
|
|
||||||
steps+=(test-stable-perf)
|
|
||||||
steps+=(test-downstream-builds)
|
|
||||||
steps+=(test-bench)
|
|
||||||
steps+=(test-local-cluster)
|
|
||||||
steps+=(test-local-cluster-flakey)
|
|
||||||
steps+=(test-local-cluster-slow)
|
|
||||||
|
|
||||||
step_index=0
|
|
||||||
if [[ -n "$1" ]]; then
|
|
||||||
start_step="$1"
|
|
||||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
|
||||||
step="${steps[$step_index]}"
|
|
||||||
if [[ "$step" = "$start_step" ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
step_index=$((step_index + 1))
|
|
||||||
done
|
|
||||||
if [[ $step_index -eq ${#steps[@]} ]]; then
|
|
||||||
echo "unexpected start step: \"$start_step\"" 1>&2
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "** starting at step: \"$start_step\" **"
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
|
||||||
step="${steps[$step_index]}"
|
|
||||||
cmd="ci/${step}.sh"
|
|
||||||
$cmd
|
|
||||||
step_index=$((step_index + 1))
|
|
||||||
done
|
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
|||||||
|
|
||||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||||
|
|
||||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
|
timeout 120 ./run.sh &
|
||||||
pid=$!
|
pid=$!
|
||||||
|
|
||||||
attempts=20
|
attempts=20
|
||||||
@@ -16,17 +16,14 @@ while [[ ! -f config/run/init-completed ]]; do
|
|||||||
if ((--attempts == 0)); then
|
if ((--attempts == 0)); then
|
||||||
echo "Error: validator failed to boot"
|
echo "Error: validator failed to boot"
|
||||||
exit 1
|
exit 1
|
||||||
else
|
|
||||||
echo "Checking init"
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
snapshot_slot=1
|
snapshot_slot=1
|
||||||
|
|
||||||
# wait a bit longer than snapshot_slot
|
# wait a bit longer than snapshot_slot
|
||||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment processed) -le $((snapshot_slot + 1)) ]]; do
|
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
echo "Checking slot"
|
|
||||||
done
|
done
|
||||||
|
|
||||||
$solana_validator --ledger config/ledger exit --force || true
|
$solana_validator --ledger config/ledger exit --force || true
|
||||||
|
@@ -18,13 +18,13 @@
|
|||||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||||
stable_version="$RUST_STABLE_VERSION"
|
stable_version="$RUST_STABLE_VERSION"
|
||||||
else
|
else
|
||||||
stable_version=1.52.1
|
stable_version=1.50.0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||||
else
|
else
|
||||||
nightly_version=2021-05-18
|
nightly_version=2021-02-18
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
@@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE
|
|||||||
TimeoutStartSec=10
|
TimeoutStartSec=10
|
||||||
TimeoutStopSec=0
|
TimeoutStopSec=0
|
||||||
KillMode=process
|
KillMode=process
|
||||||
LimitNOFILE=1000000
|
LimitNOFILE=700000
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
@@ -8,5 +8,5 @@ source "$HERE"/utils.sh
|
|||||||
ensure_env || exit 1
|
ensure_env || exit 1
|
||||||
|
|
||||||
# Allow more files to be opened by a user
|
# Allow more files to be opened by a user
|
||||||
echo "* - nofile 1000000" > /etc/security/limits.d/90-solana-nofiles.conf
|
echo "* - nofile 700000" > /etc/security/limits.d/90-solana-nofiles.conf
|
||||||
|
|
||||||
|
@@ -27,7 +27,7 @@ BENCH_ARTIFACT=current_bench_results.log
|
|||||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||||
export PATH="$PWD/target/debug":$PATH
|
export PATH="$PWD/target/debug":$PATH
|
||||||
|
|
||||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
# Clear the C dependency files, if dependeny moves these files are not regenerated
|
||||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||||
|
|
||||||
@@ -45,14 +45,6 @@ _ "$cargo" nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
|
|||||||
_ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
_ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
||||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||||
|
|
||||||
# Run gossip benches
|
|
||||||
_ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \
|
|
||||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
|
||||||
|
|
||||||
# Run poh benches
|
|
||||||
_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \
|
|
||||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
|
||||||
|
|
||||||
# Run core benches
|
# Run core benches
|
||||||
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||||
|
@@ -14,7 +14,7 @@ scripts/increment-cargo-version.sh check
|
|||||||
|
|
||||||
# Disallow uncommitted Cargo.lock changes
|
# Disallow uncommitted Cargo.lock changes
|
||||||
(
|
(
|
||||||
_ scripts/cargo-for-all-lock-files.sh tree >/dev/null
|
_ scripts/cargo-for-all-lock-files.sh tree
|
||||||
set +e
|
set +e
|
||||||
if ! _ git diff --exit-code; then
|
if ! _ git diff --exit-code; then
|
||||||
echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2
|
echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2
|
||||||
@@ -35,10 +35,8 @@ echo --- build environment
|
|||||||
"$cargo" stable clippy --version --verbose
|
"$cargo" stable clippy --version --verbose
|
||||||
"$cargo" nightly clippy --version --verbose
|
"$cargo" nightly clippy --version --verbose
|
||||||
|
|
||||||
# audit is done only with "$cargo stable"
|
# audit is done only with stable
|
||||||
"$cargo" stable audit --version
|
"$cargo" stable audit --version
|
||||||
|
|
||||||
grcov --version
|
|
||||||
)
|
)
|
||||||
|
|
||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
@@ -47,7 +45,7 @@ export RUSTFLAGS="-D warnings -A incomplete_features"
|
|||||||
# Only force up-to-date lock files on edge
|
# Only force up-to-date lock files on edge
|
||||||
if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||||
# Exclude --benches as it's not available in rust stable yet
|
# Exclude --benches as it's not available in rust stable yet
|
||||||
if _ scripts/cargo-for-all-lock-files.sh stable check --locked --tests --bins --examples; then
|
if _ scripts/cargo-for-all-lock-files.sh +"$rust_stable" check --locked --tests --bins --examples; then
|
||||||
true
|
true
|
||||||
else
|
else
|
||||||
check_status=$?
|
check_status=$?
|
||||||
@@ -58,7 +56,7 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure nightly and --benches
|
# Ensure nightly and --benches
|
||||||
_ scripts/cargo-for-all-lock-files.sh nightly check --locked --all-targets
|
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
|
||||||
else
|
else
|
||||||
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
|
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
|
||||||
fi
|
fi
|
||||||
@@ -67,8 +65,7 @@ _ ci/order-crates-for-publishing.py
|
|||||||
|
|
||||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \
|
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||||
--deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor
|
|
||||||
|
|
||||||
_ "$cargo" stable fmt --all -- --check
|
_ "$cargo" stable fmt --all -- --check
|
||||||
|
|
||||||
@@ -82,6 +79,7 @@ _ ci/do-audit.sh
|
|||||||
cd "$project"
|
cd "$project"
|
||||||
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
|
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
|
||||||
_ "$cargo" stable fmt -- --check
|
_ "$cargo" stable fmt -- --check
|
||||||
|
_ "$cargo" nightly test
|
||||||
)
|
)
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
@@ -1,9 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
|
||||||
|
|
||||||
export CI_LOCAL_RUN=true
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
scripts/build-downstream-projects.sh
|
|
@@ -1 +0,0 @@
|
|||||||
test-stable.sh
|
|
@@ -1 +0,0 @@
|
|||||||
test-stable.sh
|
|
@@ -1 +0,0 @@
|
|||||||
test-stable.sh
|
|
@@ -21,6 +21,10 @@ export RUST_BACKTRACE=1
|
|||||||
export RUSTFLAGS="-D warnings"
|
export RUSTFLAGS="-D warnings"
|
||||||
source scripts/ulimit-n.sh
|
source scripts/ulimit-n.sh
|
||||||
|
|
||||||
|
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||||
|
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||||
|
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||||
|
|
||||||
# Limit compiler jobs to reduce memory usage
|
# Limit compiler jobs to reduce memory usage
|
||||||
# on machines with 2gb/thread of memory
|
# on machines with 2gb/thread of memory
|
||||||
NPROC=$(nproc)
|
NPROC=$(nproc)
|
||||||
@@ -31,58 +35,25 @@ case $testName in
|
|||||||
test-stable)
|
test-stable)
|
||||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||||
;;
|
;;
|
||||||
test-stable-bpf)
|
test-stable-perf)
|
||||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
|
||||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
|
||||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
|
||||||
|
|
||||||
# rustfilt required for dumping BPF assembly listings
|
|
||||||
"$cargo" install rustfilt
|
|
||||||
|
|
||||||
# solana-keygen required when building C programs
|
# solana-keygen required when building C programs
|
||||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||||
export PATH="$PWD/target/debug":$PATH
|
export PATH="$PWD/target/debug":$PATH
|
||||||
cargo_build_bpf="$(realpath ./cargo-build-bpf)"
|
|
||||||
|
|
||||||
# BPF solana-sdk legacy compile test
|
# BPF solana-sdk legacy compile test
|
||||||
"$cargo_build_bpf" --manifest-path sdk/Cargo.toml
|
./cargo-build-bpf --manifest-path sdk/Cargo.toml
|
||||||
|
|
||||||
# BPF Program unit tests
|
# BPF program tests
|
||||||
"$cargo" test --manifest-path programs/bpf/Cargo.toml
|
|
||||||
"$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
|
||||||
|
|
||||||
# BPF program system tests
|
|
||||||
_ make -C programs/bpf/c tests
|
_ make -C programs/bpf/c tests
|
||||||
_ "$cargo" stable test \
|
_ "$cargo" stable test \
|
||||||
--manifest-path programs/bpf/Cargo.toml \
|
--manifest-path programs/bpf/Cargo.toml \
|
||||||
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
|
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
|
||||||
|
|
||||||
# Dump BPF program assembly listings
|
|
||||||
for bpf_test in programs/bpf/rust/*; do
|
|
||||||
if pushd "$bpf_test"; then
|
|
||||||
"$cargo_build_bpf" --dump
|
|
||||||
popd
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# BPF program instruction count assertion
|
|
||||||
bpf_target_path=programs/bpf/target
|
|
||||||
_ "$cargo" stable test \
|
|
||||||
--manifest-path programs/bpf/Cargo.toml \
|
|
||||||
--no-default-features --features=bpf_c,bpf_rust assert_instruction_count \
|
|
||||||
-- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt
|
|
||||||
|
|
||||||
bpf_dump_archive="bpf-dumps.tar.bz2"
|
|
||||||
rm -f "$bpf_dump_archive"
|
|
||||||
tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so}
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
test-stable-perf)
|
|
||||||
if [[ $(uname) = Linux ]]; then
|
if [[ $(uname) = Linux ]]; then
|
||||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||||
# is not yet loaded.
|
# is not yet loaded.
|
||||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true
|
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh
|
||||||
|
|
||||||
rm -rf target/perf-libs
|
rm -rf target/perf-libs
|
||||||
./fetch-perf-libs.sh
|
./fetch-perf-libs.sh
|
||||||
@@ -100,17 +71,7 @@ test-stable-perf)
|
|||||||
;;
|
;;
|
||||||
test-local-cluster)
|
test-local-cluster)
|
||||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
test-local-cluster-flakey)
|
|
||||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
|
||||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
test-local-cluster-slow)
|
|
||||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
|
||||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
|
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
|
@@ -19,24 +19,13 @@ upload-ci-artifact() {
|
|||||||
upload-s3-artifact() {
|
upload-s3-artifact() {
|
||||||
echo "--- artifact: $1 to $2"
|
echo "--- artifact: $1 to $2"
|
||||||
(
|
(
|
||||||
args=(
|
|
||||||
--rm
|
|
||||||
--env AWS_ACCESS_KEY_ID
|
|
||||||
--env AWS_SECRET_ACCESS_KEY
|
|
||||||
--volume "$PWD:/solana"
|
|
||||||
|
|
||||||
)
|
|
||||||
if [[ $(uname -m) = arm64 ]]; then
|
|
||||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
|
||||||
args+=(
|
|
||||||
--platform linux/amd64
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
args+=(
|
|
||||||
eremite/aws-cli:2018.12.18
|
|
||||||
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
|
||||||
)
|
|
||||||
set -x
|
set -x
|
||||||
docker run "${args[@]}"
|
docker run \
|
||||||
|
--rm \
|
||||||
|
--env AWS_ACCESS_KEY_ID \
|
||||||
|
--env AWS_SECRET_ACCESS_KEY \
|
||||||
|
--volume "$PWD:/solana" \
|
||||||
|
eremite/aws-cli:2018.12.18 \
|
||||||
|
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-clap-utils"
|
name = "solana-clap-utils"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
description = "Solana utilities for the clap"
|
description = "Solana utilities for the clap"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@@ -12,18 +12,13 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
rpassword = "4.0"
|
rpassword = "4.0"
|
||||||
solana-perf = { path = "../perf", version = "=1.8.13" }
|
solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.4" }
|
||||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
|
||||||
thiserror = "1.0.21"
|
thiserror = "1.0.21"
|
||||||
tiny-bip39 = "0.8.1"
|
tiny-bip39 = "0.8.0"
|
||||||
uriparse = "0.6.3"
|
|
||||||
url = "2.1.0"
|
url = "2.1.0"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tempfile = "3.1.0"
|
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "solana_clap_utils"
|
name = "solana_clap_utils"
|
||||||
|
|
||||||
|
@@ -1,7 +1,5 @@
|
|||||||
use {
|
use crate::{input_validators, ArgConstant};
|
||||||
crate::{input_validators, ArgConstant},
|
use clap::Arg;
|
||||||
clap::Arg,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant {
|
pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant {
|
||||||
name: "fee_payer",
|
name: "fee_payer",
|
||||||
|
@@ -1,24 +1,19 @@
|
|||||||
use {
|
use crate::keypair::{
|
||||||
crate::keypair::{
|
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
|
||||||
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
|
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||||
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
|
||||||
},
|
|
||||||
chrono::DateTime,
|
|
||||||
clap::ArgMatches,
|
|
||||||
solana_remote_wallet::remote_wallet::RemoteWalletManager,
|
|
||||||
solana_sdk::{
|
|
||||||
clock::UnixTimestamp,
|
|
||||||
commitment_config::CommitmentConfig,
|
|
||||||
genesis_config::ClusterType,
|
|
||||||
native_token::sol_to_lamports,
|
|
||||||
pubkey::Pubkey,
|
|
||||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
|
||||||
},
|
|
||||||
std::{str::FromStr, sync::Arc},
|
|
||||||
};
|
};
|
||||||
|
use chrono::DateTime;
|
||||||
// Sentinel value used to indicate to write to screen instead of file
|
use clap::ArgMatches;
|
||||||
pub const STDOUT_OUTFILE_TOKEN: &str = "-";
|
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||||
|
use solana_sdk::{
|
||||||
|
clock::UnixTimestamp,
|
||||||
|
commitment_config::CommitmentConfig,
|
||||||
|
genesis_config::ClusterType,
|
||||||
|
native_token::sol_to_lamports,
|
||||||
|
pubkey::Pubkey,
|
||||||
|
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||||
|
};
|
||||||
|
use std::{str::FromStr, sync::Arc};
|
||||||
|
|
||||||
// Return parsed values from matches at `name`
|
// Return parsed values from matches at `name`
|
||||||
pub fn values_of<T>(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<T>>
|
pub fn values_of<T>(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<T>>
|
||||||
@@ -60,7 +55,7 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
|
|||||||
if let Some(value) = matches.value_of(name) {
|
if let Some(value) = matches.value_of(name) {
|
||||||
if value == ASK_KEYWORD {
|
if value == ASK_KEYWORD {
|
||||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||||
keypair_from_seed_phrase(name, skip_validation, true, None, true).ok()
|
keypair_from_seed_phrase(name, skip_validation, true).ok()
|
||||||
} else {
|
} else {
|
||||||
read_keypair_file(value).ok()
|
read_keypair_file(value).ok()
|
||||||
}
|
}
|
||||||
@@ -75,7 +70,7 @@ pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Keypair>>
|
|||||||
.filter_map(|value| {
|
.filter_map(|value| {
|
||||||
if value == ASK_KEYWORD {
|
if value == ASK_KEYWORD {
|
||||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||||
keypair_from_seed_phrase(name, skip_validation, true, None, true).ok()
|
keypair_from_seed_phrase(name, skip_validation, true).ok()
|
||||||
} else {
|
} else {
|
||||||
read_keypair_file(value).ok()
|
read_keypair_file(value).ok()
|
||||||
}
|
}
|
||||||
@@ -196,12 +191,10 @@ pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentC
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use {
|
use super::*;
|
||||||
super::*,
|
use clap::{App, Arg};
|
||||||
clap::{App, Arg},
|
use solana_sdk::signature::write_keypair_file;
|
||||||
solana_sdk::signature::write_keypair_file,
|
use std::fs;
|
||||||
std::fs,
|
|
||||||
};
|
|
||||||
|
|
||||||
fn app<'ab, 'v>() -> App<'ab, 'v> {
|
fn app<'ab, 'v>() -> App<'ab, 'v> {
|
||||||
App::new("test")
|
App::new("test")
|
||||||
|
@@ -1,14 +1,13 @@
|
|||||||
use {
|
use crate::keypair::{parse_keypair_path, KeypairUrl, ASK_KEYWORD};
|
||||||
crate::keypair::{parse_signer_source, SignerSourceKind, ASK_KEYWORD},
|
use chrono::DateTime;
|
||||||
chrono::DateTime,
|
use solana_sdk::{
|
||||||
solana_sdk::{
|
clock::{Epoch, Slot},
|
||||||
clock::{Epoch, Slot},
|
hash::Hash,
|
||||||
hash::Hash,
|
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
signature::{read_keypair_file, Signature},
|
||||||
signature::{read_keypair_file, Signature},
|
|
||||||
},
|
|
||||||
std::{fmt::Display, str::FromStr},
|
|
||||||
};
|
};
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
||||||
where
|
where
|
||||||
@@ -33,29 +32,6 @@ where
|
|||||||
is_parsable_generic::<T, String>(string)
|
is_parsable_generic::<T, String>(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an error if string cannot be parsed as numeric type T, and value not within specified
|
|
||||||
// range
|
|
||||||
pub fn is_within_range<T>(string: String, range_min: T, range_max: T) -> Result<(), String>
|
|
||||||
where
|
|
||||||
T: FromStr + Copy + std::fmt::Debug + PartialOrd + std::ops::Add<Output = T> + From<usize>,
|
|
||||||
T::Err: Display,
|
|
||||||
{
|
|
||||||
match string.parse::<T>() {
|
|
||||||
Ok(input) => {
|
|
||||||
let range = range_min..range_max + 1.into();
|
|
||||||
if !range.contains(&input) {
|
|
||||||
Err(format!(
|
|
||||||
"input '{:?}' out of range ({:?}..{:?}]",
|
|
||||||
input, range_min, range_max
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(err) => Err(format!("error parsing '{}': {}", string, err)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an error if a pubkey cannot be parsed.
|
// Return an error if a pubkey cannot be parsed.
|
||||||
pub fn is_pubkey<T>(string: T) -> Result<(), String>
|
pub fn is_pubkey<T>(string: T) -> Result<(), String>
|
||||||
where
|
where
|
||||||
@@ -95,26 +71,6 @@ where
|
|||||||
.map_err(|err| format!("{}", err))
|
.map_err(|err| format!("{}", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an error if a `SignerSourceKind::Prompt` cannot be parsed
|
|
||||||
pub fn is_prompt_signer_source<T>(string: T) -> Result<(), String>
|
|
||||||
where
|
|
||||||
T: AsRef<str> + Display,
|
|
||||||
{
|
|
||||||
if string.as_ref() == ASK_KEYWORD {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
match parse_signer_source(string.as_ref())
|
|
||||||
.map_err(|err| format!("{}", err))?
|
|
||||||
.kind
|
|
||||||
{
|
|
||||||
SignerSourceKind::Prompt => Ok(()),
|
|
||||||
_ => Err(format!(
|
|
||||||
"Unable to parse input as `prompt:` URI scheme or `ASK` keyword: {}",
|
|
||||||
string
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
||||||
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
|
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
|
||||||
where
|
where
|
||||||
@@ -129,11 +85,8 @@ pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
|
|||||||
where
|
where
|
||||||
T: AsRef<str> + Display,
|
T: AsRef<str> + Display,
|
||||||
{
|
{
|
||||||
match parse_signer_source(string.as_ref())
|
match parse_keypair_path(string.as_ref()) {
|
||||||
.map_err(|err| format!("{}", err))?
|
KeypairUrl::Filepath(path) => is_keypair(path),
|
||||||
.kind
|
|
||||||
{
|
|
||||||
SignerSourceKind::Filepath(path) => is_keypair(path),
|
|
||||||
_ => Ok(()),
|
_ => Ok(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -214,8 +167,8 @@ where
|
|||||||
pub fn normalize_to_url_if_moniker<T: AsRef<str>>(url_or_moniker: T) -> String {
|
pub fn normalize_to_url_if_moniker<T: AsRef<str>>(url_or_moniker: T) -> String {
|
||||||
match url_or_moniker.as_ref() {
|
match url_or_moniker.as_ref() {
|
||||||
"m" | "mainnet-beta" => "https://api.mainnet-beta.solana.com",
|
"m" | "mainnet-beta" => "https://api.mainnet-beta.solana.com",
|
||||||
"t" | "testnet" => "https://api.testnet.solana.com",
|
"t" | "testnet" => "https://testnet.solana.com",
|
||||||
"d" | "devnet" => "https://api.devnet.solana.com",
|
"d" | "devnet" => "https://devnet.solana.com",
|
||||||
"l" | "localhost" => "http://localhost:8899",
|
"l" | "localhost" => "http://localhost:8899",
|
||||||
url => url,
|
url => url,
|
||||||
}
|
}
|
||||||
@@ -353,27 +306,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_niceness_adjustment_valid<T>(value: T) -> Result<(), String>
|
|
||||||
where
|
|
||||||
T: AsRef<str> + Display,
|
|
||||||
{
|
|
||||||
let adjustment = value.as_ref().parse::<i8>().map_err(|err| {
|
|
||||||
format!(
|
|
||||||
"error parsing niceness adjustment value '{}': {}",
|
|
||||||
value, err
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
if solana_perf::thread::is_renice_allowed(adjustment) {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(String::from(
|
|
||||||
"niceness adjustment supported only on Linux; negative adjustment \
|
|
||||||
(priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \
|
|
||||||
for details)",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -390,11 +322,4 @@ mod tests {
|
|||||||
assert!(is_derivation("a/b").is_err());
|
assert!(is_derivation("a/b").is_err());
|
||||||
assert!(is_derivation("0/4294967296").is_err());
|
assert!(is_derivation("0/4294967296").is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_is_niceness_adjustment_valid() {
|
|
||||||
assert_eq!(is_niceness_adjustment_valid("0"), Ok(()));
|
|
||||||
assert!(is_niceness_adjustment_valid("128").is_err());
|
|
||||||
assert!(is_niceness_adjustment_valid("-129").is_err());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -27,6 +27,5 @@ pub mod fee_payer;
|
|||||||
pub mod input_parsers;
|
pub mod input_parsers;
|
||||||
pub mod input_validators;
|
pub mod input_validators;
|
||||||
pub mod keypair;
|
pub mod keypair;
|
||||||
pub mod memo;
|
|
||||||
pub mod nonce;
|
pub mod nonce;
|
||||||
pub mod offline;
|
pub mod offline;
|
||||||
|
@@ -1,15 +0,0 @@
|
|||||||
use {crate::ArgConstant, clap::Arg};
|
|
||||||
|
|
||||||
pub const MEMO_ARG: ArgConstant<'static> = ArgConstant {
|
|
||||||
name: "memo",
|
|
||||||
long: "--with-memo",
|
|
||||||
help: "Specify a memo string to include in the transaction.",
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn memo_arg<'a, 'b>() -> Arg<'a, 'b> {
|
|
||||||
Arg::with_name(MEMO_ARG.name)
|
|
||||||
.long(MEMO_ARG.long)
|
|
||||||
.takes_value(true)
|
|
||||||
.value_name("MEMO")
|
|
||||||
.help(MEMO_ARG.help)
|
|
||||||
}
|
|
@@ -1,7 +1,5 @@
|
|||||||
use {
|
use crate::{input_validators::*, offline::BLOCKHASH_ARG, ArgConstant};
|
||||||
crate::{input_validators::*, offline::BLOCKHASH_ARG, ArgConstant},
|
use clap::{App, Arg};
|
||||||
clap::{App, Arg},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
|
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
|
||||||
name: "nonce",
|
name: "nonce",
|
||||||
|
@@ -1,7 +1,5 @@
|
|||||||
use {
|
use crate::{input_validators::*, ArgConstant};
|
||||||
crate::{input_validators::*, ArgConstant},
|
use clap::{App, Arg};
|
||||||
clap::{App, Arg},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
|
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
|
||||||
name: "blockhash",
|
name: "blockhash",
|
||||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-cli-config"
|
name = "solana-cli-config"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
@@ -1,9 +1,7 @@
|
|||||||
// Wallet settings that can be configured for long-term use
|
// Wallet settings that can be configured for long-term use
|
||||||
use {
|
use serde_derive::{Deserialize, Serialize};
|
||||||
serde_derive::{Deserialize, Serialize},
|
use std::{collections::HashMap, io, path::Path};
|
||||||
std::{collections::HashMap, io, path::Path},
|
use url::Url;
|
||||||
url::Url,
|
|
||||||
};
|
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref CONFIG_FILE: Option<String> = {
|
pub static ref CONFIG_FILE: Option<String> = {
|
||||||
@@ -109,24 +107,24 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn compute_websocket_url() {
|
fn compute_websocket_url() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Config::compute_websocket_url("http://api.devnet.solana.com"),
|
Config::compute_websocket_url(&"http://devnet.solana.com"),
|
||||||
"ws://api.devnet.solana.com/".to_string()
|
"ws://devnet.solana.com/".to_string()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Config::compute_websocket_url("https://api.devnet.solana.com"),
|
Config::compute_websocket_url(&"https://devnet.solana.com"),
|
||||||
"wss://api.devnet.solana.com/".to_string()
|
"wss://devnet.solana.com/".to_string()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Config::compute_websocket_url("http://example.com:8899"),
|
Config::compute_websocket_url(&"http://example.com:8899"),
|
||||||
"ws://example.com:8900/".to_string()
|
"ws://example.com:8900/".to_string()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Config::compute_websocket_url("https://example.com:1234"),
|
Config::compute_websocket_url(&"https://example.com:1234"),
|
||||||
"wss://example.com:1235/".to_string()
|
"wss://example.com:1235/".to_string()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(Config::compute_websocket_url("garbage"), String::new());
|
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -3,6 +3,7 @@ extern crate lazy_static;
|
|||||||
|
|
||||||
mod config;
|
mod config;
|
||||||
pub use config::{Config, CONFIG_FILE};
|
pub use config::{Config, CONFIG_FILE};
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fs::{create_dir_all, File},
|
fs::{create_dir_all, File},
|
||||||
io::{self, Write},
|
io::{self, Write},
|
||||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-cli-output"
|
name = "solana-cli-output"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "1.8.13"
|
version = "1.6.4"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@@ -12,21 +12,20 @@ documentation = "https://docs.rs/solana-cli-output"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
chrono = { version = "0.4.11", features = ["serde"] }
|
chrono = { version = "0.4.11", features = ["serde"] }
|
||||||
clap = "2.33.0"
|
console = "0.11.3"
|
||||||
console = "0.14.1"
|
|
||||||
humantime = "2.0.1"
|
humantime = "2.0.1"
|
||||||
Inflector = "0.11.4"
|
Inflector = "0.11.4"
|
||||||
indicatif = "0.15.0"
|
indicatif = "0.15.0"
|
||||||
serde = "1.0.122"
|
serde = "1.0.122"
|
||||||
serde_derive = "1.0.103"
|
serde_derive = "1.0.103"
|
||||||
serde_json = "1.0.56"
|
serde_json = "1.0.56"
|
||||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.13" }
|
solana-account-decoder = { path = "../account-decoder", version = "=1.6.4" }
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.13" }
|
solana-clap-utils = { path = "../clap-utils", version = "=1.6.4" }
|
||||||
solana-client = { path = "../client", version = "=1.8.13" }
|
solana-client = { path = "../client", version = "=1.6.4" }
|
||||||
solana-sdk = { path = "../sdk", version = "=1.8.13" }
|
solana-sdk = { path = "../sdk", version = "=1.6.4" }
|
||||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.13" }
|
solana-stake-program = { path = "../programs/stake", version = "=1.6.4" }
|
||||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.13" }
|
solana-transaction-status = { path = "../transaction-status", version = "=1.6.4" }
|
||||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
solana-vote-program = { path = "../programs/vote", version = "=1.6.4" }
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
targets = ["x86_64-unknown-linux-gnu"]
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user