Compare commits
33 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
899f57962a | ||
|
3176b00e57 | ||
|
08b9da8397 | ||
|
2bc21ecba2 | ||
|
5b2a65fab3 | ||
|
f5d56eabf3 | ||
|
af45efb62c | ||
|
f528cda832 | ||
|
eeef9f4e59 | ||
|
32124b59e9 | ||
|
aa9772f9c0 | ||
|
5f183bd773 | ||
|
2238e5001b | ||
|
79fa7ef55c | ||
|
07df827411 | ||
|
a259ff0e72 | ||
|
d7d3e767e7 | ||
|
6e8aa9af17 | ||
|
0236de7bc8 | ||
|
899bd1572a | ||
|
97ec4cd44e | ||
|
5500970a7e | ||
|
caea04d8d5 | ||
|
b1a90c3580 | ||
|
5bd4e38345 | ||
|
fddba08571 | ||
|
87963764fa | ||
|
b691a159dd | ||
|
5af1d48be8 | ||
|
3b3ec3313f | ||
|
be00246fb5 | ||
|
1d80ba9edf | ||
|
4bcf976ecd |
2
.buildkite/env/secrets.ejson
vendored
2
.buildkite/env/secrets.ejson
vendored
@@ -2,6 +2,6 @@
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"_comment": "These credentials are encrypted and pose no risk",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:KToenD1Sr3w82lHGxz1n+j3hwNlLk/1pYrjZHlvY6kE=:hN1Q25omtJ+4yYVn+qzIsPLKT3O6J9XN:DMLNLXi/pkWgvwF6gNIcNF222sgsRR9LnwLZYj0P0wGj7q6w8YQnd1Rskj+sRroI/z5pQg==]"
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]"
|
||||
}
|
||||
}
|
||||
|
@@ -12,8 +12,7 @@ export PS4="++"
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
eval "$(ci/channel-info.sh)"
|
||||
eval "$(ci/sbf-tools-info.sh)"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
(
|
||||
set -x
|
||||
MAX_CACHE_SIZE=18 # gigabytes
|
||||
@@ -37,7 +36,4 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
# `std:
|
||||
# "found possibly newer version of crate `std` which `xyz` depends on
|
||||
rm -rf target/bpfel-unknown-unknown
|
||||
if [[ $BUILDKITE_LABEL = "stable-perf" ]]; then
|
||||
rm -rf target/release
|
||||
fi
|
||||
)
|
||||
|
23
.mergify.yml
23
.mergify.yml
@@ -65,11 +65,20 @@ pull_request_rules:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.9
|
||||
|
||||
commands_restrictions:
|
||||
# The author of copied PRs is the Mergify user.
|
||||
# Restrict `copy` access to Core Contributors
|
||||
copy:
|
||||
- v1.5
|
||||
- name: v1.6 backport
|
||||
conditions:
|
||||
- author=@core-contributors
|
||||
- label=v1.6
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.6
|
||||
- name: v1.7 backport
|
||||
conditions:
|
||||
- label=v1.7
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.7
|
||||
|
15
.travis.yml
15
.travis.yml
@@ -29,7 +29,6 @@ jobs:
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
osx_image: xcode12
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
@@ -37,12 +36,8 @@ jobs:
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
- brew install gnu-tar
|
||||
- PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH"
|
||||
- tar --version
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- rustup set profile default
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
@@ -65,12 +60,6 @@ jobs:
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
install:
|
||||
- choco install openssl
|
||||
- export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64"
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
# Linux release artifacts are still built by ci/buildkite-secondary.yml
|
||||
#- <<: *release-artifacts
|
||||
# name: "Linux release artifacts"
|
||||
@@ -84,7 +73,7 @@ jobs:
|
||||
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
- "node"
|
||||
|
||||
cache:
|
||||
directories:
|
||||
@@ -127,7 +116,7 @@ jobs:
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
- "node"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
3197
Cargo.lock
generated
3197
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
27
Cargo.toml
27
Cargo.toml
@@ -1,8 +1,5 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"accountsdb-plugin-interface",
|
||||
"accountsdb-plugin-manager",
|
||||
"accountsdb-plugin-postgres",
|
||||
"accounts-cluster-bench",
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
@@ -12,7 +9,6 @@ members = [
|
||||
"banks-client",
|
||||
"banks-interface",
|
||||
"banks-server",
|
||||
"bloom",
|
||||
"clap-utils",
|
||||
"cli-config",
|
||||
"cli-output",
|
||||
@@ -25,7 +21,6 @@ members = [
|
||||
"perf",
|
||||
"validator",
|
||||
"genesis",
|
||||
"genesis-utils",
|
||||
"gossip",
|
||||
"install",
|
||||
"keygen",
|
||||
@@ -36,6 +31,7 @@ members = [
|
||||
"log-analyzer",
|
||||
"merkle-root-bench",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"storage-bigtable",
|
||||
"storage-proto",
|
||||
"streamer",
|
||||
@@ -43,19 +39,21 @@ members = [
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/secp256k1",
|
||||
"programs/bpf_loader",
|
||||
"programs/compute-budget",
|
||||
"programs/budget",
|
||||
"programs/config",
|
||||
"programs/exchange",
|
||||
"programs/ed25519",
|
||||
"programs/secp256k1",
|
||||
"programs/failure",
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/stake",
|
||||
"programs/vest",
|
||||
"programs/vote",
|
||||
"remote-wallet",
|
||||
"rpc",
|
||||
"ramp-tps",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
@@ -63,6 +61,7 @@ members = [
|
||||
"sdk/cargo-test-bpf",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
@@ -78,11 +77,3 @@ members = [
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
]
|
||||
|
||||
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
|
||||
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
|
||||
# specify arch-specific features.
|
||||
resolver = "2"
|
||||
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
15
README.md
15
README.md
@@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
@@ -19,7 +19,7 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
Please make sure you are always using the latest stable rust version by running:
|
||||
Please sure you are always using the latest stable rust version by running:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
@@ -32,12 +32,6 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
||||
```
|
||||
|
||||
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
|
||||
|
||||
```bash
|
||||
$ softwareupdate --install-rosetta
|
||||
```
|
||||
|
||||
## **2. Download the source code.**
|
||||
|
||||
```bash
|
||||
@@ -51,6 +45,11 @@ $ cd solana
|
||||
$ cargo build
|
||||
```
|
||||
|
||||
## **4. Run a minimal local cluster.**
|
||||
```bash
|
||||
$ ./run.sh
|
||||
```
|
||||
|
||||
# Testing
|
||||
|
||||
**Run the test suite:**
|
||||
|
28
SECURITY.md
28
SECURITY.md
@@ -42,36 +42,14 @@ RPC DoS/Crashes:
|
||||
$5,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* RPC attacks
|
||||
|
||||
Out of Scope:
|
||||
The following components are out of scope for the bounty program
|
||||
* Metrics: `/metrics` in the monorepo as well as https://metrics.solana.com
|
||||
* Explorer: `/explorer` in the monorepo as well as https://explorer.solana.com
|
||||
* Any encrypted credentials, auth tokens, etc. checked into the repo
|
||||
* Bugs in dependencies. Please take them upstream!
|
||||
* Attacks that require social engineering
|
||||
|
||||
Eligibility:
|
||||
* The participant submitting the bug report shall follow the process outlined within this document
|
||||
* The participant submitting the bug bounty shall follow the process outlined within this document
|
||||
* Valid exploits can be eligible even if they are not successfully executed on the cluster
|
||||
* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
|
||||
* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
|
||||
|
||||
Payment of Bug Bounties:
|
||||
* Payments for eligible bug reports are distributed monthly.
|
||||
* Bounties for all bug reports submitted in a given month are paid out in the middle of the
|
||||
following month.
|
||||
* The SOL/USD conversion rate used for payments is the market price at the end of
|
||||
the last day of the month for the month in which the bug was submitted.
|
||||
* The reference for this price is the Closing Price given by Coingecko.com on
|
||||
that date given here:
|
||||
https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
|
||||
* For example, for all bugs submitted in March 2021, the SOL/USD price for bug
|
||||
payouts is the Close price on 2021-03-31 of $19.49. This applies to all bugs
|
||||
submitted in March 2021, to be paid in mid-April 2021.
|
||||
* Bug bounties are paid out in
|
||||
[stake accounts](https://solana.com/staking) with a
|
||||
[lockup](https://docs.solana.com/staking/stake-accounts#lockups)
|
||||
expiring 12 months from the last day of the month in which the bug was submitted.
|
||||
Notes:
|
||||
* All locked tokens can be staked during the lockup period
|
||||
|
||||
<a name="process"></a>
|
||||
## Incident Response Process
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,11 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.17" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.6.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.6.1" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
||||
|
@@ -17,10 +17,8 @@ pub mod validator_info;
|
||||
use {
|
||||
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
|
||||
solana_sdk::{
|
||||
account::{ReadableAccount, WritableAccount},
|
||||
clock::Epoch,
|
||||
fee_calculator::FeeCalculator,
|
||||
pubkey::Pubkey,
|
||||
account::ReadableAccount, account::WritableAccount, clock::Epoch,
|
||||
fee_calculator::FeeCalculator, pubkey::Pubkey,
|
||||
},
|
||||
std::{
|
||||
io::{Read, Write},
|
||||
@@ -30,7 +28,6 @@ use {
|
||||
|
||||
pub type StringAmount = String;
|
||||
pub type StringDecimals = String;
|
||||
pub const MAX_BASE58_BYTES: usize = 128;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
@@ -51,7 +48,7 @@ pub enum UiAccountData {
|
||||
Binary(String, UiAccountEncoding),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary, // Legacy. Retained for RPC backwards compatibility
|
||||
@@ -63,53 +60,41 @@ pub enum UiAccountEncoding {
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
fn encode_bs58<T: ReadableAccount>(
|
||||
account: &T,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> String {
|
||||
if account.data().len() <= MAX_BASE58_BYTES {
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string()
|
||||
} else {
|
||||
"error: data too large for bs58 encoding".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode<T: ReadableAccount>(
|
||||
pubkey: &Pubkey,
|
||||
account: &T,
|
||||
account: T,
|
||||
encoding: UiAccountEncoding,
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::LegacyBinary(data)
|
||||
}
|
||||
UiAccountEncoding::Base58 => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::Binary(data, encoding)
|
||||
}
|
||||
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
),
|
||||
UiAccountEncoding::Base58 => UiAccountData::Binary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64 => UiAccountData::Binary(
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64Zstd => {
|
||||
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
|
||||
match encoder
|
||||
.write_all(slice_data(account.data(), data_slice_config))
|
||||
.write_all(slice_data(&account.data(), data_slice_config))
|
||||
.and_then(|()| encoder.finish())
|
||||
{
|
||||
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
|
||||
Err(_) => UiAccountData::Binary(
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
UiAccountEncoding::Base64,
|
||||
),
|
||||
}
|
||||
}
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
|
||||
parse_account_data(pubkey, &account.owner(), &account.data(), additional_data)
|
||||
{
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
@@ -181,7 +166,7 @@ impl Default for UiFeeCalculator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDataSliceConfig {
|
||||
pub offset: usize,
|
||||
@@ -204,10 +189,8 @@ fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::account::{Account, AccountSharedData},
|
||||
};
|
||||
use super::*;
|
||||
use solana_sdk::account::{Account, AccountSharedData};
|
||||
|
||||
#[test]
|
||||
fn test_slice_data() {
|
||||
@@ -241,7 +224,7 @@ mod test {
|
||||
fn test_base64_zstd() {
|
||||
let encoded_account = UiAccount::encode(
|
||||
&Pubkey::default(),
|
||||
&AccountSharedData::from(Account {
|
||||
AccountSharedData::from(Account {
|
||||
data: vec![0; 1024],
|
||||
..Account::default()
|
||||
}),
|
||||
|
@@ -1,27 +1,25 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id},
|
||||
parse_vote::parse_vote,
|
||||
},
|
||||
inflector::Inflector,
|
||||
serde_json::Value,
|
||||
solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar},
|
||||
std::collections::HashMap,
|
||||
thiserror::Error,
|
||||
use crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id_v2_0},
|
||||
parse_vote::parse_vote,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -114,14 +112,12 @@ pub fn parse_account_data(
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
solana_vote_program::vote_state::{VoteState, VoteStateVersions},
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
|
@@ -1,11 +1,9 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
},
|
||||
bincode::{deserialize, serialized_size},
|
||||
solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
};
|
||||
use bincode::{deserialize, serialized_size};
|
||||
use solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey};
|
||||
|
||||
pub fn parse_bpf_upgradeable_loader(
|
||||
data: &[u8],
|
||||
@@ -92,7 +90,9 @@ pub struct UiProgramData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {super::*, bincode::serialize, solana_sdk::pubkey::Pubkey};
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_parse_bpf_upgradeable_loader_accounts() {
|
||||
|
@@ -1,19 +1,15 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
},
|
||||
bincode::deserialize,
|
||||
serde_json::Value,
|
||||
solana_config_program::{get_config_data, ConfigKeys},
|
||||
solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
stake::config::{self as stake_config, Config as StakeConfig},
|
||||
},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &stake_config::id() {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
@@ -41,7 +37,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
|
||||
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| UiConfigKey {
|
||||
@@ -91,10 +87,11 @@ pub struct UiConfig<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*, crate::validator_info::ValidatorInfo, serde_json::json,
|
||||
solana_config_program::create_config_account, solana_sdk::account::ReadableAccount,
|
||||
};
|
||||
use super::*;
|
||||
use crate::validator_info::ValidatorInfo;
|
||||
use serde_json::json;
|
||||
use solana_config_program::create_config_account;
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
@@ -104,7 +101,11 @@ mod test {
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
|
||||
parse_config(
|
||||
&stake_config_account.data(),
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
@@ -124,7 +125,7 @@ mod test {
|
||||
10,
|
||||
);
|
||||
assert_eq!(
|
||||
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: vec![
|
||||
UiConfigKey {
|
||||
|
@@ -1,9 +1,7 @@
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, UiFeeCalculator},
|
||||
solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
},
|
||||
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
@@ -11,13 +9,7 @@ pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
||||
let nonce_state = nonce_state.convert_to_current();
|
||||
match nonce_state {
|
||||
// This prevents parsing an allocated System-owned account with empty data of any non-zero
|
||||
// length as `uninitialized` nonce. An empty account of the wrong length can never be
|
||||
// initialized as a nonce account, and an empty account of the correct length may not be an
|
||||
// uninitialized nonce account, since it can be assigned to another program.
|
||||
State::Uninitialized => Err(ParseAccountError::from(
|
||||
InstructionError::InvalidAccountData,
|
||||
)),
|
||||
State::Uninitialized => Ok(UiNonceState::Uninitialized),
|
||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||
authority: data.authority.to_string(),
|
||||
blockhash: data.blockhash.to_string(),
|
||||
@@ -44,16 +36,14 @@ pub struct UiNonceData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
|
@@ -1,14 +1,10 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
},
|
||||
bincode::deserialize,
|
||||
solana_sdk::{
|
||||
clock::{Epoch, UnixTimestamp},
|
||||
stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState},
|
||||
},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
@@ -136,7 +132,8 @@ impl From<Delegation> for UiDelegation {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {super::*, bincode::serialize};
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
|
||||
#[test]
|
||||
fn test_parse_stake() {
|
||||
|
@@ -1,20 +1,18 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
},
|
||||
bincode::deserialize,
|
||||
bv::BitVec,
|
||||
solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use bv::BitVec;
|
||||
use solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
};
|
||||
|
||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||
@@ -214,17 +212,15 @@ pub struct UiStakeHistoryEntry {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
},
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
let clock_sysvar = create_account_for_test(&Clock::default());
|
||||
let clock_sysvar = create_account(&Clock::default(), 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
|
||||
SysvarAccountType::Clock(UiClock::default()),
|
||||
@@ -237,13 +233,13 @@ mod test {
|
||||
first_normal_epoch: 1,
|
||||
first_normal_slot: 12,
|
||||
};
|
||||
let epoch_schedule_sysvar = create_account_for_test(&epoch_schedule);
|
||||
let epoch_schedule_sysvar = create_account(&epoch_schedule, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
|
||||
SysvarAccountType::EpochSchedule(epoch_schedule),
|
||||
);
|
||||
|
||||
let fees_sysvar = create_account_for_test(&Fees::default());
|
||||
let fees_sysvar = create_account(&Fees::default(), 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
@@ -256,7 +252,7 @@ mod test {
|
||||
let recent_blockhashes: RecentBlockhashes = vec![IterItem(0, &hash, &fee_calculator)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let recent_blockhashes_sysvar = create_account_for_test(&recent_blockhashes);
|
||||
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
@@ -274,13 +270,13 @@ mod test {
|
||||
exemption_threshold: 2.0,
|
||||
burn_percent: 5,
|
||||
};
|
||||
let rent_sysvar = create_account_for_test(&rent);
|
||||
let rent_sysvar = create_account(&rent, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
|
||||
SysvarAccountType::Rent(rent.into()),
|
||||
);
|
||||
|
||||
let rewards_sysvar = create_account_for_test(&Rewards::default());
|
||||
let rewards_sysvar = create_account(&Rewards::default(), 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
|
||||
SysvarAccountType::Rewards(UiRewards::default()),
|
||||
@@ -288,7 +284,7 @@ mod test {
|
||||
|
||||
let mut slot_hashes = SlotHashes::default();
|
||||
slot_hashes.add(1, hash);
|
||||
let slot_hashes_sysvar = create_account_for_test(&slot_hashes);
|
||||
let slot_hashes_sysvar = create_account(&slot_hashes, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
|
||||
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
|
||||
@@ -299,7 +295,7 @@ mod test {
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
let slot_history_sysvar = create_account_for_test(&slot_history);
|
||||
let slot_history_sysvar = create_account(&slot_history, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
@@ -315,7 +311,7 @@ mod test {
|
||||
deactivating: 3,
|
||||
};
|
||||
stake_history.add(1, stake_history_entry.clone());
|
||||
let stake_history_sysvar = create_account_for_test(&stake_history);
|
||||
let stake_history_sysvar = create_account(&stake_history, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
|
||||
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
|
||||
|
@@ -1,38 +1,36 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
spl_token::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
},
|
||||
std::str::FromStr,
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v2_0::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
};
|
||||
use std::str::FromStr;
|
||||
|
||||
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::id().to_bytes())
|
||||
pub fn spl_token_id_v2_0() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
|
||||
pub fn spl_token_v2_0_native_mint() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::new_from_array(pubkey.to_bytes())
|
||||
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::from_str(&pubkey.to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
|
||||
pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
Pubkey::new_from_array(pubkey.to_bytes())
|
||||
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
Pubkey::from_str(&pubkey.to_string()).unwrap()
|
||||
}
|
||||
|
||||
pub fn parse_token(
|
||||
|
@@ -1,11 +1,9 @@
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, StringAmount},
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState},
|
||||
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
@@ -123,7 +121,8 @@ struct UiEpochCredits {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {super::*, solana_vote_program::vote_state::VoteStateVersions};
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.1" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.1" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
solana-version = { path = "../version", version = "=1.6.1" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -1,19 +1,15 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
rayon::prelude::*,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
},
|
||||
solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey},
|
||||
std::{env, fs, path::PathBuf},
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_index::Ancestors,
|
||||
};
|
||||
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
|
||||
use std::{collections::HashSet, env, fs, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
@@ -62,14 +58,8 @@ fn main() {
|
||||
if fs::remove_dir_all(path.clone()).is_err() {
|
||||
println!("Warning: Couldn't remove {:?}", path);
|
||||
}
|
||||
let accounts = Accounts::new_with_config(
|
||||
vec![path],
|
||||
&ClusterType::Testnet,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
None,
|
||||
);
|
||||
let accounts =
|
||||
Accounts::new_with_config(vec![path], &ClusterType::Testnet, HashSet::new(), false);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
let pubkeys: Vec<_> = (0..num_slots)
|
||||
@@ -93,19 +83,17 @@ fn main() {
|
||||
num_slots,
|
||||
create_time
|
||||
);
|
||||
let mut ancestors = Vec::with_capacity(num_slots);
|
||||
ancestors.push(0);
|
||||
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||
for i in 1..num_slots {
|
||||
ancestors.push(i as u64);
|
||||
ancestors.insert(i as u64, i - 1);
|
||||
accounts.add_root(i as u64);
|
||||
}
|
||||
let ancestors = Ancestors::from(ancestors);
|
||||
let mut elapsed = vec![0; iterations];
|
||||
let mut elapsed_store = vec![0; iterations];
|
||||
for x in 0..iterations {
|
||||
if clean {
|
||||
let mut time = Measure::start("clean");
|
||||
accounts.accounts_db.clean_accounts(None, false);
|
||||
accounts.accounts_db.clean_accounts(None);
|
||||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
@@ -124,8 +112,6 @@ fn main() {
|
||||
solana_sdk::clock::Slot::default(),
|
||||
&ancestors,
|
||||
None,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
time_store.stop();
|
||||
if results != results_store {
|
||||
|
1
accounts-cluster-bench/.gitignore
vendored
1
accounts-cluster-bench/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
/farf/
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,24 +13,22 @@ clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.6.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.1" }
|
||||
solana-client = { path = "../client", version = "=1.6.1" }
|
||||
solana-core = { path = "../core", version = "=1.6.1" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.1" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.1" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.1" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.6.1" }
|
||||
solana-version = { path = "../version", version = "=1.6.1" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.6.1" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,43 +1,37 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg},
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_account_decoder::parse_token::spl_token_pubkey,
|
||||
solana_clap_utils::input_parsers::pubkey_of,
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT},
|
||||
solana_gossip::gossip_service::discover,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::inline_spl_token,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_transaction_status::parse_token::spl_token_instruction,
|
||||
std::{
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
use clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg};
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_account_decoder::parse_token::spl_token_v2_0_pubkey;
|
||||
use solana_clap_utils::input_parsers::pubkey_of;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::gossip_service::discover;
|
||||
use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::inline_spl_token_v2_0;
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
|
||||
use spl_token_v2_0::solana_program::pubkey::Pubkey as SplPubkey;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// Create and close messages both require 2 signatures; if transaction construction changes, update
|
||||
// this magic number
|
||||
const NUM_SIGNATURES: u64 = 2;
|
||||
|
||||
pub fn airdrop_lamports(
|
||||
client: &RpcClient,
|
||||
@@ -58,7 +52,7 @@ pub fn airdrop_lamports(
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
@@ -192,13 +186,14 @@ impl TransactionExecutor {
|
||||
let mut start = Measure::start("sig_status");
|
||||
let statuses: Vec<_> = sigs_w
|
||||
.chunks(200)
|
||||
.flat_map(|sig_chunk| {
|
||||
.map(|sig_chunk| {
|
||||
let only_sigs: Vec<_> = sig_chunk.iter().map(|s| s.0).collect();
|
||||
client
|
||||
.get_signature_statuses(&only_sigs)
|
||||
.expect("status fail")
|
||||
.value
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
let mut num_cleared = 0;
|
||||
let start_len = sigs_w.len();
|
||||
@@ -256,117 +251,62 @@ impl TransactionExecutor {
|
||||
}
|
||||
}
|
||||
|
||||
struct SeedTracker {
|
||||
max_created: Arc<AtomicU64>,
|
||||
max_closed: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
fn make_create_message(
|
||||
fn make_message(
|
||||
keypair: &Keypair,
|
||||
base_keypair: &Keypair,
|
||||
max_created_seed: Arc<AtomicU64>,
|
||||
num_instructions: usize,
|
||||
balance: u64,
|
||||
maybe_space: Option<u64>,
|
||||
mint: Option<Pubkey>,
|
||||
) -> Message {
|
||||
) -> (Message, Vec<Keypair>) {
|
||||
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000));
|
||||
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
let (instructions, new_keypairs): (Vec<_>, Vec<_>) = (0..num_instructions)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let new_keypair = Keypair::new();
|
||||
|
||||
let program_id = if mint.is_some() {
|
||||
inline_spl_token::id()
|
||||
inline_spl_token_v2_0::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
let seed = max_created_seed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let to_pubkey =
|
||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||
let mut instructions = vec![system_instruction::create_account_with_seed(
|
||||
let mut instructions = vec![system_instruction::create_account(
|
||||
&keypair.pubkey(),
|
||||
&to_pubkey,
|
||||
&base_keypair.pubkey(),
|
||||
&seed,
|
||||
&new_keypair.pubkey(),
|
||||
balance,
|
||||
space,
|
||||
&program_id,
|
||||
)];
|
||||
if let Some(mint_address) = mint {
|
||||
instructions.push(spl_token_instruction(
|
||||
spl_token::instruction::initialize_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&to_pubkey),
|
||||
&spl_token_pubkey(&mint_address),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
instructions.push(spl_token_v2_0_instruction(
|
||||
spl_token_v2_0::instruction::initialize_account(
|
||||
&spl_token_v2_0::id(),
|
||||
&spl_token_v2_0_pubkey(&new_keypair.pubkey()),
|
||||
&spl_token_v2_0_pubkey(&mint_address),
|
||||
&SplPubkey::new_unique(),
|
||||
)
|
||||
.unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
instructions
|
||||
(instructions, new_keypair)
|
||||
})
|
||||
.collect();
|
||||
.unzip();
|
||||
let instructions: Vec<_> = instructions.into_iter().flatten().collect();
|
||||
|
||||
Message::new(&instructions, Some(&keypair.pubkey()))
|
||||
(
|
||||
Message::new(&instructions, Some(&keypair.pubkey())),
|
||||
new_keypairs,
|
||||
)
|
||||
}
|
||||
|
||||
fn make_close_message(
|
||||
keypair: &Keypair,
|
||||
base_keypair: &Keypair,
|
||||
max_closed_seed: Arc<AtomicU64>,
|
||||
num_instructions: usize,
|
||||
balance: u64,
|
||||
spl_token: bool,
|
||||
) -> Message {
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let program_id = if spl_token {
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let address =
|
||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||
if spl_token {
|
||||
spl_token_instruction(
|
||||
spl_token::instruction::close_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&address),
|
||||
&spl_token_pubkey(&keypair.pubkey()),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
&[],
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
} else {
|
||||
system_instruction::transfer_with_seed(
|
||||
&address,
|
||||
&base_keypair.pubkey(),
|
||||
seed,
|
||||
&program_id,
|
||||
&keypair.pubkey(),
|
||||
balance,
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Message::new(&instructions, Some(&keypair.pubkey()))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn run_accounts_bench(
|
||||
entrypoint_addr: SocketAddr,
|
||||
faucet_addr: SocketAddr,
|
||||
payer_keypairs: &[&Keypair],
|
||||
keypair: &Keypair,
|
||||
iterations: usize,
|
||||
maybe_space: Option<u64>,
|
||||
batch_size: usize,
|
||||
close_nth_batch: u64,
|
||||
maybe_lamports: Option<u64>,
|
||||
num_instructions: usize,
|
||||
mint: Option<Pubkey>,
|
||||
@@ -375,19 +315,15 @@ fn run_accounts_bench(
|
||||
let client =
|
||||
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
|
||||
|
||||
info!("Targeting {}", entrypoint_addr);
|
||||
info!("Targetting {}", entrypoint_addr);
|
||||
|
||||
let mut last_blockhash = Instant::now();
|
||||
let mut last_log = Instant::now();
|
||||
let mut count = 0;
|
||||
let mut recent_blockhash = client.get_recent_blockhash().expect("blockhash");
|
||||
let mut tx_sent_count = 0;
|
||||
let mut total_accounts_created = 0;
|
||||
let mut total_accounts_closed = 0;
|
||||
let mut balances: Vec<_> = payer_keypairs
|
||||
.iter()
|
||||
.map(|keypair| client.get_balance(&keypair.pubkey()).unwrap_or(0))
|
||||
.collect();
|
||||
let mut total_account_count = 0;
|
||||
let mut balance = client.get_balance(&keypair.pubkey()).unwrap_or(0);
|
||||
let mut last_balance = Instant::now();
|
||||
|
||||
let default_max_lamports = 1000;
|
||||
@@ -398,13 +334,7 @@ fn run_accounts_bench(
|
||||
.expect("min balance")
|
||||
});
|
||||
|
||||
let base_keypair = Keypair::new();
|
||||
let seed_tracker = SeedTracker {
|
||||
max_created: Arc::new(AtomicU64::default()),
|
||||
max_closed: Arc::new(AtomicU64::default()),
|
||||
};
|
||||
|
||||
info!("Starting balance(s): {:?}", balances);
|
||||
info!("Starting balance: {}", balance);
|
||||
|
||||
let executor = TransactionExecutor::new(entrypoint_addr);
|
||||
|
||||
@@ -414,101 +344,53 @@ fn run_accounts_bench(
|
||||
last_blockhash = Instant::now();
|
||||
}
|
||||
|
||||
let fee = recent_blockhash
|
||||
.1
|
||||
.lamports_per_signature
|
||||
.saturating_mul(NUM_SIGNATURES);
|
||||
let (message, _keypairs) =
|
||||
make_message(keypair, num_instructions, min_balance, maybe_space, mint);
|
||||
let fee = recent_blockhash.1.calculate_fee(&message);
|
||||
let lamports = min_balance + fee;
|
||||
|
||||
for (i, balance) in balances.iter_mut().enumerate() {
|
||||
if *balance < lamports || last_balance.elapsed().as_millis() > 2000 {
|
||||
if let Ok(b) = client.get_balance(&payer_keypairs[i].pubkey()) {
|
||||
*balance = b;
|
||||
}
|
||||
last_balance = Instant::now();
|
||||
if *balance < lamports * 2 {
|
||||
info!(
|
||||
"Balance {} is less than needed: {}, doing aidrop...",
|
||||
balance, lamports
|
||||
);
|
||||
if !airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
payer_keypairs[i],
|
||||
lamports * 100_000,
|
||||
) {
|
||||
warn!("failed airdrop, exiting");
|
||||
return;
|
||||
}
|
||||
if balance < lamports || last_balance.elapsed().as_millis() > 2000 {
|
||||
if let Ok(b) = client.get_balance(&keypair.pubkey()) {
|
||||
balance = b;
|
||||
}
|
||||
last_balance = Instant::now();
|
||||
if balance < lamports {
|
||||
info!(
|
||||
"Balance {} is less than needed: {}, doing aidrop...",
|
||||
balance, lamports
|
||||
);
|
||||
if !airdrop_lamports(&client, &faucet_addr, keypair, lamports * 100_000) {
|
||||
warn!("failed airdrop, exiting");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create accounts
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size {
|
||||
let num_to_create = batch_size - sigs_len;
|
||||
if num_to_create >= payer_keypairs.len() {
|
||||
info!("creating {} new", num_to_create);
|
||||
let chunk_size = num_to_create / payer_keypairs.len();
|
||||
if chunk_size > 0 {
|
||||
for (i, keypair) in payer_keypairs.iter().enumerate() {
|
||||
let txs: Vec<_> = (0..chunk_size)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let message = make_create_message(
|
||||
keypair,
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
num_instructions,
|
||||
min_balance,
|
||||
maybe_space,
|
||||
mint,
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||
Transaction::new(&signers, message, recent_blockhash.0)
|
||||
})
|
||||
.collect();
|
||||
balances[i] = balances[i].saturating_sub(lamports * txs.len() as u64);
|
||||
info!("txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_accounts_created += num_instructions * new_ids.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if close_nth_batch > 0 {
|
||||
let num_batches_to_close =
|
||||
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
|
||||
let expected_closed = num_batches_to_close * batch_size as u64;
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
// Close every account we've created with seed between max_closed_seed..expected_closed
|
||||
if max_closed_seed < expected_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let message = make_close_message(
|
||||
payer_keypairs[0],
|
||||
&base_keypair,
|
||||
seed_tracker.max_closed.clone(),
|
||||
1,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
|
||||
Transaction::new(&signers, message, recent_blockhash.0)
|
||||
})
|
||||
info!("creating {} new", num_to_create);
|
||||
let (txs, _new_keypairs): (Vec<_>, Vec<_>) = (0..num_to_create)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let (message, new_keypairs) =
|
||||
make_message(keypair, num_instructions, min_balance, maybe_space, mint);
|
||||
let signers: Vec<&Keypair> = new_keypairs
|
||||
.iter()
|
||||
.chain(std::iter::once(keypair))
|
||||
.collect();
|
||||
balances[0] = balances[0].saturating_sub(fee * txs.len() as u64);
|
||||
info!("close txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("close ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_accounts_closed += new_ids.len() as u64;
|
||||
}
|
||||
}
|
||||
(
|
||||
Transaction::new(&signers, message, recent_blockhash.0),
|
||||
new_keypairs,
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
balance = balance.saturating_sub(lamports * txs.len() as u64);
|
||||
info!("txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_account_count += num_instructions * new_ids.len();
|
||||
} else {
|
||||
let _ = executor.drain_cleared();
|
||||
}
|
||||
@@ -516,8 +398,8 @@ fn run_accounts_bench(
|
||||
count += 1;
|
||||
if last_log.elapsed().as_millis() > 3000 {
|
||||
info!(
|
||||
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
||||
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
|
||||
"total_accounts: {} tx_sent_count: {} loop_count: {} balance: {}",
|
||||
total_account_count, tx_sent_count, count, balance
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
@@ -568,33 +450,19 @@ fn main() {
|
||||
Arg::with_name("identity")
|
||||
.long("identity")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.value_name("FILE")
|
||||
.help("keypair file"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("batch_size")
|
||||
.long("batch-size")
|
||||
.long("batch_size")
|
||||
.takes_value(true)
|
||||
.value_name("BYTES")
|
||||
.help("Number of transactions to send per batch"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("close_nth_batch")
|
||||
.long("close-frequency")
|
||||
.takes_value(true)
|
||||
.value_name("BYTES")
|
||||
.help(
|
||||
"Every `n` batches, create a batch of close transactions for
|
||||
the earliest remaining batch of accounts created.
|
||||
Note: Should be > 1 to avoid situations where the close \
|
||||
transactions will be submitted before the corresponding \
|
||||
create transactions have been confirmed",
|
||||
),
|
||||
.help("Size of accounts to create"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_instructions")
|
||||
.long("num-instructions")
|
||||
.long("num_instructions")
|
||||
.takes_value(true)
|
||||
.value_name("NUM")
|
||||
.help("Number of accounts to create on each transaction"),
|
||||
@@ -640,7 +508,6 @@ fn main() {
|
||||
let space = value_t!(matches, "space", u64).ok();
|
||||
let lamports = value_t!(matches, "lamports", u64).ok();
|
||||
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
|
||||
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
|
||||
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
|
||||
if num_instructions == 0 || num_instructions > 500 {
|
||||
@@ -650,30 +517,20 @@ fn main() {
|
||||
|
||||
let mint = pubkey_of(&matches, "mint");
|
||||
|
||||
let payer_keypairs: Vec<_> = values_t_or_exit!(matches, "identity", String)
|
||||
.iter()
|
||||
.map(|keypair_string| {
|
||||
read_keypair_file(keypair_string)
|
||||
.unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_string))
|
||||
})
|
||||
.collect();
|
||||
let mut payer_keypair_refs: Vec<&Keypair> = vec![];
|
||||
for keypair in payer_keypairs.iter() {
|
||||
payer_keypair_refs.push(keypair);
|
||||
}
|
||||
let keypair =
|
||||
read_keypair_file(&value_t_or_exit!(matches, "identity", String)).expect("bad keypair");
|
||||
|
||||
let rpc_addr = if !skip_gossip {
|
||||
info!("Finding cluster entry: {:?}", entrypoint_addr);
|
||||
let (gossip_nodes, _validators) = discover(
|
||||
None, // keypair
|
||||
None,
|
||||
Some(&entrypoint_addr),
|
||||
None, // num_nodes
|
||||
Duration::from_secs(60), // timeout
|
||||
None, // find_node_by_pubkey
|
||||
Some(&entrypoint_addr), // find_node_by_gossip_addr
|
||||
None, // my_gossip_addr
|
||||
0, // my_shred_version
|
||||
SocketAddrSpace::Unspecified,
|
||||
None,
|
||||
Some(60),
|
||||
None,
|
||||
Some(&entrypoint_addr),
|
||||
None,
|
||||
0,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
|
||||
@@ -690,11 +547,10 @@ fn main() {
|
||||
run_accounts_bench(
|
||||
rpc_addr,
|
||||
faucet_addr,
|
||||
&payer_keypair_refs,
|
||||
&keypair,
|
||||
iterations,
|
||||
space,
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
lamports,
|
||||
num_instructions,
|
||||
mint,
|
||||
@@ -703,20 +559,18 @@ fn main() {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_sdk::poh_config::PohConfig,
|
||||
use super::*;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
|
||||
#[test]
|
||||
fn test_accounts_cluster_bench() {
|
||||
solana_logger::setup();
|
||||
let validator_config = ValidatorConfig::default_for_test();
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let num_nodes = 1;
|
||||
let mut config = ClusterConfig {
|
||||
cluster_lamports: 10_000_000,
|
||||
@@ -727,22 +581,20 @@ pub mod test {
|
||||
};
|
||||
|
||||
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
|
||||
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
|
||||
let cluster = LocalCluster::new(&mut config);
|
||||
let iterations = 10;
|
||||
let maybe_space = None;
|
||||
let batch_size = 100;
|
||||
let close_nth_batch = 100;
|
||||
let maybe_lamports = None;
|
||||
let num_instructions = 2;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
run_accounts_bench(
|
||||
cluster.entry_point_info.rpc,
|
||||
faucet_addr,
|
||||
&[&cluster.funding_keypair],
|
||||
&cluster.funding_keypair,
|
||||
iterations,
|
||||
maybe_space,
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
maybe_lamports,
|
||||
num_instructions,
|
||||
None,
|
||||
|
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-interface"
|
||||
description = "The Solana AccountsDb plugin interface."
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
thiserror = "1.0.29"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,20 +0,0 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Solana AccountsDb Plugin Interface
|
||||
|
||||
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
|
||||
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
|
||||
|
||||
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
|
||||
instantiates the implementation of the interface.
|
||||
|
||||
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
|
||||
external PostgreSQL databases.
|
||||
|
||||
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
|
||||
|
||||
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)
|
@@ -1,143 +0,0 @@
|
||||
/// The interface for AccountsDb plugins. A plugin must implement
|
||||
/// the AccountsDbPlugin trait to work with the runtime.
|
||||
/// In addition, the dynamic library must export a "C" function _create_plugin which
|
||||
/// creates the implementation of the plugin.
|
||||
use {
|
||||
std::{any::Any, error, io},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
impl Eq for ReplicaAccountInfo<'_> {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
/// Information about an account being updated
|
||||
pub struct ReplicaAccountInfo<'a> {
|
||||
/// The Pubkey for the account
|
||||
pub pubkey: &'a [u8],
|
||||
|
||||
/// The lamports for the account
|
||||
pub lamports: u64,
|
||||
|
||||
/// The Pubkey of the owner program account
|
||||
pub owner: &'a [u8],
|
||||
|
||||
/// This account's data contains a loaded program (and is now read-only)
|
||||
pub executable: bool,
|
||||
|
||||
/// The epoch at which this account will next owe rent
|
||||
pub rent_epoch: u64,
|
||||
|
||||
/// The data held in this account.
|
||||
pub data: &'a [u8],
|
||||
|
||||
/// A global monotonically increasing atomic number, which can be used
|
||||
/// to tell the order of the account update. For example, when an
|
||||
/// account is updated in the same slot multiple times, the update
|
||||
/// with higher write_version should supersede the one with lower
|
||||
/// write_version.
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
/// A wrapper to future-proof ReplicaAccountInfo handling.
|
||||
/// If there were a change to the structure of ReplicaAccountInfo,
|
||||
/// there would be new enum entry for the newer version, forcing
|
||||
/// plugin implementations to handle the change.
|
||||
pub enum ReplicaAccountInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaAccountInfo<'a>),
|
||||
}
|
||||
|
||||
/// Errors returned by plugin calls
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginError {
|
||||
/// Error opening the configuration file; for example, when the file
|
||||
/// is not found or when the validator process has no permission to read it.
|
||||
#[error("Error opening config file. Error detail: ({0}).")]
|
||||
ConfigFileOpenError(#[from] io::Error),
|
||||
|
||||
/// Error in reading the content of the config file or the content
|
||||
/// is not in the expected format.
|
||||
#[error("Error reading config file. Error message: ({msg})")]
|
||||
ConfigFileReadError { msg: String },
|
||||
|
||||
/// Error when updating the account.
|
||||
#[error("Error updating account. Error message: ({msg})")]
|
||||
AccountsUpdateError { msg: String },
|
||||
|
||||
/// Error when updating the slot status
|
||||
#[error("Error updating slot status. Error message: ({msg})")]
|
||||
SlotStatusUpdateError { msg: String },
|
||||
|
||||
/// Any custom error defined by the plugin.
|
||||
#[error("Plugin-defined custom error. Error message: ({0})")]
|
||||
Custom(Box<dyn error::Error + Send + Sync>),
|
||||
}
|
||||
|
||||
/// The current status of a slot
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum SlotStatus {
|
||||
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
|
||||
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
|
||||
/// the fork the validator believes is most likely to finalize.
|
||||
Processed,
|
||||
|
||||
/// The highest slot having reached max vote lockout.
|
||||
Rooted,
|
||||
|
||||
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
|
||||
Confirmed,
|
||||
}
|
||||
|
||||
impl SlotStatus {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SlotStatus::Confirmed => "confirmed",
|
||||
SlotStatus::Processed => "processed",
|
||||
SlotStatus::Rooted => "rooted",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
|
||||
|
||||
/// Defines an AccountsDb plugin, to stream data from the runtime.
|
||||
/// AccountsDb plugins must describe desired behavior for load and unload,
|
||||
/// as well as how they will handle streamed data.
|
||||
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> &'static str;
|
||||
|
||||
/// The callback called when a plugin is loaded by the system,
|
||||
/// used for doing whatever initialization is required by the plugin.
|
||||
/// The _config_file contains the name of the
|
||||
/// of the config file. The config must be in JSON format and
|
||||
/// include a field "libpath" indicating the full path
|
||||
/// name of the shared library implementing this interface.
|
||||
fn on_load(&mut self, _config_file: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The callback called right before a plugin is unloaded by the system
|
||||
/// Used for doing cleanup before unload.
|
||||
fn on_unload(&mut self) {}
|
||||
|
||||
/// Called when an account is updated at a slot.
|
||||
/// When `is_startup` is true, it indicates the account is loaded from
|
||||
/// snapshots when the validator starts up. When `is_startup` is false,
|
||||
/// the account is updated during transaction processing.
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Called when all accounts are notified of during startup.
|
||||
fn notify_end_of_startup(&mut self) -> Result<()>;
|
||||
|
||||
/// Called when a slot status is updated
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()>;
|
||||
}
|
@@ -1 +0,0 @@
|
||||
pub mod accountsdb_plugin_interface;
|
@@ -1,30 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.4"
|
||||
libloading = "0.7.0"
|
||||
log = "0.4.11"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.67"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
thiserror = "1.0.21"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,227 +0,0 @@
|
||||
/// Module responsible for notifying plugins of account updates
|
||||
use {
|
||||
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
ReplicaAccountInfo, ReplicaAccountInfoVersions, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_runtime::{
|
||||
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
|
||||
append_vec::{StoredAccountMeta, StoredMeta},
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{AccountSharedData, ReadableAccount},
|
||||
clock::Slot,
|
||||
},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsUpdateNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
|
||||
fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData) {
|
||||
if let Some(account_info) = self.accountinfo_from_shared_account_data(meta, account) {
|
||||
self.notify_plugins_of_account_update(account_info, slot, false);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
|
||||
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
|
||||
|
||||
let account = self.accountinfo_from_stored_account_meta(account);
|
||||
measure_copy.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-copy-stored-account-info-us",
|
||||
measure_copy.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Some(account_info) = account {
|
||||
self.notify_plugins_of_account_update(account_info, slot, true);
|
||||
}
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify-account-restore-all-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
|
||||
fn notify_end_of_restore_from_snapshot(&self) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
|
||||
match plugin.notify_end_of_startup() {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to notify the end of restore from snapshot, error: {} to plugin {}",
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully notified the end of restore from snapshot to plugin {}",
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-end-of-restore-from-snapshot",
|
||||
measure.as_us() as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Confirmed);
|
||||
}
|
||||
|
||||
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Processed);
|
||||
}
|
||||
|
||||
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Rooted);
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierImpl {
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
AccountsUpdateNotifierImpl { plugin_manager }
|
||||
}
|
||||
|
||||
fn accountinfo_from_shared_account_data<'a>(
|
||||
&self,
|
||||
meta: &'a StoredMeta,
|
||||
account: &'a AccountSharedData,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: meta.pubkey.as_ref(),
|
||||
lamports: account.lamports(),
|
||||
owner: account.owner().as_ref(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch(),
|
||||
data: account.data(),
|
||||
write_version: meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn accountinfo_from_stored_account_meta<'a>(
|
||||
&self,
|
||||
stored_account_meta: &'a StoredAccountMeta,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: stored_account_meta.meta.pubkey.as_ref(),
|
||||
lamports: stored_account_meta.account_meta.lamports,
|
||||
owner: stored_account_meta.account_meta.owner.as_ref(),
|
||||
executable: stored_account_meta.account_meta.executable,
|
||||
rent_epoch: stored_account_meta.account_meta.rent_epoch,
|
||||
data: stored_account_meta.data,
|
||||
write_version: stored_account_meta.meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn notify_plugins_of_account_update(
|
||||
&self,
|
||||
account: ReplicaAccountInfo,
|
||||
slot: Slot,
|
||||
is_startup: bool,
|
||||
) {
|
||||
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-account");
|
||||
match plugin.update_account(
|
||||
ReplicaAccountInfoVersions::V0_0_1(&account),
|
||||
slot,
|
||||
is_startup,
|
||||
) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update account {} at slot {}, error: {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated account {} at slot {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
measure2.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify_plugins_of_account_update-us",
|
||||
measure2.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
|
||||
pub fn notify_slot_status(&self, slot: Slot, parent: Option<Slot>, slot_status: SlotStatus) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-slot");
|
||||
match plugin.update_slot_status(slot, parent, slot_status.clone()) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update slot status at slot {}, error: {} to plugin {}",
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated slot status at slot {} to plugin {}",
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-slot-us",
|
||||
measure.as_us() as usize,
|
||||
1000,
|
||||
1000
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,55 +0,0 @@
|
||||
/// Managing the AccountsDb plugins
|
||||
use {
|
||||
libloading::{Library, Symbol},
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::AccountsDbPlugin,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AccountsDbPluginManager {
|
||||
pub plugins: Vec<Box<dyn AccountsDbPlugin>>,
|
||||
libs: Vec<Library>,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginManager {
|
||||
pub fn new() -> Self {
|
||||
AccountsDbPluginManager {
|
||||
plugins: Vec::default(),
|
||||
libs: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function loads the dynamically linked library specified in the path. The library
|
||||
/// must do necessary initializations.
|
||||
pub unsafe fn load_plugin(
|
||||
&mut self,
|
||||
libpath: &str,
|
||||
config_file: &str,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
type PluginConstructor = unsafe fn() -> *mut dyn AccountsDbPlugin;
|
||||
let lib = Library::new(libpath)?;
|
||||
let constructor: Symbol<PluginConstructor> = lib.get(b"_create_plugin")?;
|
||||
let plugin_raw = constructor();
|
||||
let mut plugin = Box::from_raw(plugin_raw);
|
||||
plugin.on_load(config_file)?;
|
||||
self.plugins.push(plugin);
|
||||
self.libs.push(lib);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unload all plugins and loaded plugin libraries, making sure to fire
|
||||
/// their `on_plugin_unload()` methods so they can do any necessary cleanup.
|
||||
pub fn unload(&mut self) {
|
||||
for mut plugin in self.plugins.drain(..) {
|
||||
info!("Unloading plugin for {:?}", plugin.name());
|
||||
plugin.on_unload();
|
||||
}
|
||||
|
||||
for lib in self.libs.drain(..) {
|
||||
drop(lib);
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,157 +0,0 @@
|
||||
use {
|
||||
crate::{
|
||||
accounts_update_notifier::AccountsUpdateNotifierImpl,
|
||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
slot_status_observer::SlotStatusObserver,
|
||||
},
|
||||
crossbeam_channel::Receiver,
|
||||
log::*,
|
||||
serde_json,
|
||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
std::{
|
||||
fs::File,
|
||||
io::Read,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, RwLock},
|
||||
thread,
|
||||
},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsdbPluginServiceError {
|
||||
#[error("Cannot open the the plugin config file")]
|
||||
CannotOpenConfigFile(String),
|
||||
|
||||
#[error("Cannot read the the plugin config file")]
|
||||
CannotReadConfigFile(String),
|
||||
|
||||
#[error("The config file is not in a valid Json format")]
|
||||
InvalidConfigFileFormat(String),
|
||||
|
||||
#[error("Plugin library path is not specified in the config file")]
|
||||
LibPathNotSet,
|
||||
|
||||
#[error("Invalid plugin path")]
|
||||
InvalidPluginPath,
|
||||
|
||||
#[error("Cannot load plugin shared library")]
|
||||
PluginLoadError(String),
|
||||
}
|
||||
|
||||
/// The service managing the AccountsDb plugin workflow.
|
||||
pub struct AccountsDbPluginService {
|
||||
slot_status_observer: SlotStatusObserver,
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginService {
|
||||
/// Creates and returns the AccountsDbPluginService.
|
||||
/// # Arguments
|
||||
/// * `confirmed_bank_receiver` - The receiver for confirmed bank notification
|
||||
/// * `accountsdb_plugin_config_file` - The config file path for the plugin. The
|
||||
/// config file controls the plugin responsible
|
||||
/// for transporting the data to external data stores. It is defined in JSON format.
|
||||
/// The `libpath` field should be pointed to the full path of the dynamic shared library
|
||||
/// (.so file) to be loaded. The shared library must implement the `AccountsDbPlugin`
|
||||
/// trait. And the shared library shall export a `C` function `_create_plugin` which
|
||||
/// shall create the implementation of `AccountsDbPlugin` and returns to the caller.
|
||||
/// The rest of the JSON fields' definition is up to to the concrete plugin implementation
|
||||
/// It is usually used to configure the connection information for the external data store.
|
||||
|
||||
pub fn new(
|
||||
confirmed_bank_receiver: Receiver<BankNotification>,
|
||||
accountsdb_plugin_config_files: &[PathBuf],
|
||||
) -> Result<Self, AccountsdbPluginServiceError> {
|
||||
info!(
|
||||
"Starting AccountsDbPluginService from config files: {:?}",
|
||||
accountsdb_plugin_config_files
|
||||
);
|
||||
let mut plugin_manager = AccountsDbPluginManager::new();
|
||||
|
||||
for accountsdb_plugin_config_file in accountsdb_plugin_config_files {
|
||||
Self::load_plugin(&mut plugin_manager, accountsdb_plugin_config_file)?;
|
||||
}
|
||||
|
||||
let plugin_manager = Arc::new(RwLock::new(plugin_manager));
|
||||
let accounts_update_notifier = Arc::new(RwLock::new(AccountsUpdateNotifierImpl::new(
|
||||
plugin_manager.clone(),
|
||||
)));
|
||||
let slot_status_observer =
|
||||
SlotStatusObserver::new(confirmed_bank_receiver, accounts_update_notifier.clone());
|
||||
|
||||
info!("Started AccountsDbPluginService");
|
||||
Ok(AccountsDbPluginService {
|
||||
slot_status_observer,
|
||||
plugin_manager,
|
||||
accounts_update_notifier,
|
||||
})
|
||||
}
|
||||
|
||||
fn load_plugin(
|
||||
plugin_manager: &mut AccountsDbPluginManager,
|
||||
accountsdb_plugin_config_file: &Path,
|
||||
) -> Result<(), AccountsdbPluginServiceError> {
|
||||
let mut file = match File::open(accountsdb_plugin_config_file) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
|
||||
"Failed to open the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut contents = String::new();
|
||||
if let Err(err) = file.read_to_string(&mut contents) {
|
||||
return Err(AccountsdbPluginServiceError::CannotReadConfigFile(format!(
|
||||
"Failed to read the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
|
||||
let result: serde_json::Value = match serde_json::from_str(&contents) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
|
||||
format!(
|
||||
"The config file {:?} is not in a valid Json format, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let libpath = result["libpath"]
|
||||
.as_str()
|
||||
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
|
||||
let config_file = accountsdb_plugin_config_file
|
||||
.as_os_str()
|
||||
.to_str()
|
||||
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
|
||||
|
||||
unsafe {
|
||||
let result = plugin_manager.load_plugin(libpath, config_file);
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to load the plugin library: {:?}, error: {:?}",
|
||||
libpath, err
|
||||
);
|
||||
return Err(AccountsdbPluginServiceError::PluginLoadError(msg));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_accounts_update_notifier(&self) -> AccountsUpdateNotifier {
|
||||
self.accounts_update_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn join(mut self) -> thread::Result<()> {
|
||||
self.slot_status_observer.join()?;
|
||||
self.plugin_manager.write().unwrap().unload();
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
pub mod accounts_update_notifier;
|
||||
pub mod accountsdb_plugin_manager;
|
||||
pub mod accountsdb_plugin_service;
|
||||
pub mod slot_status_observer;
|
@@ -1,80 +0,0 @@
|
||||
use {
|
||||
crossbeam_channel::Receiver,
|
||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SlotStatusObserver {
|
||||
bank_notification_receiver_service: Option<JoinHandle<()>>,
|
||||
exit_updated_slot_server: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl SlotStatusObserver {
|
||||
pub fn new(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
) -> Self {
|
||||
let exit_updated_slot_server = Arc::new(AtomicBool::new(false));
|
||||
|
||||
Self {
|
||||
bank_notification_receiver_service: Some(Self::run_bank_notification_receiver(
|
||||
bank_notification_receiver,
|
||||
exit_updated_slot_server.clone(),
|
||||
accounts_update_notifier,
|
||||
)),
|
||||
exit_updated_slot_server,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_updated_slot_server.store(true, Ordering::Relaxed);
|
||||
self.bank_notification_receiver_service
|
||||
.take()
|
||||
.map(JoinHandle::join)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn run_bank_notification_receiver(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
exit: Arc<AtomicBool>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("bank_notification_receiver".to_string())
|
||||
.spawn(move || {
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
if let Ok(slot) = bank_notification_receiver.recv() {
|
||||
match slot {
|
||||
BankNotification::OptimisticallyConfirmed(slot) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_confirmed(slot, None);
|
||||
}
|
||||
BankNotification::Frozen(bank) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_processed(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
BankNotification::Root(bank) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_rooted(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
}
|
@@ -1,33 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-postgres"
|
||||
description = "The Solana AccountsDb plugin for PostgreSQL database."
|
||||
version = "1.8.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
postgres = { version = "0.19.1", features = ["with-chrono-0_4"] }
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.67"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
thiserror = "1.0.21"
|
||||
tokio-postgres = "0.7.3"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,5 +0,0 @@
|
||||
This is an example implementing the AccountsDb plugin for PostgreSQL database.
|
||||
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
|
||||
|
||||
To create the schema objects for the database, please use `scripts/create_schema.sql`.
|
||||
`scripts/drop_schema.sql` can be used to tear down the schema objects.
|
@@ -1,54 +0,0 @@
|
||||
/**
|
||||
* This plugin implementation for PostgreSQL requires the following tables
|
||||
*/
|
||||
-- The table storing accounts
|
||||
|
||||
|
||||
CREATE TABLE account (
|
||||
pubkey BYTEA PRIMARY KEY,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
-- The table storing slot information
|
||||
CREATE TABLE slot (
|
||||
slot BIGINT PRIMARY KEY,
|
||||
parent BIGINT,
|
||||
status varchar(16) NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
/**
|
||||
* The following is for keeping historical data for accounts and is not required for plugin to work.
|
||||
*/
|
||||
-- The table storing historical data for accounts
|
||||
CREATE TABLE account_audit (
|
||||
pubkey BYTEA,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
|
||||
BEGIN
|
||||
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
|
||||
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
|
||||
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
|
||||
RETURN NEW;
|
||||
END;
|
||||
|
||||
$audit_account_update$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
|
||||
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();
|
@@ -1,9 +0,0 @@
|
||||
/**
|
||||
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
|
||||
*/
|
||||
|
||||
DROP TRIGGER account_update_trigger ON account;
|
||||
DROP FUNCTION audit_account_update;
|
||||
DROP TABLE account_audit;
|
||||
DROP TABLE account;
|
||||
DROP TABLE slot;
|
@@ -1,802 +0,0 @@
|
||||
# This a reference configuration file for the PostgreSQL database version 14.
|
||||
|
||||
# -----------------------------
|
||||
# PostgreSQL configuration file
|
||||
# -----------------------------
|
||||
#
|
||||
# This file consists of lines of the form:
|
||||
#
|
||||
# name = value
|
||||
#
|
||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||
# values can be found in the PostgreSQL documentation.
|
||||
#
|
||||
# The commented-out settings shown in this file represent the default values.
|
||||
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||
# you need to reload the server.
|
||||
#
|
||||
# This file is read on server startup and when the server receives a SIGHUP
|
||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||
# require a server shutdown and restart to take effect.
|
||||
#
|
||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||
# with the "SET" SQL command.
|
||||
#
|
||||
# Memory units: B = bytes Time units: us = microseconds
|
||||
# kB = kilobytes ms = milliseconds
|
||||
# MB = megabytes s = seconds
|
||||
# GB = gigabytes min = minutes
|
||||
# TB = terabytes h = hours
|
||||
# d = days
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# FILE LOCATIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# The default values of these variables are driven from the -D command-line
|
||||
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||
|
||||
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
|
||||
# (change requires restart)
|
||||
|
||||
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
|
||||
# (change requires restart)
|
||||
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
|
||||
# (change requires restart)
|
||||
|
||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONNECTIONS AND AUTHENTICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Connection Settings -
|
||||
|
||||
#listen_addresses = 'localhost' # what IP address(es) to listen on;
|
||||
# comma-separated list of addresses;
|
||||
# defaults to 'localhost'; use '*' for all
|
||||
# (change requires restart)
|
||||
listen_addresses = '*'
|
||||
port = 5433 # (change requires restart)
|
||||
max_connections = 200 # (change requires restart)
|
||||
#superuser_reserved_connections = 3 # (change requires restart)
|
||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
||||
# (change requires restart)
|
||||
#unix_socket_group = '' # (change requires restart)
|
||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||
# (change requires restart)
|
||||
#bonjour = off # advertise server via Bonjour
|
||||
# (change requires restart)
|
||||
#bonjour_name = '' # defaults to the computer name
|
||||
# (change requires restart)
|
||||
|
||||
# - TCP settings -
|
||||
# see "man tcp" for details
|
||||
|
||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||
# 0 selects the system default
|
||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
||||
# 0 selects the system default
|
||||
|
||||
#client_connection_check_interval = 0 # time between checks for client
|
||||
# disconnection while running queries;
|
||||
# 0 for never
|
||||
|
||||
# - Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#password_encryption = scram-sha-256 # scram-sha-256 or md5
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - SSL -
|
||||
|
||||
ssl = on
|
||||
#ssl_ca_file = ''
|
||||
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
|
||||
#ssl_crl_file = ''
|
||||
#ssl_crl_dir = ''
|
||||
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
#ssl_prefer_server_ciphers = on
|
||||
#ssl_ecdh_curve = 'prime256v1'
|
||||
#ssl_min_protocol_version = 'TLSv1.2'
|
||||
#ssl_max_protocol_version = ''
|
||||
#ssl_dh_params_file = ''
|
||||
#ssl_passphrase_command = ''
|
||||
#ssl_passphrase_command_supports_reload = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# RESOURCE USAGE (except WAL)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Memory -
|
||||
|
||||
shared_buffers = 1GB # min 128kB
|
||||
# (change requires restart)
|
||||
#huge_pages = try # on, off, or try
|
||||
# (change requires restart)
|
||||
#huge_page_size = 0 # zero for system default
|
||||
# (change requires restart)
|
||||
#temp_buffers = 8MB # min 800kB
|
||||
#max_prepared_transactions = 0 # zero disables the feature
|
||||
# (change requires restart)
|
||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||
# you actively intend to use prepared transactions.
|
||||
#work_mem = 4MB # min 64kB
|
||||
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
|
||||
#maintenance_work_mem = 64MB # min 1MB
|
||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||
#logical_decoding_work_mem = 64MB # min 64kB
|
||||
#max_stack_depth = 2MB # min 100kB
|
||||
#shared_memory_type = mmap # the default is the first option
|
||||
# supported by the operating system:
|
||||
# mmap
|
||||
# sysv
|
||||
# windows
|
||||
# (change requires restart)
|
||||
dynamic_shared_memory_type = posix # the default is the first option
|
||||
# supported by the operating system:
|
||||
# posix
|
||||
# sysv
|
||||
# windows
|
||||
# mmap
|
||||
# (change requires restart)
|
||||
#min_dynamic_shared_memory = 0MB # (change requires restart)
|
||||
|
||||
# - Disk -
|
||||
|
||||
#temp_file_limit = -1 # limits per-process temp file space
|
||||
# in kilobytes, or -1 for no limit
|
||||
|
||||
# - Kernel Resources -
|
||||
|
||||
#max_files_per_process = 1000 # min 64
|
||||
# (change requires restart)
|
||||
|
||||
# - Cost-Based Vacuum Delay -
|
||||
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||
#vacuum_cost_page_miss = 2 # 0-10000 credits
|
||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||
|
||||
# - Background Writer -
|
||||
|
||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||
|
||||
# - Asynchronous Behavior -
|
||||
|
||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
|
||||
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel operations
|
||||
#parallel_leader_participation = on
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# WRITE-AHEAD LOG
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Settings -
|
||||
|
||||
wal_level = minimal # minimal, replica, or logical
|
||||
# (change requires restart)
|
||||
fsync = off # flush data to disk for crash safety
|
||||
# (turning this off can cause
|
||||
# unrecoverable data corruption)
|
||||
synchronous_commit = off # synchronization level;
|
||||
# off, local, remote_write, remote_apply, or on
|
||||
#wal_sync_method = fsync # the default is the first option
|
||||
# supported by the operating system:
|
||||
# open_datasync
|
||||
# fdatasync (default on Linux and FreeBSD)
|
||||
# fsync
|
||||
# fsync_writethrough
|
||||
# open_sync
|
||||
full_page_writes = off # recover from partial page writes
|
||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||
# (change requires restart)
|
||||
#wal_compression = off # enable compression of full-page writes
|
||||
#wal_init_zero = on # zero-fill new WAL files
|
||||
#wal_recycle = on # recycle WAL files
|
||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||
# (change requires restart)
|
||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||
#wal_skip_threshold = 2MB
|
||||
|
||||
#commit_delay = 0 # range 0-100000, in microseconds
|
||||
#commit_siblings = 5 # range 1-1000
|
||||
|
||||
# - Checkpoints -
|
||||
|
||||
#checkpoint_timeout = 5min # range 30s-1d
|
||||
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
|
||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||
#checkpoint_warning = 30s # 0 disables
|
||||
max_wal_size = 1GB
|
||||
min_wal_size = 80MB
|
||||
|
||||
# - Archiving -
|
||||
|
||||
#archive_mode = off # enables archiving; off, on, or always
|
||||
# (change requires restart)
|
||||
#archive_command = '' # command to use to archive a logfile segment
|
||||
# placeholders: %p = path of file to archive
|
||||
# %f = file name only
|
||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||
#archive_timeout = 0 # force a logfile segment switch after this
|
||||
# number of seconds; 0 disables
|
||||
|
||||
# - Archive Recovery -
|
||||
|
||||
# These are only used in recovery mode.
|
||||
|
||||
#restore_command = '' # command to use to restore an archived logfile segment
|
||||
# placeholders: %p = path of file to restore
|
||||
# %f = file name only
|
||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
||||
#recovery_end_command = '' # command to execute at completion of recovery
|
||||
|
||||
# - Recovery Target -
|
||||
|
||||
# Set these only when performing a targeted recovery.
|
||||
|
||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
||||
# consistent state is reached
|
||||
# (change requires restart)
|
||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
||||
# just after the specified recovery target (on)
|
||||
# just before the recovery target (off)
|
||||
# (change requires restart)
|
||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
||||
# (change requires restart)
|
||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPLICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Sending Servers -
|
||||
|
||||
# Set these on the primary and on any standby that will send replication data.
|
||||
|
||||
max_wal_senders = 0 # max number of walsender processes
|
||||
# (change requires restart)
|
||||
#max_replication_slots = 10 # max number of replication slots
|
||||
# (change requires restart)
|
||||
#wal_keep_size = 0 # in megabytes; 0 disables
|
||||
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
|
||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||
# (change requires restart)
|
||||
|
||||
# - Primary Server -
|
||||
|
||||
# These settings are ignored on a standby server.
|
||||
|
||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||
# method to choose sync standbys, number of sync standbys,
|
||||
# and comma-separated list of application_name
|
||||
# from standby(s); '*' = all
|
||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||
|
||||
# - Standby Servers -
|
||||
|
||||
# These settings are ignored on a primary server.
|
||||
|
||||
#primary_conninfo = '' # connection string to sending server
|
||||
#primary_slot_name = '' # replication slot on sending server
|
||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
||||
#hot_standby = on # "off" disallows queries during recovery
|
||||
# (change requires restart)
|
||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||
# when reading WAL from archive;
|
||||
# -1 allows indefinite delay
|
||||
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||
# when reading streaming WAL;
|
||||
# -1 allows indefinite delay
|
||||
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
|
||||
# is not set
|
||||
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||
# 0 disables
|
||||
#hot_standby_feedback = off # send info from standby to prevent
|
||||
# query conflicts
|
||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||
# communication from primary
|
||||
# in milliseconds; 0 disables
|
||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||
# retrieve WAL after a failed attempt
|
||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
||||
|
||||
# - Subscribers -
|
||||
|
||||
# These settings are ignored on a publisher.
|
||||
|
||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||
# (change requires restart)
|
||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# QUERY TUNING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Planner Method Configuration -
|
||||
|
||||
#enable_async_append = on
|
||||
#enable_bitmapscan = on
|
||||
#enable_gathermerge = on
|
||||
#enable_hashagg = on
|
||||
#enable_hashjoin = on
|
||||
#enable_incremental_sort = on
|
||||
#enable_indexscan = on
|
||||
#enable_indexonlyscan = on
|
||||
#enable_material = on
|
||||
#enable_memoize = on
|
||||
#enable_mergejoin = on
|
||||
#enable_nestloop = on
|
||||
#enable_parallel_append = on
|
||||
#enable_parallel_hash = on
|
||||
#enable_partition_pruning = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_tidscan = on
|
||||
|
||||
# - Planner Cost Constants -
|
||||
|
||||
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||
#random_page_cost = 4.0 # same scale as above
|
||||
#cpu_tuple_cost = 0.01 # same scale as above
|
||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||
#cpu_operator_cost = 0.0025 # same scale as above
|
||||
#parallel_setup_cost = 1000.0 # same scale as above
|
||||
#parallel_tuple_cost = 0.1 # same scale as above
|
||||
#min_parallel_table_scan_size = 8MB
|
||||
#min_parallel_index_scan_size = 512kB
|
||||
#effective_cache_size = 4GB
|
||||
|
||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||
# and query more expensive than this;
|
||||
# -1 disables
|
||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||
# more expensive than this; -1 disables
|
||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||
# query is more expensive than this;
|
||||
# -1 disables
|
||||
|
||||
# - Genetic Query Optimizer -
|
||||
|
||||
#geqo = on
|
||||
#geqo_threshold = 12
|
||||
#geqo_effort = 5 # range 1-10
|
||||
#geqo_pool_size = 0 # selects default based on effort
|
||||
#geqo_generations = 0 # selects default based on effort
|
||||
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||
#geqo_seed = 0.0 # range 0.0-1.0
|
||||
|
||||
# - Other Planner Options -
|
||||
|
||||
#default_statistics_target = 100 # range 1-10000
|
||||
#constraint_exclusion = partition # on, off, or partition
|
||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||
#from_collapse_limit = 8
|
||||
#jit = on # allow JIT compilation
|
||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||
# JOIN clauses
|
||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
||||
# force_custom_plan
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPORTING AND LOGGING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Where to Log -
|
||||
|
||||
#log_destination = 'stderr' # Valid values are combinations of
|
||||
# stderr, csvlog, syslog, and eventlog,
|
||||
# depending on platform. csvlog
|
||||
# requires logging_collector to be on.
|
||||
|
||||
# This is used when logging to stderr:
|
||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||
# into log files. Required to be on for
|
||||
# csvlogs.
|
||||
# (change requires restart)
|
||||
|
||||
# These are only used if logging_collector is on:
|
||||
#log_directory = 'log' # directory where log files are written,
|
||||
# can be absolute or relative to PGDATA
|
||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||
# can include strftime() escapes
|
||||
#log_file_mode = 0600 # creation mode for log files,
|
||||
# begin with 0 to use octal notation
|
||||
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||
# happen after that time. 0 disables.
|
||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||
# happen after that much log output.
|
||||
# 0 disables.
|
||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||
# same name as the new log file will be
|
||||
# truncated rather than appended to.
|
||||
# But such truncation only occurs on
|
||||
# time-driven rotation, not on restarts
|
||||
# or size-driven rotation. Default is
|
||||
# off, meaning append to existing files
|
||||
# in all cases.
|
||||
|
||||
# These are relevant when logging to syslog:
|
||||
#syslog_facility = 'LOCAL0'
|
||||
#syslog_ident = 'postgres'
|
||||
#syslog_sequence_numbers = on
|
||||
#syslog_split_messages = on
|
||||
|
||||
# This is only relevant when logging to eventlog (Windows):
|
||||
# (change requires restart)
|
||||
#event_source = 'PostgreSQL'
|
||||
|
||||
# - When to Log -
|
||||
|
||||
#log_min_messages = warning # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic
|
||||
|
||||
#log_min_error_statement = error # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic (effectively off)
|
||||
|
||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
||||
# and their durations, > 0 logs only
|
||||
# statements running at least this number
|
||||
# of milliseconds
|
||||
|
||||
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
|
||||
# and their durations, > 0 logs only a sample of
|
||||
# statements running at least this number
|
||||
# of milliseconds;
|
||||
# sample fraction is determined by log_statement_sample_rate
|
||||
|
||||
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
|
||||
# log_min_duration_sample to be logged;
|
||||
# 1.0 logs all such statements, 0.0 never logs
|
||||
|
||||
|
||||
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
|
||||
# are logged regardless of their duration; 1.0 logs all
|
||||
# statements from all transactions, 0.0 never logs
|
||||
|
||||
# - What to Log -
|
||||
|
||||
#debug_print_parse = off
|
||||
#debug_print_rewritten = off
|
||||
#debug_print_plan = off
|
||||
#debug_pretty_print = on
|
||||
#log_autovacuum_min_duration = -1 # log autovacuum activity;
|
||||
# -1 disables, 0 logs all actions and
|
||||
# their durations, > 0 logs only
|
||||
# actions running at least this number
|
||||
# of milliseconds.
|
||||
#log_checkpoints = off
|
||||
#log_connections = off
|
||||
#log_disconnections = off
|
||||
#log_duration = off
|
||||
#log_error_verbosity = default # terse, default, or verbose messages
|
||||
#log_hostname = off
|
||||
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
|
||||
# %a = application name
|
||||
# %u = user name
|
||||
# %d = database name
|
||||
# %r = remote host and port
|
||||
# %h = remote host
|
||||
# %b = backend type
|
||||
# %p = process ID
|
||||
# %P = process ID of parallel group leader
|
||||
# %t = timestamp without milliseconds
|
||||
# %m = timestamp with milliseconds
|
||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||
# %Q = query ID (0 if none or not computed)
|
||||
# %i = command tag
|
||||
# %e = SQL state
|
||||
# %c = session ID
|
||||
# %l = session line number
|
||||
# %s = session start timestamp
|
||||
# %v = virtual transaction ID
|
||||
# %x = transaction ID (0 if none)
|
||||
# %q = stop here in non-session
|
||||
# processes
|
||||
# %% = '%'
|
||||
# e.g. '<%u%%%d> '
|
||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||
#log_recovery_conflict_waits = off # log standby recovery conflict waits
|
||||
# >= deadlock_timeout
|
||||
#log_parameter_max_length = -1 # when logging statements, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_statement = 'none' # none, ddl, mod, all
|
||||
#log_replication_commands = off
|
||||
#log_temp_files = -1 # log temporary files equal or larger
|
||||
# than the specified size in kilobytes;
|
||||
# -1 disables, 0 logs all temp files
|
||||
log_timezone = 'Etc/UTC'
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# PROCESS TITLE
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
cluster_name = '14/main' # added to process titles if nonempty
|
||||
# (change requires restart)
|
||||
#update_process_title = on
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# STATISTICS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Query and Index Statistics Collector -
|
||||
|
||||
#track_activities = on
|
||||
#track_activity_query_size = 1024 # (change requires restart)
|
||||
#track_counts = on
|
||||
#track_io_timing = off
|
||||
#track_wal_io_timing = off
|
||||
#track_functions = none # none, pl, all
|
||||
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
|
||||
|
||||
|
||||
# - Monitoring -
|
||||
|
||||
#compute_query_id = auto
|
||||
#log_statement_stats = off
|
||||
#log_parser_stats = off
|
||||
#log_planner_stats = off
|
||||
#log_executor_stats = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# AUTOVACUUM
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||
# requires track_counts to also be on.
|
||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||
# (change requires restart)
|
||||
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||
# vacuum
|
||||
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
|
||||
# before vacuum; -1 disables insert
|
||||
# vacuums
|
||||
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||
# analyze
|
||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
|
||||
# size before insert vacuum
|
||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||
# before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
||||
# autovacuum, in milliseconds;
|
||||
# -1 means use vacuum_cost_delay
|
||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||
# autovacuum, -1 means use
|
||||
# vacuum_cost_limit
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CLIENT CONNECTION DEFAULTS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Statement Behavior -
|
||||
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
#search_path = '"$user", public' # schema names
|
||||
#row_security = on
|
||||
#default_table_access_method = 'heap'
|
||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
|
||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||
# only default tablespace
|
||||
#check_function_bodies = on
|
||||
#default_transaction_isolation = 'read committed'
|
||||
#default_transaction_read_only = off
|
||||
#default_transaction_deferrable = off
|
||||
#session_replication_role = 'origin'
|
||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#vacuum_freeze_table_age = 150000000
|
||||
#vacuum_freeze_min_age = 50000000
|
||||
#vacuum_failsafe_age = 1600000000
|
||||
#vacuum_multixact_freeze_table_age = 150000000
|
||||
#vacuum_multixact_freeze_min_age = 5000000
|
||||
#vacuum_multixact_failsafe_age = 1600000000
|
||||
#bytea_output = 'hex' # hex, escape
|
||||
#xmlbinary = 'base64'
|
||||
#xmloption = 'content'
|
||||
#gin_pending_list_limit = 4MB
|
||||
|
||||
# - Locale and Formatting -
|
||||
|
||||
datestyle = 'iso, mdy'
|
||||
#intervalstyle = 'postgres'
|
||||
timezone = 'Etc/UTC'
|
||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||
# abbreviations. Currently, there are
|
||||
# Default
|
||||
# Australia (historical usage)
|
||||
# India
|
||||
# You can create your own file in
|
||||
# share/timezonesets/.
|
||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
||||
# selects precise output mode
|
||||
#client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
|
||||
# These settings are initialized by initdb, but they can be changed.
|
||||
lc_messages = 'C.UTF-8' # locale for system error message
|
||||
# strings
|
||||
lc_monetary = 'C.UTF-8' # locale for monetary formatting
|
||||
lc_numeric = 'C.UTF-8' # locale for number formatting
|
||||
lc_time = 'C.UTF-8' # locale for time formatting
|
||||
|
||||
# default configuration for text search
|
||||
default_text_search_config = 'pg_catalog.english'
|
||||
|
||||
# - Shared Library Preloading -
|
||||
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
#jit_provider = 'llvmjit' # JIT library to use
|
||||
|
||||
# - Other Defaults -
|
||||
|
||||
#dynamic_library_path = '$libdir'
|
||||
#extension_destdir = '' # prepend path when loading extensions
|
||||
# and shared objects (added by Debian)
|
||||
#gin_fuzzy_search_limit = 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# LOCK MANAGEMENT
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_relation = -2 # negative values mean
|
||||
# (max_pred_locks_per_transaction
|
||||
# / -max_pred_locks_per_relation) - 1
|
||||
#max_pred_locks_per_page = 2 # min 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# VERSION AND PLATFORM COMPATIBILITY
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Previous PostgreSQL Versions -
|
||||
|
||||
#array_nulls = on
|
||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||
#escape_string_warning = on
|
||||
#lo_compat_privileges = off
|
||||
#quote_all_identifiers = off
|
||||
#standard_conforming_strings = on
|
||||
#synchronize_seqscans = on
|
||||
|
||||
# - Other Platforms and Clients -
|
||||
|
||||
#transform_null_equals = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# ERROR HANDLING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#exit_on_error = off # terminate session on any error?
|
||||
#restart_after_crash = on # reinitialize after backend crash?
|
||||
#data_sync_retry = off # retry or panic on failure to fsync
|
||||
# data?
|
||||
# (change requires restart)
|
||||
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONFIG FILE INCLUDES
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# These options allow settings to be loaded from files other than the
|
||||
# default postgresql.conf. Note that these are directives, not variable
|
||||
# assignments, so they can usefully be given more than once.
|
||||
|
||||
include_dir = 'conf.d' # include files ending in '.conf' from
|
||||
# a directory, e.g., 'conf.d'
|
||||
#include_if_exists = '...' # include file only if it exists
|
||||
#include = '...' # include file
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CUSTOMIZED OPTIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# Add settings for extensions here
|
@@ -1,69 +0,0 @@
|
||||
use {log::*, std::collections::HashSet};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsSelector {
|
||||
pub accounts: HashSet<Vec<u8>>,
|
||||
pub owners: HashSet<Vec<u8>>,
|
||||
pub select_all_accounts: bool,
|
||||
}
|
||||
|
||||
impl AccountsSelector {
|
||||
pub fn default() -> Self {
|
||||
AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(accounts: &[String], owners: &[String]) -> Self {
|
||||
info!(
|
||||
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
|
||||
accounts, owners
|
||||
);
|
||||
|
||||
let select_all_accounts = accounts.iter().any(|key| key == "*");
|
||||
if select_all_accounts {
|
||||
return AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts,
|
||||
};
|
||||
}
|
||||
let accounts = accounts
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
let owners = owners
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
AccountsSelector {
|
||||
accounts,
|
||||
owners,
|
||||
select_all_accounts,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
|
||||
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_accounts_selector() {
|
||||
AccountsSelector::new(
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
&[],
|
||||
);
|
||||
|
||||
AccountsSelector::new(
|
||||
&[],
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
);
|
||||
}
|
||||
}
|
@@ -1,345 +0,0 @@
|
||||
use solana_measure::measure::Measure;
|
||||
/// Main entry for the PostgreSQL plugin
|
||||
use {
|
||||
crate::{
|
||||
accounts_selector::AccountsSelector,
|
||||
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
|
||||
},
|
||||
bs58,
|
||||
log::*,
|
||||
serde_derive::{Deserialize, Serialize},
|
||||
serde_json,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions, Result, SlotStatus,
|
||||
},
|
||||
solana_metrics::*,
|
||||
std::{fs::File, io::Read},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AccountsDbPluginPostgres {
|
||||
client: Option<ParallelPostgresClient>,
|
||||
accounts_selector: Option<AccountsSelector>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for AccountsDbPluginPostgres {
|
||||
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AccountsDbPluginPostgresConfig {
|
||||
pub host: Option<String>,
|
||||
pub user: Option<String>,
|
||||
pub port: Option<u16>,
|
||||
pub connection_str: Option<String>,
|
||||
pub threads: Option<usize>,
|
||||
pub batch_size: Option<usize>,
|
||||
pub panic_on_db_errors: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginPostgresError {
|
||||
#[error("Error connecting to the backend data store. Error message: ({msg})")]
|
||||
DataStoreConnectionError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
DataSchemaError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
ConfigurationError { msg: String },
|
||||
}
|
||||
|
||||
impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
fn name(&self) -> &'static str {
|
||||
"AccountsDbPluginPostgres"
|
||||
}
|
||||
|
||||
/// Do initialization for the PostgreSQL plugin.
|
||||
///
|
||||
/// # Format of the config file:
|
||||
/// * The `accounts_selector` section allows the user to controls accounts selections.
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
||||
/// }
|
||||
/// or:
|
||||
/// "accounts_selector" = {
|
||||
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
|
||||
/// }
|
||||
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
|
||||
/// When only owners is specified,
|
||||
/// all accounts belonging to the owners will be streamed.
|
||||
/// The accounts field support wildcard to select all accounts:
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["*"\],
|
||||
/// }
|
||||
/// * "host", optional, specifies the PostgreSQL server.
|
||||
/// * "user", optional, specifies the PostgreSQL user.
|
||||
/// * "port", optional, specifies the PostgreSQL server's port.
|
||||
/// * "connection_str", optional, the custom PostgreSQL connection string.
|
||||
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
|
||||
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
|
||||
/// `host` and `user` must be given.
|
||||
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
|
||||
/// maintains a PostgreSQL connection to the server. The default is '10'.
|
||||
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
|
||||
/// from restoring a snapshot. The default is '10'.
|
||||
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
|
||||
/// PostgreSQL database. The default is 'false'.
|
||||
/// # Examples
|
||||
///
|
||||
/// {
|
||||
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
|
||||
/// "host": "host_foo",
|
||||
/// "user": "solana",
|
||||
/// "threads": 10,
|
||||
/// "accounts_selector" : {
|
||||
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
|
||||
/// }
|
||||
/// }
|
||||
|
||||
fn on_load(&mut self, config_file: &str) -> Result<()> {
|
||||
solana_logger::setup_with_default("info");
|
||||
info!(
|
||||
"Loading plugin {:?} from config_file {:?}",
|
||||
self.name(),
|
||||
config_file
|
||||
);
|
||||
let mut file = File::open(config_file)?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
|
||||
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
|
||||
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
|
||||
|
||||
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
|
||||
serde_json::from_str(&contents);
|
||||
match result {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::ConfigFileReadError {
|
||||
msg: format!(
|
||||
"The config file is not in the JSON format expected: {:?}",
|
||||
err
|
||||
),
|
||||
})
|
||||
}
|
||||
Ok(config) => {
|
||||
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
|
||||
self.client = Some(client);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn on_unload(&mut self) {
|
||||
info!("Unloading plugin: {:?}", self.name());
|
||||
|
||||
match &mut self.client {
|
||||
None => {}
|
||||
Some(client) => {
|
||||
client.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()> {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
|
||||
match account {
|
||||
ReplicaAccountInfoVersions::V0_0_1(account) => {
|
||||
let mut measure_select =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-select");
|
||||
if let Some(accounts_selector) = &self.accounts_selector {
|
||||
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
measure_select.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-select-us",
|
||||
measure_select.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
bs58::encode(account.owner).into_string(),
|
||||
slot,
|
||||
self.accounts_selector.as_ref().unwrap()
|
||||
);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database."
|
||||
.to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let mut measure_update =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-client");
|
||||
let result = { client.update_account(account, slot, is_startup) };
|
||||
measure_update.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-client-us",
|
||||
measure_update.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-main-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.update_slot_status(slot, parent, status);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<()> {
|
||||
info!("Notifying the end of startup for accounts notifications");
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.notify_end_of_startup();
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsDbPluginPostgres {
|
||||
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
|
||||
let accounts_selector = &config["accounts_selector"];
|
||||
|
||||
if accounts_selector.is_null() {
|
||||
AccountsSelector::default()
|
||||
} else {
|
||||
let accounts = &accounts_selector["accounts"];
|
||||
let accounts: Vec<String> = if accounts.is_array() {
|
||||
accounts
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
let owners = &accounts_selector["owners"];
|
||||
let owners: Vec<String> = if owners.is_array() {
|
||||
owners
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
AccountsSelector::new(&accounts, &owners)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
AccountsDbPluginPostgres {
|
||||
client: None,
|
||||
accounts_selector: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[allow(improper_ctypes_definitions)]
|
||||
/// # Safety
|
||||
///
|
||||
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
|
||||
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
|
||||
let plugin = AccountsDbPluginPostgres::new();
|
||||
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
|
||||
Box::into_raw(plugin)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use {super::*, serde_json};
|
||||
|
||||
#[test]
|
||||
fn test_accounts_selector_from_config() {
|
||||
let config = "{\"accounts_selector\" : { \
|
||||
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
|
||||
}}";
|
||||
|
||||
let config: serde_json::Value = serde_json::from_str(config).unwrap();
|
||||
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
|
||||
}
|
||||
}
|
@@ -1,3 +0,0 @@
|
||||
pub mod accounts_selector;
|
||||
pub mod accountsdb_plugin_postgres;
|
||||
pub mod postgres_client;
|
@@ -1,879 +0,0 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
|
||||
use {
|
||||
crate::accountsdb_plugin_postgres::{
|
||||
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
|
||||
},
|
||||
chrono::Utc,
|
||||
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
|
||||
log::*,
|
||||
postgres::{Client, NoTls, Statement},
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_sdk::timing::AtomicInterval,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
},
|
||||
tokio_postgres::types::ToSql,
|
||||
};
|
||||
|
||||
/// The maximum asynchronous requests allowed in the channel to avoid excessive
|
||||
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
|
||||
const MAX_ASYNC_REQUESTS: usize = 40960;
|
||||
const DEFAULT_POSTGRES_PORT: u16 = 5432;
|
||||
const DEFAULT_THREADS_COUNT: usize = 100;
|
||||
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
|
||||
const ACCOUNT_COLUMN_COUNT: usize = 9;
|
||||
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
|
||||
|
||||
struct PostgresSqlClientWrapper {
|
||||
client: Client,
|
||||
update_account_stmt: Statement,
|
||||
bulk_account_insert_stmt: Statement,
|
||||
update_slot_with_parent_stmt: Statement,
|
||||
update_slot_without_parent_stmt: Statement,
|
||||
}
|
||||
|
||||
pub struct SimplePostgresClient {
|
||||
batch_size: usize,
|
||||
pending_account_updates: Vec<DbAccountInfo>,
|
||||
client: Mutex<PostgresSqlClientWrapper>,
|
||||
}
|
||||
|
||||
struct PostgresClientWorker {
|
||||
client: SimplePostgresClient,
|
||||
/// Indicating if accounts notification during startup is done.
|
||||
is_startup_done: bool,
|
||||
}
|
||||
|
||||
impl Eq for DbAccountInfo {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct DbAccountInfo {
|
||||
pub pubkey: Vec<u8>,
|
||||
pub lamports: i64,
|
||||
pub owner: Vec<u8>,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: i64,
|
||||
pub data: Vec<u8>,
|
||||
pub slot: i64,
|
||||
pub write_version: i64,
|
||||
}
|
||||
|
||||
pub(crate) fn abort() -> ! {
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
// standard error is usually redirected to a log file, cry for help on standard output as
|
||||
// well
|
||||
eprintln!("Validator process aborted. The validator log may contain further details");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
panic!("process::exit(1) is intercepted for friendly test failure...");
|
||||
}
|
||||
|
||||
impl DbAccountInfo {
|
||||
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
|
||||
let data = account.data().to_vec();
|
||||
Self {
|
||||
pubkey: account.pubkey().to_vec(),
|
||||
lamports: account.lamports() as i64,
|
||||
owner: account.owner().to_vec(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch() as i64,
|
||||
data,
|
||||
slot: slot as i64,
|
||||
write_version: account.write_version(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ReadableAccountInfo: Sized {
|
||||
fn pubkey(&self) -> &[u8];
|
||||
fn owner(&self) -> &[u8];
|
||||
fn lamports(&self) -> i64;
|
||||
fn executable(&self) -> bool;
|
||||
fn rent_epoch(&self) -> i64;
|
||||
fn data(&self) -> &[u8];
|
||||
fn write_version(&self) -> i64;
|
||||
}
|
||||
|
||||
impl ReadableAccountInfo for DbAccountInfo {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
&self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
&self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports as i64
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch as i64
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version as i64
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PostgresClient {
|
||||
fn join(&mut self) -> thread::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
|
||||
}
|
||||
|
||||
impl SimplePostgresClient {
|
||||
fn connect_to_db(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Client, AccountsDbPluginError> {
|
||||
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
|
||||
|
||||
let connection_str = if let Some(connection_str) = &config.connection_str {
|
||||
connection_str.clone()
|
||||
} else {
|
||||
if config.host.is_none() || config.user.is_none() {
|
||||
let msg = format!(
|
||||
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
|
||||
config.connection_str, config.host, config.user
|
||||
);
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::ConfigurationError { msg },
|
||||
)));
|
||||
}
|
||||
format!(
|
||||
"host={} user={} port={}",
|
||||
config.host.as_ref().unwrap(),
|
||||
config.user.as_ref().unwrap(),
|
||||
port
|
||||
)
|
||||
};
|
||||
|
||||
match Client::connect(&connection_str, NoTls) {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
|
||||
err, connection_str
|
||||
);
|
||||
error!("{}", msg);
|
||||
Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
|
||||
)))
|
||||
}
|
||||
Ok(client) => Ok(client),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_bulk_account_insert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
|
||||
for j in 0..batch_size {
|
||||
let row = j * ACCOUNT_COLUMN_COUNT;
|
||||
let val_str = format!(
|
||||
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
|
||||
row + 1,
|
||||
row + 2,
|
||||
row + 3,
|
||||
row + 4,
|
||||
row + 5,
|
||||
row + 6,
|
||||
row + 7,
|
||||
row + 8,
|
||||
row + 9,
|
||||
);
|
||||
|
||||
if j == 0 {
|
||||
stmt = format!("{} {}", &stmt, val_str);
|
||||
} else {
|
||||
stmt = format!("{}, {}", &stmt, val_str);
|
||||
}
|
||||
}
|
||||
|
||||
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
stmt = format!("{} {}", stmt, handle_conflict);
|
||||
|
||||
info!("{}", stmt);
|
||||
let bulk_stmt = client.prepare(&stmt);
|
||||
|
||||
match bulk_stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_single_account_upsert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
|
||||
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slot_upsert_statement_with_parent(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
|
||||
VALUES ($1, $2, $3, $4) \
|
||||
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slot_upsert_statement_without_parent(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO slot (slot, status, updated_on) \
|
||||
VALUES ($1, $2, $3) \
|
||||
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal function for updating or inserting a single account
|
||||
fn upsert_account_internal(
|
||||
account: &DbAccountInfo,
|
||||
statement: &Statement,
|
||||
client: &mut Client,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
let lamports = account.lamports() as i64;
|
||||
let rent_epoch = account.rent_epoch() as i64;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let result = client.query(
|
||||
statement,
|
||||
&[
|
||||
&account.pubkey(),
|
||||
&account.slot,
|
||||
&account.owner(),
|
||||
&lamports,
|
||||
&account.executable(),
|
||||
&rent_epoch,
|
||||
&account.data(),
|
||||
&account.write_version(),
|
||||
&updated_on,
|
||||
],
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update or insert a single account
|
||||
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
Self::upsert_account_internal(account, statement, client)
|
||||
}
|
||||
|
||||
/// Insert accounts in batch to reduce network overhead
|
||||
fn insert_accounts_in_batch(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
self.pending_account_updates.push(account);
|
||||
|
||||
if self.pending_account_updates.len() == self.batch_size {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
|
||||
|
||||
let mut values: Vec<&(dyn ToSql + Sync)> =
|
||||
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
for j in 0..self.batch_size {
|
||||
let account = &self.pending_account_updates[j];
|
||||
|
||||
values.push(&account.pubkey);
|
||||
values.push(&account.slot);
|
||||
values.push(&account.owner);
|
||||
values.push(&account.lamports);
|
||||
values.push(&account.executable);
|
||||
values.push(&account.rent_epoch);
|
||||
values.push(&account.data);
|
||||
values.push(&account.write_version);
|
||||
values.push(&updated_on);
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-prepare-values-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let result = client
|
||||
.client
|
||||
.query(&client.bulk_account_insert_stmt, &values);
|
||||
|
||||
self.pending_account_updates.clear();
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-count",
|
||||
self.batch_size,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush any left over accounts in batch which are not processed in the last batch
|
||||
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
if self.pending_account_updates.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
|
||||
for account in self.pending_account_updates.drain(..) {
|
||||
Self::upsert_account_internal(&account, statement, client)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating SimplePostgresClient...");
|
||||
let mut client = Self::connect_to_db(config)?;
|
||||
let bulk_account_insert_stmt =
|
||||
Self::build_bulk_account_insert_statement(&mut client, config)?;
|
||||
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
|
||||
|
||||
let update_slot_with_parent_stmt =
|
||||
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
|
||||
let update_slot_without_parent_stmt =
|
||||
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
|
||||
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
info!("Created SimplePostgresClient.");
|
||||
Ok(Self {
|
||||
batch_size,
|
||||
pending_account_updates: Vec::with_capacity(batch_size),
|
||||
client: Mutex::new(PostgresSqlClientWrapper {
|
||||
client,
|
||||
update_account_stmt,
|
||||
bulk_account_insert_stmt,
|
||||
update_slot_with_parent_stmt,
|
||||
update_slot_without_parent_stmt,
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PostgresClient for SimplePostgresClient {
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
trace!(
|
||||
"Updating account {} with owner {} at slot {}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
bs58::encode(account.owner()).into_string(),
|
||||
account.slot,
|
||||
);
|
||||
if !is_startup {
|
||||
return self.upsert_account(&account);
|
||||
}
|
||||
self.insert_accounts_in_batch(account)
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
let slot = slot as i64; // postgres only supports i64
|
||||
let parent = parent.map(|parent| parent as i64);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let status_str = status.as_str();
|
||||
let client = self.client.get_mut().unwrap();
|
||||
|
||||
let result = match parent {
|
||||
Some(parent) => client.client.execute(
|
||||
&client.update_slot_with_parent_stmt,
|
||||
&[&slot, &parent, &status_str, &updated_on],
|
||||
),
|
||||
None => client.client.execute(
|
||||
&client.update_slot_without_parent_stmt,
|
||||
&[&slot, &status_str, &updated_on],
|
||||
),
|
||||
};
|
||||
|
||||
match result {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{:?}", msg);
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
|
||||
}
|
||||
Ok(rows) => {
|
||||
assert_eq!(1, rows, "Expected one rows to be updated a time");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
self.flush_buffered_writes()
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateAccountRequest {
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
}
|
||||
|
||||
struct UpdateSlotRequest {
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
slot_status: SlotStatus,
|
||||
}
|
||||
|
||||
enum DbWorkItem {
|
||||
UpdateAccount(UpdateAccountRequest),
|
||||
UpdateSlot(UpdateSlotRequest),
|
||||
}
|
||||
|
||||
impl PostgresClientWorker {
|
||||
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
let result = SimplePostgresClient::new(&config);
|
||||
match result {
|
||||
Ok(client) => Ok(PostgresClientWorker {
|
||||
client,
|
||||
is_startup_done: false,
|
||||
}),
|
||||
Err(err) => {
|
||||
error!("Error in creating SimplePostgresClient: {}", err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_work(
|
||||
&mut self,
|
||||
receiver: Receiver<DbWorkItem>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
panic_on_db_errors: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
while !exit_worker.load(Ordering::Relaxed) {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
|
||||
let work = receiver.recv_timeout(Duration::from_millis(500));
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-worker-recv-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
match work {
|
||||
Ok(work) => match work {
|
||||
DbWorkItem::UpdateAccount(request) => {
|
||||
if let Err(err) = self
|
||||
.client
|
||||
.update_account(request.account, request.is_startup)
|
||||
{
|
||||
error!("Failed to update account: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
DbWorkItem::UpdateSlot(request) => {
|
||||
if let Err(err) = self.client.update_slot_status(
|
||||
request.slot,
|
||||
request.parent,
|
||||
request.slot_status,
|
||||
) {
|
||||
error!("Failed to update slot: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(err) => match err {
|
||||
RecvTimeoutError::Timeout => {
|
||||
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
|
||||
if let Err(err) = self.client.notify_end_of_startup() {
|
||||
error!("Error in notifying end of startup: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
self.is_startup_done = true;
|
||||
startup_done_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
error!("Error in receiving the item {:?}", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct ParallelPostgresClient {
|
||||
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
initialized_worker_count: Arc<AtomicUsize>,
|
||||
sender: Sender<DbWorkItem>,
|
||||
last_report: AtomicInterval,
|
||||
}
|
||||
|
||||
impl ParallelPostgresClient {
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating ParallelPostgresClient...");
|
||||
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
|
||||
let exit_worker = Arc::new(AtomicBool::new(false));
|
||||
let mut workers = Vec::default();
|
||||
let is_startup_done = Arc::new(AtomicBool::new(false));
|
||||
let startup_done_count = Arc::new(AtomicUsize::new(0));
|
||||
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
|
||||
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
|
||||
for i in 0..worker_count {
|
||||
let cloned_receiver = receiver.clone();
|
||||
let exit_clone = exit_worker.clone();
|
||||
let is_startup_done_clone = is_startup_done.clone();
|
||||
let startup_done_count_clone = startup_done_count.clone();
|
||||
let initialized_worker_count_clone = initialized_worker_count.clone();
|
||||
let config = config.clone();
|
||||
let worker = Builder::new()
|
||||
.name(format!("worker-{}", i))
|
||||
.spawn(move || -> Result<(), AccountsDbPluginError> {
|
||||
let panic_on_db_errors = *config
|
||||
.panic_on_db_errors
|
||||
.as_ref()
|
||||
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
|
||||
let result = PostgresClientWorker::new(config);
|
||||
|
||||
match result {
|
||||
Ok(mut worker) => {
|
||||
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
|
||||
worker.do_work(
|
||||
cloned_receiver,
|
||||
exit_clone,
|
||||
is_startup_done_clone,
|
||||
startup_done_count_clone,
|
||||
panic_on_db_errors,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Error when making connection to database: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
workers.push(worker);
|
||||
}
|
||||
|
||||
info!("Created ParallelPostgresClient.");
|
||||
Ok(Self {
|
||||
last_report: AtomicInterval::default(),
|
||||
workers,
|
||||
exit_worker,
|
||||
is_startup_done,
|
||||
startup_done_count,
|
||||
initialized_worker_count,
|
||||
sender,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_worker.store(true, Ordering::Relaxed);
|
||||
while !self.workers.is_empty() {
|
||||
let worker = self.workers.pop();
|
||||
if worker.is_none() {
|
||||
break;
|
||||
}
|
||||
let worker = worker.unwrap();
|
||||
let result = worker.join().unwrap();
|
||||
if result.is_err() {
|
||||
error!("The worker thread has failed: {:?}", result);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_account(
|
||||
&mut self,
|
||||
account: &ReplicaAccountInfo,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if self.last_report.should_update(30000) {
|
||||
datapoint_debug!(
|
||||
"postgres-plugin-stats",
|
||||
("message-queue-length", self.sender.len() as i64, i64),
|
||||
);
|
||||
}
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
|
||||
let wrk_item = DbWorkItem::UpdateAccount(UpdateAccountRequest {
|
||||
account: DbAccountInfo::new(account, slot),
|
||||
is_startup,
|
||||
});
|
||||
|
||||
measure.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-create-work-item-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
|
||||
|
||||
if let Err(err) = self.sender.send(wrk_item) {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!(
|
||||
"Failed to update the account {:?}, error: {:?}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
err
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-send-msg-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if let Err(err) = self.sender.send(DbWorkItem::UpdateSlot(UpdateSlotRequest {
|
||||
slot,
|
||||
parent,
|
||||
slot_status: status,
|
||||
})) {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError {
|
||||
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Notifying the end of startup");
|
||||
// Ensure all items in the queue has been received by the workers
|
||||
while !self.sender.is_empty() {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
self.is_startup_done.store(true, Ordering::Relaxed);
|
||||
|
||||
// Wait for all worker threads to be done with flushing
|
||||
while self.startup_done_count.load(Ordering::Relaxed)
|
||||
!= self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
{
|
||||
info!(
|
||||
"Startup done count: {}, good worker thread count: {}",
|
||||
self.startup_done_count.load(Ordering::Relaxed),
|
||||
self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
);
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
|
||||
info!("Done with notifying the end of startup");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PostgresClientBuilder {}
|
||||
|
||||
impl PostgresClientBuilder {
|
||||
pub fn build_pararallel_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
|
||||
ParallelPostgresClient::new(config)
|
||||
}
|
||||
|
||||
pub fn build_simple_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
|
||||
SimplePostgresClient::new(config)
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,18 +14,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-perf = { path = "../perf", version = "=1.8.17" }
|
||||
solana-poh = { path = "../poh", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
solana-core = { path = "../core", version = "=1.6.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.1" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.6.1" }
|
||||
solana-perf = { path = "../perf", version = "=1.6.1" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.6.1" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.1" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.1" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
solana-version = { path = "../version", version = "=1.6.1" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,37 +1,38 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
crossbeam_channel::unbounded,
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_core::banking_stage::BankingStage,
|
||||
solana_gossip::cluster_info::{ClusterInfo, Node},
|
||||
solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::to_packet_batches,
|
||||
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
|
||||
solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
cost_model::CostModel,
|
||||
},
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::{Keypair, Signature},
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::{
|
||||
banking_stage::{create_test_recorder, BankingStage},
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info::Node,
|
||||
poh_recorder::PohRecorder,
|
||||
poh_recorder::WorkingBankEntry,
|
||||
};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::Keypair,
|
||||
signature::Signature,
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
fn check_txs(
|
||||
@@ -77,7 +78,7 @@ fn make_accounts_txs(
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
||||
}
|
||||
@@ -168,7 +169,6 @@ fn main() {
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
|
||||
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
@@ -189,7 +189,7 @@ fn main() {
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
@@ -199,7 +199,7 @@ fn main() {
|
||||
if !skip_sanity {
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(tx);
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||
});
|
||||
bank.clear_signatures();
|
||||
@@ -211,7 +211,7 @@ fn main() {
|
||||
bank.clear_signatures();
|
||||
}
|
||||
|
||||
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
@@ -219,21 +219,15 @@ fn main() {
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost().info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
verified_receiver,
|
||||
tpu_vote_receiver,
|
||||
vote_receiver,
|
||||
None,
|
||||
replay_vote_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
@@ -361,10 +355,10 @@ fn main() {
|
||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||
for tx in transactions.iter_mut() {
|
||||
tx.message.recent_blockhash = bank.last_blockhash();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
@@ -386,7 +380,6 @@ fn main() {
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
drop(tpu_vote_sender);
|
||||
drop(vote_sender);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
banking_stage.join().unwrap();
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,20 +11,20 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
borsh = "0.9.0"
|
||||
borsh-derive = "0.9.0"
|
||||
borsh = "0.8.1"
|
||||
borsh-derive = "0.8.1"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.6.1" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio = { version = "1.1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.1" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.6.1" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -5,38 +5,31 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
use futures::{future::join_all, Future, FutureExt};
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use {
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse},
|
||||
solana_program::{
|
||||
clock::{Clock, Slot},
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
program_pack::Pack,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
sysvar::{self, Sysvar},
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
},
|
||||
std::io::{self, Error, ErrorKind},
|
||||
tarpc::{
|
||||
client::{self, channel::RequestDispatch, NewClient},
|
||||
context::{self, Context},
|
||||
rpc::{ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
Transport,
|
||||
},
|
||||
tokio::{net::ToSocketAddrs, time::Duration},
|
||||
tokio_serde::formats::Bincode,
|
||||
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||
use solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
};
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use tarpc::{
|
||||
client::{self, channel::RequestDispatch, NewClient},
|
||||
context::{self, Context},
|
||||
rpc::{ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
Transport,
|
||||
};
|
||||
use tokio::{net::ToSocketAddrs, time::Duration};
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
// This exists only for backward compatibility
|
||||
pub trait BanksClientExt {}
|
||||
@@ -70,7 +63,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
}
|
||||
@@ -92,14 +85,6 @@ impl BanksClient {
|
||||
self.inner.get_slot_with_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_block_height_with_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
@@ -130,39 +115,24 @@ impl BanksClient {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
}
|
||||
|
||||
/// Return the cluster clock
|
||||
pub fn get_clock(&mut self) -> impl Future<Output = io::Result<Clock>> + '_ {
|
||||
self.get_account(sysvar::clock::id()).map(|result| {
|
||||
let clock_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Clock sysvar not present"))?;
|
||||
from_account::<Clock, _>(&clock_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Clock sysvar")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
||||
/// will use the transaction's blockhash to look up these same fee parameters and
|
||||
/// use them to calculate the transaction fee.
|
||||
pub fn get_fees(
|
||||
&mut self,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the cluster Sysvar
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
self.get_sysvar::<Rent>()
|
||||
self.get_account(sysvar::rent::id()).map(|result| {
|
||||
let rent_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||
from_account::<Rent, _>(&rent_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
||||
@@ -222,18 +192,12 @@ impl BanksClient {
|
||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted slot. All transactions at or below this slot
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot.
|
||||
/// Return the most recent rooted slot height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot height.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted block height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher block height.
|
||||
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the account at the given address at the slot corresponding to the given
|
||||
/// commitment level. If the account is not found, None is returned.
|
||||
pub fn get_account_with_commitment(
|
||||
@@ -349,18 +313,16 @@ pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClie
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
solana_banks_server::banks_server::start_local_server,
|
||||
solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
},
|
||||
solana_sdk::{message::Message, signature::Signer, system_instruction},
|
||||
std::sync::{Arc, RwLock},
|
||||
tarpc::transport,
|
||||
tokio::{runtime::Runtime, time::sleep},
|
||||
use super::*;
|
||||
use solana_banks_server::banks_server::start_local_server;
|
||||
use solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
};
|
||||
use solana_sdk::{message::Message, signature::Signer, system_instruction};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tarpc::transport;
|
||||
use tokio::{runtime::Runtime, time::sleep};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
@@ -388,9 +350,7 @@ mod tests {
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
|
||||
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
||||
@@ -417,15 +377,13 @@ mod tests {
|
||||
|
||||
let mint_pubkey = &genesis.mint_keypair.pubkey();
|
||||
let bob_pubkey = solana_sdk::pubkey::new_rand();
|
||||
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(mint_pubkey));
|
||||
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?;
|
||||
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
let signature = transaction.signatures[0];
|
||||
banks_client.send_transaction(transaction).await?;
|
||||
@@ -433,8 +391,8 @@ mod tests {
|
||||
let mut status = banks_client.get_transaction_status(signature).await?;
|
||||
|
||||
while status.is_none() {
|
||||
let root_block_height = banks_client.get_root_block_height().await?;
|
||||
if root_block_height > last_valid_block_height {
|
||||
let root_slot = banks_client.get_root_slot().await?;
|
||||
if root_slot > last_valid_slot {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,11 +12,11 @@ edition = "2018"
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio = { version = "1.1", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -1,15 +1,13 @@
|
||||
use {
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
},
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -36,7 +34,6 @@ pub trait Banks {
|
||||
async fn get_transaction_status_with_context(signature: Signature)
|
||||
-> Option<TransactionStatus>;
|
||||
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
||||
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
@@ -49,10 +46,8 @@ pub trait Banks {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
tarpc::{client, transport},
|
||||
};
|
||||
use super::*;
|
||||
use tarpc::{client, transport};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,12 +14,12 @@ bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.6.1" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.6.1" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio = { version = "1.1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
tokio-stream = "0.1"
|
||||
|
||||
|
@@ -1,52 +1,48 @@
|
||||
use {
|
||||
crate::send_transaction_service::{SendTransactionService, TransactionInfo},
|
||||
bincode::{deserialize, serialize},
|
||||
futures::{
|
||||
future,
|
||||
prelude::stream::{self, StreamExt},
|
||||
},
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
},
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
feature_set::FeatureSet,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
},
|
||||
std::{
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
},
|
||||
tarpc::{
|
||||
context::Context,
|
||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Handler},
|
||||
transport,
|
||||
},
|
||||
tokio::time::sleep,
|
||||
tokio_serde::formats::Bincode,
|
||||
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
|
||||
use bincode::{deserialize, serialize};
|
||||
use futures::{
|
||||
future,
|
||||
prelude::stream::{self, StreamExt},
|
||||
};
|
||||
use solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
};
|
||||
use tarpc::{
|
||||
context::Context,
|
||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Handler},
|
||||
transport,
|
||||
};
|
||||
use tokio::time::sleep;
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
}
|
||||
|
||||
impl BanksServer {
|
||||
@@ -58,13 +54,11 @@ impl BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
Self {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +81,6 @@ impl BanksServer {
|
||||
fn new_loopback(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
@@ -102,12 +95,7 @@ impl BanksServer {
|
||||
.name("solana-bank-forks-client".to_string())
|
||||
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
|
||||
.unwrap();
|
||||
Self::new(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
)
|
||||
Self::new(bank_forks, block_commitment_cache, transaction_sender)
|
||||
}
|
||||
|
||||
fn slot(&self, commitment: CommitmentLevel) -> Slot {
|
||||
@@ -125,16 +113,16 @@ impl BanksServer {
|
||||
self,
|
||||
signature: &Signature,
|
||||
blockhash: &Hash,
|
||||
last_valid_block_height: u64,
|
||||
last_valid_slot: Slot,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
let mut status = self
|
||||
.bank(commitment)
|
||||
.get_signature_status_with_blockhash(signature, blockhash);
|
||||
while status.is_none() {
|
||||
sleep(self.poll_signature_status_sleep_duration).await;
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
let bank = self.bank(commitment);
|
||||
if bank.block_height() > last_valid_block_height {
|
||||
if bank.slot() > last_valid_slot {
|
||||
break;
|
||||
}
|
||||
status = bank.get_signature_status_with_blockhash(signature, blockhash);
|
||||
@@ -143,13 +131,10 @@ impl BanksServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_transaction(
|
||||
transaction: &Transaction,
|
||||
feature_set: &Arc<FeatureSet>,
|
||||
) -> transaction::Result<()> {
|
||||
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
|
||||
} else if let Err(err) = transaction.verify_precompiles() {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
@@ -160,19 +145,16 @@ fn verify_transaction(
|
||||
impl Banks for BanksServer {
|
||||
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_block_height = self
|
||||
let last_valid_slot = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
.get_blockhash_last_valid_slot(&blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info = TransactionInfo::new(
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
);
|
||||
let info =
|
||||
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
}
|
||||
|
||||
@@ -180,13 +162,11 @@ impl Banks for BanksServer {
|
||||
self,
|
||||
_: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> (FeeCalculator, Hash, u64) {
|
||||
) -> (FeeCalculator, Hash, Slot) {
|
||||
let bank = self.bank(commitment);
|
||||
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
|
||||
let last_valid_block_height = bank
|
||||
.get_blockhash_last_valid_block_height(&blockhash)
|
||||
.unwrap();
|
||||
(fee_calculator, blockhash, last_valid_block_height)
|
||||
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
|
||||
(fee_calculator, blockhash, last_valid_slot)
|
||||
}
|
||||
|
||||
async fn get_transaction_status_with_context(
|
||||
@@ -229,33 +209,29 @@ impl Banks for BanksServer {
|
||||
self.slot(commitment)
|
||||
}
|
||||
|
||||
async fn get_block_height_with_context(self, _: Context, commitment: CommitmentLevel) -> u64 {
|
||||
self.bank(commitment).block_height()
|
||||
}
|
||||
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
|
||||
if let Err(err) = verify_transaction(&transaction) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_block_height = self
|
||||
.bank(commitment)
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
let last_valid_slot = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_slot(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info = TransactionInfo::new(
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
);
|
||||
let info =
|
||||
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment)
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -273,13 +249,8 @@ impl Banks for BanksServer {
|
||||
pub async fn start_local_server(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
|
||||
let banks_server = BanksServer::new_loopback(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
poll_signature_status_sleep_duration,
|
||||
);
|
||||
let banks_server = BanksServer::new_loopback(bank_forks, block_commitment_cache);
|
||||
let (client_transport, server_transport) = transport::channel::unbounded();
|
||||
let server = server::new(server::Config::default())
|
||||
.incoming(stream::once(future::ready(server_transport)))
|
||||
@@ -314,12 +285,8 @@ pub async fn start_tcp_server(
|
||||
|
||||
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
|
||||
|
||||
let server = BanksServer::new(
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
sender,
|
||||
Duration::from_millis(200),
|
||||
);
|
||||
let server =
|
||||
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
|
||||
chan.respond_with(server.serve()).execute()
|
||||
})
|
||||
// Max 10 channels.
|
||||
|
@@ -1,23 +1,21 @@
|
||||
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
|
||||
|
||||
use {
|
||||
crate::banks_server::start_tcp_server,
|
||||
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
|
||||
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
use crate::banks_server::start_tcp_server;
|
||||
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
|
||||
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
},
|
||||
tokio_stream::wrappers::IntervalStream,
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
};
|
||||
use tokio_stream::wrappers::IntervalStream;
|
||||
|
||||
pub struct RpcBanksService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@@ -103,7 +101,8 @@ impl RpcBanksService {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {super::*, solana_runtime::bank::Bank};
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
|
||||
#[test]
|
||||
fn test_rpc_banks_server_exit() {
|
||||
|
@@ -1,19 +1,17 @@
|
||||
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
|
||||
use {
|
||||
log::*,
|
||||
solana_metrics::{datapoint_warn, inc_new_counter_info},
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks},
|
||||
solana_sdk::signature::Signature,
|
||||
std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
mpsc::{Receiver, RecvTimeoutError},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
use log::*;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
mpsc::{Receiver, RecvTimeoutError},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Maximum size of the transaction queue
|
||||
@@ -26,19 +24,15 @@ pub struct SendTransactionService {
|
||||
pub struct TransactionInfo {
|
||||
pub signature: Signature,
|
||||
pub wire_transaction: Vec<u8>,
|
||||
pub last_valid_block_height: u64,
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
impl TransactionInfo {
|
||||
pub fn new(
|
||||
signature: Signature,
|
||||
wire_transaction: Vec<u8>,
|
||||
last_valid_block_height: u64,
|
||||
) -> Self {
|
||||
pub fn new(signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) -> Self {
|
||||
Self {
|
||||
signature,
|
||||
wire_transaction,
|
||||
last_valid_block_height,
|
||||
last_valid_slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,7 +124,7 @@ impl SendTransactionService {
|
||||
result.rooted += 1;
|
||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||
false
|
||||
} else if transaction_info.last_valid_block_height < root_bank.block_height() {
|
||||
} else if transaction_info.last_valid_slot < root_bank.slot() {
|
||||
info!("Dropping expired transaction: {}", signature);
|
||||
result.expired += 1;
|
||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||
@@ -144,8 +138,8 @@ impl SendTransactionService {
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
send_socket,
|
||||
tpu_address,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
@@ -185,14 +179,12 @@ impl SendTransactionService {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
||||
system_transaction,
|
||||
},
|
||||
std::sync::mpsc::channel,
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
||||
system_transaction,
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
fn service_exit() {
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,23 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.8.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.1" }
|
||||
solana-core = { path = "../core", version = "=1.6.1" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.6.1" }
|
||||
solana-client = { path = "../client", version = "=1.6.1" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.1" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.6.1" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.1" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.6.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.1" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
solana-version = { path = "../version", version = "=1.6.1" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.6.1" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,45 +1,43 @@
|
||||
#![allow(clippy::useless_attribute)]
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
use {
|
||||
crate::order_book::*,
|
||||
itertools::izip,
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_exchange_program::{exchange_instruction, exchange_state::*, id},
|
||||
solana_faucet::faucet::request_airdrop_transaction,
|
||||
solana_genesis::Base64Account,
|
||||
solana_metrics::datapoint_info,
|
||||
solana_sdk::{
|
||||
client::{Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::{duration_as_ms, duration_as_s},
|
||||
transaction::Transaction,
|
||||
},
|
||||
std::{
|
||||
cmp,
|
||||
collections::{HashMap, VecDeque},
|
||||
fs::File,
|
||||
io::prelude::*,
|
||||
mem,
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder},
|
||||
time::{Duration, Instant},
|
||||
use crate::order_book::*;
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_exchange_program::{exchange_instruction, exchange_state::*, id};
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_sdk::{
|
||||
client::{Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
timing::{duration_as_ms, duration_as_s},
|
||||
transaction::Transaction,
|
||||
{system_instruction, system_program},
|
||||
};
|
||||
use std::{
|
||||
cmp,
|
||||
collections::{HashMap, VecDeque},
|
||||
fs::File,
|
||||
io::prelude::*,
|
||||
mem,
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
|
||||
@@ -453,13 +451,13 @@ fn swapper<T>(
|
||||
let to_swap_txs: Vec<_> = to_swap
|
||||
.par_iter()
|
||||
.map(|(signer, swap, profit)| {
|
||||
let s: &Keypair = signer;
|
||||
let s: &Keypair = &signer;
|
||||
let owner = &signer.pubkey();
|
||||
let instruction = exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
profit,
|
||||
&profit,
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
||||
Transaction::new(&[s], message, blockhash)
|
||||
@@ -602,7 +600,7 @@ fn trader<T>(
|
||||
src,
|
||||
),
|
||||
];
|
||||
let message = Message::new(&instructions, Some(owner_pubkey));
|
||||
let message = Message::new(&instructions, Some(&owner_pubkey));
|
||||
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
@@ -741,7 +739,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), m);
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(k.clone(), Transaction::new_unsigned(message))
|
||||
})
|
||||
@@ -779,7 +777,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut waits = 0;
|
||||
loop {
|
||||
sleep(Duration::from_millis(200));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
|
||||
if to_fund_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
@@ -838,7 +836,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
);
|
||||
let request_ix =
|
||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||
let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey));
|
||||
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
|
||||
(
|
||||
(from_keypair, new_keypair),
|
||||
Transaction::new_unsigned(message),
|
||||
@@ -874,7 +872,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
let mut waits = 0;
|
||||
while !to_create_txs.is_empty() {
|
||||
sleep(Duration::from_millis(200));
|
||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx));
|
||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
|
||||
if to_create_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
@@ -960,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>, tot
|
||||
|
||||
fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
||||
let mut seed = [0_u8; 32];
|
||||
seed.copy_from_slice(Keypair::new().pubkey().as_ref());
|
||||
seed.copy_from_slice(&Keypair::new().pubkey().as_ref());
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
rnd.gen_n_keypairs(num)
|
||||
}
|
||||
@@ -991,7 +989,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
|
||||
.expect("Failed to get blockhash");
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let signature = client.async_send_transaction(transaction).unwrap();
|
||||
|
||||
|
@@ -1,10 +1,10 @@
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_faucet::faucet::FAUCET_PORT,
|
||||
solana_sdk::signature::{read_keypair_file, Keypair},
|
||||
std::{net::SocketAddr, process::exit, time::Duration},
|
||||
};
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct Config {
|
||||
pub entrypoint_addr: SocketAddr,
|
||||
|
@@ -3,13 +3,10 @@ pub mod bench;
|
||||
mod cli;
|
||||
pub mod order_book;
|
||||
|
||||
use {
|
||||
crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config},
|
||||
log::*,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
|
||||
solana_sdk::signature::Signer,
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
};
|
||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||
use log::*;
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::signature::Signer;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
@@ -58,12 +55,11 @@ fn main() {
|
||||
);
|
||||
} else {
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
info!("{} nodes found", num_clients);
|
||||
if num_clients < num_nodes {
|
||||
|
@@ -1,13 +1,11 @@
|
||||
use {
|
||||
itertools::{
|
||||
EitherOrBoth::{Both, Left, Right},
|
||||
Itertools,
|
||||
},
|
||||
log::*,
|
||||
solana_exchange_program::exchange_state::*,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{cmp::Ordering, collections::BinaryHeap, error, fmt},
|
||||
};
|
||||
use itertools::EitherOrBoth::{Both, Left, Right};
|
||||
use itertools::Itertools;
|
||||
use log::*;
|
||||
use solana_exchange_program::exchange_state::*;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::{error, fmt};
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ToOrder {
|
||||
|
@@ -1,24 +1,23 @@
|
||||
use {
|
||||
log::*,
|
||||
solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config},
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_exchange_program::{
|
||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||
},
|
||||
solana_faucet::faucet::run_local_faucet_with_port,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
||||
solana_sdk::{
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{process::exit, sync::mpsc::channel, time::Duration},
|
||||
use log::*;
|
||||
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||
use solana_core::{
|
||||
gossip_service::{discover_cluster, get_multi_client},
|
||||
validator::ValidatorConfig,
|
||||
};
|
||||
use solana_exchange_program::{
|
||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||
};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_client::BankClient};
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{process::exit, sync::mpsc::channel, time::Duration};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
@@ -46,19 +45,13 @@ fn test_exchange_local_cluster() {
|
||||
} = config;
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default_for_test(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||
..ClusterConfig::default()
|
||||
},
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
@@ -75,17 +68,13 @@ fn test_exchange_local_cluster() {
|
||||
.expect("faucet_addr");
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
NUM_NODES,
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
});
|
||||
let nodes =
|
||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
info!("clients: {}", num_clients);
|
||||
assert!(num_clients >= NUM_NODES);
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.1" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.6.1" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.1" }
|
||||
solana-version = { path = "../version", version = "=1.6.1" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,38 +1,32 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg},
|
||||
solana_streamer::{
|
||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketBatchReceiver},
|
||||
},
|
||||
std::{
|
||||
cmp::max,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc,
|
||||
},
|
||||
thread::{sleep, spawn, JoinHandle, Result},
|
||||
time::{Duration, SystemTime},
|
||||
},
|
||||
};
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle, Result};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut packet_batch = PacketBatch::default();
|
||||
packet_batch.packets.resize(10, Packet::default());
|
||||
for w in packet_batch.packets.iter_mut() {
|
||||
let mut msgs = Packets::default();
|
||||
msgs.packets.resize(10, Packet::default());
|
||||
for w in msgs.packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(addr);
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
let packet_batch = Arc::new(packet_batch);
|
||||
let msgs = Arc::new(msgs);
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in &packet_batch.packets {
|
||||
for p in &msgs.packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size <= PACKET_DATA_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
@@ -42,14 +36,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
if let Ok(packet_batch) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -81,7 +75,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketBatchRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("bench-streamer-recycler-shrink-stats");
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
@@ -98,7 +92,6 @@ fn main() -> Result<()> {
|
||||
recycler.clone(),
|
||||
"bench-streamer-test",
|
||||
1,
|
||||
true,
|
||||
));
|
||||
}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,24 +15,22 @@ log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
|
||||
solana-core = { path = "../core", version = "=1.8.17" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.8.17" }
|
||||
solana-client = { path = "../client", version = "=1.8.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.17" }
|
||||
solana-version = { path = "../version", version = "=1.8.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.1" }
|
||||
solana-core = { path = "../core", version = "=1.6.1" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.6.1" }
|
||||
solana-client = { path = "../client", version = "=1.6.1" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.1" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.1" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.6.1" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.1" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
solana-version = { path = "../version", version = "=1.6.1" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.6.1" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,36 +1,34 @@
|
||||
use {
|
||||
crate::cli::Config,
|
||||
log::*,
|
||||
rayon::prelude::*,
|
||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_faucet::faucet::request_airdrop_transaction,
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{self, datapoint_info},
|
||||
solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
use crate::cli::Config;
|
||||
use log::*;
|
||||
use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// The point at which transactions become "too old", in seconds.
|
||||
@@ -546,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
|
||||
// re-sign retained to_fund_txes with updated blockhash
|
||||
self.sign(blockhash);
|
||||
self.send(client);
|
||||
self.send(&client);
|
||||
|
||||
// Sleep a few slots to allow transactions to process
|
||||
sleep(Duration::from_secs(1));
|
||||
|
||||
self.verify(client, to_lamports);
|
||||
self.verify(&client, to_lamports);
|
||||
|
||||
// retry anything that seems to have dropped through cracks
|
||||
// again since these txs are all or nothing, they're fine to
|
||||
@@ -566,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(*k, Transaction::new_unsigned(message))
|
||||
})
|
||||
@@ -619,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
return None;
|
||||
}
|
||||
|
||||
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
|
||||
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
|
||||
verified_txs.fetch_add(1, Ordering::Relaxed);
|
||||
Some(k.pubkey())
|
||||
} else {
|
||||
@@ -735,7 +733,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
@@ -926,14 +924,12 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
||||
solana_sdk::{
|
||||
client::SyncClient, fee_calculator::FeeRateGovernor,
|
||||
genesis_config::create_genesis_config,
|
||||
},
|
||||
};
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::client::SyncClient;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::genesis_config::create_genesis_config;
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
|
@@ -1,13 +1,11 @@
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg, ArgMatches},
|
||||
solana_faucet::faucet::FAUCET_PORT,
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
},
|
||||
std::{net::SocketAddr, process::exit, time::Duration},
|
||||
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
};
|
||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||
|
||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||
|
||||
|
@@ -1,20 +1,13 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
log::*,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs},
|
||||
cli,
|
||||
},
|
||||
solana_genesis::Base64Account,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
signature::{Keypair, Signer},
|
||||
system_program,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
|
||||
};
|
||||
use log::*;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||
use solana_bench_tps::cli;
|
||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_program;
|
||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||
|
||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
@@ -46,7 +39,7 @@ fn main() {
|
||||
let keypair_count = *tx_count * keypair_multiplier;
|
||||
if *write_to_client_file {
|
||||
info!("Generating {} keypairs", keypair_count);
|
||||
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
|
||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||
let num_accounts = keypairs.len() as u64;
|
||||
let max_fee =
|
||||
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||
@@ -75,14 +68,13 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
@@ -96,7 +88,7 @@ fn main() {
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == *target_node {
|
||||
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
||||
target_client = Some(Arc::new(get_client(&[node])));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -105,7 +97,7 @@ fn main() {
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
||||
Arc::new(get_client(&nodes))
|
||||
};
|
||||
|
||||
let keypairs = if *read_from_client_file {
|
||||
@@ -143,7 +135,7 @@ fn main() {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(*faucet_addr),
|
||||
id,
|
||||
&id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
)
|
||||
|
@@ -1,24 +1,20 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
serial_test::serial,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
},
|
||||
solana_client::thin_client::create_client,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::run_local_faucet_with_port,
|
||||
solana_gossip::cluster_info::VALIDATOR_PORT_RANGE,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
},
|
||||
use serial_test::serial;
|
||||
use solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
};
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::{cluster_info::VALIDATOR_PORT_RANGE, validator::ValidatorConfig};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
@@ -26,19 +22,13 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
|
||||
solana_logger::setup();
|
||||
const NUM_NODES: usize = 1;
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default_for_test(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors,
|
||||
..ClusterConfig::default()
|
||||
},
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||
native_instruction_processors,
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
|
@@ -1,32 +0,0 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.8.17"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bloom"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
rand = "0.7.0"
|
||||
serde = { version = "1.0.133", features = ["rc"] }
|
||||
rayon = "1.5.1"
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.8.17" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
log = "0.4.14"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bloom"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
@@ -1 +0,0 @@
|
||||
../frozen-abi/build.rs
|
@@ -1,5 +0,0 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
pub mod bloom;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_frozen_abi_macro;
|
9
cargo
9
cargo
@@ -3,22 +3,25 @@
|
||||
# shellcheck source=ci/rust-version.sh
|
||||
here=$(dirname "$0")
|
||||
|
||||
source "${here}"/ci/rust-version.sh all
|
||||
|
||||
toolchain=
|
||||
case "$1" in
|
||||
stable)
|
||||
source "${here}"/ci/rust-version.sh stable
|
||||
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
||||
toolchain="$rust_stable"
|
||||
shift
|
||||
;;
|
||||
nightly)
|
||||
source "${here}"/ci/rust-version.sh nightly
|
||||
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
|
||||
toolchain="$rust_nightly"
|
||||
shift
|
||||
;;
|
||||
+*)
|
||||
toolchain="${1#+}"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
source "${here}"/ci/rust-version.sh stable
|
||||
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
||||
toolchain="$rust_stable"
|
||||
;;
|
||||
|
@@ -102,8 +102,6 @@ command_step() {
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -139,7 +137,7 @@ all_test_steps() {
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
@@ -150,33 +148,6 @@ all_test_steps() {
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
@@ -194,7 +165,7 @@ EOF
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
@@ -223,8 +194,6 @@ EOF
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
@@ -247,15 +216,7 @@ EOF
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
30
|
||||
45
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
|
@@ -3,24 +3,16 @@
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
- command: "ci/publish-crate.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build-aarch64-apple-darwin"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball (aarch64-apple-darwin)"
|
||||
|
@@ -105,18 +105,11 @@ if [[ -z "$CHANNEL" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $CHANNEL = beta ]]; then
|
||||
CHANNEL_LATEST_TAG="$BETA_CHANNEL_LATEST_TAG"
|
||||
elif [[ $CHANNEL = stable ]]; then
|
||||
CHANNEL_LATEST_TAG="$STABLE_CHANNEL_LATEST_TAG"
|
||||
fi
|
||||
|
||||
echo EDGE_CHANNEL="$EDGE_CHANNEL"
|
||||
echo BETA_CHANNEL="$BETA_CHANNEL"
|
||||
echo BETA_CHANNEL_LATEST_TAG="$BETA_CHANNEL_LATEST_TAG"
|
||||
echo STABLE_CHANNEL="$STABLE_CHANNEL"
|
||||
echo STABLE_CHANNEL_LATEST_TAG="$STABLE_CHANNEL_LATEST_TAG"
|
||||
echo CHANNEL="$CHANNEL"
|
||||
echo CHANNEL_LATEST_TAG="$CHANNEL_LATEST_TAG"
|
||||
|
||||
exit 0
|
||||
|
@@ -7,6 +7,8 @@ src_root="$(readlink -f "${here}/..")"
|
||||
|
||||
cd "${src_root}"
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
cargo_audit_ignores=(
|
||||
# failure is officially deprecated/unmaintained
|
||||
#
|
||||
@@ -28,29 +30,16 @@ cargo_audit_ignores=(
|
||||
# Blocked on multiple crates updating `time` to >= 0.2.23
|
||||
--ignore RUSTSEC-2020-0071
|
||||
|
||||
# difference is unmaintained
|
||||
#
|
||||
# Blocked on predicates v1.0.6 removing its dependency on `difference`
|
||||
--ignore RUSTSEC-2020-0095
|
||||
|
||||
# generic-array: arr! macro erases lifetimes
|
||||
#
|
||||
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
||||
# https://github.com/paritytech/libsecp256k1/issues/66
|
||||
--ignore RUSTSEC-2020-0146
|
||||
|
||||
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0078
|
||||
|
||||
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0079
|
||||
|
||||
# chrono: Potential segfault in `localtime_r` invocations
|
||||
#
|
||||
# Blocked due to no safe upgrade
|
||||
# https://github.com/chronotope/chrono/issues/499
|
||||
--ignore RUSTSEC-2020-0159
|
||||
|
||||
)
|
||||
scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}"
|
||||
scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.52.1
|
||||
FROM solanalabs/rust:1.50.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.52.1
|
||||
FROM rust:1.50.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
21
ci/env.sh
21
ci/env.sh
@@ -23,9 +23,6 @@ if [[ -n $CI ]]; then
|
||||
elif [[ -n $BUILDKITE ]]; then
|
||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
|
||||
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
@@ -38,18 +35,7 @@ if [[ -n $CI ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
|
||||
case "$(uname -s)" in
|
||||
Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
Darwin)
|
||||
export CI_OS_NAME=osx
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
export CI_OS_NAME=linux
|
||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||
# triggered it
|
||||
@@ -88,13 +74,10 @@ else
|
||||
export CI_BUILD_ID=
|
||||
export CI_COMMIT=
|
||||
export CI_JOB_ID=
|
||||
export CI_OS_NAME=
|
||||
export CI_PULL_REQUEST=
|
||||
export CI_REPO_SLUG=
|
||||
export CI_TAG=
|
||||
# Don't override ci/run-local.sh
|
||||
if [[ -z $CI_LOCAL_RUN ]]; then
|
||||
export CI_OS_NAME=
|
||||
fi
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
@@ -70,7 +70,7 @@ done
|
||||
|
||||
source ci/upload-ci-artifact.sh
|
||||
source scripts/configure-metrics.sh
|
||||
source multinode-demo/common.sh --prebuild
|
||||
source multinode-demo/common.sh
|
||||
|
||||
nodes=(
|
||||
"multinode-demo/bootstrap-validator.sh \
|
||||
@@ -127,7 +127,7 @@ startNode() {
|
||||
waitForNodeToInit() {
|
||||
declare initCompleteFile=$1
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 300 ]]; then
|
||||
if [[ $SECONDS -ge 240 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
|
@@ -12,14 +12,10 @@ import json
|
||||
import subprocess
|
||||
import sys;
|
||||
|
||||
real_file = os.path.realpath(__file__)
|
||||
ci_path = os.path.dirname(real_file)
|
||||
src_root = os.path.dirname(ci_path)
|
||||
|
||||
def load_metadata():
|
||||
cmd = f'{src_root}/cargo metadata --no-deps --format-version=1'
|
||||
return json.loads(subprocess.Popen(
|
||||
cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
'cargo metadata --no-deps --format-version=1',
|
||||
shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
|
||||
def get_packages():
|
||||
metadata = load_metadata()
|
||||
|
27
ci/publish-bpf-sdk.sh
Executable file
27
ci/publish-bpf-sdk.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ -n "$CI_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$CI_TAG
|
||||
else
|
||||
CHANNEL_OR_TAG=$CHANNEL
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
sdk/bpf/scripts/package.sh
|
||||
[[ -f bpf-sdk.tar.bz2 ]]
|
||||
)
|
||||
|
||||
source ci/upload-ci-artifact.sh
|
||||
echo --- AWS S3 Store
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo Skipped
|
||||
else
|
||||
upload-s3-artifact "/solana/bpf-sdk.tar.bz2" "s3://solana-sdk/$CHANNEL_OR_TAG/bpf-sdk.tar.bz2"
|
||||
fi
|
||||
|
||||
exit 0
|
@@ -39,11 +39,7 @@ fi
|
||||
|
||||
case "$CI_OS_NAME" in
|
||||
osx)
|
||||
_cputype="$(uname -m)"
|
||||
if [[ $_cputype = arm64 ]]; then
|
||||
_cputype=aarch64
|
||||
fi
|
||||
TARGET=${_cputype}-apple-darwin
|
||||
TARGET=x86_64-apple-darwin
|
||||
;;
|
||||
linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
@@ -87,7 +83,7 @@ echo --- Creating release tarball
|
||||
export CHANNEL
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
scripts/cargo-install-all.sh stable "${RELEASE_BASENAME}"
|
||||
scripts/cargo-install-all.sh +"$rust_stable" "${RELEASE_BASENAME}"
|
||||
|
||||
tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}"
|
||||
bzip2 "${TARBALL_BASENAME}"-$TARGET.tar
|
||||
@@ -150,7 +146,7 @@ elif [[ -n $BUILDKITE ]]; then
|
||||
cat > release.solana.com-install <<EOF
|
||||
SOLANA_RELEASE=$CHANNEL_OR_TAG
|
||||
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
|
||||
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
|
||||
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
|
||||
EOF
|
||||
cat install/solana-install-init.sh >> release.solana.com-install
|
||||
|
||||
|
@@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -e
|
||||
|
||||
case $(uname -o) in
|
||||
*/Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
*)
|
||||
echo "local CI runs are only supported on Linux" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
steps=()
|
||||
steps+=(test-sanity)
|
||||
steps+=(shellcheck)
|
||||
steps+=(test-checks)
|
||||
steps+=(test-coverage)
|
||||
steps+=(test-stable)
|
||||
steps+=(test-stable-bpf)
|
||||
steps+=(test-stable-perf)
|
||||
steps+=(test-downstream-builds)
|
||||
steps+=(test-bench)
|
||||
steps+=(test-local-cluster)
|
||||
steps+=(test-local-cluster-flakey)
|
||||
steps+=(test-local-cluster-slow)
|
||||
|
||||
step_index=0
|
||||
if [[ -n "$1" ]]; then
|
||||
start_step="$1"
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
if [[ "$step" = "$start_step" ]]; then
|
||||
break
|
||||
fi
|
||||
step_index=$((step_index + 1))
|
||||
done
|
||||
if [[ $step_index -eq ${#steps[@]} ]]; then
|
||||
echo "unexpected start step: \"$start_step\"" 1>&2
|
||||
exit 1
|
||||
else
|
||||
echo "** starting at step: \"$start_step\" **"
|
||||
echo
|
||||
fi
|
||||
fi
|
||||
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
cmd="ci/${step}.sh"
|
||||
$cmd
|
||||
step_index=$((step_index + 1))
|
||||
done
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
|
||||
timeout 120 ./run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
@@ -16,17 +16,14 @@ while [[ ! -f config/run/init-completed ]]; do
|
||||
if ((--attempts == 0)); then
|
||||
echo "Error: validator failed to boot"
|
||||
exit 1
|
||||
else
|
||||
echo "Checking init"
|
||||
fi
|
||||
done
|
||||
|
||||
snapshot_slot=1
|
||||
|
||||
# wait a bit longer than snapshot_slot
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment processed) -le $((snapshot_slot + 1)) ]]; do
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
|
||||
sleep 1
|
||||
echo "Checking slot"
|
||||
done
|
||||
|
||||
$solana_validator --ledger config/ledger exit --force || true
|
||||
|
@@ -18,13 +18,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.52.1
|
||||
stable_version=1.50.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2021-05-18
|
||||
nightly_version=2021-02-18
|
||||
fi
|
||||
|
||||
|
||||
|
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Finds the version of sbf-tools used by this source tree.
|
||||
#
|
||||
# stdout of this script may be eval-ed.
|
||||
#
|
||||
|
||||
here="$(dirname "$0")"
|
||||
|
||||
SBF_TOOLS_VERSION=unknown
|
||||
|
||||
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
|
||||
if [[ -f "${cargo_build_bpf_main}" ]]; then
|
||||
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
|
||||
if [[ ${version} != '' ]]; then
|
||||
SBF_TOOLS_VERSION="${version}"
|
||||
else
|
||||
echo '--- unable to parse SBF_TOOLS_VERSION'
|
||||
fi
|
||||
else
|
||||
echo "--- '${cargo_build_bpf_main}' not present"
|
||||
fi
|
||||
|
||||
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"
|
@@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE
|
||||
TimeoutStartSec=10
|
||||
TimeoutStopSec=0
|
||||
KillMode=process
|
||||
LimitNOFILE=1000000
|
||||
LimitNOFILE=700000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@@ -8,5 +8,5 @@ source "$HERE"/utils.sh
|
||||
ensure_env || exit 1
|
||||
|
||||
# Allow more files to be opened by a user
|
||||
echo "* - nofile 1000000" > /etc/security/limits.d/90-solana-nofiles.conf
|
||||
echo "* - nofile 700000" > /etc/security/limits.d/90-solana-nofiles.conf
|
||||
|
||||
|
@@ -27,7 +27,7 @@ BENCH_ARTIFACT=current_bench_results.log
|
||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||
export PATH="$PWD/target/debug":$PATH
|
||||
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
# Clear the C dependency files, if dependeny moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
@@ -45,14 +45,6 @@ _ "$cargo" nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
|
||||
_ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run gossip benches
|
||||
_ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run poh benches
|
||||
_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run core benches
|
||||
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
@@ -14,7 +14,7 @@ scripts/increment-cargo-version.sh check
|
||||
|
||||
# Disallow uncommitted Cargo.lock changes
|
||||
(
|
||||
_ scripts/cargo-for-all-lock-files.sh tree >/dev/null
|
||||
_ scripts/cargo-for-all-lock-files.sh tree
|
||||
set +e
|
||||
if ! _ git diff --exit-code; then
|
||||
echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2
|
||||
@@ -35,10 +35,8 @@ echo --- build environment
|
||||
"$cargo" stable clippy --version --verbose
|
||||
"$cargo" nightly clippy --version --verbose
|
||||
|
||||
# audit is done only with "$cargo stable"
|
||||
# audit is done only with stable
|
||||
"$cargo" stable audit --version
|
||||
|
||||
grcov --version
|
||||
)
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
@@ -47,7 +45,7 @@ export RUSTFLAGS="-D warnings -A incomplete_features"
|
||||
# Only force up-to-date lock files on edge
|
||||
if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
# Exclude --benches as it's not available in rust stable yet
|
||||
if _ scripts/cargo-for-all-lock-files.sh stable check --locked --tests --bins --examples; then
|
||||
if _ scripts/cargo-for-all-lock-files.sh +"$rust_stable" check --locked --tests --bins --examples; then
|
||||
true
|
||||
else
|
||||
check_status=$?
|
||||
@@ -58,7 +56,7 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
fi
|
||||
|
||||
# Ensure nightly and --benches
|
||||
_ scripts/cargo-for-all-lock-files.sh nightly check --locked --all-targets
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
|
||||
else
|
||||
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
|
||||
fi
|
||||
@@ -67,8 +65,7 @@ _ ci/order-crates-for-publishing.py
|
||||
|
||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \
|
||||
--deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||
|
||||
_ "$cargo" stable fmt --all -- --check
|
||||
|
||||
@@ -82,6 +79,7 @@ _ ci/do-audit.sh
|
||||
cd "$project"
|
||||
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
|
||||
_ "$cargo" stable fmt -- --check
|
||||
_ "$cargo" nightly test
|
||||
)
|
||||
done
|
||||
}
|
||||
|
@@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -ex
|
||||
|
||||
scripts/build-downstream-projects.sh
|
@@ -1 +0,0 @@
|
||||
test-stable.sh
|
@@ -1 +0,0 @@
|
||||
test-stable.sh
|
@@ -25,29 +25,4 @@ echo
|
||||
_ ci/nits.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
|
||||
# Ensure the current channel version is not equal ("greater") than
|
||||
# the version of the latest tag
|
||||
if [[ -z $CI_TAG ]]; then
|
||||
echo "--- channel version check"
|
||||
(
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ -n $CHANNEL_LATEST_TAG ]]; then
|
||||
source scripts/read-cargo-variable.sh
|
||||
|
||||
version=$(readCargoVariable version "version/Cargo.toml")
|
||||
echo "version: v$version"
|
||||
echo "latest channel tag: $CHANNEL_LATEST_TAG"
|
||||
|
||||
if [[ $CHANNEL_LATEST_TAG = v$version ]]; then
|
||||
echo "Error: please run ./scripts/increment-cargo-version.sh"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Skipped. CHANNEL_LATEST_TAG (CHANNEL=$CHANNEL) unset"
|
||||
fi
|
||||
)
|
||||
fi
|
||||
|
||||
echo --- ok
|
||||
|
@@ -1 +0,0 @@
|
||||
test-stable.sh
|
@@ -21,6 +21,13 @@ export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# Clear the BPF sysroot files, they are not automatically rebuilt
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
@@ -31,58 +38,25 @@ case $testName in
|
||||
test-stable)
|
||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-bpf)
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# rustfilt required for dumping BPF assembly listings
|
||||
"$cargo" install rustfilt
|
||||
|
||||
test-stable-perf)
|
||||
# solana-keygen required when building C programs
|
||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||
export PATH="$PWD/target/debug":$PATH
|
||||
cargo_build_bpf="$(realpath ./cargo-build-bpf)"
|
||||
|
||||
# BPF solana-sdk legacy compile test
|
||||
"$cargo_build_bpf" --manifest-path sdk/Cargo.toml
|
||||
./cargo-build-bpf --manifest-path sdk/Cargo.toml
|
||||
|
||||
# BPF Program unit tests
|
||||
"$cargo" test --manifest-path programs/bpf/Cargo.toml
|
||||
"$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
||||
|
||||
# BPF program system tests
|
||||
# BPF program tests
|
||||
_ make -C programs/bpf/c tests
|
||||
_ "$cargo" stable test \
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
|
||||
|
||||
# Dump BPF program assembly listings
|
||||
for bpf_test in programs/bpf/rust/*; do
|
||||
if pushd "$bpf_test"; then
|
||||
"$cargo_build_bpf" --dump
|
||||
popd
|
||||
fi
|
||||
done
|
||||
|
||||
# BPF program instruction count assertion
|
||||
bpf_target_path=programs/bpf/target
|
||||
_ "$cargo" stable test \
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust assert_instruction_count \
|
||||
-- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt
|
||||
|
||||
bpf_dump_archive="bpf-dumps.tar.bz2"
|
||||
rm -f "$bpf_dump_archive"
|
||||
tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so}
|
||||
exit 0
|
||||
;;
|
||||
test-stable-perf)
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||
# is not yet loaded.
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh
|
||||
|
||||
rm -rf target/perf-libs
|
||||
./fetch-perf-libs.sh
|
||||
@@ -100,17 +74,7 @@ test-stable-perf)
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-flakey)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-slow)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
|
@@ -19,24 +19,13 @@ upload-ci-artifact() {
|
||||
upload-s3-artifact() {
|
||||
echo "--- artifact: $1 to $2"
|
||||
(
|
||||
args=(
|
||||
--rm
|
||||
--env AWS_ACCESS_KEY_ID
|
||||
--env AWS_SECRET_ACCESS_KEY
|
||||
--volume "$PWD:/solana"
|
||||
|
||||
)
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
args+=(
|
||||
--platform linux/amd64
|
||||
)
|
||||
fi
|
||||
args+=(
|
||||
eremite/aws-cli:2018.12.18
|
||||
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
||||
)
|
||||
set -x
|
||||
docker run "${args[@]}"
|
||||
docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
||||
)
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.8.17"
|
||||
version = "1.6.1"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,18 +12,13 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-perf = { path = "../perf", version = "=1.8.17" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.17" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.1" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.1" }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.8.1"
|
||||
uriparse = "0.6.3"
|
||||
tiny-bip39 = "0.8.0"
|
||||
url = "2.1.0"
|
||||
chrono = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_clap_utils"
|
||||
|
||||
|
@@ -1,7 +1,5 @@
|
||||
use {
|
||||
crate::{input_validators, ArgConstant},
|
||||
clap::Arg,
|
||||
};
|
||||
use crate::{input_validators, ArgConstant};
|
||||
use clap::Arg;
|
||||
|
||||
pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "fee_payer",
|
||||
|
@@ -1,24 +1,19 @@
|
||||
use {
|
||||
crate::keypair::{
|
||||
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
|
||||
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
},
|
||||
chrono::DateTime,
|
||||
clap::ArgMatches,
|
||||
solana_remote_wallet::remote_wallet::RemoteWalletManager,
|
||||
solana_sdk::{
|
||||
clock::UnixTimestamp,
|
||||
commitment_config::CommitmentConfig,
|
||||
genesis_config::ClusterType,
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
},
|
||||
std::{str::FromStr, sync::Arc},
|
||||
use crate::keypair::{
|
||||
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
|
||||
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
};
|
||||
|
||||
// Sentinel value used to indicate to write to screen instead of file
|
||||
pub const STDOUT_OUTFILE_TOKEN: &str = "-";
|
||||
use chrono::DateTime;
|
||||
use clap::ArgMatches;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
clock::UnixTimestamp,
|
||||
commitment_config::CommitmentConfig,
|
||||
genesis_config::ClusterType,
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
};
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
|
||||
// Return parsed values from matches at `name`
|
||||
pub fn values_of<T>(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<T>>
|
||||
@@ -60,7 +55,7 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
|
||||
if let Some(value) = matches.value_of(name) {
|
||||
if value == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase(name, skip_validation, true, None, true).ok()
|
||||
keypair_from_seed_phrase(name, skip_validation, true).ok()
|
||||
} else {
|
||||
read_keypair_file(value).ok()
|
||||
}
|
||||
@@ -75,7 +70,7 @@ pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Keypair>>
|
||||
.filter_map(|value| {
|
||||
if value == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase(name, skip_validation, true, None, true).ok()
|
||||
keypair_from_seed_phrase(name, skip_validation, true).ok()
|
||||
} else {
|
||||
read_keypair_file(value).ok()
|
||||
}
|
||||
@@ -196,12 +191,10 @@ pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentC
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
clap::{App, Arg},
|
||||
solana_sdk::signature::write_keypair_file,
|
||||
std::fs,
|
||||
};
|
||||
use super::*;
|
||||
use clap::{App, Arg};
|
||||
use solana_sdk::signature::write_keypair_file;
|
||||
use std::fs;
|
||||
|
||||
fn app<'ab, 'v>() -> App<'ab, 'v> {
|
||||
App::new("test")
|
||||
|
@@ -1,14 +1,13 @@
|
||||
use {
|
||||
crate::keypair::{parse_signer_source, SignerSourceKind, ASK_KEYWORD},
|
||||
chrono::DateTime,
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
hash::Hash,
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{read_keypair_file, Signature},
|
||||
},
|
||||
std::{fmt::Display, str::FromStr},
|
||||
use crate::keypair::{parse_keypair_path, KeypairUrl, ASK_KEYWORD};
|
||||
use chrono::DateTime;
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
hash::Hash,
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{read_keypair_file, Signature},
|
||||
};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
|
||||
where
|
||||
@@ -33,29 +32,6 @@ where
|
||||
is_parsable_generic::<T, String>(string)
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as numeric type T, and value not within specified
|
||||
// range
|
||||
pub fn is_within_range<T>(string: String, range_min: T, range_max: T) -> Result<(), String>
|
||||
where
|
||||
T: FromStr + Copy + std::fmt::Debug + PartialOrd + std::ops::Add<Output = T> + From<usize>,
|
||||
T::Err: Display,
|
||||
{
|
||||
match string.parse::<T>() {
|
||||
Ok(input) => {
|
||||
let range = range_min..range_max + 1.into();
|
||||
if !range.contains(&input) {
|
||||
Err(format!(
|
||||
"input '{:?}' out of range ({:?}..{:?}]",
|
||||
input, range_min, range_max
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(err) => Err(format!("error parsing '{}': {}", string, err)),
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if a pubkey cannot be parsed.
|
||||
pub fn is_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
@@ -95,26 +71,6 @@ where
|
||||
.map_err(|err| format!("{}", err))
|
||||
}
|
||||
|
||||
// Return an error if a `SignerSourceKind::Prompt` cannot be parsed
|
||||
pub fn is_prompt_signer_source<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
if string.as_ref() == ASK_KEYWORD {
|
||||
return Ok(());
|
||||
}
|
||||
match parse_signer_source(string.as_ref())
|
||||
.map_err(|err| format!("{}", err))?
|
||||
.kind
|
||||
{
|
||||
SignerSourceKind::Prompt => Ok(()),
|
||||
_ => Err(format!(
|
||||
"Unable to parse input as `prompt:` URI scheme or `ASK` keyword: {}",
|
||||
string
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey string or keypair file location
|
||||
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
@@ -129,11 +85,8 @@ pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match parse_signer_source(string.as_ref())
|
||||
.map_err(|err| format!("{}", err))?
|
||||
.kind
|
||||
{
|
||||
SignerSourceKind::Filepath(path) => is_keypair(path),
|
||||
match parse_keypair_path(string.as_ref()) {
|
||||
KeypairUrl::Filepath(path) => is_keypair(path),
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
@@ -214,8 +167,8 @@ where
|
||||
pub fn normalize_to_url_if_moniker<T: AsRef<str>>(url_or_moniker: T) -> String {
|
||||
match url_or_moniker.as_ref() {
|
||||
"m" | "mainnet-beta" => "https://api.mainnet-beta.solana.com",
|
||||
"t" | "testnet" => "https://api.testnet.solana.com",
|
||||
"d" | "devnet" => "https://api.devnet.solana.com",
|
||||
"t" | "testnet" => "https://testnet.solana.com",
|
||||
"d" | "devnet" => "https://devnet.solana.com",
|
||||
"l" | "localhost" => "http://localhost:8899",
|
||||
url => url,
|
||||
}
|
||||
@@ -353,27 +306,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_niceness_adjustment_valid<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let adjustment = value.as_ref().parse::<i8>().map_err(|err| {
|
||||
format!(
|
||||
"error parsing niceness adjustment value '{}': {}",
|
||||
value, err
|
||||
)
|
||||
})?;
|
||||
if solana_perf::thread::is_renice_allowed(adjustment) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(String::from(
|
||||
"niceness adjustment supported only on Linux; negative adjustment \
|
||||
(priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \
|
||||
for details)",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -390,11 +322,4 @@ mod tests {
|
||||
assert!(is_derivation("a/b").is_err());
|
||||
assert!(is_derivation("0/4294967296").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_niceness_adjustment_valid() {
|
||||
assert_eq!(is_niceness_adjustment_valid("0"), Ok(()));
|
||||
assert!(is_niceness_adjustment_valid("128").is_err());
|
||||
assert!(is_niceness_adjustment_valid("-129").is_err());
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user