Compare commits

..

36 Commits

Author SHA1 Message Date
Justin Starry
7782d34bbf Add StakesCache struct to abstract away locking (#21738) (#21796) 2021-12-10 22:38:04 -05:00
mergify[bot]
2c4765e75a Bump solana_rbpf to version v0.2.18 (#21774) (#21786)
(cherry picked from commit a5a0dabe7b)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-11 02:38:03 +00:00
mergify[bot]
e71ea19e60 adds back ErasureMeta::first_coding_index field (#21623) (#21785)
https://github.com/solana-labs/solana/pull/16646
removed first_coding_index since the field is currently redundant and
always equal to fec_set_index.
However, with upcoming changes to erasure coding schema, this will no
longer be the same as fec_set_index and so requires a separate field to
represent.

(cherry picked from commit 49ba09b333)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-10 23:14:10 +00:00
mergify[bot]
ed0040d555 Update to Rust 1.57.0 (#21779)
(cherry picked from commit 15a9fa6f53)

Co-authored-by: Steven Czabaniuk <steven@solana.com>
2021-12-10 22:23:48 +00:00
mergify[bot]
da9e6826ac Move type alias and use it more broadly (#21763) (#21777)
(cherry picked from commit 350845c513)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 21:44:41 +00:00
mergify[bot]
68fc72a7f4 Add more reporting for invalid stake cache members and prune them (#21654) (#21741)
* Add more reporting for invalid stake cache members

* feedback

(cherry picked from commit 6fc329180b)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:30:16 +00:00
mergify[bot]
2a6bb2b954 Migrate from address maps to address lookup tables (#21634) (#21773)
* Migrate from address maps to address lookup tables

* update sanitize error

* cargo fmt

* update abi

(cherry picked from commit 6c108c8fc3)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:10:37 +00:00
mergify[bot]
ef51778c78 Nits in message-processor (#21755) (#21762)
* Fixup typo

* Simplify types slightly

(cherry picked from commit c1386d66e6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 11:24:20 -05:00
mergify[bot]
abecf292a3 Expand docs for Pubkey::create_program_address (#21750) (#21759)
* Expand docs for Pubkey::create_program_address

* Update sdk/program/src/pubkey.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 6919c4863b)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-10 11:23:54 -05:00
Michael Vines
a31660815f rebase 2021-12-09 18:41:47 -08:00
Michael Vines
539ad4bea6 Remove libcurl to prevent wasm-pack segfault in libssl
(cherry picked from commit f32216588d)
2021-12-09 18:41:47 -08:00
Michael Vines
85f601993f Cargo.lock
(cherry picked from commit f4babb7566)

# Conflicts:
#	Cargo.lock
#	programs/bpf/Cargo.lock
2021-12-09 18:41:47 -08:00
Michael Vines
b0754cc575 Add initial wasm bindings for Instruction, SystemProgram and Transaction
(cherry picked from commit a35df1cb02)
2021-12-09 18:41:47 -08:00
Michael Vines
effd0b2547 Add wasm bindings for Hash
(cherry picked from commit 03a956e8d9)
2021-12-09 18:41:47 -08:00
Michael Vines
8836069719 Add wasm bindings for Pubkey and Keypair
(cherry picked from commit 488dc37fec)
2021-12-09 18:41:47 -08:00
mergify[bot]
2698a5c705 AcctIdx: env var to enable testing of disk buckets (#21494) (#21723)
(cherry picked from commit 54862eba0d)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2021-12-09 23:39:06 +00:00
mergify[bot]
dd157fd47f Fixed minor issues with the cluster overview docs which had confused some (#21744) (#21745)
new users.

(cherry picked from commit 6d18b6bab5)

Co-authored-by: bji <bryan@ischo.com>
2021-12-09 20:41:21 +00:00
mergify[bot]
8cacf82cb8 adds more sanity checks to shreds (#21675) (#21734)
(cherry picked from commit 8063273d09)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-09 18:44:43 +00:00
mergify[bot]
8ee5fbc5c0 simulateTransaction now returns the correct error code if accounts are provided as input (#21716)
(cherry picked from commit 824994db69)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-12-09 01:12:42 +00:00
mergify[bot]
f2a6b94e5c SDK: Add stdlib.h include to pull in abort() (#21700) (#21705)
(cherry picked from commit 923720f529)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-12-08 17:31:11 +00:00
mergify[bot]
ef970bb14a - Implicitly fixes invoke_context.return_data not being reset between instructions in process_message. (#21671) (#21684)
- Lets InvokeContext::process_cross_program_instruction() handle the first invocation depth too.
- Marks InvokeContext::verify(), InvokeContext::verify_and_update() and InvokeContext::process_executable_chain() private.
- Renames InvokeContext::process_cross_program_instruction() to InvokeContext::process_instruction().
- Removes InvokeContext::new_mock_with_sysvars().

(cherry picked from commit 1df88837c8)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-08 10:48:49 +00:00
Jarred Nicholls
cabd851904 Avoid entropy sources when constructing a solana_program::message::Message.
The solana-program crate can be used in certain embedded environments (HSMs) where
the source of entropy, whether used for cryptographic purposes or not, is tightly
controlled. In these cases, using the default OS source of entrophy is not always
acceptable. Thus, using the default Rust stdlib entropy source for seeding its
default hasher, is prohibited. This means any use of HashMap/HashSet must be able
to be constructed and used with a custom hasher implementation.

This commit removes the use of Itertools::unique() to dedupe Instructions that are
being compiled into a new Message, which uses a default-configured HashMap
under-the-hood. Instead, we use a BTreeSet which does not invoke any entropy
source in order to seed a hash implementation.

(cherry picked from commit 4da435f2a0)
2021-12-07 22:36:21 -08:00
mergify[bot]
2d2ef59550 Ensure we have keys to activate these features (#21669) (#21674)
(cherry picked from commit 45e56c599d)

Co-authored-by: Sean Young <sean@mess.org>
2021-12-07 23:24:11 +00:00
mergify[bot]
b7b56d5016 Docs: Solflare web/app updates (#21540) (#21668)
* Update Solflare description

* Add Solflare to mobile wallets

* Sort mobile wallets alphabetically

* Sort web wollets alphabetically

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/apps.md

Co-authored-by: Justin Starry <justin.m.starry@gmail.com>
(cherry picked from commit a2477c1f32)

Co-authored-by: Boris Vujicic <turshija@gmail.com>
2021-12-07 16:44:28 +00:00
mergify[bot]
18e3a635b4 docs: Fix SOL staked formula (#21615) (#21667)
Fix the formula on the proposal page: https://docs.solana.com/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards

(cherry picked from commit b57097ef18)

Co-authored-by: Melroy van den Berg <melroy@melroy.org>
2021-12-07 16:01:12 +00:00
mergify[bot]
2b4347d502 Add option to reclaim accounts-cluster-bench accounts/lamports (backport #21656) (#21658)
* Add option to reclaim accounts-cluster-bench accounts/lamports (#21656)

* Add option to reclaim accounts-cluster-bench accounts/lamports

* lint

(cherry picked from commit 205fd95722)

# Conflicts:
#	accounts-cluster-bench/Cargo.toml

* Fix conflict

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-07 09:18:48 +00:00
mergify[bot]
87accd16d8 Fixup flaky tests (#21617) (#21647)
* Fixup flaky tests

* Fixup listeners

(cherry picked from commit f493a88258)

Co-authored-by: carllin <carl@solana.com>
2021-12-07 03:54:14 +00:00
mergify[bot]
0e969015fc Add offline and fee-payer utilities to CLI vote module (#21579) (#21649)
* create-vote-account: add offline, nonce, fee_payer capabilities

* vote-authorize: add offline, nonce, fee-payer

* vote-update-things: add offline, nonce, fee-payer

* withdraw-vote: add offline, nonce, fee-payer

* close-vote-acct: add fee-payer

* Allow WithdrawVoteAccount to empty account, since offline operations cannot perform account state queries as in CloseVoteAccount

* Fix lint

* Update offline-signing docs

* Add some parse unit tests

* Add offline integration test

(cherry picked from commit 873fe81bc0)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-07 01:51:02 +00:00
mergify[bot]
46935c022e Ensure that StakeDelegations and StakeHistory serde (#21640) (#21653)
Add tests to StakeDelegations and StakeHistory to ensure that the outer
types serialize and deserialize correctly to/from the inner types.

(cherry picked from commit da4015a959)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2021-12-07 01:44:49 +00:00
mergify[bot]
8a7106bc08 Remove activated feature for filtering invalid stakes from rewards (#21641) (#21651)
(cherry picked from commit a1adcb23b6)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:58:31 +00:00
mergify[bot]
89d2f34a03 Reject vote withdraws that create non-rent-exempt accounts (backport #21639) (#21645)
* Reject vote withdraws that create non-rent-exempt accounts (#21639)

* Reject vote withdraws that create non-rent-exempt accounts

* fix mocked instruction test

(cherry picked from commit e123883b26)

# Conflicts:
#	sdk/src/feature_set.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:42:01 +00:00
mergify[bot]
b3fa1e4550 Move transaction error code into new module (#21635) (#21638)
(cherry picked from commit 3dab1e711d)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-06 20:11:20 +00:00
mergify[bot]
58c755e1d4 Rework docs for Pubkey::find_program_address and friends (#21528) (#21637)
* Rework docs for Pubkey::find_program_address and friends

* Remove circular dependency

* Minor tweaks

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Sort solana-program dev-dependencies

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit d1c101cde2)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 19:04:35 +00:00
mergify[bot]
60085305b4 Fix spelling of 'Borsh' (#21624)
(cherry picked from commit f3c2803af9)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 05:31:28 +00:00
mergify[bot]
b4c8e095bd adds back position field to coding-shred-header (#21600) (#21620)
https://github.com/solana-labs/solana/pull/17004
removed position field from coding-shred-header because as it stands the
field is redundant and unused.
However, with the upcoming changes to erasure coding schema this field
will no longer be redundant and needs to be populated.

(cherry picked from commit cd17f63d81)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-05 16:40:22 +00:00
mergify[bot]
3e28ffa884 Bump RpcClient node versions (#21612) (#21613)
* Bump blockhash/fee api check versions

* Bump snapshot api check version

(cherry picked from commit 3e5a5a834f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-05 01:08:22 +00:00
758 changed files with 31260 additions and 69617 deletions

View File

@@ -1,19 +0,0 @@
#!/usr/bin/env bash
#
# This script is used to upload the full buildkite pipeline. The steps defined
# in the buildkite UI should simply be:
#
# steps:
# - command: ".buildkite/pipeline-upload.sh"
#
set -e
cd "$(dirname "$0")"/..
source ci/_
sudo chmod 0777 ci/buildkite-pipeline-in-disk.sh
_ ci/buildkite-pipeline-in-disk.sh pipeline.yml
echo +++ pipeline
cat pipeline.yml
_ buildkite-agent pipeline upload pipeline.yml

View File

@@ -12,14 +12,7 @@ export PS4="++"
# Restore target/ from the previous CI build on this machine
#
eval "$(ci/channel-info.sh)"
eval "$(ci/sbf-tools-info.sh)"
source "ci/rust-version.sh"
HOST_RUST_VERSION="$rust_stable"
pattern='^[0-9]+\.[0-9]+\.[0-9]+$'
if [[ ${HOST_RUST_VERSION} =~ ${pattern} ]]; then
HOST_RUST_VERSION="${rust_stable%.*}"
fi
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"-"$HOST_RUST_VERSION"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
(
set -x
MAX_CACHE_SIZE=18 # gigabytes

View File

@@ -1,66 +0,0 @@
name: client_targets
on:
pull_request:
branches:
- master
paths:
- "client/**"
- "sdk/**"
- ".github/workflows/client-targets.yml"
env:
CARGO_TERM_COLOR: always
jobs:
check_compilation:
name: Client compilation
runs-on: ${{ matrix.os }}
strategy:
matrix:
target: [aarch64-apple-ios, x86_64-apple-ios, aarch64-apple-darwin, x86_64-apple-darwin, aarch64-linux-android, armv7-linux-androideabi, i686-linux-android, x86_64-linux-android]
include:
- target: aarch64-apple-ios
platform: ios
os: macos-latest
- target: x86_64-apple-ios
platform: ios
os: macos-latest
- target: aarch64-apple-darwin
platform: ios
os: macos-latest
- target: x86_64-apple-darwin
platform: ios
os: macos-latest
- target: aarch64-linux-android
platform: android
os: ubuntu-latest
- target: armv7-linux-androideabi
platform: android
os: ubuntu-latest
- target: i686-linux-android
platform: android
os: ubuntu-latest
- target: x86_64-linux-android
platform: android
os: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
target: ${{ matrix.target }}
- name: Install cargo-ndk
if: ${{ matrix.platform == 'android' }}
run: cargo install cargo-ndk
- uses: actions-rs/cargo@v1
if: ${{ matrix.platform == 'android' }}
with:
command: ndk
args: --target ${{ matrix.target }} build -p solana-client
- uses: actions-rs/cargo@v1
if: ${{ matrix.platform == 'ios' }}
with:
command: build
args: -p solana-client --target ${{ matrix.target }}

View File

@@ -10,8 +10,6 @@ jobs:
if: ${{ github.event.workflow_run.conclusion == 'success' }}
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: amondnet/vercel-action@v20
with:
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
@@ -20,33 +18,3 @@ jobs:
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
working-directory: ./explorer
scope: ${{ secrets.TEAM_ID }}
- name: vercel url
run : |
touch vercelfile.txt
vercel --token ${{secrets.VERCEL_TOKEN}} ls explorer --scope team_8A2WD7p4uR7tmKX9M68loHXI > vercelfile.txt
touch vercelfile1.txt
head -n 2 vercelfile.txt > vercelfile1.txt
touch vercelfile2.txt
tail -n 1 vercelfile1.txt > vercelfile2.txt
filtered_url7=$(cut -f7 -d" " vercelfile2.txt)
echo "filtered_url7 is: $filtered_url7"
touch .env.preview1
echo "$filtered_url7" > .env.preview1
#filtered_url=$(cat vercelfile2.txt )
#echo "$filtered_url" >> .env.preview1
- name: Run tests
uses: mathiasvr/command-output@v1
id: tests2
with:
run: |
echo "$(cat .env.preview1)"
- name: Slack Notification1
uses: rtCamp/action-slack-notify@master
env:
SLACK_MESSAGE: ${{ steps.tests2.outputs.stdout }}
SLACK_TITLE: Vercel "Explorer" Preview Deployment Link
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -65,7 +65,20 @@ jobs:
node-version: ${{ matrix.node }}
cache: 'npm'
cache-dependency-path: web3.js/package-lock.json
- run: npm i -g npm@7
- run: npm ci
- run: npm run lint
- run: |
source .travis/before_install.sh
npm install
source .travis/script.sh
npm run build
ls -l lib
test -r lib/index.iife.js
test -r lib/index.cjs.js
test -r lib/index.esm.js
- run: npm run doc
- run: npm run codecov
- run: |
sh -c "$(curl -sSfL https://release.solana.com/edge/install)"
echo "$HOME/.local/share/solana/install/active_release/bin" >> $GITHUB_PATH
PATH="$HOME/.local/share/solana/install/active_release/bin:$PATH"
solana --version
- run: npm run test:live-with-test-validator

1
.gitignore vendored
View File

@@ -4,7 +4,6 @@
/solana-metrics/
/solana-metrics.tar.bz2
/target/
/test-ledger/
**/*.rs.bk
.cargo

View File

@@ -113,10 +113,3 @@ pull_request_rules:
ignore_conflicts: true
branches:
- v1.9
commands_restrictions:
# The author of copied PRs is the Mergify user.
# Restrict `copy` access to Core Contributors
copy:
conditions:
- author=@core-contributors

View File

@@ -78,24 +78,6 @@ jobs:
# - sudo apt-get install libssl-dev libudev-dev
# docs pull request
# - name: "explorer"
# if: type = pull_request AND branch = master
# language: node_js
# node_js:
# - "lts/*"
# cache:
# directories:
# - ~/.npm
# before_install:
# - .travis/affects.sh explorer/ .travis || travis_terminate 0
# - cd explorer
# script:
# - npm run build
# - npm run format
- name: "docs"
if: type IN (push, pull_request) OR tag IS present
language: node_js

View File

@@ -74,7 +74,7 @@ minutes to execute. Use that time to write a detailed problem description. Once
the description is written and CI succeeds, click the "Ready to Review" button
and add reviewers. Adding reviewers before CI succeeds is a fast path to losing
reviewer engagement. Not only will they be notified and see the PR is not yet
ready for them, they will also be bombarded with additional notifications
ready for them, they will also be bombarded them with additional notifications
each time you push a commit to get past CI or until they "mute" the PR. Once
muted, you'll need to reach out over some other medium, such as Discord, to
request they have another look. When you use draft PRs, no notifications are

1895
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@
members = [
"accountsdb-plugin-interface",
"accountsdb-plugin-manager",
"accountsdb-plugin-postgres",
"accounts-cluster-bench",
"bench-streamer",
"bench-tps",
@@ -11,7 +12,6 @@ members = [
"banks-interface",
"banks-server",
"bucket_map",
"bloom",
"clap-utils",
"cli-config",
"cli-output",
@@ -46,11 +46,7 @@ members = [
"poh",
"poh-bench",
"program-test",
"programs/address-lookup-table",
"programs/address-lookup-table-tests",
"programs/ed25519-tests",
"programs/bpf_loader",
"programs/bpf_loader/gen-syscall-list",
"programs/compute-budget",
"programs/config",
"programs/stake",
@@ -81,10 +77,13 @@ members = [
"test-validator",
"rpc-test",
"client-test",
"zk-token-sdk",
"programs/zk-token-proof",
]
exclude = [
"programs/bpf",
]
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
# specify arch-specific features.
resolver = "2"

View File

@@ -1,4 +1,4 @@
Copyright 2022 Solana Foundation.
Copyright 2020 Solana Foundation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -38,6 +38,12 @@ $ sudo apt-get update
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
```
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
```bash
$ softwareupdate --install-rosetta
```
## **2. Download the source code.**
```bash
@@ -68,7 +74,7 @@ devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://doc
# Benchmarking
First, install the nightly build of rustc. `cargo bench` requires the use of the
First install the nightly build of rustc. `cargo bench` requires use of the
unstable features only available in the nightly build.
```bash
@@ -115,12 +121,12 @@ the reader to check and validate their accuracy and truthfulness.
Furthermore, nothing in this project constitutes a solicitation for
investment.
Any content produced by SF or developer resources that SF provides are
Any content produced by SF or developer resources that SF provides, are
for educational and inspirational purposes only. SF does not encourage,
induce or sanction the deployment, integration or use of any such
applications (including the code comprising the Solana blockchain
protocol) in violation of applicable laws or regulations and hereby
prohibits any such deployment, integration or use. This includes the use of
prohibits any such deployment, integration or use. This includes use of
any such applications by the reader (a) in violation of export control
or sanctions laws of the United States or any other applicable
jurisdiction, (b) if the reader is located in or ordinarily resident in
@@ -133,7 +139,7 @@ prohibitions.
The reader should be aware that U.S. export control and sanctions laws
prohibit U.S. persons (and other persons that are subject to such laws)
from transacting with persons in certain countries and territories or
that are on the SDN list. As a project-based primarily on open-source
that are on the SDN list. As a project based primarily on open-source
software, it is possible that such sanctioned persons may nevertheless
bypass prohibitions, obtain the code comprising the Solana blockchain
protocol (or other project code or applications) and deploy, integrate,

View File

@@ -94,7 +94,7 @@ Alternatively use the Github UI.
```
1. Confirm that your freshly cut release branch is shown as `BETA_CHANNEL` and the previous release branch as `STABLE_CHANNEL`:
```
ci/channel-info.sh
ci/channel_info.sh
```
## Steps to Create a Release
@@ -152,5 +152,5 @@ appearing. To check for progress:
[Crates.io](https://crates.io/crates/solana) should have an updated Solana version. This can take 2-3 hours, and sometimes fails in the `solana-secondary` job.
If this happens and the error is non-fatal, click "Retry" on the "publish crate" job
### Update software on testnet.solana.com
See the documentation at https://github.com/solana-labs/cluster-ops/. devnet.solana.com and mainnet-beta.solana.com run stable releases that have been tested on testnet. Do not update devnet or mainnet-beta with a beta release.
### Update software on devnet.solana.com/testnet.solana.com/mainnet-beta.solana.com
See the documentation at https://github.com/solana-labs/cluster-ops/

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.10.0"
version = "1.9.0"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,15 +16,15 @@ bs58 = "0.4.0"
bv = "0.11.1"
Inflector = "0.11.4"
lazy_static = "1.4.0"
serde = "1.0.136"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.78"
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
serde_json = "1.0.72"
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.10.0"
zstd = "0.9.0"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -136,13 +136,16 @@ impl UiAccount {
UiAccountData::Binary(blob, encoding) => match encoding {
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
UiAccountEncoding::Base64Zstd => base64::decode(blob).ok().and_then(|zstd_data| {
let mut data = vec![];
zstd::stream::read::Decoder::new(zstd_data.as_slice())
.and_then(|mut reader| reader.read_to_end(&mut data))
.map(|_| data)
.ok()
}),
UiAccountEncoding::Base64Zstd => base64::decode(blob)
.ok()
.map(|zstd_data| {
let mut data = vec![];
zstd::stream::read::Decoder::new(zstd_data.as_slice())
.and_then(|mut reader| reader.read_to_end(&mut data))
.map(|_| data)
.ok()
})
.flatten(),
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
},
}?;

View File

@@ -5,7 +5,7 @@ use {
parse_nonce::parse_nonce,
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_ids},
parse_token::{parse_token, spl_token_id},
parse_vote::parse_vote,
},
inflector::Inflector,
@@ -21,6 +21,7 @@ lazy_static! {
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
let mut m = HashMap::new();
@@ -30,9 +31,7 @@ lazy_static! {
);
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
for spl_token_id in spl_token_ids() {
m.insert(spl_token_id, ParsableAccount::SplToken);
}
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);

View File

@@ -15,31 +15,16 @@ use {
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
fn spl_token_id() -> Pubkey {
pub fn spl_token_id() -> Pubkey {
Pubkey::new_from_array(spl_token::id().to_bytes())
}
// Returns all known SPL Token program ids
pub fn spl_token_ids() -> Vec<Pubkey> {
vec![spl_token_id()]
}
// Check if the provided program id as a known SPL Token program id
pub fn is_known_spl_token_id(program_id: &Pubkey) -> bool {
*program_id == spl_token_id()
}
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_native_mint() -> Pubkey {
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
}
// The program id of the `spl_token_native_mint` account
pub fn spl_token_native_mint_program_id() -> Pubkey {
spl_token_id()
}
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
SplTokenPubkey::new_from_array(pubkey.to_bytes())

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
clap = "2.33.1"
[package.metadata.docs.rs]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,25 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-faucet = { path = "../faucet", version = "=1.10.0" }
solana-gossip = { path = "../gossip", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-core = { path = "../core", version = "=1.10.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -116,7 +116,7 @@ fn make_create_message(
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.flat_map(|_| {
.map(|_| {
let program_id = if mint.is_some() {
inline_spl_token::id()
} else {
@@ -148,6 +148,7 @@ fn make_create_message(
instructions
})
.flatten()
.collect();
Message::new(&instructions, Some(&keypair.pubkey()))
@@ -673,7 +674,7 @@ pub mod test {
#[test]
fn test_accounts_cluster_bench() {
solana_logger::setup();
let validator_config = ValidatorConfig::default_for_test();
let validator_config = ValidatorConfig::default();
let num_nodes = 1;
let mut config = ClusterConfig {
cluster_lamports: 10_000_000,

View File

@@ -3,17 +3,17 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-interface"
description = "The Solana AccountsDb plugin interface."
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
documentation = "https://docs.rs/solana-validator"
[dependencies]
log = "0.4.11"
thiserror = "1.0.30"
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -3,8 +3,8 @@
/// In addition, the dynamic library must export a "C" function _create_plugin which
/// creates the implementation of the plugin.
use {
solana_sdk::{clock::UnixTimestamp, signature::Signature, transaction::SanitizedTransaction},
solana_transaction_status::{Reward, TransactionStatusMeta},
solana_sdk::{signature::Signature, transaction::SanitizedTransaction},
solana_transaction_status::TransactionStatusMeta,
std::{any::Any, error, io},
thiserror::Error,
};
@@ -12,117 +12,54 @@ use {
impl Eq for ReplicaAccountInfo<'_> {}
#[derive(Clone, PartialEq, Debug)]
/// Information about an account being updated
pub struct ReplicaAccountInfo<'a> {
/// The Pubkey for the account
pub pubkey: &'a [u8],
/// The lamports for the account
pub lamports: u64,
/// The Pubkey of the owner program account
pub owner: &'a [u8],
/// This account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// The epoch at which this account will next owe rent
pub rent_epoch: u64,
/// The data held in this account.
pub data: &'a [u8],
/// A global monotonically increasing atomic number, which can be used
/// to tell the order of the account update. For example, when an
/// account is updated in the same slot multiple times, the update
/// with higher write_version should supersede the one with lower
/// write_version.
pub write_version: u64,
}
/// A wrapper to future-proof ReplicaAccountInfo handling.
/// If there were a change to the structure of ReplicaAccountInfo,
/// there would be new enum entry for the newer version, forcing
/// plugin implementations to handle the change.
pub enum ReplicaAccountInfoVersions<'a> {
V0_0_1(&'a ReplicaAccountInfo<'a>),
}
/// Information about a transaction
#[derive(Clone, Debug)]
pub struct ReplicaTransactionInfo<'a> {
/// The first signature of the transaction, used for identifying the transaction.
pub signature: &'a Signature,
/// Indicates if the transaction is a simple vote transaction.
pub is_vote: bool,
/// The sanitized transaction.
pub transaction: &'a SanitizedTransaction,
/// Metadata of the transaction status.
pub transaction_status_meta: &'a TransactionStatusMeta,
}
/// A wrapper to future-proof ReplicaTransactionInfo handling.
/// If there were a change to the structure of ReplicaTransactionInfo,
/// there would be new enum entry for the newer version, forcing
/// plugin implementations to handle the change.
pub enum ReplicaTransactionInfoVersions<'a> {
V0_0_1(&'a ReplicaTransactionInfo<'a>),
}
#[derive(Clone, Debug)]
pub struct ReplicaBlockInfo<'a> {
pub slot: u64,
pub blockhash: &'a str,
pub rewards: &'a [Reward],
pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
}
pub enum ReplicaBlockInfoVersions<'a> {
V0_0_1(&'a ReplicaBlockInfo<'a>),
}
/// Errors returned by plugin calls
#[derive(Error, Debug)]
pub enum AccountsDbPluginError {
/// Error opening the configuration file; for example, when the file
/// is not found or when the validator process has no permission to read it.
#[error("Error opening config file. Error detail: ({0}).")]
ConfigFileOpenError(#[from] io::Error),
/// Error in reading the content of the config file or the content
/// is not in the expected format.
#[error("Error reading config file. Error message: ({msg})")]
ConfigFileReadError { msg: String },
/// Error when updating the account.
#[error("Error updating account. Error message: ({msg})")]
AccountsUpdateError { msg: String },
/// Error when updating the slot status
#[error("Error updating slot status. Error message: ({msg})")]
SlotStatusUpdateError { msg: String },
/// Any custom error defined by the plugin.
#[error("Plugin-defined custom error. Error message: ({0})")]
Custom(Box<dyn error::Error + Send + Sync>),
}
/// The current status of a slot
#[derive(Debug, Clone)]
pub enum SlotStatus {
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
/// the fork the validator believes is most likely to finalize.
Processed,
/// The highest slot having reached max vote lockout.
Rooted,
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
Confirmed,
}
@@ -138,9 +75,6 @@ impl SlotStatus {
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
/// Defines an AccountsDb plugin, to stream data from the runtime.
/// AccountsDb plugins must describe desired behavior for load and unload,
/// as well as how they will handle streamed data.
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn name(&self) -> &'static str;
@@ -159,9 +93,6 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn on_unload(&mut self) {}
/// Called when an account is updated at a slot.
/// When `is_startup` is true, it indicates the account is loaded from
/// snapshots when the validator starts up. When `is_startup` is false,
/// the account is updated during transaction processing.
#[allow(unused_variables)]
fn update_account(
&mut self,
@@ -198,12 +129,6 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
Ok(())
}
/// Called when block's metadata is updated.
#[allow(unused_variables)]
fn notify_block_metadata(&mut self, blockinfo: ReplicaBlockInfoVersions) -> Result<()> {
Ok(())
}
/// Check if the plugin is interested in account data
/// Default is true -- if the plugin is not interested in
/// account data, please return false.

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,17 +12,19 @@ documentation = "https://docs.rs/solana-validator"
[dependencies]
bs58 = "0.4.0"
crossbeam-channel = "0.5"
json5 = "0.4.1"
libloading = "0.7.3"
libloading = "0.7.2"
log = "0.4.11"
serde_json = "1.0.78"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-metrics = { path = "../metrics", version = "=1.10.0" }
solana-rpc = { path = "../rpc", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
thiserror = "1.0.30"
[package.metadata.docs.rs]

View File

@@ -2,13 +2,12 @@ use {
crate::{
accounts_update_notifier::AccountsUpdateNotifierImpl,
accountsdb_plugin_manager::AccountsDbPluginManager,
block_metadata_notifier::BlockMetadataNotifierImpl,
block_metadata_notifier_interface::BlockMetadataNotifierLock,
slot_status_notifier::SlotStatusNotifierImpl, slot_status_observer::SlotStatusObserver,
transaction_notifier::TransactionNotifierImpl,
},
crossbeam_channel::Receiver,
log::*,
serde_json,
solana_rpc::{
optimistically_confirmed_bank_tracker::BankNotification,
transaction_notifier_interface::TransactionNotifierLock,
@@ -51,7 +50,6 @@ pub struct AccountsDbPluginService {
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
transaction_notifier: Option<TransactionNotifierLock>,
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
}
impl AccountsDbPluginService {
@@ -104,24 +102,17 @@ impl AccountsDbPluginService {
None
};
let (slot_status_observer, block_metadata_notifier): (
Option<SlotStatusObserver>,
Option<BlockMetadataNotifierLock>,
) = if account_data_notifications_enabled || transaction_notifications_enabled {
let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone());
let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier));
(
let slot_status_observer =
if account_data_notifications_enabled || transaction_notifications_enabled {
let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone());
let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier));
Some(SlotStatusObserver::new(
confirmed_bank_receiver,
slot_status_notifier,
)),
Some(Arc::new(RwLock::new(BlockMetadataNotifierImpl::new(
plugin_manager.clone(),
)))),
)
} else {
(None, None)
};
))
} else {
None
};
info!("Started AccountsDbPluginService");
Ok(AccountsDbPluginService {
@@ -129,7 +120,6 @@ impl AccountsDbPluginService {
plugin_manager,
accounts_update_notifier,
transaction_notifier,
block_metadata_notifier,
})
}
@@ -155,12 +145,12 @@ impl AccountsDbPluginService {
)));
}
let result: serde_json::Value = match json5::from_str(&contents) {
let result: serde_json::Value = match serde_json::from_str(&contents) {
Ok(value) => value,
Err(err) => {
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
format!(
"The config file {:?} is not in a valid Json5 format, error: {:?}",
"The config file {:?} is not in a valid Json format, error: {:?}",
accountsdb_plugin_config_file, err
),
));
@@ -170,24 +160,13 @@ impl AccountsDbPluginService {
let libpath = result["libpath"]
.as_str()
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
let mut libpath = PathBuf::from(libpath);
if libpath.is_relative() {
let config_dir = accountsdb_plugin_config_file.parent().ok_or_else(|| {
AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
"Failed to resolve parent of {:?}",
accountsdb_plugin_config_file,
))
})?;
libpath = config_dir.join(libpath);
}
let config_file = accountsdb_plugin_config_file
.as_os_str()
.to_str()
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
unsafe {
let result = plugin_manager.load_plugin(libpath.to_str().unwrap(), config_file);
let result = plugin_manager.load_plugin(libpath, config_file);
if let Err(err) = result {
let msg = format!(
"Failed to load the plugin library: {:?}, error: {:?}",
@@ -207,10 +186,6 @@ impl AccountsDbPluginService {
self.transaction_notifier.clone()
}
pub fn get_block_metadata_notifier(&self) -> Option<BlockMetadataNotifierLock> {
self.block_metadata_notifier.clone()
}
pub fn join(self) -> thread::Result<()> {
if let Some(mut slot_status_observer) = self.slot_status_observer {
slot_status_observer.join()?;

View File

@@ -1,105 +0,0 @@
use {
crate::{
accountsdb_plugin_manager::AccountsDbPluginManager,
block_metadata_notifier_interface::BlockMetadataNotifier,
},
log::*,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
ReplicaBlockInfo, ReplicaBlockInfoVersions,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_runtime::bank::RewardInfo,
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
solana_transaction_status::{Reward, Rewards},
std::sync::{Arc, RwLock},
};
pub(crate) struct BlockMetadataNotifierImpl {
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
}
impl BlockMetadataNotifier for BlockMetadataNotifierImpl {
/// Notify the block metadata
fn notify_block_metadata(
&self,
slot: u64,
blockhash: &str,
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
) {
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
let rewards = Self::build_rewards(rewards);
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-update-slot");
let block_info =
Self::build_replica_block_info(slot, blockhash, &rewards, block_time, block_height);
let block_info = ReplicaBlockInfoVersions::V0_0_1(&block_info);
match plugin.notify_block_metadata(block_info) {
Err(err) => {
error!(
"Failed to update block metadata at slot {}, error: {} to plugin {}",
slot,
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully updated block metadata at slot {} to plugin {}",
slot,
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-update-block-metadata-us",
measure.as_us() as usize,
1000,
1000
);
}
}
}
impl BlockMetadataNotifierImpl {
fn build_rewards(rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>) -> Rewards {
let rewards = rewards.read().unwrap();
rewards
.iter()
.map(|(pubkey, reward)| Reward {
pubkey: pubkey.to_string(),
lamports: reward.lamports,
post_balance: reward.post_balance,
reward_type: Some(reward.reward_type),
commission: reward.commission,
})
.collect()
}
fn build_replica_block_info<'a>(
slot: u64,
blockhash: &'a str,
rewards: &'a [Reward],
block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
) -> ReplicaBlockInfo<'a> {
ReplicaBlockInfo {
slot,
blockhash,
rewards,
block_time,
block_height,
}
}
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
Self { plugin_manager }
}
}

View File

@@ -1,20 +0,0 @@
use {
solana_runtime::bank::RewardInfo,
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
std::sync::{Arc, RwLock},
};
/// Interface for notifying block metadata changes
pub trait BlockMetadataNotifier {
/// Notify the block metadata
fn notify_block_metadata(
&self,
slot: u64,
blockhash: &str,
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
);
}
pub type BlockMetadataNotifierLock = Arc<RwLock<dyn BlockMetadataNotifier + Sync + Send>>;

View File

@@ -1,8 +1,6 @@
pub mod accounts_update_notifier;
pub mod accountsdb_plugin_manager;
pub mod accountsdb_plugin_service;
pub mod block_metadata_notifier;
pub mod block_metadata_notifier_interface;
pub mod slot_status_notifier;
pub mod slot_status_observer;
pub mod transaction_notifier;

View File

@@ -8,6 +8,7 @@ use {
solana_measure::measure::Measure,
solana_metrics::*,
solana_rpc::transaction_notifier_interface::TransactionNotifier,
solana_runtime::bank,
solana_sdk::{clock::Slot, signature::Signature, transaction::SanitizedTransaction},
solana_transaction_status::TransactionStatusMeta,
std::sync::{Arc, RwLock},
@@ -84,7 +85,7 @@ impl TransactionNotifierImpl {
) -> ReplicaTransactionInfo<'a> {
ReplicaTransactionInfo {
signature,
is_vote: transaction.is_simple_vote_transaction(),
is_vote: bank::is_simple_vote_transaction(transaction),
transaction,
transaction_status_meta,
}

View File

@@ -0,0 +1,39 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-postgres"
description = "The Solana AccountsDb plugin for PostgreSQL database."
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
bs58 = "0.4.0"
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.5"
log = "0.4.14"
postgres = { version = "0.19.2", features = ["with-chrono-0_4"] }
postgres-types = { version = "0.2.2", features = ["derive"] }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
thiserror = "1.0.30"
tokio-postgres = "0.7.4"
[dev-dependencies]
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -0,0 +1,5 @@
This is an example implementing the AccountsDb plugin for PostgreSQL database.
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
To create the schema objects for the database, please use `scripts/create_schema.sql`.
`scripts/drop_schema.sql` can be used to tear down the schema objects.

View File

@@ -0,0 +1,184 @@
/**
* This plugin implementation for PostgreSQL requires the following tables
*/
-- The table storing accounts
CREATE TABLE account (
pubkey BYTEA PRIMARY KEY,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- The table storing slot information
CREATE TABLE slot (
slot BIGINT PRIMARY KEY,
parent BIGINT,
status VARCHAR(16) NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- Types for Transactions
Create TYPE "TransactionErrorCode" AS ENUM (
'AccountInUse',
'AccountLoadedTwice',
'AccountNotFound',
'ProgramAccountNotFound',
'InsufficientFundsForFee',
'InvalidAccountForFee',
'AlreadyProcessed',
'BlockhashNotFound',
'InstructionError',
'CallChainTooDeep',
'MissingSignatureForFee',
'InvalidAccountIndex',
'SignatureFailure',
'InvalidProgramForExecution',
'SanitizeFailure',
'ClusterMaintenance',
'AccountBorrowOutstanding',
'WouldExceedMaxAccountCostLimit',
'WouldExceedMaxBlockCostLimit',
'UnsupportedVersion',
'InvalidWritableAccount'
);
CREATE TYPE "TransactionError" AS (
error_code "TransactionErrorCode",
error_detail VARCHAR(256)
);
CREATE TYPE "CompiledInstruction" AS (
program_id_index SMALLINT,
accounts SMALLINT[],
data BYTEA
);
CREATE TYPE "InnerInstructions" AS (
index SMALLINT,
instructions "CompiledInstruction"[]
);
CREATE TYPE "TransactionTokenBalance" AS (
account_index SMALLINT,
mint VARCHAR(44),
ui_token_amount DOUBLE PRECISION,
owner VARCHAR(44)
);
Create TYPE "RewardType" AS ENUM (
'Fee',
'Rent',
'Staking',
'Voting'
);
CREATE TYPE "Reward" AS (
pubkey VARCHAR(44),
lamports BIGINT,
post_balance BIGINT,
reward_type "RewardType",
commission SMALLINT
);
CREATE TYPE "TransactionStatusMeta" AS (
error "TransactionError",
fee BIGINT,
pre_balances BIGINT[],
post_balances BIGINT[],
inner_instructions "InnerInstructions"[],
log_messages TEXT[],
pre_token_balances "TransactionTokenBalance"[],
post_token_balances "TransactionTokenBalance"[],
rewards "Reward"[]
);
CREATE TYPE "TransactionMessageHeader" AS (
num_required_signatures SMALLINT,
num_readonly_signed_accounts SMALLINT,
num_readonly_unsigned_accounts SMALLINT
);
CREATE TYPE "TransactionMessage" AS (
header "TransactionMessageHeader",
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[]
);
CREATE TYPE "TransactionMessageAddressTableLookup" AS (
account_key: BYTEA[],
writable_indexes SMALLINT[],
readonly_indexes SMALLINT[]
);
CREATE TYPE "TransactionMessageV0" AS (
header "TransactionMessageHeader",
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[],
address_table_lookups "TransactionMessageAddressTableLookup"[]
);
CREATE TYPE "LoadedAddresses" AS (
writable BYTEA[],
readonly BYTEA[]
);
CREATE TYPE "LoadedMessageV0" AS (
message "TransactionMessageV0",
loaded_addresses "LoadedAddresses"
);
-- The table storing transactions
CREATE TABLE transaction (
slot BIGINT NOT NULL,
signature BYTEA NOT NULL,
is_vote BOOL NOT NULL,
message_type SMALLINT, -- 0: legacy, 1: v0 message
legacy_message "TransactionMessage",
v0_loaded_message "LoadedMessageV0",
signatures BYTEA[],
message_hash BYTEA,
meta "TransactionStatusMeta",
updated_on TIMESTAMP NOT NULL,
CONSTRAINT transaction_pk PRIMARY KEY (slot, signature)
);
/**
* The following is for keeping historical data for accounts and is not required for plugin to work.
*/
-- The table storing historical data for accounts
CREATE TABLE account_audit (
pubkey BYTEA,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
CREATE INDEX account_audit_account_key ON account_audit (pubkey, write_version);
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
BEGIN
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
RETURN NEW;
END;
$audit_account_update$ LANGUAGE plpgsql;
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();

View File

@@ -0,0 +1,25 @@
/**
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
*/
DROP TRIGGER account_update_trigger ON account;
DROP FUNCTION audit_account_update;
DROP TABLE account_audit;
DROP TABLE account;
DROP TABLE slot;
DROP TABLE transaction;
DROP TYPE "TransactionError" CASCADE;
DROP TYPE "TransactionErrorCode" CASCADE;
DROP TYPE "LoadedMessageV0" CASCADE;
DROP TYPE "LoadedAddresses" CASCADE;
DROP TYPE "TransactionMessageV0" CASCADE;
DROP TYPE "TransactionMessage" CASCADE;
DROP TYPE "TransactionMessageHeader" CASCADE;
DROP TYPE "TransactionMessageAddressTableLookup" CASCADE;
DROP TYPE "TransactionStatusMeta" CASCADE;
DROP TYPE "RewardType" CASCADE;
DROP TYPE "Reward" CASCADE;
DROP TYPE "TransactionTokenBalance" CASCADE;
DROP TYPE "InnerInstructions" CASCADE;
DROP TYPE "CompiledInstruction" CASCADE;

View File

@@ -0,0 +1,802 @@
# This a reference configuration file for the PostgreSQL database version 14.
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
listen_addresses = '*'
port = 5433 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 1GB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, replica, or logical
# (change requires restart)
fsync = off # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
synchronous_commit = off # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
full_page_writes = off # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enable compression of full-page writes
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = -1 # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '14/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -0,0 +1,74 @@
use {log::*, std::collections::HashSet};
#[derive(Debug)]
pub(crate) struct AccountsSelector {
pub accounts: HashSet<Vec<u8>>,
pub owners: HashSet<Vec<u8>>,
pub select_all_accounts: bool,
}
impl AccountsSelector {
pub fn default() -> Self {
AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts: true,
}
}
pub fn new(accounts: &[String], owners: &[String]) -> Self {
info!(
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
accounts, owners
);
let select_all_accounts = accounts.iter().any(|key| key == "*");
if select_all_accounts {
return AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts,
};
}
let accounts = accounts
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
let owners = owners
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
AccountsSelector {
accounts,
owners,
select_all_accounts,
}
}
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
}
/// Check if any account is of interested at all
pub fn is_enabled(&self) -> bool {
self.select_all_accounts || !self.accounts.is_empty() || !self.owners.is_empty()
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_create_accounts_selector() {
AccountsSelector::new(
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
&[],
);
AccountsSelector::new(
&[],
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
);
}
}

View File

@@ -0,0 +1,437 @@
use solana_measure::measure::Measure;
/// Main entry for the PostgreSQL plugin
use {
crate::{
accounts_selector::AccountsSelector,
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
transaction_selector::TransactionSelector,
},
bs58,
log::*,
serde_derive::{Deserialize, Serialize},
serde_json,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions,
ReplicaTransactionInfoVersions, Result, SlotStatus,
},
solana_metrics::*,
std::{fs::File, io::Read},
thiserror::Error,
};
#[derive(Default)]
pub struct AccountsDbPluginPostgres {
client: Option<ParallelPostgresClient>,
accounts_selector: Option<AccountsSelector>,
transaction_selector: Option<TransactionSelector>,
}
impl std::fmt::Debug for AccountsDbPluginPostgres {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountsDbPluginPostgresConfig {
pub host: Option<String>,
pub user: Option<String>,
pub port: Option<u16>,
pub connection_str: Option<String>,
pub threads: Option<usize>,
pub batch_size: Option<usize>,
pub panic_on_db_errors: Option<bool>,
}
#[derive(Error, Debug)]
pub enum AccountsDbPluginPostgresError {
#[error("Error connecting to the backend data store. Error message: ({msg})")]
DataStoreConnectionError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
DataSchemaError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
ConfigurationError { msg: String },
}
impl AccountsDbPlugin for AccountsDbPluginPostgres {
fn name(&self) -> &'static str {
"AccountsDbPluginPostgres"
}
/// Do initialization for the PostgreSQL plugin.
///
/// # Format of the config file:
/// * The `accounts_selector` section allows the user to controls accounts selections.
/// "accounts_selector" : {
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// or:
/// "accounts_selector" = {
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
/// }
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
/// When only owners is specified,
/// all accounts belonging to the owners will be streamed.
/// The accounts field support wildcard to select all accounts:
/// "accounts_selector" : {
/// "accounts" : \["*"\],
/// }
/// * "host", optional, specifies the PostgreSQL server.
/// * "user", optional, specifies the PostgreSQL user.
/// * "port", optional, specifies the PostgreSQL server's port.
/// * "connection_str", optional, the custom PostgreSQL connection string.
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
/// `host` and `user` must be given.
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
/// maintains a PostgreSQL connection to the server. The default is '10'.
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
/// from restoring a snapshot. The default is '10'.
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
/// PostgreSQL database. The default is 'false'.
/// * "transaction_selector", optional, controls if and what transaction to store. If this field is missing
/// None of the transction is stored.
/// "transaction_selector" : {
/// "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// The `mentions` field support wildcard to select all transaction or all 'vote' transactions:
/// For example, to select all transactions:
/// "transaction_selector" : {
/// "mentions" : \["*"\],
/// }
/// To select all vote transactions:
/// "transaction_selector" : {
/// "mentions" : \["all_votes"\],
/// }
/// # Examples
///
/// {
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
/// "host": "host_foo",
/// "user": "solana",
/// "threads": 10,
/// "accounts_selector" : {
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
/// }
/// }
fn on_load(&mut self, config_file: &str) -> Result<()> {
solana_logger::setup_with_default("info");
info!(
"Loading plugin {:?} from config_file {:?}",
self.name(),
config_file
);
let mut file = File::open(config_file)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
self.transaction_selector = Some(Self::create_transaction_selector_from_config(&result));
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
serde_json::from_str(&contents);
match result {
Err(err) => {
return Err(AccountsDbPluginError::ConfigFileReadError {
msg: format!(
"The config file is not in the JSON format expected: {:?}",
err
),
})
}
Ok(config) => {
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
self.client = Some(client);
}
}
Ok(())
}
fn on_unload(&mut self) {
info!("Unloading plugin: {:?}", self.name());
match &mut self.client {
None => {}
Some(client) => {
client.join().unwrap();
}
}
}
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()> {
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
match account {
ReplicaAccountInfoVersions::V0_0_1(account) => {
let mut measure_select =
Measure::start("accountsdb-plugin-postgres-update-account-select");
if let Some(accounts_selector) = &self.accounts_selector {
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
return Ok(());
}
} else {
return Ok(());
}
measure_select.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-select-us",
measure_select.as_us() as usize,
100000,
100000
);
debug!(
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
bs58::encode(account.pubkey).into_string(),
bs58::encode(account.owner).into_string(),
slot,
self.accounts_selector.as_ref().unwrap()
);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database."
.to_string(),
},
)));
}
Some(client) => {
let mut measure_update =
Measure::start("accountsdb-plugin-postgres-update-account-client");
let result = { client.update_account(account, slot, is_startup) };
measure_update.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-client-us",
measure_update.as_us() as usize,
100000,
100000
);
if let Err(err) = result {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
}
}
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-main-us",
measure_all.as_us() as usize,
100000,
100000
);
Ok(())
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()> {
info!("Updating slot {:?} at with status {:?}", slot, status);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.update_slot_status(slot, parent, status);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<()> {
info!("Notifying the end of startup for accounts notifications");
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.notify_end_of_startup();
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_transaction(
&mut self,
transaction_info: ReplicaTransactionInfoVersions,
slot: u64,
) -> Result<()> {
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => match transaction_info {
ReplicaTransactionInfoVersions::V0_0_1(transaction_info) => {
if let Some(transaction_selector) = &self.transaction_selector {
if !transaction_selector.is_transaction_selected(
transaction_info.is_vote,
transaction_info.transaction.message().account_keys_iter(),
) {
return Ok(());
}
} else {
return Ok(());
}
let result = client.log_transaction_info(transaction_info, slot);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the transaction info to the PostgreSQL database. Error: {:?}", err)
});
}
}
},
}
Ok(())
}
/// Check if the plugin is interested in account data
/// Default is true -- if the plugin is not interested in
/// account data, please return false.
fn account_data_notifications_enabled(&self) -> bool {
self.accounts_selector
.as_ref()
.map_or_else(|| false, |selector| selector.is_enabled())
}
/// Check if the plugin is interested in transaction data
fn transaction_notifications_enabled(&self) -> bool {
self.transaction_selector
.as_ref()
.map_or_else(|| false, |selector| selector.is_enabled())
}
}
impl AccountsDbPluginPostgres {
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
let accounts_selector = &config["accounts_selector"];
if accounts_selector.is_null() {
AccountsSelector::default()
} else {
let accounts = &accounts_selector["accounts"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
let owners = &accounts_selector["owners"];
let owners: Vec<String> = if owners.is_array() {
owners
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
AccountsSelector::new(&accounts, &owners)
}
}
fn create_transaction_selector_from_config(config: &serde_json::Value) -> TransactionSelector {
let transaction_selector = &config["transaction_selector"];
if transaction_selector.is_null() {
TransactionSelector::default()
} else {
let accounts = &transaction_selector["mentions"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
TransactionSelector::new(&accounts)
}
}
pub fn new() -> Self {
Self::default()
}
}
#[no_mangle]
#[allow(improper_ctypes_definitions)]
/// # Safety
///
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
let plugin = AccountsDbPluginPostgres::new();
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
Box::into_raw(plugin)
}
#[cfg(test)]
pub(crate) mod tests {
use {super::*, serde_json};
#[test]
fn test_accounts_selector_from_config() {
let config = "{\"accounts_selector\" : { \
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
}}";
let config: serde_json::Value = serde_json::from_str(config).unwrap();
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
}
}

View File

@@ -0,0 +1,4 @@
pub mod accounts_selector;
pub mod accountsdb_plugin_postgres;
pub mod postgres_client;
pub mod transaction_selector;

View File

@@ -0,0 +1,905 @@
#![allow(clippy::integer_arithmetic)]
mod postgres_client_transaction;
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
use {
crate::accountsdb_plugin_postgres::{
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
},
chrono::Utc,
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
log::*,
postgres::{Client, NoTls, Statement},
postgres_client_transaction::LogTransactionRequest,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_sdk::timing::AtomicInterval,
std::{
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Arc, Mutex,
},
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
},
tokio_postgres::types,
};
/// The maximum asynchronous requests allowed in the channel to avoid excessive
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
const MAX_ASYNC_REQUESTS: usize = 40960;
const DEFAULT_POSTGRES_PORT: u16 = 5432;
const DEFAULT_THREADS_COUNT: usize = 100;
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
const ACCOUNT_COLUMN_COUNT: usize = 9;
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
struct PostgresSqlClientWrapper {
client: Client,
update_account_stmt: Statement,
bulk_account_insert_stmt: Statement,
update_slot_with_parent_stmt: Statement,
update_slot_without_parent_stmt: Statement,
update_transaction_log_stmt: Statement,
}
pub struct SimplePostgresClient {
batch_size: usize,
pending_account_updates: Vec<DbAccountInfo>,
client: Mutex<PostgresSqlClientWrapper>,
}
struct PostgresClientWorker {
client: SimplePostgresClient,
/// Indicating if accounts notification during startup is done.
is_startup_done: bool,
}
impl Eq for DbAccountInfo {}
#[derive(Clone, PartialEq, Debug)]
pub struct DbAccountInfo {
pub pubkey: Vec<u8>,
pub lamports: i64,
pub owner: Vec<u8>,
pub executable: bool,
pub rent_epoch: i64,
pub data: Vec<u8>,
pub slot: i64,
pub write_version: i64,
}
pub(crate) fn abort() -> ! {
#[cfg(not(test))]
{
// standard error is usually redirected to a log file, cry for help on standard output as
// well
eprintln!("Validator process aborted. The validator log may contain further details");
std::process::exit(1);
}
#[cfg(test)]
panic!("process::exit(1) is intercepted for friendly test failure...");
}
impl DbAccountInfo {
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
let data = account.data().to_vec();
Self {
pubkey: account.pubkey().to_vec(),
lamports: account.lamports() as i64,
owner: account.owner().to_vec(),
executable: account.executable(),
rent_epoch: account.rent_epoch() as i64,
data,
slot: slot as i64,
write_version: account.write_version(),
}
}
}
pub trait ReadableAccountInfo: Sized {
fn pubkey(&self) -> &[u8];
fn owner(&self) -> &[u8];
fn lamports(&self) -> i64;
fn executable(&self) -> bool;
fn rent_epoch(&self) -> i64;
fn data(&self) -> &[u8];
fn write_version(&self) -> i64;
}
impl ReadableAccountInfo for DbAccountInfo {
fn pubkey(&self) -> &[u8] {
&self.pubkey
}
fn owner(&self) -> &[u8] {
&self.owner
}
fn lamports(&self) -> i64 {
self.lamports
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch
}
fn data(&self) -> &[u8] {
&self.data
}
fn write_version(&self) -> i64 {
self.write_version
}
}
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
fn pubkey(&self) -> &[u8] {
self.pubkey
}
fn owner(&self) -> &[u8] {
self.owner
}
fn lamports(&self) -> i64 {
self.lamports as i64
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch as i64
}
fn data(&self) -> &[u8] {
self.data
}
fn write_version(&self) -> i64 {
self.write_version as i64
}
}
pub trait PostgresClient {
fn join(&mut self) -> thread::Result<()> {
Ok(())
}
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError>;
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError>;
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
fn log_transaction(
&mut self,
transaction_log_info: LogTransactionRequest,
) -> Result<(), AccountsDbPluginError>;
}
impl SimplePostgresClient {
fn connect_to_db(
config: &AccountsDbPluginPostgresConfig,
) -> Result<Client, AccountsDbPluginError> {
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
let connection_str = if let Some(connection_str) = &config.connection_str {
connection_str.clone()
} else {
if config.host.is_none() || config.user.is_none() {
let msg = format!(
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
config.connection_str, config.host, config.user
);
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::ConfigurationError { msg },
)));
}
format!(
"host={} user={} port={}",
config.host.as_ref().unwrap(),
config.user.as_ref().unwrap(),
port
)
};
match Client::connect(&connection_str, NoTls) {
Err(err) => {
let msg = format!(
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
err, connection_str
);
error!("{}", msg);
Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
)))
}
Ok(client) => Ok(client),
}
}
fn build_bulk_account_insert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
for j in 0..batch_size {
let row = j * ACCOUNT_COLUMN_COUNT;
let val_str = format!(
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
row + 1,
row + 2,
row + 3,
row + 4,
row + 5,
row + 6,
row + 7,
row + 8,
row + 9,
);
if j == 0 {
stmt = format!("{} {}", &stmt, val_str);
} else {
stmt = format!("{}, {}", &stmt, val_str);
}
}
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
stmt = format!("{} {}", stmt, handle_conflict);
info!("{}", stmt);
let bulk_stmt = client.prepare(&stmt);
match bulk_stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_single_account_upsert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_slot_upsert_statement_with_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
VALUES ($1, $2, $3, $4) \
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
fn build_slot_upsert_statement_without_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, status, updated_on) \
VALUES ($1, $2, $3) \
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
/// Internal function for updating or inserting a single account
fn upsert_account_internal(
account: &DbAccountInfo,
statement: &Statement,
client: &mut Client,
) -> Result<(), AccountsDbPluginError> {
let lamports = account.lamports() as i64;
let rent_epoch = account.rent_epoch() as i64;
let updated_on = Utc::now().naive_utc();
let result = client.query(
statement,
&[
&account.pubkey(),
&account.slot,
&account.owner(),
&lamports,
&account.executable(),
&rent_epoch,
&account.data(),
&account.write_version(),
&updated_on,
],
);
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
Ok(())
}
/// Update or insert a single account
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
Self::upsert_account_internal(account, statement, client)
}
/// Insert accounts in batch to reduce network overhead
fn insert_accounts_in_batch(
&mut self,
account: DbAccountInfo,
) -> Result<(), AccountsDbPluginError> {
self.pending_account_updates.push(account);
if self.pending_account_updates.len() == self.batch_size {
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
let mut values: Vec<&(dyn types::ToSql + Sync)> =
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
let updated_on = Utc::now().naive_utc();
for j in 0..self.batch_size {
let account = &self.pending_account_updates[j];
values.push(&account.pubkey);
values.push(&account.slot);
values.push(&account.owner);
values.push(&account.lamports);
values.push(&account.executable);
values.push(&account.rent_epoch);
values.push(&account.data);
values.push(&account.write_version);
values.push(&updated_on);
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-prepare-values-us",
measure.as_us() as usize,
10000,
10000
);
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
let client = self.client.get_mut().unwrap();
let result = client
.client
.query(&client.bulk_account_insert_stmt, &values);
self.pending_account_updates.clear();
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-us",
measure.as_us() as usize,
10000,
10000
);
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-count",
self.batch_size,
10000,
10000
);
}
Ok(())
}
/// Flush any left over accounts in batch which are not processed in the last batch
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
if self.pending_account_updates.is_empty() {
return Ok(());
}
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
for account in self.pending_account_updates.drain(..) {
Self::upsert_account_internal(&account, statement, client)?;
}
Ok(())
}
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating SimplePostgresClient...");
let mut client = Self::connect_to_db(config)?;
let bulk_account_insert_stmt =
Self::build_bulk_account_insert_statement(&mut client, config)?;
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
let update_slot_with_parent_stmt =
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
let update_slot_without_parent_stmt =
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
let update_transaction_log_stmt =
Self::build_transaction_info_upsert_statement(&mut client, config)?;
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
info!("Created SimplePostgresClient.");
Ok(Self {
batch_size,
pending_account_updates: Vec::with_capacity(batch_size),
client: Mutex::new(PostgresSqlClientWrapper {
client,
update_account_stmt,
bulk_account_insert_stmt,
update_slot_with_parent_stmt,
update_slot_without_parent_stmt,
update_transaction_log_stmt,
}),
})
}
}
impl PostgresClient for SimplePostgresClient {
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
trace!(
"Updating account {} with owner {} at slot {}",
bs58::encode(account.pubkey()).into_string(),
bs58::encode(account.owner()).into_string(),
account.slot,
);
if !is_startup {
return self.upsert_account(&account);
}
self.insert_accounts_in_batch(account)
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
info!("Updating slot {:?} at with status {:?}", slot, status);
let slot = slot as i64; // postgres only supports i64
let parent = parent.map(|parent| parent as i64);
let updated_on = Utc::now().naive_utc();
let status_str = status.as_str();
let client = self.client.get_mut().unwrap();
let result = match parent {
Some(parent) => client.client.execute(
&client.update_slot_with_parent_stmt,
&[&slot, &parent, &status_str, &updated_on],
),
None => client.client.execute(
&client.update_slot_without_parent_stmt,
&[&slot, &status_str, &updated_on],
),
};
match result {
Err(err) => {
let msg = format!(
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
err
);
error!("{:?}", msg);
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
}
Ok(rows) => {
assert_eq!(1, rows, "Expected one rows to be updated a time");
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
self.flush_buffered_writes()
}
fn log_transaction(
&mut self,
transaction_log_info: LogTransactionRequest,
) -> Result<(), AccountsDbPluginError> {
self.log_transaction_impl(transaction_log_info)
}
}
struct UpdateAccountRequest {
account: DbAccountInfo,
is_startup: bool,
}
struct UpdateSlotRequest {
slot: u64,
parent: Option<u64>,
slot_status: SlotStatus,
}
#[warn(clippy::large_enum_variant)]
enum DbWorkItem {
UpdateAccount(Box<UpdateAccountRequest>),
UpdateSlot(Box<UpdateSlotRequest>),
LogTransaction(Box<LogTransactionRequest>),
}
impl PostgresClientWorker {
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
let result = SimplePostgresClient::new(&config);
match result {
Ok(client) => Ok(PostgresClientWorker {
client,
is_startup_done: false,
}),
Err(err) => {
error!("Error in creating SimplePostgresClient: {}", err);
Err(err)
}
}
}
fn do_work(
&mut self,
receiver: Receiver<DbWorkItem>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
panic_on_db_errors: bool,
) -> Result<(), AccountsDbPluginError> {
while !exit_worker.load(Ordering::Relaxed) {
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
let work = receiver.recv_timeout(Duration::from_millis(500));
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-worker-recv-us",
measure.as_us() as usize,
100000,
100000
);
match work {
Ok(work) => match work {
DbWorkItem::UpdateAccount(request) => {
if let Err(err) = self
.client
.update_account(request.account, request.is_startup)
{
error!("Failed to update account: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
DbWorkItem::UpdateSlot(request) => {
if let Err(err) = self.client.update_slot_status(
request.slot,
request.parent,
request.slot_status,
) {
error!("Failed to update slot: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
DbWorkItem::LogTransaction(transaction_log_info) => {
self.client.log_transaction(*transaction_log_info)?;
}
},
Err(err) => match err {
RecvTimeoutError::Timeout => {
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
if let Err(err) = self.client.notify_end_of_startup() {
error!("Error in notifying end of startup: ({})", err);
if panic_on_db_errors {
abort();
}
}
self.is_startup_done = true;
startup_done_count.fetch_add(1, Ordering::Relaxed);
}
continue;
}
_ => {
error!("Error in receiving the item {:?}", err);
if panic_on_db_errors {
abort();
}
break;
}
},
}
}
Ok(())
}
}
pub struct ParallelPostgresClient {
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
initialized_worker_count: Arc<AtomicUsize>,
sender: Sender<DbWorkItem>,
last_report: AtomicInterval,
}
impl ParallelPostgresClient {
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating ParallelPostgresClient...");
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
let exit_worker = Arc::new(AtomicBool::new(false));
let mut workers = Vec::default();
let is_startup_done = Arc::new(AtomicBool::new(false));
let startup_done_count = Arc::new(AtomicUsize::new(0));
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
for i in 0..worker_count {
let cloned_receiver = receiver.clone();
let exit_clone = exit_worker.clone();
let is_startup_done_clone = is_startup_done.clone();
let startup_done_count_clone = startup_done_count.clone();
let initialized_worker_count_clone = initialized_worker_count.clone();
let config = config.clone();
let worker = Builder::new()
.name(format!("worker-{}", i))
.spawn(move || -> Result<(), AccountsDbPluginError> {
let panic_on_db_errors = *config
.panic_on_db_errors
.as_ref()
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
let result = PostgresClientWorker::new(config);
match result {
Ok(mut worker) => {
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
worker.do_work(
cloned_receiver,
exit_clone,
is_startup_done_clone,
startup_done_count_clone,
panic_on_db_errors,
)?;
Ok(())
}
Err(err) => {
error!("Error when making connection to database: ({})", err);
if panic_on_db_errors {
abort();
}
Err(err)
}
}
})
.unwrap();
workers.push(worker);
}
info!("Created ParallelPostgresClient.");
Ok(Self {
last_report: AtomicInterval::default(),
workers,
exit_worker,
is_startup_done,
startup_done_count,
initialized_worker_count,
sender,
})
}
pub fn join(&mut self) -> thread::Result<()> {
self.exit_worker.store(true, Ordering::Relaxed);
while !self.workers.is_empty() {
let worker = self.workers.pop();
if worker.is_none() {
break;
}
let worker = worker.unwrap();
let result = worker.join().unwrap();
if result.is_err() {
error!("The worker thread has failed: {:?}", result);
}
}
Ok(())
}
pub fn update_account(
&mut self,
account: &ReplicaAccountInfo,
slot: u64,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
if self.last_report.should_update(30000) {
datapoint_debug!(
"postgres-plugin-stats",
("message-queue-length", self.sender.len() as i64, i64),
);
}
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
let wrk_item = DbWorkItem::UpdateAccount(Box::new(UpdateAccountRequest {
account: DbAccountInfo::new(account, slot),
is_startup,
}));
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-create-work-item-us",
measure.as_us() as usize,
100000,
100000
);
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
if let Err(err) = self.sender.send(wrk_item) {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!(
"Failed to update the account {:?}, error: {:?}",
bs58::encode(account.pubkey()).into_string(),
err
),
});
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-send-msg-us",
measure.as_us() as usize,
100000,
100000
);
Ok(())
}
pub fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
if let Err(err) = self
.sender
.send(DbWorkItem::UpdateSlot(Box::new(UpdateSlotRequest {
slot,
parent,
slot_status: status,
})))
{
return Err(AccountsDbPluginError::SlotStatusUpdateError {
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
});
}
Ok(())
}
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
info!("Notifying the end of startup");
// Ensure all items in the queue has been received by the workers
while !self.sender.is_empty() {
sleep(Duration::from_millis(100));
}
self.is_startup_done.store(true, Ordering::Relaxed);
// Wait for all worker threads to be done with flushing
while self.startup_done_count.load(Ordering::Relaxed)
!= self.initialized_worker_count.load(Ordering::Relaxed)
{
info!(
"Startup done count: {}, good worker thread count: {}",
self.startup_done_count.load(Ordering::Relaxed),
self.initialized_worker_count.load(Ordering::Relaxed)
);
sleep(Duration::from_millis(100));
}
info!("Done with notifying the end of startup");
Ok(())
}
}
pub struct PostgresClientBuilder {}
impl PostgresClientBuilder {
pub fn build_pararallel_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
ParallelPostgresClient::new(config)
}
pub fn build_simple_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
SimplePostgresClient::new(config)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,194 @@
/// The transaction selector is responsible for filtering transactions
/// in the plugin framework.
use {log::*, solana_sdk::pubkey::Pubkey, std::collections::HashSet};
pub(crate) struct TransactionSelector {
pub mentioned_addresses: HashSet<Vec<u8>>,
pub select_all_transactions: bool,
pub select_all_vote_transactions: bool,
}
#[allow(dead_code)]
impl TransactionSelector {
pub fn default() -> Self {
Self {
mentioned_addresses: HashSet::default(),
select_all_transactions: false,
select_all_vote_transactions: false,
}
}
/// Create a selector based on the mentioned addresses
/// To select all transactions use ["*"] or ["all"]
/// To select all vote transactions, use ["all_votes"]
/// To select transactions mentioning specific addresses use ["<pubkey1>", "<pubkey2>", ...]
pub fn new(mentioned_addresses: &[String]) -> Self {
info!(
"Creating TransactionSelector from addresses: {:?}",
mentioned_addresses
);
let select_all_transactions = mentioned_addresses
.iter()
.any(|key| key == "*" || key == "all");
if select_all_transactions {
return Self {
mentioned_addresses: HashSet::default(),
select_all_transactions,
select_all_vote_transactions: true,
};
}
let select_all_vote_transactions = mentioned_addresses.iter().any(|key| key == "all_votes");
if select_all_vote_transactions {
return Self {
mentioned_addresses: HashSet::default(),
select_all_transactions,
select_all_vote_transactions: true,
};
}
let mentioned_addresses = mentioned_addresses
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
Self {
mentioned_addresses,
select_all_transactions: false,
select_all_vote_transactions: false,
}
}
/// Check if a transaction is of interest.
pub fn is_transaction_selected(
&self,
is_vote: bool,
mentioned_addresses: Box<dyn Iterator<Item = &Pubkey> + '_>,
) -> bool {
if !self.is_enabled() {
return false;
}
if self.select_all_transactions || (self.select_all_vote_transactions && is_vote) {
return true;
}
for address in mentioned_addresses {
if self.mentioned_addresses.contains(address.as_ref()) {
return true;
}
}
false
}
/// Check if any transaction is of interest at all
pub fn is_enabled(&self) -> bool {
self.select_all_transactions
|| self.select_all_vote_transactions
|| !self.mentioned_addresses.is_empty()
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_select_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&[pubkey1.to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_transaction_using_wildcard() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["*".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_transaction_all() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["all".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_vote_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["all_votes".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
}
#[test]
fn test_select_no_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&[]);
assert!(!selector.is_enabled());
let addresses = [pubkey1];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
}
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.10.0" }
solana-gossip = { path = "../gossip", version = "=1.10.0" }
solana-ledger = { path = "../ledger", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-perf = { path = "../perf", version = "=1.10.0" }
solana-poh = { path = "../poh", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-core = { path = "../core", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-ledger = { path = "../ledger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-poh = { path = "../poh", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,7 +1,7 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, App, Arg},
crossbeam_channel::{unbounded, Receiver},
crossbeam_channel::unbounded,
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
@@ -11,10 +11,9 @@ use {
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
leader_schedule_cache::LeaderScheduleCache,
},
solana_measure::measure::Measure,
solana_perf::packet::to_packet_batches,
solana_perf::packet::to_packets_chunked,
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
@@ -29,7 +28,7 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
std::{
sync::{atomic::Ordering, Arc, Mutex, RwLock},
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
thread::sleep,
time::{Duration, Instant},
},
@@ -175,11 +174,6 @@ fn main() {
let mut bank_forks = BankForks::new(bank0);
let mut bank = bank_forks.working_bank();
// set cost tracker limits to MAX so it will not filter out TXs
bank.write_cost_tracker()
.unwrap()
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
info!("threads: {} txs: {}", num_threads, total_num_transactions);
let same_payer = matches.is_present("same_payer");
@@ -218,19 +212,14 @@ fn main() {
bank.clear_signatures();
}
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(
&bank,
&blockstore,
None,
Some(leader_schedule_cache.clone()),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
@@ -340,17 +329,9 @@ fn main() {
bank = bank_forks.working_bank();
insert_time.stop();
// set cost tracker limits to MAX so it will not filter out TXs
bank.write_cost_tracker().unwrap().set_limits(
std::u64::MAX,
std::u64::MAX,
std::u64::MAX,
);
poh_recorder.lock().unwrap().set_bank(&bank);
assert!(poh_recorder.lock().unwrap().bank().is_some());
if bank.slot() > 32 {
leader_schedule_cache.set_root(&bank);
bank_forks.set_root(root, &AbsRequestSender::default(), None);
root += 1;
}
@@ -383,7 +364,7 @@ fn main() {
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.10.0"
version = "1.9.0"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,19 +10,18 @@ documentation = "https://docs.rs/solana-banks-client"
edition = "2021"
[dependencies]
borsh = "0.9.3"
borsh = "0.9.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
solana-program = { path = "../sdk/program", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-program = { path = "../sdk/program", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-banks-server = { path = "../banks-server", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-banks-server = { path = "../banks-server", version = "=1.9.0" }
[lib]
crate-type = ["lib"]

View File

@@ -1,73 +0,0 @@
use {
solana_sdk::{transaction::TransactionError, transport::TransportError},
std::io,
tarpc::client::RpcError,
thiserror::Error,
};
/// Errors from BanksClient
#[derive(Error, Debug)]
pub enum BanksClientError {
#[error("client error: {0}")]
ClientError(&'static str),
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
RpcError(#[from] RpcError),
#[error("transport transaction error: {0}")]
TransactionError(#[from] TransactionError),
#[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")]
SimulationError {
err: TransactionError,
logs: Vec<String>,
units_consumed: u64,
},
}
impl BanksClientError {
pub fn unwrap(&self) -> TransactionError {
match self {
BanksClientError::TransactionError(err)
| BanksClientError::SimulationError { err, .. } => err.clone(),
_ => panic!("unexpected transport error"),
}
}
}
impl From<BanksClientError> for io::Error {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::Io(err) => err,
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::TransactionError(err) => {
Self::new(io::ErrorKind::Other, err.to_string())
}
BanksClientError::SimulationError { err, .. } => {
Self::new(io::ErrorKind::Other, err.to_string())
}
}
}
}
impl From<BanksClientError> for TransportError {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::Io(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::RpcError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::TransactionError(err) => Self::TransactionError(err),
BanksClientError::SimulationError { err, .. } => Self::TransactionError(err),
}
}
}

View File

@@ -5,14 +5,11 @@
//! but they are undocumented, may change over time, and are generally more
//! cumbersome to use.
pub use {
crate::error::BanksClientError,
solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus},
};
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
use {
borsh::BorshDeserialize,
futures::{future::join_all, Future, FutureExt, TryFutureExt},
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
futures::{future::join_all, Future, FutureExt},
solana_banks_interface::{BanksRequest, BanksResponse},
solana_program::{
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
rent::Rent, sysvar::Sysvar,
@@ -23,7 +20,9 @@ use {
message::Message,
signature::Signature,
transaction::{self, Transaction},
transport,
},
std::io::{self, Error, ErrorKind},
tarpc::{
client::{self, NewClient, RequestDispatch},
context::{self, Context},
@@ -34,8 +33,6 @@ use {
tokio_serde::formats::Bincode,
};
mod error;
// This exists only for backward compatibility
pub trait BanksClientExt {}
@@ -60,10 +57,8 @@ impl BanksClient {
&mut self,
ctx: Context,
transaction: Transaction,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
self.inner
.send_transaction_with_context(ctx, transaction)
.map_err(Into::into)
) -> impl Future<Output = io::Result<()>> + '_ {
self.inner.send_transaction_with_context(ctx, transaction)
}
#[deprecated(
@@ -74,41 +69,35 @@ impl BanksClient {
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
#[allow(deprecated)]
self.inner
.get_fees_with_commitment_and_context(ctx, commitment)
.map_err(Into::into)
}
pub fn get_transaction_status_with_context(
&mut self,
ctx: Context,
signature: Signature,
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
self.inner
.get_transaction_status_with_context(ctx, signature)
.map_err(Into::into)
}
pub fn get_slot_with_context(
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
self.inner
.get_slot_with_context(ctx, commitment)
.map_err(Into::into)
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_slot_with_context(ctx, commitment)
}
pub fn get_block_height_with_context(
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
self.inner
.get_block_height_with_context(ctx, commitment)
.map_err(Into::into)
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_block_height_with_context(ctx, commitment)
}
pub fn process_transaction_with_commitment_and_context(
@@ -116,26 +105,9 @@ impl BanksClient {
ctx: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<Option<transaction::Result<()>>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
self.inner
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map_err(Into::into)
}
pub fn process_transaction_with_preflight_and_commitment_and_context(
&mut self,
ctx: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_
{
self.inner
.process_transaction_with_preflight_and_commitment_and_context(
ctx,
transaction,
commitment,
)
.map_err(Into::into)
}
pub fn get_account_with_commitment_and_context(
@@ -143,10 +115,9 @@ impl BanksClient {
ctx: Context,
address: Pubkey,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
self.inner
.get_account_with_commitment_and_context(ctx, address, commitment)
.map_err(Into::into)
}
/// Send a transaction and return immediately. The server will resend the
@@ -155,7 +126,7 @@ impl BanksClient {
pub fn send_transaction(
&mut self,
transaction: Transaction,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<()>> + '_ {
self.send_transaction_with_context(context::current(), transaction)
}
@@ -168,25 +139,23 @@ impl BanksClient {
)]
pub fn get_fees(
&mut self,
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
#[allow(deprecated)]
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
}
/// Return the cluster Sysvar
pub fn get_sysvar<T: Sysvar>(
&mut self,
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(T::id()).map(|result| {
let sysvar = result?.ok_or(BanksClientError::ClientError("Sysvar not present"))?;
from_account::<T, _>(&sysvar).ok_or(BanksClientError::ClientError(
"Failed to deserialize sysvar",
))
let sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
from_account::<T, _>(&sysvar)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
})
}
/// Return the cluster rent
pub fn get_rent(&mut self) -> impl Future<Output = Result<Rent, BanksClientError>> + '_ {
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
self.get_sysvar::<Rent>()
}
@@ -194,11 +163,8 @@ impl BanksClient {
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
/// method to get both a blockhash and the blockhash's last valid slot.
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
pub fn get_recent_blockhash(
&mut self,
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
#[allow(deprecated)]
self.get_fees().map(|result| Ok(result?.1))
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
self.get_latest_blockhash()
}
/// Send a transaction and return after the transaction has been rejected or
@@ -207,71 +173,23 @@ impl BanksClient {
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
) -> impl Future<Output = transport::Result<()>> + '_ {
let mut ctx = context::current();
ctx.deadline += Duration::from_secs(50);
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map(|result| match result? {
None => Err(BanksClientError::ClientError(
"invalid blockhash or fee-payer",
)),
None => {
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
}
Some(transaction_result) => Ok(transaction_result?),
})
}
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
/// after the transaction has been rejected or reached the given level of commitment.
pub fn process_transaction_with_preflight_and_commitment(
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
let mut ctx = context::current();
ctx.deadline += Duration::from_secs(50);
self.process_transaction_with_preflight_and_commitment_and_context(
ctx,
transaction,
commitment,
)
.map(|result| match result? {
BanksTransactionResultWithSimulation {
result: None,
simulation_details: _,
} => Err(BanksClientError::ClientError(
"invalid blockhash or fee-payer",
)),
BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: Some(simulation_details),
} => Err(BanksClientError::SimulationError {
err,
logs: simulation_details.logs,
units_consumed: simulation_details.units_consumed,
}),
BanksTransactionResultWithSimulation {
result: Some(result),
simulation_details: _,
} => result.map_err(Into::into),
})
}
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
/// after the transaction has been finalized or rejected.
pub fn process_transaction_with_preflight(
&mut self,
transaction: Transaction,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
self.process_transaction_with_preflight_and_commitment(
transaction,
CommitmentLevel::default(),
)
}
/// Send a transaction and return until the transaction has been finalized or rejected.
pub fn process_transaction(
&mut self,
transaction: Transaction,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
) -> impl Future<Output = transport::Result<()>> + '_ {
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
}
@@ -279,7 +197,7 @@ impl BanksClient {
&mut self,
transactions: Vec<Transaction>,
commitment: CommitmentLevel,
) -> Result<(), BanksClientError> {
) -> transport::Result<()> {
let mut clients: Vec<_> = transactions.iter().map(|_| self.clone()).collect();
let futures = clients
.iter_mut()
@@ -295,21 +213,19 @@ impl BanksClient {
pub fn process_transactions(
&mut self,
transactions: Vec<Transaction>,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
) -> impl Future<Output = transport::Result<()>> + '_ {
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
}
/// Return the most recent rooted slot. All transactions at or below this slot
/// are said to be finalized. The cluster will not fork to a higher slot.
pub fn get_root_slot(&mut self) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
self.get_slot_with_context(context::current(), CommitmentLevel::default())
}
/// Return the most recent rooted block height. All transactions at or below this height
/// are said to be finalized. The cluster will not fork to a higher block height.
pub fn get_root_block_height(
&mut self,
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
}
@@ -319,7 +235,7 @@ impl BanksClient {
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
self.get_account_with_commitment_and_context(context::current(), address, commitment)
}
@@ -328,7 +244,7 @@ impl BanksClient {
pub fn get_account(
&mut self,
address: Pubkey,
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
self.get_account_with_commitment(address, CommitmentLevel::default())
}
@@ -337,11 +253,12 @@ impl BanksClient {
pub fn get_packed_account_data<T: Pack>(
&mut self,
address: Pubkey,
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
T::unpack_from_slice(&account.data)
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
})
}
@@ -350,10 +267,11 @@ impl BanksClient {
pub fn get_account_data_with_borsh<T: BorshDeserialize>(
&mut self,
address: Pubkey,
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
T::try_from_slice(&account.data).map_err(Into::into)
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
T::try_from_slice(&account.data)
})
}
@@ -363,17 +281,14 @@ impl BanksClient {
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<u64>> + '_ {
self.get_account_with_commitment_and_context(context::current(), address, commitment)
.map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0)))
}
/// Return the balance in lamports of an account at the given address at the time
/// of the most recent root slot.
pub fn get_balance(
&mut self,
address: Pubkey,
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
pub fn get_balance(&mut self, address: Pubkey) -> impl Future<Output = io::Result<u64>> + '_ {
self.get_balance_with_commitment(address, CommitmentLevel::default())
}
@@ -385,7 +300,7 @@ impl BanksClient {
pub fn get_transaction_status(
&mut self,
signature: Signature,
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
self.get_transaction_status_with_context(context::current(), signature)
}
@@ -393,7 +308,7 @@ impl BanksClient {
pub async fn get_transaction_statuses(
&mut self,
signatures: Vec<Signature>,
) -> Result<Vec<Option<TransactionStatus>>, BanksClientError> {
) -> io::Result<Vec<Option<TransactionStatus>>> {
// tarpc futures oddly hold a mutable reference back to the client so clone the client upfront
let mut clients_and_signatures: Vec<_> = signatures
.into_iter()
@@ -410,22 +325,19 @@ impl BanksClient {
statuses.into_iter().collect()
}
pub fn get_latest_blockhash(
&mut self,
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
pub fn get_latest_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
self.get_latest_blockhash_with_commitment(CommitmentLevel::default())
.map(|result| {
result?
.map(|x| x.0)
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
.map_err(Into::into)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))
})
}
pub fn get_latest_blockhash_with_commitment(
&mut self,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
self.get_latest_blockhash_with_commitment_and_context(context::current(), commitment)
}
@@ -433,10 +345,9 @@ impl BanksClient {
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
self.inner
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
.map_err(Into::into)
}
pub fn get_fee_for_message_with_commitment_and_context(
@@ -444,14 +355,13 @@ impl BanksClient {
ctx: Context,
commitment: CommitmentLevel,
message: Message,
) -> impl Future<Output = Result<Option<u64>, BanksClientError>> + '_ {
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
self.inner
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
.map_err(Into::into)
}
}
pub async fn start_client<C>(transport: C) -> Result<BanksClient, BanksClientError>
pub async fn start_client<C>(transport: C) -> io::Result<BanksClient>
where
C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>> + Send + 'static,
{
@@ -460,7 +370,7 @@ where
})
}
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> Result<BanksClient, BanksClientError> {
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
let transport = tcp::connect(addr, Bincode::default).await?;
Ok(BanksClient {
inner: TarpcClient::new(client::Config::default(), transport).spawn(),
@@ -489,7 +399,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
fn test_banks_server_transfer_via_server() -> io::Result<()> {
// This test shows the preferred way to interact with BanksServer.
// It creates a runtime explicitly (no globals via tokio macros) and calls
// `runtime.block_on()` just once, to run all the async code.
@@ -522,7 +432,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
fn test_banks_server_transfer_via_client() -> io::Result<()> {
// The caller may not want to hold the connection open until the transaction
// is processed (or blockhash expires). In this test, we verify the
// server-side functionality is available to the client.

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.10.0"
version = "1.9.0"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-banks-interface"
edition = "2021"
[dependencies]
serde = { version = "1.0.136", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
tarpc = { version = "0.27.2", features = ["full"] }
serde = { version = "1.0.130", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
[lib]
crate-type = ["lib"]

View File

@@ -30,19 +30,6 @@ pub struct TransactionStatus {
pub confirmation_status: Option<TransactionConfirmationStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TransactionSimulationDetails {
pub logs: Vec<String>,
pub units_consumed: u64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BanksTransactionResultWithSimulation {
pub result: Option<transaction::Result<()>>,
pub simulation_details: Option<TransactionSimulationDetails>,
}
#[tarpc::service]
pub trait Banks {
async fn send_transaction_with_context(transaction: Transaction);
@@ -57,10 +44,6 @@ pub trait Banks {
-> Option<TransactionStatus>;
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
async fn process_transaction_with_preflight_and_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,
) -> BanksTransactionResultWithSimulation;
async fn process_transaction_with_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.10.0"
version = "1.9.0"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,13 +11,12 @@ edition = "2021"
[dependencies]
bincode = "1.3.3"
crossbeam-channel = "0.5"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
tarpc = { version = "0.27.2", features = ["full"] }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
tokio-stream = "0.1"

View File

@@ -1,16 +1,10 @@
use {
bincode::{deserialize, serialize},
crossbeam_channel::{unbounded, Receiver, Sender},
futures::{future, prelude::stream::StreamExt},
solana_banks_interface::{
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus,
},
solana_runtime::{
bank::{Bank, TransactionSimulationResult},
bank_forks::BankForks,
commitment::BlockCommitmentCache,
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
},
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
solana_sdk::{
account::Account,
clock::Slot,
@@ -21,7 +15,7 @@ use {
message::{Message, SanitizedMessage},
pubkey::Pubkey,
signature::Signature,
transaction::{self, SanitizedTransaction, Transaction},
transaction::{self, Transaction},
},
solana_send_transaction_service::{
send_transaction_service::{SendTransactionService, TransactionInfo},
@@ -31,14 +25,17 @@ use {
convert::TryFrom,
io,
net::{Ipv4Addr, SocketAddr},
sync::{Arc, RwLock},
sync::{
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::Builder,
time::Duration,
},
tarpc::{
context::Context,
serde_transport::tcp,
server::{self, incoming::Incoming, Channel},
server::{self, Channel, Incoming},
transport::{self, channel::UnboundedChannel},
ClientMessage, Response,
},
@@ -94,7 +91,7 @@ impl BanksServer {
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
poll_signature_status_sleep_duration: Duration,
) -> Self {
let (transaction_sender, transaction_receiver) = unbounded();
let (transaction_sender, transaction_receiver) = channel();
let bank = bank_forks.read().unwrap().working_bank();
let slot = bank.slot();
{
@@ -245,47 +242,6 @@ impl Banks for BanksServer {
self.bank(commitment).block_height()
}
async fn process_transaction_with_preflight_and_commitment_and_context(
self,
ctx: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> BanksTransactionResultWithSimulation {
let sanitized_transaction =
match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) {
Err(err) => {
return BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: None,
};
}
Ok(tx) => tx,
};
if let TransactionSimulationResult {
result: Err(err),
logs,
post_simulation_accounts: _,
units_consumed,
} = self
.bank(commitment)
.simulate_transaction_unchecked(sanitized_transaction)
{
return BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: Some(TransactionSimulationDetails {
logs,
units_consumed,
}),
};
}
BanksTransactionResultWithSimulation {
result: self
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.await,
simulation_details: None,
}
}
async fn process_transaction_with_commitment_and_context(
self,
_: Context,
@@ -390,7 +346,7 @@ pub async fn start_tcp_server(
// serve is generated by the service attribute. It takes as input any type implementing
// the generated Banks trait.
.map(move |chan| {
let (sender, receiver) = unbounded();
let (sender, receiver) = channel();
SendTransactionService::new::<NullTpuInfo>(
tpu_addr,

View File

@@ -2,18 +2,19 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
crossbeam-channel = "0.5"
clap = "2.33.1"
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,16 +1,16 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, App, Arg},
crossbeam_channel::unbounded,
clap::{crate_description, crate_name, App, Arg},
solana_streamer::{
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketBatchReceiver},
packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketReceiver},
},
std::{
cmp::max,
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::channel,
Arc,
},
thread::{sleep, spawn, JoinHandle, Result},
@@ -20,19 +20,19 @@ use {
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut packet_batch = PacketBatch::default();
packet_batch.packets.resize(10, Packet::default());
for w in packet_batch.packets.iter_mut() {
let mut msgs = Packets::default();
msgs.packets.resize(10, Packet::default());
for w in msgs.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(addr);
}
let packet_batch = Arc::new(packet_batch);
let msgs = Arc::new(msgs);
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in &packet_batch.packets {
for p in &msgs.packets {
let a = p.meta.addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
@@ -42,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
})
}
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
if let Ok(packet_batch) = r.recv_timeout(timer) {
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
if let Ok(msgs) = r.recv_timeout(timer) {
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
}
})
}
@@ -67,22 +67,13 @@ fn main() -> Result<()> {
.takes_value(true)
.help("Use NUM receive sockets"),
)
.arg(
Arg::with_name("num-producers")
.long("num-producers")
.value_name("NUM")
.takes_value(true)
.help("Use this many producer threads."),
)
.get_matches();
if let Some(n) = matches.value_of("num-recv-sockets") {
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
}
let num_producers = value_t!(matches, "num_producers", u64).unwrap_or(4);
let port = 0;
let mut port = 0;
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let mut addr = SocketAddr::new(ip_addr, 0);
@@ -90,18 +81,15 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
let recycler = PacketBatchRecycler::default();
let (_port, read_sockets) = solana_net_utils::multi_bind_in_range(
ip_addr,
(port, port + num_sockets as u16),
num_sockets,
)
.unwrap();
for read in read_sockets {
let recycler = PacketsRecycler::default();
for _ in 0..num_sockets {
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
addr = read.local_addr().unwrap();
let (s_reader, r_reader) = unbounded();
port = addr.port();
let (s_reader, r_reader) = channel();
read_channels.push(r_reader);
read_threads.push(receiver(
Arc::new(read),
@@ -114,10 +102,9 @@ fn main() -> Result<()> {
));
}
let producer_threads: Vec<_> = (0..num_producers)
.into_iter()
.map(|_| producer(&addr, exit.clone()))
.collect();
let t_producer1 = producer(&addr, exit.clone());
let t_producer2 = producer(&addr, exit.clone());
let t_producer3 = producer(&addr, exit.clone());
let rvs = Arc::new(AtomicUsize::new(0));
let sink_threads: Vec<_> = read_channels
@@ -137,9 +124,9 @@ fn main() -> Result<()> {
for t_reader in read_threads {
t_reader.join()?;
}
for t_producer in producer_threads {
t_producer.join()?;
}
t_producer1.join()?;
t_producer2.join()?;
t_producer3.join()?;
for t_sink in sink_threads {
t_sink.join()?;
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,28 +10,27 @@ publish = false
[dependencies]
clap = "2.33.1"
crossbeam-channel = "0.5"
log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.78"
serde_yaml = "0.8.23"
solana-core = { path = "../core", version = "=1.10.0" }
solana-genesis = { path = "../genesis", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-faucet = { path = "../faucet", version = "=1.10.0" }
solana-gossip = { path = "../gossip", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-metrics = { path = "../metrics", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
serde_json = "1.0.72"
serde_yaml = "0.8.21"
solana-core = { path = "../core", version = "=1.9.0" }
solana-genesis = { path = "../genesis", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
[dev-dependencies]
serial_test = "0.5.1"
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -110,7 +110,7 @@ fn generate_chunked_transfers(
shared_txs: &SharedTransactions,
shared_tx_active_thread_count: Arc<AtomicIsize>,
source_keypair_chunks: Vec<Vec<&Keypair>>,
dest_keypair_chunks: &mut [VecDeque<&Keypair>],
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
threads: usize,
duration: Duration,
sustained: bool,

View File

@@ -21,7 +21,7 @@ pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
fn main() {
solana_logger::setup_with_default("solana=info");
solana_metrics::set_panic_hook("bench-tps", /*version:*/ None);
solana_metrics::set_panic_hook("bench-tps");
let matches = cli::build_args(solana_version::version!()).get_matches();
let cli_config = cli::extract_args(&matches);

View File

@@ -1,6 +1,5 @@
#![allow(clippy::integer_arithmetic)]
use {
crossbeam_channel::unbounded,
serial_test::serial,
solana_bench_tps::{
bench::{do_bench_tps, generate_and_fund_keypairs},
@@ -16,7 +15,10 @@ use {
},
solana_sdk::signature::{Keypair, Signer},
solana_streamer::socket::SocketAddrSpace,
std::{sync::Arc, time::Duration},
std::{
sync::{mpsc::channel, Arc},
time::Duration,
},
};
fn test_bench_tps_local_cluster(config: Config) {
@@ -29,7 +31,7 @@ fn test_bench_tps_local_cluster(config: Config) {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default_for_test(),
&ValidatorConfig::default(),
NUM_NODES,
),
native_instruction_processors,
@@ -50,7 +52,7 @@ fn test_bench_tps_local_cluster(config: Config) {
VALIDATOR_PORT_RANGE,
));
let (addr_sender, addr_receiver) = unbounded();
let (addr_sender, addr_receiver) = channel();
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
let faucet_addr = addr_receiver
.recv_timeout(Duration::from_secs(2))

View File

@@ -1,32 +0,0 @@
[package]
name = "solana-bloom"
version = "1.10.0"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bloom"
edition = "2021"
[dependencies]
bv = { version = "0.11.1", features = ["serde"] }
fnv = "1.0.7"
rand = "0.7.0"
serde = { version = "1.0.136", features = ["rc"] }
rayon = "1.5.1"
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
log = "0.4.14"
[lib]
crate-type = ["lib"]
name = "solana_bloom"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[build-dependencies]
rustc_version = "0.4"

View File

@@ -1 +0,0 @@
../frozen-abi/build.rs

View File

@@ -1,5 +0,0 @@
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
pub mod bloom;
#[macro_use]
extern crate solana_frozen_abi_macro;

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.10.0"
version = "1.9.0"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@@ -11,18 +11,15 @@ license = "Apache-2.0"
edition = "2021"
[dependencies]
solana-sdk = { path = "../sdk", version = "=1.10.0" }
memmap2 = "0.5.2"
log = { version = "0.4.11" }
solana-measure = { path = "../measure", version = "=1.10.0" }
rand = "0.7.0"
tempfile = "3.3.0"
modular-bitfield = "0.11.2"
[dev-dependencies]
fs_extra = "1.2.0"
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
memmap2 = "0.5.0"
log = { version = "0.4.11" }
solana-measure = { path = "../measure", version = "=1.9.0" }
rand = "0.7.0"
fs_extra = "1.2.0"
tempfile = "3.2.0"
[lib]
crate-type = ["lib"]

View File

@@ -3,7 +3,7 @@ use {
bucket_item::BucketItem,
bucket_map::BucketMapError,
bucket_stats::BucketMapStats,
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2},
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2, UID_UNLOCKED},
index_entry::IndexEntry,
MaxSearch, RefCount,
},
@@ -17,7 +17,7 @@ use {
ops::RangeBounds,
path::PathBuf,
sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
atomic::{AtomicUsize, Ordering},
Arc, Mutex,
},
},
@@ -81,7 +81,6 @@ impl<T: Clone + Copy> Bucket<T> {
drives: Arc<Vec<PathBuf>>,
max_search: MaxSearch,
stats: Arc<BucketMapStats>,
count: Arc<AtomicU64>,
) -> Self {
let index = BucketStorage::new(
Arc::clone(&drives),
@@ -89,7 +88,6 @@ impl<T: Clone + Copy> Bucket<T> {
std::mem::size_of::<IndexEntry>() as u64,
max_search,
Arc::clone(&stats.index),
count,
);
Self {
random: thread_rng().gen(),
@@ -102,10 +100,14 @@ impl<T: Clone + Copy> Bucket<T> {
}
}
pub fn bucket_len(&self) -> u64 {
self.index.used.load(Ordering::Relaxed)
}
pub fn keys(&self) -> Vec<Pubkey> {
let mut rv = vec![];
for i in 0..self.index.capacity() {
if self.index.is_free(i) {
if self.index.uid(i) == UID_UNLOCKED {
continue;
}
let ix: &IndexEntry = self.index.get(i);
@@ -118,10 +120,10 @@ impl<T: Clone + Copy> Bucket<T> {
where
R: RangeBounds<Pubkey>,
{
let mut result = Vec::with_capacity(self.index.count.load(Ordering::Relaxed) as usize);
let mut result = Vec::with_capacity(self.index.used.load(Ordering::Relaxed) as usize);
for i in 0..self.index.capacity() {
let ii = i % self.index.capacity();
if self.index.is_free(ii) {
if self.index.uid(ii) == UID_UNLOCKED {
continue;
}
let ix: &IndexEntry = self.index.get(ii);
@@ -154,7 +156,7 @@ impl<T: Clone + Copy> Bucket<T> {
let ix = Self::bucket_index_ix(index, key, random);
for i in ix..ix + index.max_search() {
let ii = i % index.capacity();
if index.is_free(ii) {
if index.uid(ii) == UID_UNLOCKED {
continue;
}
let elem: &mut IndexEntry = index.get_mut(ii);
@@ -173,7 +175,7 @@ impl<T: Clone + Copy> Bucket<T> {
let ix = Self::bucket_index_ix(index, key, random);
for i in ix..ix + index.max_search() {
let ii = i % index.capacity();
if index.is_free(ii) {
if index.uid(ii) == UID_UNLOCKED {
continue;
}
let elem: &IndexEntry = index.get(ii);
@@ -185,23 +187,26 @@ impl<T: Clone + Copy> Bucket<T> {
}
fn bucket_create_key(
index: &mut BucketStorage,
index: &BucketStorage,
key: &Pubkey,
elem_uid: Uid,
random: u64,
is_resizing: bool,
) -> Result<u64, BucketMapError> {
let ix = Self::bucket_index_ix(index, key, random);
for i in ix..ix + index.max_search() {
let ii = i as u64 % index.capacity();
if !index.is_free(ii) {
if index.uid(ii) != UID_UNLOCKED {
continue;
}
index.allocate(ii, elem_uid, is_resizing).unwrap();
let elem: &mut IndexEntry = index.get_mut(ii);
// These fields will be overwritten after allocation by callers.
index.allocate(ii, elem_uid).unwrap();
let mut elem: &mut IndexEntry = index.get_mut(ii);
elem.key = *key;
// These will be overwritten after allocation by callers.
// Since this part of the mmapped file could have previously been used by someone else, there can be garbage here.
elem.init(key);
elem.ref_count = 0;
elem.storage_offset = 0;
elem.storage_capacity_when_created_pow2 = 0;
elem.num_slots = 0;
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
return Ok(ii);
}
@@ -220,14 +225,8 @@ impl<T: Clone + Copy> Bucket<T> {
Some(elem.ref_count)
}
fn create_key(&mut self, key: &Pubkey) -> Result<u64, BucketMapError> {
Self::bucket_create_key(
&mut self.index,
key,
IndexEntry::key_uid(key),
self.random,
false,
)
fn create_key(&self, key: &Pubkey) -> Result<u64, BucketMapError> {
Self::bucket_create_key(&self.index, key, IndexEntry::key_uid(key), self.random)
}
pub fn read_value(&self, key: &Pubkey) -> Option<(&[T], RefCount)> {
@@ -257,17 +256,16 @@ impl<T: Clone + Copy> Bucket<T> {
Some(res) => res,
};
elem.ref_count = ref_count;
let elem_uid = self.index.uid_unchecked(elem_ix);
let elem_uid = self.index.uid(elem_ix);
let bucket_ix = elem.data_bucket_ix();
let current_bucket = &self.data[bucket_ix as usize];
let num_slots = data.len() as u64;
if best_fit_bucket == bucket_ix && elem.num_slots > 0 {
// in place update
let elem_loc = elem.data_loc(current_bucket);
let slice: &mut [T] = current_bucket.get_mut_cell_slice(elem_loc, data.len() as u64);
assert_eq!(current_bucket.uid(elem_loc), Some(elem_uid));
elem.num_slots = num_slots;
slice.copy_from_slice(data);
assert!(current_bucket.uid(elem_loc) == elem_uid);
elem.num_slots = data.len() as u64;
slice.clone_from_slice(data);
Ok(())
} else {
// need to move the allocation to a best fit spot
@@ -277,21 +275,18 @@ impl<T: Clone + Copy> Bucket<T> {
let pos = thread_rng().gen_range(0, cap);
for i in pos..pos + self.index.max_search() {
let ix = i % cap;
if best_bucket.is_free(ix) {
if best_bucket.uid(ix) == UID_UNLOCKED {
let elem_loc = elem.data_loc(current_bucket);
let old_slots = elem.num_slots;
elem.set_storage_offset(ix);
elem.set_storage_capacity_when_created_pow2(best_bucket.capacity_pow2);
elem.num_slots = num_slots;
if old_slots > 0 {
let current_bucket = &mut self.data[bucket_ix as usize];
if elem.num_slots > 0 {
current_bucket.free(elem_loc, elem_uid);
}
elem.storage_offset = ix;
elem.storage_capacity_when_created_pow2 = best_bucket.capacity_pow2;
elem.num_slots = data.len() as u64;
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
if num_slots > 0 {
let best_bucket = &mut self.data[best_fit_bucket as usize];
best_bucket.allocate(ix, elem_uid, false).unwrap();
let slice = best_bucket.get_mut_cell_slice(ix, num_slots);
if elem.num_slots > 0 {
best_bucket.allocate(ix, elem_uid).unwrap();
let slice = best_bucket.get_mut_cell_slice(ix, data.len() as u64);
slice.copy_from_slice(data);
}
return Ok(());
@@ -303,12 +298,10 @@ impl<T: Clone + Copy> Bucket<T> {
pub fn delete_key(&mut self, key: &Pubkey) {
if let Some((elem, elem_ix)) = self.find_entry(key) {
let elem_uid = self.index.uid_unchecked(elem_ix);
let elem_uid = self.index.uid(elem_ix);
if elem.num_slots > 0 {
let ix = elem.data_bucket_ix() as usize;
let data_bucket = &self.data[ix];
let data_bucket = &self.data[elem.data_bucket_ix() as usize];
let loc = elem.data_loc(data_bucket);
let data_bucket = &mut self.data[ix];
//debug!( "DATA FREE {:?} {} {} {}", key, elem.data_location, data_bucket.capacity, elem_uid );
data_bucket.free(loc, elem_uid);
}
@@ -326,7 +319,7 @@ impl<T: Clone + Copy> Bucket<T> {
//increasing the capacity by ^4 reduces the
//likelyhood of a re-index collision of 2^(max_search)^2
//1 in 2^32
let mut index = BucketStorage::new_with_capacity(
let index = BucketStorage::new_with_capacity(
Arc::clone(&self.drives),
1,
std::mem::size_of::<IndexEntry>() as u64,
@@ -334,16 +327,14 @@ impl<T: Clone + Copy> Bucket<T> {
self.index.capacity_pow2 + i, // * 2,
self.index.max_search,
Arc::clone(&self.stats.index),
Arc::clone(&self.index.count),
);
let random = thread_rng().gen();
let mut valid = true;
for ix in 0..self.index.capacity() {
let uid = self.index.uid(ix);
if let Some(uid) = uid {
if UID_UNLOCKED != uid {
let elem: &IndexEntry = self.index.get(ix);
let new_ix =
Self::bucket_create_key(&mut index, &elem.key, uid, random, true);
let new_ix = Self::bucket_create_key(&index, &elem.key, uid, random);
if new_ix.is_err() {
valid = false;
break;
@@ -400,7 +391,6 @@ impl<T: Clone + Copy> Bucket<T> {
Self::elem_size(),
self.index.max_search,
Arc::clone(&self.stats.data),
Arc::default(),
))
}
self.data.push(bucket);

View File

@@ -30,13 +30,14 @@ impl<T: Clone + Copy> BucketApi<T> {
drives: Arc<Vec<PathBuf>>,
max_search: MaxSearch,
stats: Arc<BucketMapStats>,
count: Arc<AtomicU64>,
) -> Self {
Self {
drives,
max_search,
stats,
bucket: RwLock::default(),
count: Arc::default(),
count,
}
}
@@ -72,7 +73,12 @@ impl<T: Clone + Copy> BucketApi<T> {
}
pub fn bucket_len(&self) -> u64 {
self.count.load(Ordering::Relaxed)
self.bucket
.read()
.unwrap()
.as_ref()
.map(|bucket| bucket.bucket_len())
.unwrap_or_default()
}
pub fn delete_key(&self, key: &Pubkey) {
@@ -89,11 +95,11 @@ impl<T: Clone + Copy> BucketApi<T> {
Arc::clone(&self.drives),
self.max_search,
Arc::clone(&self.stats),
Arc::clone(&self.count),
));
} else {
let write = bucket.as_mut().unwrap();
write.handle_delayed_grows();
self.count.store(write.bucket_len(), Ordering::Relaxed);
}
bucket
}

View File

@@ -79,14 +79,21 @@ impl<T: Clone + Copy + Debug> BucketMap<T> {
});
let drives = Arc::new(drives);
let stats = Arc::default();
let buckets = (0..config.max_buckets)
.into_iter()
.map(|_| {
let mut per_bucket_count = Vec::with_capacity(config.max_buckets);
per_bucket_count.resize_with(config.max_buckets, Arc::default);
let stats = Arc::new(BucketMapStats {
per_bucket_count,
..BucketMapStats::default()
});
let buckets = stats
.per_bucket_count
.iter()
.map(|per_bucket_count| {
Arc::new(BucketApi::new(
Arc::clone(&drives),
max_search,
Arc::clone(&stats),
Arc::clone(per_bucket_count),
))
})
.collect();

View File

@@ -14,4 +14,5 @@ pub struct BucketStats {
pub struct BucketMapStats {
pub index: Arc<BucketStats>,
pub data: Arc<BucketStats>,
pub per_bucket_count: Vec<Arc<AtomicU64>>,
}

View File

@@ -35,42 +35,27 @@ use {
pub const DEFAULT_CAPACITY_POW2: u8 = 5;
/// A Header UID of 0 indicates that the header is unlocked
const UID_UNLOCKED: Uid = 0;
pub(crate) const UID_UNLOCKED: Uid = 0;
pub(crate) type Uid = u64;
#[repr(C)]
struct Header {
lock: u64,
lock: AtomicU64,
}
impl Header {
/// try to lock this entry with 'uid'
/// return true if it could be locked
fn try_lock(&mut self, uid: Uid) -> bool {
if self.lock == UID_UNLOCKED {
self.lock = uid;
true
} else {
false
}
fn try_lock(&self, uid: Uid) -> bool {
Ok(UID_UNLOCKED)
== self
.lock
.compare_exchange(UID_UNLOCKED, uid, Ordering::AcqRel, Ordering::Relaxed)
}
/// mark this entry as unlocked
fn unlock(&mut self, expected: Uid) {
assert_eq!(expected, self.lock);
self.lock = UID_UNLOCKED;
fn unlock(&self) -> Uid {
self.lock.swap(UID_UNLOCKED, Ordering::Release)
}
/// uid that has locked this entry or None if unlocked
fn uid(&self) -> Option<Uid> {
if self.lock == UID_UNLOCKED {
None
} else {
Some(self.lock)
}
}
/// true if this entry is unlocked
fn is_unlocked(&self) -> bool {
self.lock == UID_UNLOCKED
fn uid(&self) -> Uid {
self.lock.load(Ordering::Acquire)
}
}
@@ -79,7 +64,7 @@ pub struct BucketStorage {
mmap: MmapMut,
pub cell_size: u64,
pub capacity_pow2: u8,
pub count: Arc<AtomicU64>,
pub used: AtomicU64,
pub stats: Arc<BucketStats>,
pub max_search: MaxSearch,
}
@@ -103,7 +88,6 @@ impl BucketStorage {
capacity_pow2: u8,
max_search: MaxSearch,
stats: Arc<BucketStats>,
count: Arc<AtomicU64>,
) -> Self {
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity_pow2, &stats);
@@ -111,7 +95,7 @@ impl BucketStorage {
path,
mmap,
cell_size,
count,
used: AtomicU64::new(0),
capacity_pow2,
stats,
max_search,
@@ -128,7 +112,6 @@ impl BucketStorage {
elem_size: u64,
max_search: MaxSearch,
stats: Arc<BucketStats>,
count: Arc<AtomicU64>,
) -> Self {
Self::new_with_capacity(
drives,
@@ -137,74 +120,53 @@ impl BucketStorage {
DEFAULT_CAPACITY_POW2,
max_search,
stats,
count,
)
}
/// return ref to header of item 'ix' in mmapped file
fn header_ptr(&self, ix: u64) -> &Header {
pub fn uid(&self, ix: u64) -> Uid {
assert!(ix < self.capacity(), "bad index size");
let ix = (ix * self.cell_size) as usize;
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
unsafe {
let hdr = hdr_slice.as_ptr() as *const Header;
hdr.as_ref().unwrap()
return hdr.as_ref().unwrap().uid();
}
}
/// return ref to header of item 'ix' in mmapped file
fn header_mut_ptr(&mut self, ix: u64) -> &mut Header {
let ix = (ix * self.cell_size) as usize;
let hdr_slice: &mut [u8] = &mut self.mmap[ix..ix + std::mem::size_of::<Header>()];
unsafe {
let hdr = hdr_slice.as_mut_ptr() as *mut Header;
hdr.as_mut().unwrap()
}
}
/// return uid allocated at index 'ix' or None if vacant
pub fn uid(&self, ix: u64) -> Option<Uid> {
assert!(ix < self.capacity(), "bad index size");
self.header_ptr(ix).uid()
}
/// true if the entry at index 'ix' is free (as opposed to being allocated)
pub fn is_free(&self, ix: u64) -> bool {
// note that the terminology in the implementation is locked or unlocked.
// but our api is allocate/free
self.header_ptr(ix).is_unlocked()
}
/// caller knows id is not empty
pub fn uid_unchecked(&self, ix: u64) -> Uid {
self.uid(ix).unwrap()
}
/// 'is_resizing' true if caller is resizing the index (so don't increment count)
/// 'is_resizing' false if caller is adding an item to the index (so increment count)
pub fn allocate(
&mut self,
ix: u64,
uid: Uid,
is_resizing: bool,
) -> Result<(), BucketStorageError> {
pub fn allocate(&self, ix: u64, uid: Uid) -> Result<(), BucketStorageError> {
assert!(ix < self.capacity(), "allocate: bad index size");
assert!(UID_UNLOCKED != uid, "allocate: bad uid");
let mut e = Err(BucketStorageError::AlreadyAllocated);
let ix = (ix * self.cell_size) as usize;
//debug!("ALLOC {} {}", ix, uid);
if self.header_mut_ptr(ix).try_lock(uid) {
e = Ok(());
if !is_resizing {
self.count.fetch_add(1, Ordering::Relaxed);
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
unsafe {
let hdr = hdr_slice.as_ptr() as *const Header;
if hdr.as_ref().unwrap().try_lock(uid) {
e = Ok(());
self.used.fetch_add(1, Ordering::Relaxed);
}
}
};
e
}
pub fn free(&mut self, ix: u64, uid: Uid) {
pub fn free(&self, ix: u64, uid: Uid) {
assert!(ix < self.capacity(), "bad index size");
assert!(UID_UNLOCKED != uid, "free: bad uid");
self.header_mut_ptr(ix).unlock(uid);
self.count.fetch_sub(1, Ordering::Relaxed);
let ix = (ix * self.cell_size) as usize;
//debug!("FREE {} {}", ix, uid);
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
unsafe {
let hdr = hdr_slice.as_ptr() as *const Header;
//debug!("FREE uid: {}", hdr.as_ref().unwrap().uid());
let previous_uid = hdr.as_ref().unwrap().unlock();
assert_eq!(
previous_uid, uid,
"free: unlocked a header with a differet uid: {}",
previous_uid
);
self.used.fetch_sub(1, Ordering::Relaxed);
}
}
pub fn get<T: Sized>(&self, ix: u64) -> &T {
@@ -362,9 +324,6 @@ impl BucketStorage {
capacity_pow_2,
max_search,
Arc::clone(stats),
bucket
.map(|bucket| Arc::clone(&bucket.count))
.unwrap_or_default(),
);
if let Some(bucket) = bucket {
new_bucket.copy_contents(bucket);
@@ -382,43 +341,3 @@ impl BucketStorage {
1 << self.capacity_pow2
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_bucket_storage() {
let tmpdir1 = std::env::temp_dir().join("bucket_map_test_mt");
let paths: Vec<PathBuf> = [tmpdir1]
.iter()
.filter(|x| std::fs::create_dir_all(x).is_ok())
.cloned()
.collect();
assert!(!paths.is_empty());
let mut storage =
BucketStorage::new(Arc::new(paths), 1, 1, 1, Arc::default(), Arc::default());
let ix = 0;
let uid = Uid::MAX;
assert!(storage.is_free(ix));
assert!(storage.allocate(ix, uid, false).is_ok());
assert!(storage.allocate(ix, uid, false).is_err());
assert!(!storage.is_free(ix));
assert_eq!(storage.uid(ix), Some(uid));
assert_eq!(storage.uid_unchecked(ix), uid);
storage.free(ix, uid);
assert!(storage.is_free(ix));
assert_eq!(storage.uid(ix), None);
let uid = 1;
assert!(storage.is_free(ix));
assert!(storage.allocate(ix, uid, false).is_ok());
assert!(storage.allocate(ix, uid, false).is_err());
assert!(!storage.is_free(ix));
assert_eq!(storage.uid(ix), Some(uid));
assert_eq!(storage.uid_unchecked(ix), uid);
storage.free(ix, uid);
assert!(storage.is_free(ix));
assert_eq!(storage.uid(ix), None);
}
}

View File

@@ -1,11 +1,9 @@
#![allow(dead_code)]
use {
crate::{
bucket::Bucket,
bucket_storage::{BucketStorage, Uid},
RefCount,
},
modular_bitfield::prelude::*,
solana_sdk::{clock::Slot, pubkey::Pubkey},
std::{
collections::hash_map::DefaultHasher,
@@ -21,42 +19,13 @@ use {
pub struct IndexEntry {
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
storage_cap_and_offset: PackedStorage,
pub storage_offset: u64, // smaller? since these are variably sized, this could get tricky. well, actually accountinfo is not variable sized...
// if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2
pub storage_capacity_when_created_pow2: u8, // see data_location
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
}
/// Pack the storage offset and capacity-when-crated-pow2 fields into a single u64
#[bitfield(bits = 64)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
struct PackedStorage {
capacity_when_created_pow2: B8,
offset: B56,
}
impl IndexEntry {
pub fn init(&mut self, pubkey: &Pubkey) {
self.key = *pubkey;
self.ref_count = 0;
self.storage_cap_and_offset = PackedStorage::default();
self.num_slots = 0;
}
pub fn set_storage_capacity_when_created_pow2(
&mut self,
storage_capacity_when_created_pow2: u8,
) {
self.storage_cap_and_offset
.set_capacity_when_created_pow2(storage_capacity_when_created_pow2)
}
pub fn set_storage_offset(&mut self, storage_offset: u64) {
self.storage_cap_and_offset
.set_offset_checked(storage_offset)
.expect("New storage offset must fit into 7 bytes!")
}
pub fn data_bucket_from_num_slots(num_slots: Slot) -> u64 {
(num_slots as f64).log2().ceil() as u64 // use int log here?
}
@@ -69,18 +38,10 @@ impl IndexEntry {
self.ref_count
}
fn storage_capacity_when_created_pow2(&self) -> u8 {
self.storage_cap_and_offset.capacity_when_created_pow2()
}
fn storage_offset(&self) -> u64 {
self.storage_cap_and_offset.offset()
}
// This function maps the original data location into an index in the current bucket storage.
// This is coupled with how we resize bucket storages.
pub fn data_loc(&self, storage: &BucketStorage) -> u64 {
self.storage_offset() << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2())
self.storage_offset << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2)
}
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {
@@ -89,7 +50,7 @@ impl IndexEntry {
let slice = if self.num_slots > 0 {
let loc = self.data_loc(data_bucket);
let uid = Self::key_uid(&self.key);
assert_eq!(Some(uid), bucket.data[data_bucket_ix as usize].uid(loc));
assert_eq!(uid, bucket.data[data_bucket_ix as usize].uid(loc));
bucket.data[data_bucket_ix as usize].get_cell_slice(loc, self.num_slots)
} else {
// num_slots is 0. This means we don't have an actual allocation.
@@ -98,59 +59,9 @@ impl IndexEntry {
};
Some((slice, self.ref_count))
}
pub fn key_uid(key: &Pubkey) -> Uid {
let mut s = DefaultHasher::new();
key.hash(&mut s);
s.finish().max(1u64)
}
}
#[cfg(test)]
mod tests {
use super::*;
impl IndexEntry {
pub fn new(key: Pubkey) -> Self {
IndexEntry {
key,
ref_count: 0,
storage_cap_and_offset: PackedStorage::default(),
num_slots: 0,
}
}
}
/// verify that accessors for storage_offset and capacity_when_created are
/// correct and independent
#[test]
fn test_api() {
for offset in [0, 1, u32::MAX as u64] {
let mut index = IndexEntry::new(solana_sdk::pubkey::new_rand());
if offset != 0 {
index.set_storage_offset(offset);
}
assert_eq!(index.storage_offset(), offset);
assert_eq!(index.storage_capacity_when_created_pow2(), 0);
for pow in [1, 255, 0] {
index.set_storage_capacity_when_created_pow2(pow);
assert_eq!(index.storage_offset(), offset);
assert_eq!(index.storage_capacity_when_created_pow2(), pow);
}
}
}
#[test]
fn test_size() {
assert_eq!(std::mem::size_of::<PackedStorage>(), 1 + 7);
assert_eq!(std::mem::size_of::<IndexEntry>(), 32 + 8 + 8 + 8);
}
#[test]
#[should_panic(expected = "New storage offset must fit into 7 bytes!")]
fn test_set_storage_offset_value_too_large() {
let too_big = 1 << 56;
let mut index = IndexEntry::new(Pubkey::new_unique());
index.set_storage_offset(too_big);
}
}

View File

@@ -9,8 +9,5 @@ for a in "$@"; do
fi
done
set -ex
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
fi
set -x
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"

View File

@@ -1,365 +0,0 @@
#!/usr/bin/env bash
#
# Builds a buildkite pipeline based on the environment variables
#
set -e
cd "$(dirname "$0")"/..
output_file=${1:-/dev/stderr}
if [[ -n $CI_PULL_REQUEST ]]; then
IFS=':' read -ra affected_files <<< "$(buildkite-agent meta-data get affected_files)"
if [[ ${#affected_files[*]} -eq 0 ]]; then
echo "Unable to determine the files affected by this PR"
exit 1
fi
else
affected_files=()
fi
annotate() {
if [[ -n $BUILDKITE ]]; then
buildkite-agent annotate "$@"
fi
}
# Checks if a CI pull request affects one or more path patterns. Each
# pattern argument is checked in series. If one of them found to be affected,
# return immediately as such.
#
# Bash regular expressions are permitted in the pattern:
# affects .rs$ -- any file or directory ending in .rs
# affects .rs -- also matches foo.rs.bar
# affects ^snap/ -- anything under the snap/ subdirectory
# affects snap/ -- also matches foo/snap/
# Any pattern starting with the ! character will be negated:
# affects !^docs/ -- anything *not* under the docs/ subdirectory
#
affects() {
if [[ -z $CI_PULL_REQUEST ]]; then
# affected_files metadata is not currently available for non-PR builds so assume
# the worse (affected)
return 0
fi
# Assume everyting needs to be tested when any Dockerfile changes
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
if [[ ${pattern:0:1} = "!" ]]; then
for file in "${affected_files[@]}"; do
if [[ ! $file =~ ${pattern:1} ]]; then
return 0 # affected
fi
done
else
for file in "${affected_files[@]}"; do
if [[ $file =~ $pattern ]]; then
return 0 # affected
fi
done
fi
done
return 1 # not affected
}
# Checks if a CI pull request affects anything other than the provided path patterns
#
# Syntax is the same as `affects()` except that the negation prefix is not
# supported
#
affects_other_than() {
if [[ -z $CI_PULL_REQUEST ]]; then
# affected_files metadata is not currently available for non-PR builds so assume
# the worse (affected)
return 0
fi
for file in "${affected_files[@]}"; do
declare matched=false
for pattern in "$@"; do
if [[ $file =~ $pattern ]]; then
matched=true
fi
done
if ! $matched; then
return 0 # affected
fi
done
return 1 # not affected
}
start_pipeline() {
echo "# $*" > "$output_file"
echo "steps:" >> "$output_file"
}
command_step() {
cat >> "$output_file" <<EOF
- name: "$1"
command: "$2"
timeout_in_minutes: $3
artifact_paths: "log-*.txt"
EOF
}
trigger_secondary_step() {
cat >> "$output_file" <<"EOF"
- trigger: "solana-secondary"
branches: "!pull/*"
async: true
build:
message: "${BUILDKITE_MESSAGE}"
commit: "${BUILDKITE_COMMIT}"
branch: "${BUILDKITE_BRANCH}"
env:
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
EOF
}
wait_step() {
echo " - wait" >> "$output_file"
}
all_test_steps() {
command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20
wait_step
# Coverage...
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
wait_step
else
annotate --style info --context test-coverage \
"Coverage skipped as no .rs files were modified"
fi
# Coverage in disk...
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-coverage.sh \
^scripts/coverage-in-disk.sh \
; then
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
wait_step
else
annotate --style info --context test-coverage \
"Coverage skipped as no .rs files were modified"
fi
# Full test suite
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
wait_step
# BPF test suite
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-bpf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
; then
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-bpf.sh"
name: "stable-bpf"
timeout_in_minutes: 20
artifact_paths: "bpf-dumps.tar.bz2"
agents:
- "queue=default"
EOF
else
annotate --style info \
"Stable-BPF skipped as no relevant files were modified"
fi
# Perf test suite
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
; then
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 20
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
EOF
else
annotate --style info \
"Stable-perf skipped as no relevant files were modified"
fi
# Downstream backwards compatibility
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
^scripts/build-downstream-projects.sh \
; then
cat >> "$output_file" <<"EOF"
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
EOF
else
annotate --style info \
"downstream-projects skipped as no relevant files were modified"
fi
# Downstream Anchor projects backwards compatibility
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
^scripts/build-downstream-anchor-projects.sh \
; then
cat >> "$output_file" <<"EOF"
- command: "scripts/build-downstream-anchor-projects.sh"
name: "downstream-anchor-projects"
timeout_in_minutes: 10
EOF
else
annotate --style info \
"downstream-anchor-projects skipped as no relevant files were modified"
fi
# Wasm support
if affects \
^ci/test-wasm.sh \
^ci/test-stable.sh \
^sdk/ \
; then
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
else
annotate --style info \
"wasm skipped as no relevant files were modified"
fi
# Benches...
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-coverage.sh \
^ci/test-bench.sh \
; then
command_step bench "ci/test-bench.sh" 30
else
annotate --style info --context test-bench \
"Bench skipped as no .rs files were modified"
fi
command_step "local-cluster" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
40
command_step "local-cluster-flakey" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
10
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
}
pull_or_push_steps() {
command_step sanity "ci/test-sanity.sh" 5
wait_step
# Check for any .sh file changes
if affects .sh$; then
command_step shellcheck "ci/shellcheck.sh" 5
wait_step
fi
# Run the full test suite by default, skipping only if modifications are local
# to some particular areas of the tree
if affects_other_than ^.buildkite ^.mergify .md$ ^docs/ ^web3.js/ ^explorer/ ^.gitbook; then
all_test_steps
fi
# web3.js, explorer and docs changes run on Travis or Github actions...
}
if [[ -n $BUILDKITE_TAG ]]; then
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
annotate --style info --context release-tag \
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
# Jump directly to the secondary build to publish release artifacts quickly
trigger_secondary_step
exit 0
fi
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
echo "+++ Affected files in this PR"
for file in "${affected_files[@]}"; do
echo "- $file"
done
start_pipeline "Pull request pipeline for $BUILDKITE_BRANCH"
# Add helpful link back to the corresponding Github Pull Request
annotate --style info --context pr-backlink \
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
command_step dependabot "ci/dependabot-pr.sh" 5
wait_step
fi
pull_or_push_steps
exit 0
fi
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
pull_or_push_steps
wait_step
trigger_secondary_step
exit 0

View File

@@ -227,31 +227,6 @@ EOF
"downstream-projects skipped as no relevant files were modified"
fi
# Downstream Anchor projects backwards compatibility
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
^scripts/build-downstream-anchor-projects.sh \
; then
cat >> "$output_file" <<"EOF"
- command: "scripts/build-downstream-anchor-projects.sh"
name: "downstream-anchor-projects"
timeout_in_minutes: 10
EOF
else
annotate --style info \
"downstream-anchor-projects skipped as no relevant files were modified"
fi
# Wasm support
if affects \
^ci/test-wasm.sh \
@@ -281,15 +256,7 @@ EOF
command_step "local-cluster" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
40
command_step "local-cluster-flakey" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
10
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
50
}
pull_or_push_steps() {

View File

@@ -19,8 +19,3 @@ steps:
timeout_in_minutes: 240
name: "publish crate"
branches: "!master"
- command: "ci/publish-tarball.sh"
agents:
- "queue=release-build-aarch64-apple-darwin"
timeout_in_minutes: 60
name: "publish tarball (aarch64-apple-darwin)"

View File

@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.59.0
FROM solanalabs/rust:1.57.0
ARG date
RUN set -x \

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.59.0
FROM rust:1.57.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

View File

@@ -23,9 +23,6 @@ if [[ -n $CI ]]; then
elif [[ -n $BUILDKITE ]]; then
export CI_BRANCH=$BUILDKITE_BRANCH
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
fi
export CI_COMMIT=$BUILDKITE_COMMIT
export CI_JOB_ID=$BUILDKITE_JOB_ID
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
@@ -38,18 +35,7 @@ if [[ -n $CI ]]; then
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
export CI_PULL_REQUEST=
fi
case "$(uname -s)" in
Linux)
export CI_OS_NAME=linux
;;
Darwin)
export CI_OS_NAME=osx
;;
*)
;;
esac
export CI_OS_NAME=linux
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it

View File

@@ -39,11 +39,7 @@ fi
case "$CI_OS_NAME" in
osx)
_cputype="$(uname -m)"
if [[ $_cputype = arm64 ]]; then
_cputype=aarch64
fi
TARGET=${_cputype}-apple-darwin
TARGET=x86_64-apple-darwin
;;
linux)
TARGET=x86_64-unknown-linux-gnu
@@ -150,7 +146,7 @@ elif [[ -n $BUILDKITE ]]; then
cat > release.solana.com-install <<EOF
SOLANA_RELEASE=$CHANNEL_OR_TAG
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
EOF
cat install/solana-install-init.sh >> release.solana.com-install

View File

@@ -27,8 +27,6 @@ steps+=(test-stable-perf)
steps+=(test-downstream-builds)
steps+=(test-bench)
steps+=(test-local-cluster)
steps+=(test-local-cluster-flakey)
steps+=(test-local-cluster-slow)
step_index=0
if [[ -n "$1" ]]; then

View File

@@ -7,7 +7,7 @@ source multinode-demo/common.sh
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
SOLANA_RUN_SH_VALIDATOR_ARGS="--full-snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
pid=$!
attempts=20

View File

@@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.59.0
stable_version=1.57.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2022-02-24
nightly_version=2021-12-03
fi

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
#
# Finds the version of sbf-tools used by this source tree.
#
# stdout of this script may be eval-ed.
#
here="$(dirname "$0")"
SBF_TOOLS_VERSION=unknown
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
if [[ -f "${cargo_build_bpf_main}" ]]; then
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
if [[ ${version} != '' ]]; then
SBF_TOOLS_VERSION="${version}"
else
echo '--- unable to parse SBF_TOOLS_VERSION'
fi
else
echo "--- '${cargo_build_bpf_main}' not present"
fi
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"

View File

@@ -69,14 +69,20 @@ _ ci/order-crates-for-publishing.py
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
_ "$cargo" nightly fmt --all -- --check
_ "$cargo" stable fmt --all -- --check
_ ci/do-audit.sh
{
cd programs/bpf
_ "$cargo" nightly clippy --all -- --deny=warnings --allow=clippy::missing_safety_doc
_ "$cargo" nightly fmt --all -- --check
for project in rust/*/ ; do
echo "+++ do_bpf_checks $project"
(
cd "$project"
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
_ "$cargo" stable fmt -- --check
)
done
}
echo --- ok

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -21,16 +21,15 @@ export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
source scripts/ulimit-n.sh
# limit jobs to 4gb/thread
JOBS=$(grep MemTotal /proc/meminfo | awk '{printf "%.0f", ($2 / (4 * 1024 * 1024))}')
# Limit compiler jobs to reduce memory usage
# on machines with 2gb/thread of memory
NPROC=$(nproc)
JOBS=$((JOBS>NPROC ? NPROC : JOBS))
NPROC=$((NPROC>14 ? 14 : NPROC))
echo "Executing $testName"
case $testName in
test-stable)
_ "$cargo" stable test --jobs "$JOBS" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
;;
test-stable-bpf)
# Clear the C dependency files, if dependency moves these files are not regenerated
@@ -66,9 +65,6 @@ test-stable-bpf)
fi
done
# bpf-tools version
"$cargo_build_bpf" -V
# BPF program instruction count assertion
bpf_target_path=programs/bpf/target
_ "$cargo" stable test \
@@ -104,17 +100,7 @@ test-stable-perf)
;;
test-local-cluster)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-flakey)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-slow)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-wasm)

View File

@@ -19,24 +19,13 @@ upload-ci-artifact() {
upload-s3-artifact() {
echo "--- artifact: $1 to $2"
(
args=(
--rm
--env AWS_ACCESS_KEY_ID
--env AWS_SECRET_ACCESS_KEY
--volume "$PWD:/solana"
)
if [[ $(uname -m) = arm64 ]]; then
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
args+=(
--platform linux/amd64
)
fi
args+=(
eremite/aws-cli:2018.12.18
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
set -x
docker run "${args[@]}"
docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.10.0"
version = "1.9.0"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,9 +12,9 @@ edition = "2021"
[dependencies]
clap = "2.33.0"
rpassword = "5.0"
solana-perf = { path = "../perf", version = "=1.10.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0", default-features = false}
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"
@@ -22,7 +22,7 @@ url = "2.2.2"
chrono = "0.4"
[dev-dependencies]
tempfile = "3.3.0"
tempfile = "3.2.0"
[lib]
name = "solana_clap_utils"

View File

@@ -328,7 +328,7 @@ pub fn is_derivation<T>(value: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
let value = value.as_ref().replace('\'', "");
let value = value.as_ref().replace("'", "");
let mut parts = value.split('/');
let account = parts.next().unwrap();
account

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,13 +12,13 @@ documentation = "https://docs.rs/solana-cli-config"
[dependencies]
dirs-next = "2.0.0"
lazy_static = "1.4.0"
serde = "1.0.136"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_yaml = "0.8.23"
serde_yaml = "0.8.21"
url = "2.2.2"
[dev-dependencies]
anyhow = "1.0.53"
anyhow = "1.0.51"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,14 +17,14 @@ console = "0.15.0"
humantime = "2.0.1"
Inflector = "0.11.4"
indicatif = "0.16.2"
serde = "1.0.136"
serde_json = "1.0.78"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
serde = "1.0.130"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[package.metadata.docs.rs]

View File

@@ -46,8 +46,6 @@ use {
},
};
static CHECK_MARK: Emoji = Emoji("", "");
static CROSS_MARK: Emoji = Emoji("", "");
static WARNING: Emoji = Emoji("⚠️", "!");
#[derive(PartialEq, Debug)]
@@ -101,7 +99,7 @@ impl OutputFormat {
pub struct CliAccount {
#[serde(flatten)]
pub keyed_account: RpcKeyedAccount,
#[serde(skip_serializing, skip_deserializing)]
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
@@ -393,19 +391,19 @@ impl fmt::Display for CliValidators {
) -> fmt::Result {
fn non_zero_or_dash(v: u64, max_v: u64) -> String {
if v == 0 {
" - ".into()
"- ".into()
} else if v == max_v {
format!("{:>9} ( 0)", v)
format!("{:>8} ( 0)", v)
} else if v > max_v.saturating_sub(100) {
format!("{:>9} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
format!("{:>8} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
} else {
format!("{:>9} ", v)
format!("{:>8} ", v)
}
}
writeln!(
f,
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {:>22} ({:.2}%)",
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {}",
if validator.delinquent {
WARNING.to_string()
} else {
@@ -419,19 +417,19 @@ impl fmt::Display for CliValidators {
if let Some(skip_rate) = validator.skip_rate {
format!("{:.2}%", skip_rate)
} else {
"- ".to_string()
"- ".to_string()
},
validator.epoch_credits,
validator.version,
build_balance_message_with_config(
validator.activated_stake,
&BuildBalanceMessageConfig {
use_lamports_unit,
trim_trailing_zeros: false,
..BuildBalanceMessageConfig::default()
}
),
100. * validator.activated_stake as f64 / total_active_stake as f64,
if validator.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(validator.activated_stake, use_lamports_unit, true),
100. * validator.activated_stake as f64 / total_active_stake as f64,
)
} else {
"-".into()
},
)
}
@@ -441,13 +439,13 @@ impl fmt::Display for CliValidators {
0
};
let header = style(format!(
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
" ",
"Identity",
"Vote Account",
"Commission",
"Last Vote ",
"Root Slot ",
"Last Vote ",
"Root Slot ",
"Skip Rate",
"Credits",
"Version",
@@ -2287,7 +2285,6 @@ impl fmt::Display for CliBlock {
let sign = if reward.lamports < 0 { "-" } else { "" };
total_rewards += reward.lamports;
#[allow(clippy::format_in_format_args)]
writeln!(
f,
" {:<44} {:^15} {:>15} {} {}",
@@ -2451,8 +2448,6 @@ pub struct CliGossipNode {
pub rpc_host: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub feature_set: Option<u32>,
}
impl CliGossipNode {
@@ -2465,7 +2460,6 @@ impl CliGossipNode {
tpu_port: info.tpu.map(|addr| addr.port()),
rpc_host: info.rpc.map(|addr| addr.to_string()),
version: info.version,
feature_set: info.feature_set,
}
}
}
@@ -2491,7 +2485,7 @@ impl fmt::Display for CliGossipNode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{:15} | {:44} | {:6} | {:5} | {:21} | {:8}| {}",
"{:15} | {:44} | {:6} | {:5} | {:21} | {}",
unwrap_to_string_or_none(self.ip_address.as_ref()),
self.identity_label
.as_ref()
@@ -2500,7 +2494,6 @@ impl fmt::Display for CliGossipNode {
unwrap_to_string_or_none(self.tpu_port.as_ref()),
unwrap_to_string_or_none(self.rpc_host.as_ref()),
unwrap_to_string_or_default(self.version.as_ref(), "unknown"),
unwrap_to_string_or_default(self.feature_set.as_ref(), "unknown"),
)
}
}
@@ -2515,10 +2508,10 @@ impl fmt::Display for CliGossipNodes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"IP Address | Identity \
| Gossip | TPU | RPC Address | Version | Feature Set\n\
"IP Address | Node identifier \
| Gossip | TPU | RPC Address | Version\n\
----------------+----------------------------------------------+\
--------+-------+-----------------------+---------+----------------",
--------+-------+-----------------------+----------------",
)?;
for node in self.0.iter() {
writeln!(f, "{}", node)?;
@@ -2530,172 +2523,6 @@ impl fmt::Display for CliGossipNodes {
impl QuietDisplay for CliGossipNodes {}
impl VerboseDisplay for CliGossipNodes {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPing {
pub source_pubkey: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub fixed_blockhash: Option<String>,
#[serde(skip_serializing)]
pub blockhash_from_cluster: bool,
pub pings: Vec<CliPingData>,
pub transaction_stats: CliPingTxStats,
#[serde(skip_serializing_if = "Option::is_none")]
pub confirmation_stats: Option<CliPingConfirmationStats>,
}
impl fmt::Display for CliPing {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Source Account:", &self.source_pubkey)?;
if let Some(fixed_blockhash) = &self.fixed_blockhash {
let blockhash_origin = if self.blockhash_from_cluster {
"fetched from cluster"
} else {
"supplied from cli arguments"
};
writeln!(
f,
"Fixed blockhash is used: {} ({})",
fixed_blockhash, blockhash_origin
)?;
}
writeln!(f)?;
for ping in &self.pings {
write!(f, "{}", ping)?;
}
writeln!(f)?;
writeln!(f, "--- transaction statistics ---")?;
write!(f, "{}", self.transaction_stats)?;
if let Some(confirmation_stats) = &self.confirmation_stats {
write!(f, "{}", confirmation_stats)?;
}
Ok(())
}
}
impl QuietDisplay for CliPing {}
impl VerboseDisplay for CliPing {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPingData {
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub signature: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
#[serde(skip_serializing)]
pub print_timestamp: bool,
pub timestamp: String,
pub sequence: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub lamports: Option<u64>,
}
impl fmt::Display for CliPingData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (mark, msg) = if let Some(signature) = &self.signature {
if self.success {
(
CHECK_MARK,
format!(
"{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
self.lamports.unwrap(),
self.sequence,
self.ms.unwrap(),
signature
),
)
} else if let Some(error) = &self.error {
(
CROSS_MARK,
format!(
"Transaction failed: seq={:<3} error={:?} signature={}",
self.sequence, error, signature
),
)
} else {
(
CROSS_MARK,
format!(
"Confirmation timeout: seq={:<3} signature={}",
self.sequence, signature
),
)
}
} else {
(
CROSS_MARK,
format!(
"Submit failed: seq={:<3} error={:?}",
self.sequence,
self.error.as_ref().unwrap(),
),
)
};
writeln!(
f,
"{}{}{}",
if self.print_timestamp {
&self.timestamp
} else {
""
},
mark,
msg
)
}
}
impl QuietDisplay for CliPingData {}
impl VerboseDisplay for CliPingData {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPingTxStats {
pub num_transactions: u32,
pub num_transaction_confirmed: u32,
}
impl fmt::Display for CliPingTxStats {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss",
self.num_transactions,
self.num_transaction_confirmed,
(100.
- f64::from(self.num_transaction_confirmed) / f64::from(self.num_transactions)
* 100.)
)
}
}
impl QuietDisplay for CliPingTxStats {}
impl VerboseDisplay for CliPingTxStats {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPingConfirmationStats {
pub min: f64,
pub mean: f64,
pub max: f64,
pub std_dev: f64,
}
impl fmt::Display for CliPingConfirmationStats {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms",
self.min, self.mean, self.max, self.std_dev,
)
}
}
impl QuietDisplay for CliPingConfirmationStats {}
impl VerboseDisplay for CliPingConfirmationStats {}
#[cfg(test)]
mod tests {
use {

View File

@@ -139,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
} else {
"-"
},
if message.is_writable(index) {
if message.is_writable(index, /*demote_program_write_locks=*/ true) {
"w" // comment for consistent rust fmt (no joking; lol)
} else {
"-"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.0"
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,40 +17,39 @@ criterion-stats = "0.3.0"
ctrlc = { version = "3.2.1", features = ["termination"] }
console = "0.15.0"
const_format = "0.2.22"
crossbeam-channel = "0.5"
log = "0.4.14"
humantime = "2.0.1"
num-traits = "0.2"
pretty-hex = "0.2.1"
reqwest = { version = "0.11.9", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "1.0.5"
serde = "1.0.136"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.78"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
solana-cli-config = { path = "../cli-config", version = "=1.10.0" }
solana-cli-output = { path = "../cli-output", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
solana-faucet = { path = "../faucet", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
solana_rbpf = "=0.2.24"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-cli-config = { path = "../cli-config", version = "=1.9.0" }
solana-cli-output = { path = "../cli-output", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
solana_rbpf = "=0.2.18"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
tempfile = "3.3.0"
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
tempfile = "3.2.0"
[[bin]]
name = "solana"

View File

@@ -98,7 +98,10 @@ pub fn get_fee_for_messages(
) -> Result<u64, CliError> {
Ok(messages
.iter()
.map(|message| rpc_client.get_fee_for_message(message))
.map(|message| {
println!("msg {:?}", message.recent_blockhash);
rpc_client.get_fee_for_message(message)
})
.collect::<Result<Vec<_>, _>>()?
.iter()
.sum())

View File

@@ -83,6 +83,7 @@ pub enum CliCommand {
filter: RpcTransactionLogsFilter,
},
Ping {
lamports: u64,
interval: Duration,
count: Option<u64>,
timeout: Duration,
@@ -972,6 +973,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::LiveSlots => process_live_slots(config),
CliCommand::Logs { filter } => process_logs(config, filter),
CliCommand::Ping {
lamports,
interval,
count,
timeout,
@@ -980,6 +982,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_ping(
&rpc_client,
config,
*lamports,
interval,
count,
timeout,
@@ -1706,7 +1709,7 @@ mod tests {
serde_json::{json, Value},
solana_client::{
blockhash_query,
mock_sender_for_cli::SIGNATURE,
mock_sender::SIGNATURE,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
},

View File

@@ -4,8 +4,7 @@ use {
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
},
clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand},
console::style,
crossbeam_channel::unbounded,
console::{style, Emoji},
serde::{Deserialize, Serialize},
solana_clap_utils::{
input_parsers::*,
@@ -16,7 +15,7 @@ use {
solana_cli_output::{
display::{
build_balance_message, format_labeled_address, new_spinner_progress_bar,
println_transaction, unix_timestamp_to_string, writeln_name_value,
println_name_value, println_transaction, unix_timestamp_to_string, writeln_name_value,
},
*,
},
@@ -44,13 +43,13 @@ use {
message::Message,
native_token::lamports_to_sol,
nonce::State as NonceState,
pubkey::Pubkey,
pubkey::{self, Pubkey},
rent::Rent,
rpc_port::DEFAULT_RPC_PORT_STR,
signature::Signature,
slot_history,
stake::{self, state::StakeState},
system_instruction,
system_instruction, system_program,
sysvar::{
self,
slot_history::SlotHistory,
@@ -75,6 +74,9 @@ use {
thiserror::Error,
};
static CHECK_MARK: Emoji = Emoji("", "");
static CROSS_MARK: Emoji = Emoji("", "");
pub trait ClusterQuerySubCommands {
fn cluster_query_subcommands(self) -> Self;
}
@@ -260,6 +262,15 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.takes_value(false)
.help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.value_name("NUMBER")
.takes_value(true)
.default_value("1")
.validator(is_amount)
.help("Number of lamports to transfer for each transaction"),
)
.arg(
Arg::with_name("timeout")
.short("t")
@@ -504,6 +515,7 @@ pub fn parse_cluster_ping(
default_signer: &DefaultSigner,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let lamports = value_t_or_exit!(matches, "lamports", u64);
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
let count = if matches.is_present("count") {
Some(value_t_or_exit!(matches, "count", u64))
@@ -515,6 +527,7 @@ pub fn parse_cluster_ping(
let print_timestamp = matches.is_present("print_timestamp");
Ok(CliCommandInfo {
command: CliCommand::Ping {
lamports,
interval,
count,
timeout,
@@ -1345,34 +1358,40 @@ pub fn process_get_transaction_count(rpc_client: &RpcClient, _config: &CliConfig
pub fn process_ping(
rpc_client: &RpcClient,
config: &CliConfig,
lamports: u64,
interval: &Duration,
count: &Option<u64>,
timeout: &Duration,
fixed_blockhash: &Option<Hash>,
print_timestamp: bool,
) -> ProcessResult {
let (signal_sender, signal_receiver) = unbounded();
println_name_value("Source Account:", &config.signers[0].pubkey().to_string());
println!();
let (signal_sender, signal_receiver) = std::sync::mpsc::channel();
ctrlc::set_handler(move || {
let _ = signal_sender.send(());
})
.expect("Error setting Ctrl-C handler");
let mut cli_pings = vec![];
let mut submit_count = 0;
let mut confirmed_count = 0;
let mut confirmation_time: VecDeque<u64> = VecDeque::with_capacity(1024);
let mut blockhash = rpc_client.get_latest_blockhash()?;
let mut lamports = 0;
let mut blockhash_transaction_count = 0;
let mut blockhash_acquired = Instant::now();
let mut blockhash_from_cluster = false;
if let Some(fixed_blockhash) = fixed_blockhash {
if *fixed_blockhash != Hash::default() {
let blockhash_origin = if *fixed_blockhash != Hash::default() {
blockhash = *fixed_blockhash;
"supplied from cli arguments"
} else {
blockhash_from_cluster = true;
}
"fetched from cluster"
};
println!(
"Fixed blockhash is used: {} ({})",
blockhash, blockhash_origin
);
}
'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) {
let now = Instant::now();
@@ -1380,12 +1399,15 @@ pub fn process_ping(
// Fetch a new blockhash every minute
let new_blockhash = rpc_client.get_new_latest_blockhash(&blockhash)?;
blockhash = new_blockhash;
lamports = 0;
blockhash_transaction_count = 0;
blockhash_acquired = Instant::now();
}
let to = config.signers[0].pubkey();
lamports += 1;
let seed =
&format!("{}{}", blockhash_transaction_count, blockhash)[0..pubkey::MAX_SEED_LEN];
let to = Pubkey::create_with_seed(&config.signers[0].pubkey(), seed, &system_program::id())
.unwrap();
blockhash_transaction_count += 1;
let build_message = |lamports| {
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
@@ -1408,7 +1430,11 @@ pub fn process_ping(
.duration_since(UNIX_EPOCH)
.unwrap()
.as_micros();
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
if print_timestamp {
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
} else {
String::new()
}
};
match rpc_client.send_transaction(&tx) {
@@ -1422,51 +1448,35 @@ pub fn process_ping(
Ok(()) => {
let elapsed_time_millis = elapsed_time.as_millis() as u64;
confirmation_time.push_back(elapsed_time_millis);
let cli_ping_data = CliPingData {
success: true,
signature: Some(signature.to_string()),
ms: Some(elapsed_time_millis),
error: None,
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: Some(lamports),
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
println!(
"{}{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
timestamp(),
CHECK_MARK, lamports, seq, elapsed_time_millis, signature
);
confirmed_count += 1;
}
Err(err) => {
let cli_ping_data = CliPingData {
success: false,
signature: Some(signature.to_string()),
ms: None,
error: Some(err.to_string()),
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: None,
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
println!(
"{}{}Transaction failed: seq={:<3} error={:?} signature={}",
timestamp(),
CROSS_MARK,
seq,
err,
signature
);
}
}
break;
}
if elapsed_time >= *timeout {
let cli_ping_data = CliPingData {
success: false,
signature: Some(signature.to_string()),
ms: None,
error: None,
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: None,
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
println!(
"{}{}Confirmation timeout: seq={:<3} signature={}",
timestamp(),
CROSS_MARK,
seq,
signature
);
break;
}
@@ -1480,18 +1490,13 @@ pub fn process_ping(
}
}
Err(err) => {
let cli_ping_data = CliPingData {
success: false,
signature: None,
ms: None,
error: Some(err.to_string()),
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: None,
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
println!(
"{}{}Submit failed: seq={:<3} error={:?}",
timestamp(),
CROSS_MARK,
seq,
err
);
}
}
submit_count += 1;
@@ -1501,34 +1506,28 @@ pub fn process_ping(
}
}
let transaction_stats = CliPingTxStats {
num_transactions: submit_count,
num_transaction_confirmed: confirmed_count,
};
let confirmation_stats = if !confirmation_time.is_empty() {
println!();
println!("--- transaction statistics ---");
println!(
"{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss",
submit_count,
confirmed_count,
(100. - f64::from(confirmed_count) / f64::from(submit_count) * 100.)
);
if !confirmation_time.is_empty() {
let samples: Vec<f64> = confirmation_time.iter().map(|t| *t as f64).collect();
let dist = criterion_stats::Distribution::from(samples.into_boxed_slice());
let mean = dist.mean();
Some(CliPingConfirmationStats {
min: dist.min(),
println!(
"confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms",
dist.min(),
mean,
max: dist.max(),
std_dev: dist.std_dev(Some(mean)),
})
} else {
None
};
dist.max(),
dist.std_dev(Some(mean))
);
}
let cli_ping = CliPing {
source_pubkey: config.signers[0].pubkey().to_string(),
fixed_blockhash: fixed_blockhash.map(|_| blockhash.to_string()),
blockhash_from_cluster,
pings: cli_pings,
transaction_stats,
confirmation_stats,
};
Ok(config.output_format.formatted_string(&cli_ping))
Ok("".to_string())
}
pub fn parse_logs(
@@ -2129,7 +2128,7 @@ pub fn process_calculate_rent(
timing::years_as_slots(1.0, &seconds_per_tick, clock::DEFAULT_TICKS_PER_SLOT);
let slots_per_epoch = epoch_schedule.slots_per_epoch as f64;
let years_per_epoch = slots_per_epoch / slots_per_year;
let lamports_per_epoch = rent.due(0, data_length, years_per_epoch).lamports();
let (lamports_per_epoch, _) = rent.due(0, data_length, years_per_epoch);
let cli_rent_calculation = CliRentCalculation {
lamports_per_byte_year: rent.lamports_per_byte_year,
lamports_per_epoch,
@@ -2305,6 +2304,7 @@ mod tests {
parse_command(&test_ping, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Ping {
lamports: 1,
interval: Duration::from_secs(1),
count: Some(2),
timeout: Duration::from_secs(3),

View File

@@ -5,7 +5,7 @@ use {
},
clap::{App, AppSettings, Arg, ArgMatches, SubCommand},
console::style,
serde::{Deserialize, Deserializer, Serialize, Serializer},
serde::{Deserialize, Serialize},
solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*},
solana_cli_output::{QuietDisplay, VerboseDisplay},
solana_client::{client_error::ClientError, rpc_client::RpcClient},
@@ -23,7 +23,6 @@ use {
cmp::Ordering,
collections::{HashMap, HashSet},
fmt,
str::FromStr,
sync::Arc,
},
};
@@ -46,7 +45,7 @@ pub enum FeatureCliCommand {
},
}
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")]
pub enum CliFeatureStatus {
Inactive,
@@ -54,29 +53,7 @@ pub enum CliFeatureStatus {
Active(Slot),
}
impl PartialOrd for CliFeatureStatus {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeatureStatus {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Self::Inactive, Self::Inactive) => Ordering::Equal,
(Self::Inactive, _) => Ordering::Greater,
(_, Self::Inactive) => Ordering::Less,
(Self::Pending, Self::Pending) => Ordering::Equal,
(Self::Pending, _) => Ordering::Greater,
(_, Self::Pending) => Ordering::Less,
(Self::Active(self_active_slot), Self::Active(other_active_slot)) => {
self_active_slot.cmp(other_active_slot)
}
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeature {
pub id: String,
@@ -85,28 +62,11 @@ pub struct CliFeature {
pub status: CliFeatureStatus,
}
impl PartialOrd for CliFeature {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeature {
fn cmp(&self, other: &Self) -> Ordering {
match self.status.cmp(&other.status) {
Ordering::Equal => self.id.cmp(&other.id),
ordering => ordering,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatures {
pub features: Vec<CliFeature>,
pub feature_activation_allowed: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub cluster_feature_sets: Option<CliClusterFeatureSets>,
#[serde(skip)]
pub inactive: bool,
}
@@ -133,16 +93,11 @@ impl fmt::Display for CliFeatures {
CliFeatureStatus::Inactive => style("inactive".to_string()).red(),
CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(),
CliFeatureStatus::Active(activation_slot) =>
style(format!("active since slot {:>9}", activation_slot)).green(),
style(format!("active since slot {}", activation_slot)).green(),
},
feature.description,
)?;
}
if let Some(feature_sets) = &self.cluster_feature_sets {
write!(f, "{}", feature_sets)?;
}
if self.inactive && !self.feature_activation_allowed {
writeln!(
f,
@@ -159,191 +114,6 @@ impl fmt::Display for CliFeatures {
impl QuietDisplay for CliFeatures {}
impl VerboseDisplay for CliFeatures {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliClusterFeatureSets {
pub tool_feature_set: u32,
pub feature_sets: Vec<CliFeatureSet>,
#[serde(skip)]
pub stake_allowed: bool,
#[serde(skip)]
pub rpc_allowed: bool,
}
impl fmt::Display for CliClusterFeatureSets {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut tool_feature_set_matches_cluster = false;
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
let feature_sets: Vec<_> = self
.feature_sets
.iter()
.map(|feature_set_info| {
let me = if self.tool_feature_set == feature_set_info.feature_set {
tool_feature_set_matches_cluster = true;
true
} else {
false
};
let software_versions: Vec<_> = feature_set_info
.software_versions
.iter()
.map(ToString::to_string)
.collect();
let software_versions = software_versions.join(", ");
let feature_set = if feature_set_info.feature_set == 0 {
"unknown".to_string()
} else {
feature_set_info.feature_set.to_string()
};
let stake_percent = format!("{:.2}%", feature_set_info.stake_percent);
let rpc_percent = format!("{:.2}%", feature_set_info.rpc_percent);
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
(
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
)
})
.collect();
if !tool_feature_set_matches_cluster {
writeln!(
f,
"\n{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold())?;
} else {
if !self.stake_allowed {
write!(
f,
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
)?;
}
if !self.rpc_allowed {
write!(
f,
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
)?;
}
}
writeln!(
f,
"\n\n{}",
style(format!("Tool Feature Set: {}", self.tool_feature_set)).bold()
)?;
writeln!(
f,
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
)?;
for (software_versions, feature_set, stake_percent, rpc_percent, me) in feature_sets {
writeln!(
f,
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
)?;
}
writeln!(f)
}
}
impl QuietDisplay for CliClusterFeatureSets {}
impl VerboseDisplay for CliClusterFeatureSets {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatureSet {
software_versions: Vec<CliVersion>,
feature_set: u32,
stake_percent: f64,
rpc_percent: f32,
}
#[derive(Eq, PartialEq, Ord, PartialOrd)]
struct CliVersion(Option<semver::Version>);
impl fmt::Display for CliVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match &self.0 {
None => "unknown".to_string(),
Some(version) => version.to_string(),
};
write!(f, "{}", s)
}
}
impl FromStr for CliVersion {
type Err = semver::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let version_option = if s == "unknown" {
None
} else {
Some(semver::Version::from_str(s)?)
};
Ok(CliVersion(version_option))
}
}
impl Serialize for CliVersion {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for CliVersion {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s: &str = Deserialize::deserialize(deserializer)?;
CliVersion::from_str(s).map_err(serde::de::Error::custom)
}
}
pub trait FeatureSubCommands {
fn feature_subcommands(self) -> Self;
}
@@ -560,10 +330,7 @@ fn feature_set_stats(rpc_client: &RpcClient) -> Result<FeatureSetStats, ClientEr
}
// Feature activation is only allowed when 95% of the active stake is on the current feature set
fn feature_activation_allowed(
rpc_client: &RpcClient,
quiet: bool,
) -> Result<(bool, Option<CliClusterFeatureSets>), ClientError> {
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
let my_feature_set = solana_version::Version::default().feature_set;
let feature_set_stats = feature_set_stats(rpc_client)?;
@@ -579,43 +346,54 @@ fn feature_activation_allowed(
)
.unwrap_or((false, false));
let cluster_feature_sets = if quiet {
None
} else {
let mut feature_sets = feature_set_stats
.into_iter()
.map(
|(
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent: rpc_percent,
software_versions,
},
)| {
CliFeatureSet {
software_versions: software_versions.into_iter().map(CliVersion).collect(),
feature_set,
stake_percent,
rpc_percent,
}
},
)
.collect::<Vec<_>>();
feature_sets.sort_by(|l, r| {
match l.software_versions[0]
.cmp(&r.software_versions[0])
if !quiet {
if feature_set_stats.get(&my_feature_set).is_none() {
println!(
"{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold());
} else {
if !stake_allowed {
print!(
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
);
}
if !rpc_allowed {
print!(
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
);
}
}
println!(
"\n\n{}",
style(format!("Tool Feature Set: {}", my_feature_set)).bold()
);
let mut feature_set_stats = feature_set_stats.into_iter().collect::<Vec<_>>();
feature_set_stats.sort_by(|l, r| {
match l.1.software_versions[0]
.cmp(&r.1.software_versions[0])
.reverse()
{
Ordering::Equal => {
match l
.1
.stake_percent
.partial_cmp(&r.stake_percent)
.partial_cmp(&r.1.stake_percent)
.unwrap()
.reverse()
{
Ordering::Equal => {
l.rpc_percent.partial_cmp(&r.rpc_percent).unwrap().reverse()
l.1.rpc_nodes_percent
.partial_cmp(&r.1.rpc_nodes_percent)
.unwrap()
.reverse()
}
o => o,
}
@@ -623,15 +401,96 @@ fn feature_activation_allowed(
o => o,
}
});
Some(CliClusterFeatureSets {
tool_feature_set: my_feature_set,
feature_sets,
stake_allowed,
rpc_allowed,
})
};
Ok((stake_allowed && rpc_allowed, cluster_feature_sets))
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut stats_output = Vec::new();
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
for (
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent,
software_versions,
},
) in feature_set_stats.into_iter()
{
let me = feature_set == my_feature_set;
let feature_set = if feature_set == 0 {
"unknown".to_string()
} else {
feature_set.to_string()
};
let stake_percent = format!("{:.2}%", stake_percent);
let rpc_percent = format!("{:.2}%", rpc_nodes_percent);
let mut has_unknown = false;
let mut software_versions = software_versions
.iter()
.filter_map(|v| {
if v.is_none() {
has_unknown = true;
}
v.as_ref()
})
.map(ToString::to_string)
.collect::<Vec<_>>();
if has_unknown {
software_versions.push("unknown".to_string());
}
let software_versions = software_versions.join(", ");
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
stats_output.push((
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
));
}
println!(
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
);
for (software_versions, feature_set, stake_percent, rpc_percent, me) in stats_output {
println!(
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
);
}
println!();
}
Ok(stake_allowed && rpc_allowed)
}
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
@@ -691,14 +550,10 @@ fn process_status(
});
}
features.sort_unstable();
let (feature_activation_allowed, cluster_feature_sets) =
feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_set = CliFeatures {
features,
feature_activation_allowed,
cluster_feature_sets,
inactive,
};
Ok(config.output_format.formatted_string(&feature_set))
@@ -722,7 +577,7 @@ fn process_activate(
}
}
if !feature_activation_allowed(rpc_client, false)?.0 {
if !feature_activation_allowed(rpc_client, false)? {
match force {
ForceActivation::Almost =>
return Err("Add force argument once more to override the sanity check to force feature activation ".into()),

View File

@@ -334,17 +334,9 @@ pub fn check_nonce_account(
match state_from_account(nonce_account)? {
State::Initialized(ref data) => {
if &data.blockhash != nonce_hash {
Err(Error::InvalidHash {
provided: *nonce_hash,
expected: data.blockhash,
}
.into())
Err(Error::InvalidHash.into())
} else if nonce_authority != &data.authority {
Err(Error::InvalidAuthority {
provided: *nonce_authority,
expected: data.authority,
}
.into())
Err(Error::InvalidAuthority.into())
} else {
Ok(())
}
@@ -954,22 +946,15 @@ mod tests {
hash(b"invalid"),
0,
)));
let invalid_hash = Account::new_data(1, &data, &system_program::ID).unwrap();
let invalid_hash = Account::new_data(1, &data, &system_program::ID);
if let CliError::InvalidNonce(err) =
check_nonce_account(&invalid_hash, &nonce_pubkey, &blockhash).unwrap_err()
check_nonce_account(&invalid_hash.unwrap(), &nonce_pubkey, &blockhash).unwrap_err()
{
assert_eq!(
err,
Error::InvalidHash {
provided: blockhash,
expected: hash(b"invalid"),
}
);
assert_eq!(err, Error::InvalidHash,);
}
let new_nonce_authority = solana_sdk::pubkey::new_rand();
let data = Versions::new_current(State::Initialized(nonce::state::Data::new(
new_nonce_authority,
solana_sdk::pubkey::new_rand(),
blockhash,
0,
)));
@@ -977,13 +962,7 @@ mod tests {
if let CliError::InvalidNonce(err) =
check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash).unwrap_err()
{
assert_eq!(
err,
Error::InvalidAuthority {
provided: nonce_pubkey,
expected: new_nonce_authority,
}
);
assert_eq!(err, Error::InvalidAuthority,);
}
let data = Versions::new_current(State::Uninitialized);

View File

@@ -42,7 +42,6 @@ use {
system_instruction::{self, SystemError},
system_program,
transaction::{Transaction, TransactionError},
transaction_context::TransactionContext,
},
std::{
fs::File,
@@ -1991,15 +1990,17 @@ fn read_and_verify_elf(program_location: &str) -> Result<Vec<u8>, Box<dyn std::e
let mut program_data = Vec::new();
file.read_to_end(&mut program_data)
.map_err(|err| format!("Unable to read program file: {}", err))?;
let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1);
let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]);
let mut invoke_context = InvokeContext::new_mock(&[], &[]);
// Verify the program
Executable::<BpfError, ThisInstructionMeter>::from_elf(
&program_data,
Some(verifier::check),
Config {
reject_broken_elfs: true,
reject_unresolved_syscalls: true,
verify_mul64_imm_nonzero: false,
verify_shift32_imm: true,
reject_section_virtual_address_file_offset_mismatch: true,
..Config::default()
},
register_syscalls(&mut invoke_context).unwrap(),

View File

@@ -16,7 +16,6 @@ use {
pub enum SpendAmount {
All,
Some(u64),
RentExempt,
}
impl Default for SpendAmount {
@@ -91,7 +90,6 @@ where
0,
from_pubkey,
fee_pubkey,
0,
build_message,
)?;
Ok((message, spend))
@@ -99,12 +97,6 @@ where
let from_balance = rpc_client
.get_balance_with_commitment(from_pubkey, commitment)?
.value;
let from_rent_exempt_minimum = if amount == SpendAmount::RentExempt {
let data = rpc_client.get_account_data(from_pubkey)?;
rpc_client.get_minimum_balance_for_rent_exemption(data.len())?
} else {
0
};
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
rpc_client,
amount,
@@ -112,7 +104,6 @@ where
from_balance,
from_pubkey,
fee_pubkey,
from_rent_exempt_minimum,
build_message,
)?;
if from_pubkey == fee_pubkey {
@@ -149,7 +140,6 @@ fn resolve_spend_message<F>(
from_balance: u64,
from_pubkey: &Pubkey,
fee_pubkey: &Pubkey,
from_rent_exempt_minimum: u64,
build_message: F,
) -> Result<(Message, SpendAndFee), CliError>
where
@@ -186,20 +176,5 @@ where
},
))
}
SpendAmount::RentExempt => {
let mut lamports = if from_pubkey == fee_pubkey {
from_balance.saturating_sub(fee)
} else {
from_balance
};
lamports = lamports.saturating_sub(from_rent_exempt_minimum);
Ok((
build_message(lamports),
SpendAndFee {
spend: lamports,
fee,
},
))
}
}
}

View File

@@ -1384,13 +1384,7 @@ pub fn process_stake_authorize(
if let Some(authorized) = authorized {
match authorization_type {
StakeAuthorize::Staker => {
// first check authorized withdrawer
check_current_authority(&authorized.withdrawer, &authority.pubkey())
.or_else(|_| {
// ...then check authorized staker. If neither matches, error will
// print the stake key as `expected`
check_current_authority(&authorized.staker, &authority.pubkey())
})?;
check_current_authority(&authorized.staker, &authority.pubkey())?;
}
StakeAuthorize::Withdrawer => {
check_current_authority(&authorized.withdrawer, &authority.pubkey())?;

View File

@@ -1,29 +1,23 @@
use {
solana_client::rpc_client::RpcClient,
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig},
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, pubkey::Pubkey},
std::{thread::sleep, time::Duration},
};
#[macro_export]
macro_rules! check_balance {
($expected_balance:expr, $client:expr, $pubkey:expr) => {
(0..5).for_each(|tries| {
let balance = $client
.get_balance_with_commitment($pubkey, CommitmentConfig::processed())
.unwrap()
.value;
if balance == $expected_balance {
return;
}
if tries == 4 {
assert_eq!(balance, $expected_balance);
}
std::thread::sleep(std::time::Duration::from_millis(500));
});
};
($expected_balance:expr, $client:expr, $pubkey:expr,) => {
check_balance!($expected_balance, $client, $pubkey)
};
pub fn check_recent_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
(0..5).for_each(|tries| {
let balance = client
.get_balance_with_commitment(pubkey, CommitmentConfig::processed())
.unwrap()
.value;
if balance == expected_balance {
return;
}
if tries == 4 {
assert_eq!(balance, expected_balance);
}
sleep(Duration::from_millis(500));
});
}
pub fn check_ready(rpc_client: &RpcClient) {

View File

@@ -35,8 +35,7 @@ use {
transaction::Transaction,
},
solana_vote_program::{
vote_error::VoteError,
vote_instruction::{self, withdraw},
vote_instruction::{self, withdraw, VoteError},
vote_state::{VoteAuthorize, VoteInit, VoteState},
},
std::sync::Arc,
@@ -360,7 +359,7 @@ impl VoteSubCommands for App<'_, '_> {
.takes_value(true)
.required(true)
.validator(is_amount_or_all)
.help("The amount to withdraw, in SOL; accepts keyword ALL, which for this command means account balance minus rent-exempt minimum"),
.help("The amount to withdraw, in SOL; accepts keyword ALL"),
)
.arg(
Arg::with_name("authorized_withdrawer")
@@ -654,13 +653,7 @@ pub fn parse_withdraw_from_vote_account(
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let destination_account_pubkey =
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
let mut withdraw_amount = SpendAmount::new_from_matches(matches, "amount");
// As a safeguard for vote accounts for running validators, `ALL` withdraws only the amount in
// excess of the rent-exempt minimum. In order to close the account with this subcommand, a
// validator must specify the withdrawal amount precisely.
if withdraw_amount == SpendAmount::All {
withdraw_amount = SpendAmount::RentExempt;
}
let withdraw_amount = SpendAmount::new_from_matches(matches, "amount");
let (withdraw_authority, withdraw_authority_pubkey) =
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
@@ -1997,7 +1990,7 @@ mod tests {
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
destination_account_pubkey: pubkey,
withdraw_authority: 0,
withdraw_amount: SpendAmount::RentExempt,
withdraw_amount: SpendAmount::All,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),

View File

@@ -39,7 +39,7 @@ use {
system_program,
transaction::Transaction,
},
solana_transaction_status::{Encodable, EncodedTransaction, UiTransactionEncoding},
solana_transaction_status::{EncodedTransaction, UiTransactionEncoding},
std::{fmt::Write as FmtWrite, fs::File, io::Write, sync::Arc},
};
@@ -273,14 +273,11 @@ impl WalletSubCommands for App<'_, '_> {
}
fn resolve_derived_address_program_id(matches: &ArgMatches<'_>, arg_name: &str) -> Option<Pubkey> {
matches.value_of(arg_name).and_then(|v| {
let upper = v.to_ascii_uppercase();
match upper.as_str() {
"NONCE" | "SYSTEM" => Some(system_program::id()),
"STAKE" => Some(stake::program::id()),
"VOTE" => Some(solana_vote_program::id()),
_ => pubkey_of(matches, arg_name),
}
matches.value_of(arg_name).and_then(|v| match v {
"NONCE" => Some(system_program::id()),
"STAKE" => Some(stake::program::id()),
"VOTE" => Some(solana_vote_program::id()),
_ => pubkey_of(matches, arg_name),
})
}
@@ -465,27 +462,18 @@ pub fn process_show_account(
let mut account_string = config.output_format.formatted_string(&cli_account);
match config.output_format {
OutputFormat::Json | OutputFormat::JsonCompact => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(account_string.as_bytes())?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account to {}", output_file)?;
}
if config.output_format == OutputFormat::Display
|| config.output_format == OutputFormat::DisplayVerbose
{
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
}
OutputFormat::Display | OutputFormat::DisplayVerbose => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
}
}
OutputFormat::DisplayQuiet => (),
}
Ok(account_string)
@@ -567,8 +555,10 @@ pub fn process_confirm(
.transaction
.decode()
.expect("Successful decode");
let json_transaction =
decoded_transaction.encode(UiTransactionEncoding::Json);
let json_transaction = EncodedTransaction::encode(
decoded_transaction.clone(),
UiTransactionEncoding::Json,
);
transaction = Some(CliTransaction {
transaction: json_transaction,
@@ -610,7 +600,7 @@ pub fn process_decode_transaction(config: &CliConfig, transaction: &Transaction)
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction);
let decode_transaction = CliTransaction {
decoded_transaction: transaction.clone(),
transaction: transaction.encode(UiTransactionEncoding::Json),
transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json),
meta: None,
block_time: None,
slot: None,

Some files were not shown because too many files have changed in this diff Show More