Compare commits
311 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
08cc140d4a | ||
|
2120ef5808 | ||
|
c08af09aaa | ||
|
8b12749f02 | ||
|
e343a17ce9 | ||
|
3fd78ac6ea | ||
|
41bbc11a46 | ||
|
68934353f2 | ||
|
92543a3f92 | ||
|
a514aff819 | ||
|
8d8525e4fc | ||
|
2c1cec4e2c | ||
|
7d0a0a26bb | ||
|
73016d3ed2 | ||
|
9b1cb5c1b7 | ||
|
94f4748a34 | ||
|
8963724ed6 | ||
|
65df58c64a | ||
|
380c5da2d0 | ||
|
7d488a6ed8 | ||
|
159cfdae25 | ||
|
1c3d09ed21 | ||
|
2c8cfdb3f3 | ||
|
85570ac207 | ||
|
054b95cbe1 | ||
|
b67a5bb3b9 | ||
|
3e3fb4e296 | ||
|
f66d8551e9 | ||
|
a5cb10666c | ||
|
76384758d8 | ||
|
4eca26ae50 | ||
|
2d144afec5 | ||
|
781609b27a | ||
|
5a5244ecf8 | ||
|
2e60f95ab9 | ||
|
55179524bd | ||
|
4a0785ddcd | ||
|
4698fbc036 | ||
|
70f76b450e | ||
|
d64eebb799 | ||
|
71211e0d90 | ||
|
320fbd63c5 | ||
|
0fe00bab7d | ||
|
00630d9c1b | ||
|
d05b5b0902 | ||
|
5c69af607d | ||
|
df16a37ab5 | ||
|
432eafd730 | ||
|
41142a7d76 | ||
|
8047601a7b | ||
|
85856a73aa | ||
|
c3890ada8e | ||
|
ceb253ce90 | ||
|
dd6c365bd9 | ||
|
9ea025315e | ||
|
c43cef79b5 | ||
|
2605724aa3 | ||
|
539f303eb7 | ||
|
f7091811d4 | ||
|
15ef1827bf | ||
|
85fef67213 | ||
|
90a70d9b5b | ||
|
643442e830 | ||
|
69e207ca58 | ||
|
fb8db79e63 | ||
|
237347847b | ||
|
4706790c20 | ||
|
04281734e5 | ||
|
a98ca9037d | ||
|
12e40a40f5 | ||
|
c715bc93cf | ||
|
3aa3cd8852 | ||
|
f83cb74509 | ||
|
6c47a98945 | ||
|
4dfbb4347c | ||
|
28fc733894 | ||
|
93b44d8a4c | ||
|
2804204f80 | ||
|
4d891043d1 | ||
|
74498650bc | ||
|
af3b307734 | ||
|
2368e09d89 | ||
|
6fca541847 | ||
|
15e9cedc0d | ||
|
d68a40396c | ||
|
b0e0410003 | ||
|
d1174f677e | ||
|
cf88542254 | ||
|
99c55dbec3 | ||
|
bc412d51d6 | ||
|
87c3e71bb8 | ||
|
d0cf5bb721 | ||
|
fb54991901 | ||
|
9995a54be7 | ||
|
d9a5f714e1 | ||
|
620a80b581 | ||
|
b354dae249 | ||
|
af7ed83285 | ||
|
8bc4cc90d2 | ||
|
39a4cc95dc | ||
|
187ed6a387 | ||
|
91bc44931f | ||
|
35ca3182ba | ||
|
24345d8e63 | ||
|
bf45f5b88e | ||
|
2ddb5b27c1 | ||
|
7f10fd6a21 | ||
|
a0a881594a | ||
|
e9e35fd7bd | ||
|
66b94b86a9 | ||
|
59f406d78a | ||
|
dbf9a32883 | ||
|
37e9076db0 | ||
|
f77ea5f324 | ||
|
c9df037dae | ||
|
2b87d99479 | ||
|
2546ef4ad6 | ||
|
96ae795758 | ||
|
9bddb4e437 | ||
|
4079f12a3e | ||
|
e121b94524 | ||
|
a7623ad18c | ||
|
054e475c6c | ||
|
7a421fe602 | ||
|
2ef0b85829 | ||
|
a6b7a3b7ff | ||
|
9d69f2b324 | ||
|
4f82a4ba1f | ||
|
ed0b30efcc | ||
|
4ee6bc9a93 | ||
|
676c43b9d2 | ||
|
b1d8296498 | ||
|
34984ed16e | ||
|
f4d1577337 | ||
|
58dcc451a9 | ||
|
f0695ef6d9 | ||
|
41b0d6cca3 | ||
|
ae77a52c97 | ||
|
133314e58c | ||
|
cb49ae21b4 | ||
|
a9ebba5643 | ||
|
8ce65878da | ||
|
a4ca18a54d | ||
|
7cb147fdcd | ||
|
2d693be9fa | ||
|
50e716fc80 | ||
|
1f00926874 | ||
|
662c6be51e | ||
|
9761f5b67f | ||
|
7b1da62763 | ||
|
2f97fee71a | ||
|
3ae674dd28 | ||
|
8214bc9db4 | ||
|
1132def37c | ||
|
7267ebaaf2 | ||
|
4be6e52a4f | ||
|
e7348243b4 | ||
|
fc0c74d722 | ||
|
687cd4779e | ||
|
b28d7050ab | ||
|
6d72acfd6d | ||
|
840ec0686e | ||
|
ba0188a36d | ||
|
05b9a2f203 | ||
|
8578429c4d | ||
|
87f4a1f4b6 | ||
|
17411f9b4c | ||
|
fb0e5adc7e | ||
|
f4ded6fb6b | ||
|
f89bf7b939 | ||
|
c99aed4abf | ||
|
edfd8c1717 | ||
|
09dbf069e8 | ||
|
9764d4349b | ||
|
d84b994451 | ||
|
185f52b712 | ||
|
3b59f67562 | ||
|
7d2589e2ac | ||
|
77558c315d | ||
|
464d533da3 | ||
|
f8bf478fde | ||
|
35fb47d1ce | ||
|
5bd27dd175 | ||
|
794f28d9ab | ||
|
d7a673f7f5 | ||
|
b3fa1288aa | ||
|
3e4e2e9113 | ||
|
fd4754e5a9 | ||
|
0a9460ed8b | ||
|
478c641cb5 | ||
|
735f000952 | ||
|
264bb903a3 | ||
|
7c5d3e5874 | ||
|
70d5b6aeaf | ||
|
ca451ea23e | ||
|
113d261a2c | ||
|
c6ab915668 | ||
|
d5c0ffc11f | ||
|
6a2b62de62 | ||
|
4645be3e52 | ||
|
7efd0391e9 | ||
|
6a556c5adb | ||
|
0cd45400ca | ||
|
531f36c571 | ||
|
9c9d3e8b6b | ||
|
74b98c2dd4 | ||
|
9fb67f9b07 | ||
|
401c542d2a | ||
|
14ed446923 | ||
|
adc584ee22 | ||
|
810ca36eae | ||
|
16f821ea8c | ||
|
584e9bfbe7 | ||
|
3ad4c3306c | ||
|
be0bcd85ed | ||
|
8708186760 | ||
|
8f3e37c174 | ||
|
7d61935bf1 | ||
|
a70eb098f4 | ||
|
f31593bfbe | ||
|
8f26c71964 | ||
|
9fbaaa5102 | ||
|
78e7913352 | ||
|
f58b87befe | ||
|
1a2823b875 | ||
|
75fe0d3ecf | ||
|
c296a6c9ed | ||
|
57e5406476 | ||
|
4f57c4a4fe | ||
|
c4b3b2865d | ||
|
f58c375b1f | ||
|
bf41c53f11 | ||
|
e3a4b98432 | ||
|
91657ba8fe | ||
|
35ee48bec9 | ||
|
02cfa85214 | ||
|
02be3a6568 | ||
|
b20fae5a09 | ||
|
e572678176 | ||
|
f4521002b9 | ||
|
0c5a2bcd5a | ||
|
c25d16bf0d | ||
|
301e38044a | ||
|
bfa6302985 | ||
|
b66e2ae353 | ||
|
3967dc8685 | ||
|
569c83295d | ||
|
a462c58594 | ||
|
7dba8bb49f | ||
|
c907d4444d | ||
|
b4c847557b | ||
|
de48347078 | ||
|
9f173d3717 | ||
|
dcd76e484f | ||
|
2246135654 | ||
|
41ea597256 | ||
|
fb955bd4ec | ||
|
5c3fbb384f | ||
|
a056fd88cb | ||
|
2f1816d1db | ||
|
2cd2f3ba7b | ||
|
135dfdbf1e | ||
|
fad4bfdf2a | ||
|
a9d4728c35 | ||
|
3977bcde63 | ||
|
cf2a9de19c | ||
|
5e2b12aee5 | ||
|
6c329e2fd3 | ||
|
0376045c7d | ||
|
c1f54c22ed | ||
|
0576d133ad | ||
|
9956afb2bd | ||
|
01941cf3de | ||
|
4b63d51e3e | ||
|
5bf4445ae6 | ||
|
7782d34bbf | ||
|
2c4765e75a | ||
|
e71ea19e60 | ||
|
ed0040d555 | ||
|
da9e6826ac | ||
|
68fc72a7f4 | ||
|
2a6bb2b954 | ||
|
ef51778c78 | ||
|
abecf292a3 | ||
|
a31660815f | ||
|
539ad4bea6 | ||
|
85f601993f | ||
|
b0754cc575 | ||
|
effd0b2547 | ||
|
8836069719 | ||
|
2698a5c705 | ||
|
dd157fd47f | ||
|
8cacf82cb8 | ||
|
8ee5fbc5c0 | ||
|
f2a6b94e5c | ||
|
ef970bb14a | ||
|
cabd851904 | ||
|
2d2ef59550 | ||
|
b7b56d5016 | ||
|
18e3a635b4 | ||
|
2b4347d502 | ||
|
87accd16d8 | ||
|
0e969015fc | ||
|
46935c022e | ||
|
8a7106bc08 | ||
|
89d2f34a03 | ||
|
b3fa1e4550 | ||
|
58c755e1d4 | ||
|
60085305b4 | ||
|
b4c8e095bd | ||
|
3e28ffa884 |
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script is used to upload the full buildkite pipeline. The steps defined
|
||||
# in the buildkite UI should simply be:
|
||||
#
|
||||
# steps:
|
||||
# - command: ".buildkite/pipeline-upload.sh"
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/_
|
||||
sudo chmod 0777 ci/buildkite-pipeline-in-disk.sh
|
||||
|
||||
_ ci/buildkite-pipeline-in-disk.sh pipeline.yml
|
||||
echo +++ pipeline
|
||||
cat pipeline.yml
|
||||
|
||||
_ buildkite-agent pipeline upload pipeline.yml
|
@@ -13,13 +13,7 @@ export PS4="++"
|
||||
#
|
||||
eval "$(ci/channel-info.sh)"
|
||||
eval "$(ci/sbf-tools-info.sh)"
|
||||
source "ci/rust-version.sh"
|
||||
HOST_RUST_VERSION="$rust_stable"
|
||||
pattern='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
if [[ ${HOST_RUST_VERSION} =~ ${pattern} ]]; then
|
||||
HOST_RUST_VERSION="${rust_stable%.*}"
|
||||
fi
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"-"$HOST_RUST_VERSION"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
|
||||
(
|
||||
set -x
|
||||
MAX_CACHE_SIZE=18 # gigabytes
|
||||
|
66
.github/workflows/client-targets.yml
vendored
66
.github/workflows/client-targets.yml
vendored
@@ -1,66 +0,0 @@
|
||||
name: client_targets
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "client/**"
|
||||
- "sdk/**"
|
||||
- ".github/workflows/client-targets.yml"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
check_compilation:
|
||||
name: Client compilation
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
target: [aarch64-apple-ios, x86_64-apple-ios, aarch64-apple-darwin, x86_64-apple-darwin, aarch64-linux-android, armv7-linux-androideabi, i686-linux-android, x86_64-linux-android]
|
||||
include:
|
||||
- target: aarch64-apple-ios
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: x86_64-apple-ios
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: aarch64-apple-darwin
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: x86_64-apple-darwin
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: aarch64-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: armv7-linux-androideabi
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: i686-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: x86_64-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
target: ${{ matrix.target }}
|
||||
- name: Install cargo-ndk
|
||||
if: ${{ matrix.platform == 'android' }}
|
||||
run: cargo install cargo-ndk
|
||||
- uses: actions-rs/cargo@v1
|
||||
if: ${{ matrix.platform == 'android' }}
|
||||
with:
|
||||
command: ndk
|
||||
args: --target ${{ matrix.target }} build -p solana-client
|
||||
- uses: actions-rs/cargo@v1
|
||||
if: ${{ matrix.platform == 'ios' }}
|
||||
with:
|
||||
command: build
|
||||
args: -p solana-client --target ${{ matrix.target }}
|
32
.github/workflows/explorer_preview.yml
vendored
32
.github/workflows/explorer_preview.yml
vendored
@@ -10,8 +10,6 @@ jobs:
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
@@ -20,33 +18,3 @@ jobs:
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
working-directory: ./explorer
|
||||
scope: ${{ secrets.TEAM_ID }}
|
||||
|
||||
- name: vercel url
|
||||
run : |
|
||||
touch vercelfile.txt
|
||||
vercel --token ${{secrets.VERCEL_TOKEN}} ls explorer --scope team_8A2WD7p4uR7tmKX9M68loHXI > vercelfile.txt
|
||||
touch vercelfile1.txt
|
||||
head -n 2 vercelfile.txt > vercelfile1.txt
|
||||
touch vercelfile2.txt
|
||||
tail -n 1 vercelfile1.txt > vercelfile2.txt
|
||||
filtered_url7=$(cut -f7 -d" " vercelfile2.txt)
|
||||
echo "filtered_url7 is: $filtered_url7"
|
||||
touch .env.preview1
|
||||
echo "$filtered_url7" > .env.preview1
|
||||
#filtered_url=$(cat vercelfile2.txt )
|
||||
#echo "$filtered_url" >> .env.preview1
|
||||
|
||||
|
||||
- name: Run tests
|
||||
uses: mathiasvr/command-output@v1
|
||||
id: tests2
|
||||
with:
|
||||
run: |
|
||||
echo "$(cat .env.preview1)"
|
||||
|
||||
- name: Slack Notification1
|
||||
uses: rtCamp/action-slack-notify@master
|
||||
env:
|
||||
SLACK_MESSAGE: ${{ steps.tests2.outputs.stdout }}
|
||||
SLACK_TITLE: Vercel "Explorer" Preview Deployment Link
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
19
.github/workflows/web3.yml
vendored
19
.github/workflows/web3.yml
vendored
@@ -65,7 +65,20 @@ jobs:
|
||||
node-version: ${{ matrix.node }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: web3.js/package-lock.json
|
||||
- run: npm i -g npm@7
|
||||
- run: npm ci
|
||||
- run: npm run lint
|
||||
- run: |
|
||||
source .travis/before_install.sh
|
||||
npm install
|
||||
source .travis/script.sh
|
||||
npm run build
|
||||
ls -l lib
|
||||
test -r lib/index.iife.js
|
||||
test -r lib/index.cjs.js
|
||||
test -r lib/index.esm.js
|
||||
- run: npm run doc
|
||||
- run: npm run codecov
|
||||
- run: |
|
||||
sh -c "$(curl -sSfL https://release.solana.com/edge/install)"
|
||||
echo "$HOME/.local/share/solana/install/active_release/bin" >> $GITHUB_PATH
|
||||
PATH="$HOME/.local/share/solana/install/active_release/bin:$PATH"
|
||||
solana --version
|
||||
- run: npm run test:live-with-test-validator
|
||||
|
18
.travis.yml
18
.travis.yml
@@ -78,24 +78,6 @@ jobs:
|
||||
# - sudo apt-get install libssl-dev libudev-dev
|
||||
|
||||
# docs pull request
|
||||
# - name: "explorer"
|
||||
# if: type = pull_request AND branch = master
|
||||
|
||||
# language: node_js
|
||||
# node_js:
|
||||
# - "lts/*"
|
||||
|
||||
# cache:
|
||||
# directories:
|
||||
# - ~/.npm
|
||||
|
||||
# before_install:
|
||||
# - .travis/affects.sh explorer/ .travis || travis_terminate 0
|
||||
# - cd explorer
|
||||
|
||||
# script:
|
||||
# - npm run build
|
||||
# - npm run format
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
|
@@ -74,7 +74,7 @@ minutes to execute. Use that time to write a detailed problem description. Once
|
||||
the description is written and CI succeeds, click the "Ready to Review" button
|
||||
and add reviewers. Adding reviewers before CI succeeds is a fast path to losing
|
||||
reviewer engagement. Not only will they be notified and see the PR is not yet
|
||||
ready for them, they will also be bombarded with additional notifications
|
||||
ready for them, they will also be bombarded them with additional notifications
|
||||
each time you push a commit to get past CI or until they "mute" the PR. Once
|
||||
muted, you'll need to reach out over some other medium, such as Discord, to
|
||||
request they have another look. When you use draft PRs, no notifications are
|
||||
|
2226
Cargo.lock
generated
2226
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -81,10 +81,13 @@ members = [
|
||||
"test-validator",
|
||||
"rpc-test",
|
||||
"client-test",
|
||||
"zk-token-sdk",
|
||||
"programs/zk-token-proof",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
]
|
||||
|
||||
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
|
||||
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
|
||||
# specify arch-specific features.
|
||||
resolver = "2"
|
||||
|
2
LICENSE
2
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright 2022 Solana Foundation.
|
||||
Copyright 2020 Solana Foundation.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
14
README.md
14
README.md
@@ -38,6 +38,12 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
||||
```
|
||||
|
||||
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
|
||||
|
||||
```bash
|
||||
$ softwareupdate --install-rosetta
|
||||
```
|
||||
|
||||
## **2. Download the source code.**
|
||||
|
||||
```bash
|
||||
@@ -68,7 +74,7 @@ devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://doc
|
||||
|
||||
# Benchmarking
|
||||
|
||||
First, install the nightly build of rustc. `cargo bench` requires the use of the
|
||||
First install the nightly build of rustc. `cargo bench` requires use of the
|
||||
unstable features only available in the nightly build.
|
||||
|
||||
```bash
|
||||
@@ -115,12 +121,12 @@ the reader to check and validate their accuracy and truthfulness.
|
||||
Furthermore, nothing in this project constitutes a solicitation for
|
||||
investment.
|
||||
|
||||
Any content produced by SF or developer resources that SF provides are
|
||||
Any content produced by SF or developer resources that SF provides, are
|
||||
for educational and inspirational purposes only. SF does not encourage,
|
||||
induce or sanction the deployment, integration or use of any such
|
||||
applications (including the code comprising the Solana blockchain
|
||||
protocol) in violation of applicable laws or regulations and hereby
|
||||
prohibits any such deployment, integration or use. This includes the use of
|
||||
prohibits any such deployment, integration or use. This includes use of
|
||||
any such applications by the reader (a) in violation of export control
|
||||
or sanctions laws of the United States or any other applicable
|
||||
jurisdiction, (b) if the reader is located in or ordinarily resident in
|
||||
@@ -133,7 +139,7 @@ prohibitions.
|
||||
The reader should be aware that U.S. export control and sanctions laws
|
||||
prohibit U.S. persons (and other persons that are subject to such laws)
|
||||
from transacting with persons in certain countries and territories or
|
||||
that are on the SDN list. As a project-based primarily on open-source
|
||||
that are on the SDN list. As a project based primarily on open-source
|
||||
software, it is possible that such sanctioned persons may nevertheless
|
||||
bypass prohibitions, obtain the code comprising the Solana blockchain
|
||||
protocol (or other project code or applications) and deploy, integrate,
|
||||
|
@@ -94,7 +94,7 @@ Alternatively use the Github UI.
|
||||
```
|
||||
1. Confirm that your freshly cut release branch is shown as `BETA_CHANNEL` and the previous release branch as `STABLE_CHANNEL`:
|
||||
```
|
||||
ci/channel-info.sh
|
||||
ci/channel_info.sh
|
||||
```
|
||||
|
||||
## Steps to Create a Release
|
||||
@@ -152,5 +152,5 @@ appearing. To check for progress:
|
||||
[Crates.io](https://crates.io/crates/solana) should have an updated Solana version. This can take 2-3 hours, and sometimes fails in the `solana-secondary` job.
|
||||
If this happens and the error is non-fatal, click "Retry" on the "publish crate" job
|
||||
|
||||
### Update software on testnet.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/. devnet.solana.com and mainnet-beta.solana.com run stable releases that have been tested on testnet. Do not update devnet or mainnet-beta with a beta release.
|
||||
### Update software on devnet.solana.com/testnet.solana.com/mainnet-beta.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -16,15 +16,15 @@ bs58 = "0.4.0"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.136"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.78"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
serde_json = "1.0.72"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.7" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.10.0"
|
||||
zstd = "0.9.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -136,13 +136,16 @@ impl UiAccount {
|
||||
UiAccountData::Binary(blob, encoding) => match encoding {
|
||||
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob).ok().and_then(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
}),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob)
|
||||
.ok()
|
||||
.map(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
})
|
||||
.flatten(),
|
||||
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
|
||||
},
|
||||
}?;
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
clap = "2.33.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,25 +13,25 @@ clap = "2.33.1"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.7" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.7" }
|
||||
solana-client = { path = "../client", version = "=1.9.7" }
|
||||
solana-core = { path = "../core", version = "=1.9.7" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.7" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.7" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.7" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -116,7 +116,7 @@ fn make_create_message(
|
||||
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.flat_map(|_| {
|
||||
.map(|_| {
|
||||
let program_id = if mint.is_some() {
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
@@ -148,6 +148,7 @@ fn make_create_message(
|
||||
|
||||
instructions
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
Message::new(&instructions, Some(&keypair.pubkey()))
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-interface"
|
||||
description = "The Solana AccountsDb plugin interface."
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,8 +12,8 @@ documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
thiserror = "1.0.30"
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,17 +12,19 @@ documentation = "https://docs.rs/solana-validator"
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.5"
|
||||
json5 = "0.4.1"
|
||||
libloading = "0.7.3"
|
||||
libloading = "0.7.2"
|
||||
log = "0.4.11"
|
||||
serde_json = "1.0.78"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.7" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.7" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
thiserror = "1.0.30"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -9,6 +9,7 @@ use {
|
||||
},
|
||||
crossbeam_channel::Receiver,
|
||||
log::*,
|
||||
serde_json,
|
||||
solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::BankNotification,
|
||||
transaction_notifier_interface::TransactionNotifierLock,
|
||||
@@ -155,12 +156,12 @@ impl AccountsDbPluginService {
|
||||
)));
|
||||
}
|
||||
|
||||
let result: serde_json::Value = match json5::from_str(&contents) {
|
||||
let result: serde_json::Value = match serde_json::from_str(&contents) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
|
||||
format!(
|
||||
"The config file {:?} is not in a valid Json5 format, error: {:?}",
|
||||
"The config file {:?} is not in a valid Json format, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
),
|
||||
));
|
||||
@@ -170,24 +171,13 @@ impl AccountsDbPluginService {
|
||||
let libpath = result["libpath"]
|
||||
.as_str()
|
||||
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
|
||||
let mut libpath = PathBuf::from(libpath);
|
||||
if libpath.is_relative() {
|
||||
let config_dir = accountsdb_plugin_config_file.parent().ok_or_else(|| {
|
||||
AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
|
||||
"Failed to resolve parent of {:?}",
|
||||
accountsdb_plugin_config_file,
|
||||
))
|
||||
})?;
|
||||
libpath = config_dir.join(libpath);
|
||||
}
|
||||
|
||||
let config_file = accountsdb_plugin_config_file
|
||||
.as_os_str()
|
||||
.to_str()
|
||||
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
|
||||
|
||||
unsafe {
|
||||
let result = plugin_manager.load_plugin(libpath.to_str().unwrap(), config_file);
|
||||
let result = plugin_manager.load_plugin(libpath, config_file);
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to load the plugin library: {:?}, error: {:?}",
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-core = { path = "../core", version = "=1.9.7" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.7" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.7" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.7" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
crossbeam_channel::{unbounded, Receiver},
|
||||
crossbeam_channel::unbounded,
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
@@ -29,7 +29,7 @@ use {
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{atomic::Ordering, Arc, Mutex, RwLock},
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
@@ -175,11 +175,6 @@ fn main() {
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker()
|
||||
.unwrap()
|
||||
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
||||
let same_payer = matches.is_present("same_payer");
|
||||
@@ -340,13 +335,6 @@ fn main() {
|
||||
bank = bank_forks.working_bank();
|
||||
insert_time.stop();
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker().unwrap().set_limits(
|
||||
std::u64::MAX,
|
||||
std::u64::MAX,
|
||||
std::u64::MAX,
|
||||
);
|
||||
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,19 +10,19 @@ documentation = "https://docs.rs/solana-banks-client"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
borsh = "0.9.3"
|
||||
borsh = "0.9.1"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.7" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.9.7" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -5,10 +5,8 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
pub use {
|
||||
crate::error::BanksClientError,
|
||||
solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus},
|
||||
};
|
||||
pub use crate::error::BanksClientError;
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use {
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt, TryFutureExt},
|
||||
@@ -23,7 +21,9 @@ use {
|
||||
message::Message,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
},
|
||||
std::io,
|
||||
tarpc::{
|
||||
client::{self, NewClient, RequestDispatch},
|
||||
context::{self, Context},
|
||||
@@ -60,9 +60,10 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.inner
|
||||
.send_transaction_with_context(ctx, transaction)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -74,10 +75,11 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -85,9 +87,10 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.inner
|
||||
.get_transaction_status_with_context(ctx, signature)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -95,9 +98,10 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner
|
||||
.get_slot_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -105,9 +109,10 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner
|
||||
.get_block_height_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -116,9 +121,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<transaction::Result<()>>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
|
||||
self.inner
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -143,9 +149,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.inner
|
||||
.get_account_with_commitment_and_context(ctx, address, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -155,7 +162,7 @@ impl BanksClient {
|
||||
pub fn send_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
}
|
||||
|
||||
@@ -168,25 +175,27 @@ impl BanksClient {
|
||||
)]
|
||||
pub fn get_fees(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the cluster Sysvar
|
||||
pub fn get_sysvar<T: Sysvar>(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?.ok_or(BanksClientError::ClientError("Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar).ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
let sysvar = result?
|
||||
.ok_or(BanksClientError::ClientError("Sysvar not present"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = Result<Rent, BanksClientError>> + '_ {
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
self.get_sysvar::<Rent>()
|
||||
}
|
||||
|
||||
@@ -194,9 +203,7 @@ impl BanksClient {
|
||||
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
|
||||
/// method to get both a blockhash and the blockhash's last valid slot.
|
||||
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
|
||||
pub fn get_recent_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.get_fees().map(|result| Ok(result?.1))
|
||||
}
|
||||
@@ -207,7 +214,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
@@ -217,6 +224,7 @@ impl BanksClient {
|
||||
)),
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
})
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
@@ -271,7 +279,7 @@ impl BanksClient {
|
||||
pub fn process_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -279,7 +287,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Result<(), BanksClientError> {
|
||||
) -> transport::Result<()> {
|
||||
let mut clients: Vec<_> = transactions.iter().map(|_| self.clone()).collect();
|
||||
let futures = clients
|
||||
.iter_mut()
|
||||
@@ -295,21 +303,19 @@ impl BanksClient {
|
||||
pub fn process_transactions(
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted slot. All transactions at or below this slot
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted block height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher block height.
|
||||
pub fn get_root_block_height(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -319,7 +325,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
}
|
||||
|
||||
@@ -328,7 +334,7 @@ impl BanksClient {
|
||||
pub fn get_account(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.get_account_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -337,11 +343,14 @@ impl BanksClient {
|
||||
pub fn get_packed_account_data<T: Pack>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
let account = result?
|
||||
.ok_or(BanksClientError::ClientError("Account not found"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
T::unpack_from_slice(&account.data)
|
||||
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
})
|
||||
}
|
||||
|
||||
@@ -350,7 +359,7 @@ impl BanksClient {
|
||||
pub fn get_account_data_with_borsh<T: BorshDeserialize>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::try_from_slice(&account.data).map_err(Into::into)
|
||||
@@ -363,17 +372,14 @@ impl BanksClient {
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
.map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0)))
|
||||
}
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the time
|
||||
/// of the most recent root slot.
|
||||
pub fn get_balance(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
pub fn get_balance(&mut self, address: Pubkey) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
self.get_balance_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -385,7 +391,7 @@ impl BanksClient {
|
||||
pub fn get_transaction_status(
|
||||
&mut self,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.get_transaction_status_with_context(context::current(), signature)
|
||||
}
|
||||
|
||||
@@ -393,7 +399,7 @@ impl BanksClient {
|
||||
pub async fn get_transaction_statuses(
|
||||
&mut self,
|
||||
signatures: Vec<Signature>,
|
||||
) -> Result<Vec<Option<TransactionStatus>>, BanksClientError> {
|
||||
) -> io::Result<Vec<Option<TransactionStatus>>> {
|
||||
// tarpc futures oddly hold a mutable reference back to the client so clone the client upfront
|
||||
let mut clients_and_signatures: Vec<_> = signatures
|
||||
.into_iter()
|
||||
@@ -410,9 +416,7 @@ impl BanksClient {
|
||||
statuses.into_iter().collect()
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
pub fn get_latest_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment(CommitmentLevel::default())
|
||||
.map(|result| {
|
||||
result?
|
||||
@@ -425,7 +429,7 @@ impl BanksClient {
|
||||
pub fn get_latest_blockhash_with_commitment(
|
||||
&mut self,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment_and_context(context::current(), commitment)
|
||||
}
|
||||
|
||||
@@ -433,9 +437,10 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
|
||||
self.inner
|
||||
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -444,14 +449,15 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> impl Future<Output = Result<Option<u64>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
|
||||
self.inner
|
||||
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_client<C>(transport: C) -> Result<BanksClient, BanksClientError>
|
||||
pub async fn start_client<C>(transport: C) -> io::Result<BanksClient>
|
||||
where
|
||||
C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>> + Send + 'static,
|
||||
{
|
||||
@@ -460,7 +466,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> Result<BanksClient, BanksClientError> {
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
|
||||
let transport = tcp::connect(addr, Bincode::default).await?;
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn(),
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-banks-interface"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
serde = { version = "1.0.130", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,12 +11,11 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.3"
|
||||
crossbeam-channel = "0.5"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.7" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
@@ -1,6 +1,5 @@
|
||||
use {
|
||||
bincode::{deserialize, serialize},
|
||||
crossbeam_channel::{unbounded, Receiver, Sender},
|
||||
futures::{future, prelude::stream::StreamExt},
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
|
||||
@@ -31,7 +30,10 @@ use {
|
||||
convert::TryFrom,
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{Arc, RwLock},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
},
|
||||
@@ -94,7 +96,7 @@ impl BanksServer {
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
let (transaction_sender, transaction_receiver) = unbounded();
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let slot = bank.slot();
|
||||
{
|
||||
@@ -390,7 +392,7 @@ pub async fn start_tcp_server(
|
||||
// serve is generated by the service attribute. It takes as input any type implementing
|
||||
// the generated Banks trait.
|
||||
.map(move |chan| {
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
SendTransactionService::new::<NullTpuInfo>(
|
||||
tpu_addr,
|
||||
|
@@ -2,18 +2,19 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam-channel = "0.5"
|
||||
clap = "2.33.1"
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.7" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.7" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,7 +1,6 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
crossbeam_channel::unbounded,
|
||||
clap::{crate_description, crate_name, App, Arg},
|
||||
solana_streamer::{
|
||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketBatchReceiver},
|
||||
@@ -11,6 +10,7 @@ use {
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc,
|
||||
},
|
||||
thread::{sleep, spawn, JoinHandle, Result},
|
||||
@@ -67,22 +67,13 @@ fn main() -> Result<()> {
|
||||
.takes_value(true)
|
||||
.help("Use NUM receive sockets"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num-producers")
|
||||
.long("num-producers")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("Use this many producer threads."),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
if let Some(n) = matches.value_of("num-recv-sockets") {
|
||||
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
|
||||
}
|
||||
|
||||
let num_producers = value_t!(matches, "num_producers", u64).unwrap_or(4);
|
||||
|
||||
let port = 0;
|
||||
let mut port = 0;
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let mut addr = SocketAddr::new(ip_addr, 0);
|
||||
|
||||
@@ -91,17 +82,14 @@ fn main() -> Result<()> {
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketBatchRecycler::default();
|
||||
let (_port, read_sockets) = solana_net_utils::multi_bind_in_range(
|
||||
ip_addr,
|
||||
(port, port + num_sockets as u16),
|
||||
num_sockets,
|
||||
)
|
||||
.unwrap();
|
||||
for read in read_sockets {
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
||||
addr = read.local_addr().unwrap();
|
||||
let (s_reader, r_reader) = unbounded();
|
||||
port = addr.port();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
read_channels.push(r_reader);
|
||||
read_threads.push(receiver(
|
||||
Arc::new(read),
|
||||
@@ -114,10 +102,9 @@ fn main() -> Result<()> {
|
||||
));
|
||||
}
|
||||
|
||||
let producer_threads: Vec<_> = (0..num_producers)
|
||||
.into_iter()
|
||||
.map(|_| producer(&addr, exit.clone()))
|
||||
.collect();
|
||||
let t_producer1 = producer(&addr, exit.clone());
|
||||
let t_producer2 = producer(&addr, exit.clone());
|
||||
let t_producer3 = producer(&addr, exit.clone());
|
||||
|
||||
let rvs = Arc::new(AtomicUsize::new(0));
|
||||
let sink_threads: Vec<_> = read_channels
|
||||
@@ -137,9 +124,9 @@ fn main() -> Result<()> {
|
||||
for t_reader in read_threads {
|
||||
t_reader.join()?;
|
||||
}
|
||||
for t_producer in producer_threads {
|
||||
t_producer.join()?;
|
||||
}
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
for t_sink in sink_threads {
|
||||
t_sink.join()?;
|
||||
}
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,28 +10,27 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
serde_json = "1.0.78"
|
||||
serde_yaml = "0.8.23"
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
serde_json = "1.0.72"
|
||||
serde_yaml = "0.8.21"
|
||||
solana-core = { path = "../core", version = "=1.9.7" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.9.7" }
|
||||
solana-client = { path = "../client", version = "=1.9.7" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.7" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.7" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.6.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
|
||||
serial_test = "0.5.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -110,7 +110,7 @@ fn generate_chunked_transfers(
|
||||
shared_txs: &SharedTransactions,
|
||||
shared_tx_active_thread_count: Arc<AtomicIsize>,
|
||||
source_keypair_chunks: Vec<Vec<&Keypair>>,
|
||||
dest_keypair_chunks: &mut [VecDeque<&Keypair>],
|
||||
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
|
@@ -1,6 +1,5 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
crossbeam_channel::unbounded,
|
||||
serial_test::serial,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
@@ -16,7 +15,10 @@ use {
|
||||
},
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{sync::Arc, time::Duration},
|
||||
std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
},
|
||||
};
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
@@ -50,7 +52,7 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
VALIDATOR_PORT_RANGE,
|
||||
));
|
||||
|
||||
let (addr_sender, addr_receiver) = unbounded();
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -13,12 +13,12 @@ edition = "2021"
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
rand = "0.7.0"
|
||||
serde = { version = "1.0.136", features = ["rc"] }
|
||||
serde = { version = "1.0.133", features = ["rc"] }
|
||||
rayon = "1.5.1"
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.7" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
log = "0.4.14"
|
||||
|
||||
[lib]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bucket-map"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "solana-bucket-map"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bucket-map"
|
||||
@@ -11,18 +11,15 @@ license = "Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
memmap2 = "0.5.2"
|
||||
log = { version = "0.4.11" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
rand = "0.7.0"
|
||||
tempfile = "3.3.0"
|
||||
modular-bitfield = "0.11.2"
|
||||
|
||||
[dev-dependencies]
|
||||
fs_extra = "1.2.0"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
memmap2 = "0.5.0"
|
||||
log = { version = "0.4.11" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
rand = "0.7.0"
|
||||
fs_extra = "1.2.0"
|
||||
tempfile = "3.2.0"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -3,7 +3,7 @@ use {
|
||||
bucket_item::BucketItem,
|
||||
bucket_map::BucketMapError,
|
||||
bucket_stats::BucketMapStats,
|
||||
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2},
|
||||
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2, UID_UNLOCKED},
|
||||
index_entry::IndexEntry,
|
||||
MaxSearch, RefCount,
|
||||
},
|
||||
@@ -17,7 +17,7 @@ use {
|
||||
ops::RangeBounds,
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
},
|
||||
@@ -81,7 +81,6 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let index = BucketStorage::new(
|
||||
Arc::clone(&drives),
|
||||
@@ -89,7 +88,6 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
max_search,
|
||||
Arc::clone(&stats.index),
|
||||
count,
|
||||
);
|
||||
Self {
|
||||
random: thread_rng().gen(),
|
||||
@@ -102,10 +100,14 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bucket_len(&self) -> u64 {
|
||||
self.index.used.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn keys(&self) -> Vec<Pubkey> {
|
||||
let mut rv = vec![];
|
||||
for i in 0..self.index.capacity() {
|
||||
if self.index.is_free(i) {
|
||||
if self.index.uid(i) == UID_UNLOCKED {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(i);
|
||||
@@ -118,10 +120,10 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
{
|
||||
let mut result = Vec::with_capacity(self.index.count.load(Ordering::Relaxed) as usize);
|
||||
let mut result = Vec::with_capacity(self.index.used.load(Ordering::Relaxed) as usize);
|
||||
for i in 0..self.index.capacity() {
|
||||
let ii = i % self.index.capacity();
|
||||
if self.index.is_free(ii) {
|
||||
if self.index.uid(ii) == UID_UNLOCKED {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(ii);
|
||||
@@ -154,7 +156,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.is_free(ii) {
|
||||
if index.uid(ii) == UID_UNLOCKED {
|
||||
continue;
|
||||
}
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
@@ -173,7 +175,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.is_free(ii) {
|
||||
if index.uid(ii) == UID_UNLOCKED {
|
||||
continue;
|
||||
}
|
||||
let elem: &IndexEntry = index.get(ii);
|
||||
@@ -185,23 +187,26 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
}
|
||||
|
||||
fn bucket_create_key(
|
||||
index: &mut BucketStorage,
|
||||
index: &BucketStorage,
|
||||
key: &Pubkey,
|
||||
elem_uid: Uid,
|
||||
random: u64,
|
||||
is_resizing: bool,
|
||||
) -> Result<u64, BucketMapError> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i as u64 % index.capacity();
|
||||
if !index.is_free(ii) {
|
||||
if index.uid(ii) != UID_UNLOCKED {
|
||||
continue;
|
||||
}
|
||||
index.allocate(ii, elem_uid, is_resizing).unwrap();
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
// These fields will be overwritten after allocation by callers.
|
||||
index.allocate(ii, elem_uid).unwrap();
|
||||
let mut elem: &mut IndexEntry = index.get_mut(ii);
|
||||
elem.key = *key;
|
||||
// These will be overwritten after allocation by callers.
|
||||
// Since this part of the mmapped file could have previously been used by someone else, there can be garbage here.
|
||||
elem.init(key);
|
||||
elem.ref_count = 0;
|
||||
elem.storage_offset = 0;
|
||||
elem.storage_capacity_when_created_pow2 = 0;
|
||||
elem.num_slots = 0;
|
||||
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
|
||||
return Ok(ii);
|
||||
}
|
||||
@@ -220,14 +225,8 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
Some(elem.ref_count)
|
||||
}
|
||||
|
||||
fn create_key(&mut self, key: &Pubkey) -> Result<u64, BucketMapError> {
|
||||
Self::bucket_create_key(
|
||||
&mut self.index,
|
||||
key,
|
||||
IndexEntry::key_uid(key),
|
||||
self.random,
|
||||
false,
|
||||
)
|
||||
fn create_key(&self, key: &Pubkey) -> Result<u64, BucketMapError> {
|
||||
Self::bucket_create_key(&self.index, key, IndexEntry::key_uid(key), self.random)
|
||||
}
|
||||
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(&[T], RefCount)> {
|
||||
@@ -257,17 +256,16 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
Some(res) => res,
|
||||
};
|
||||
elem.ref_count = ref_count;
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
let elem_uid = self.index.uid(elem_ix);
|
||||
let bucket_ix = elem.data_bucket_ix();
|
||||
let current_bucket = &self.data[bucket_ix as usize];
|
||||
let num_slots = data.len() as u64;
|
||||
if best_fit_bucket == bucket_ix && elem.num_slots > 0 {
|
||||
// in place update
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
let slice: &mut [T] = current_bucket.get_mut_cell_slice(elem_loc, data.len() as u64);
|
||||
assert_eq!(current_bucket.uid(elem_loc), Some(elem_uid));
|
||||
elem.num_slots = num_slots;
|
||||
slice.copy_from_slice(data);
|
||||
assert!(current_bucket.uid(elem_loc) == elem_uid);
|
||||
elem.num_slots = data.len() as u64;
|
||||
slice.clone_from_slice(data);
|
||||
Ok(())
|
||||
} else {
|
||||
// need to move the allocation to a best fit spot
|
||||
@@ -277,21 +275,18 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
let pos = thread_rng().gen_range(0, cap);
|
||||
for i in pos..pos + self.index.max_search() {
|
||||
let ix = i % cap;
|
||||
if best_bucket.is_free(ix) {
|
||||
if best_bucket.uid(ix) == UID_UNLOCKED {
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
let old_slots = elem.num_slots;
|
||||
elem.set_storage_offset(ix);
|
||||
elem.set_storage_capacity_when_created_pow2(best_bucket.capacity_pow2);
|
||||
elem.num_slots = num_slots;
|
||||
if old_slots > 0 {
|
||||
let current_bucket = &mut self.data[bucket_ix as usize];
|
||||
if elem.num_slots > 0 {
|
||||
current_bucket.free(elem_loc, elem_uid);
|
||||
}
|
||||
elem.storage_offset = ix;
|
||||
elem.storage_capacity_when_created_pow2 = best_bucket.capacity_pow2;
|
||||
elem.num_slots = data.len() as u64;
|
||||
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
|
||||
if num_slots > 0 {
|
||||
let best_bucket = &mut self.data[best_fit_bucket as usize];
|
||||
best_bucket.allocate(ix, elem_uid, false).unwrap();
|
||||
let slice = best_bucket.get_mut_cell_slice(ix, num_slots);
|
||||
if elem.num_slots > 0 {
|
||||
best_bucket.allocate(ix, elem_uid).unwrap();
|
||||
let slice = best_bucket.get_mut_cell_slice(ix, data.len() as u64);
|
||||
slice.copy_from_slice(data);
|
||||
}
|
||||
return Ok(());
|
||||
@@ -303,12 +298,10 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
|
||||
pub fn delete_key(&mut self, key: &Pubkey) {
|
||||
if let Some((elem, elem_ix)) = self.find_entry(key) {
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
let elem_uid = self.index.uid(elem_ix);
|
||||
if elem.num_slots > 0 {
|
||||
let ix = elem.data_bucket_ix() as usize;
|
||||
let data_bucket = &self.data[ix];
|
||||
let data_bucket = &self.data[elem.data_bucket_ix() as usize];
|
||||
let loc = elem.data_loc(data_bucket);
|
||||
let data_bucket = &mut self.data[ix];
|
||||
//debug!( "DATA FREE {:?} {} {} {}", key, elem.data_location, data_bucket.capacity, elem_uid );
|
||||
data_bucket.free(loc, elem_uid);
|
||||
}
|
||||
@@ -326,7 +319,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
//increasing the capacity by ^4 reduces the
|
||||
//likelyhood of a re-index collision of 2^(max_search)^2
|
||||
//1 in 2^32
|
||||
let mut index = BucketStorage::new_with_capacity(
|
||||
let index = BucketStorage::new_with_capacity(
|
||||
Arc::clone(&self.drives),
|
||||
1,
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
@@ -334,16 +327,14 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
self.index.capacity_pow2 + i, // * 2,
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.index),
|
||||
Arc::clone(&self.index.count),
|
||||
);
|
||||
let random = thread_rng().gen();
|
||||
let mut valid = true;
|
||||
for ix in 0..self.index.capacity() {
|
||||
let uid = self.index.uid(ix);
|
||||
if let Some(uid) = uid {
|
||||
if UID_UNLOCKED != uid {
|
||||
let elem: &IndexEntry = self.index.get(ix);
|
||||
let new_ix =
|
||||
Self::bucket_create_key(&mut index, &elem.key, uid, random, true);
|
||||
let new_ix = Self::bucket_create_key(&index, &elem.key, uid, random);
|
||||
if new_ix.is_err() {
|
||||
valid = false;
|
||||
break;
|
||||
@@ -400,7 +391,6 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
Self::elem_size(),
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.data),
|
||||
Arc::default(),
|
||||
))
|
||||
}
|
||||
self.data.push(bucket);
|
||||
|
@@ -30,13 +30,14 @@ impl<T: Clone + Copy> BucketApi<T> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
Self {
|
||||
drives,
|
||||
max_search,
|
||||
stats,
|
||||
bucket: RwLock::default(),
|
||||
count: Arc::default(),
|
||||
count,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +73,12 @@ impl<T: Clone + Copy> BucketApi<T> {
|
||||
}
|
||||
|
||||
pub fn bucket_len(&self) -> u64 {
|
||||
self.count.load(Ordering::Relaxed)
|
||||
self.bucket
|
||||
.read()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map(|bucket| bucket.bucket_len())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn delete_key(&self, key: &Pubkey) {
|
||||
@@ -89,11 +95,11 @@ impl<T: Clone + Copy> BucketApi<T> {
|
||||
Arc::clone(&self.drives),
|
||||
self.max_search,
|
||||
Arc::clone(&self.stats),
|
||||
Arc::clone(&self.count),
|
||||
));
|
||||
} else {
|
||||
let write = bucket.as_mut().unwrap();
|
||||
write.handle_delayed_grows();
|
||||
self.count.store(write.bucket_len(), Ordering::Relaxed);
|
||||
}
|
||||
bucket
|
||||
}
|
||||
|
@@ -79,14 +79,21 @@ impl<T: Clone + Copy + Debug> BucketMap<T> {
|
||||
});
|
||||
let drives = Arc::new(drives);
|
||||
|
||||
let stats = Arc::default();
|
||||
let buckets = (0..config.max_buckets)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let mut per_bucket_count = Vec::with_capacity(config.max_buckets);
|
||||
per_bucket_count.resize_with(config.max_buckets, Arc::default);
|
||||
let stats = Arc::new(BucketMapStats {
|
||||
per_bucket_count,
|
||||
..BucketMapStats::default()
|
||||
});
|
||||
let buckets = stats
|
||||
.per_bucket_count
|
||||
.iter()
|
||||
.map(|per_bucket_count| {
|
||||
Arc::new(BucketApi::new(
|
||||
Arc::clone(&drives),
|
||||
max_search,
|
||||
Arc::clone(&stats),
|
||||
Arc::clone(per_bucket_count),
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
|
@@ -14,4 +14,5 @@ pub struct BucketStats {
|
||||
pub struct BucketMapStats {
|
||||
pub index: Arc<BucketStats>,
|
||||
pub data: Arc<BucketStats>,
|
||||
pub per_bucket_count: Vec<Arc<AtomicU64>>,
|
||||
}
|
||||
|
@@ -35,42 +35,27 @@ use {
|
||||
pub const DEFAULT_CAPACITY_POW2: u8 = 5;
|
||||
|
||||
/// A Header UID of 0 indicates that the header is unlocked
|
||||
const UID_UNLOCKED: Uid = 0;
|
||||
pub(crate) const UID_UNLOCKED: Uid = 0;
|
||||
|
||||
pub(crate) type Uid = u64;
|
||||
|
||||
#[repr(C)]
|
||||
struct Header {
|
||||
lock: u64,
|
||||
lock: AtomicU64,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
/// try to lock this entry with 'uid'
|
||||
/// return true if it could be locked
|
||||
fn try_lock(&mut self, uid: Uid) -> bool {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
self.lock = uid;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
fn try_lock(&self, uid: Uid) -> bool {
|
||||
Ok(UID_UNLOCKED)
|
||||
== self
|
||||
.lock
|
||||
.compare_exchange(UID_UNLOCKED, uid, Ordering::AcqRel, Ordering::Relaxed)
|
||||
}
|
||||
/// mark this entry as unlocked
|
||||
fn unlock(&mut self, expected: Uid) {
|
||||
assert_eq!(expected, self.lock);
|
||||
self.lock = UID_UNLOCKED;
|
||||
fn unlock(&self) -> Uid {
|
||||
self.lock.swap(UID_UNLOCKED, Ordering::Release)
|
||||
}
|
||||
/// uid that has locked this entry or None if unlocked
|
||||
fn uid(&self) -> Option<Uid> {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
None
|
||||
} else {
|
||||
Some(self.lock)
|
||||
}
|
||||
}
|
||||
/// true if this entry is unlocked
|
||||
fn is_unlocked(&self) -> bool {
|
||||
self.lock == UID_UNLOCKED
|
||||
fn uid(&self) -> Uid {
|
||||
self.lock.load(Ordering::Acquire)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +64,7 @@ pub struct BucketStorage {
|
||||
mmap: MmapMut,
|
||||
pub cell_size: u64,
|
||||
pub capacity_pow2: u8,
|
||||
pub count: Arc<AtomicU64>,
|
||||
pub used: AtomicU64,
|
||||
pub stats: Arc<BucketStats>,
|
||||
pub max_search: MaxSearch,
|
||||
}
|
||||
@@ -103,7 +88,6 @@ impl BucketStorage {
|
||||
capacity_pow2: u8,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
|
||||
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity_pow2, &stats);
|
||||
@@ -111,7 +95,7 @@ impl BucketStorage {
|
||||
path,
|
||||
mmap,
|
||||
cell_size,
|
||||
count,
|
||||
used: AtomicU64::new(0),
|
||||
capacity_pow2,
|
||||
stats,
|
||||
max_search,
|
||||
@@ -128,7 +112,6 @@ impl BucketStorage {
|
||||
elem_size: u64,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
Self::new_with_capacity(
|
||||
drives,
|
||||
@@ -137,74 +120,53 @@ impl BucketStorage {
|
||||
DEFAULT_CAPACITY_POW2,
|
||||
max_search,
|
||||
stats,
|
||||
count,
|
||||
)
|
||||
}
|
||||
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_ptr(&self, ix: u64) -> &Header {
|
||||
pub fn uid(&self, ix: u64) -> Uid {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
hdr.as_ref().unwrap()
|
||||
return hdr.as_ref().unwrap().uid();
|
||||
}
|
||||
}
|
||||
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_mut_ptr(&mut self, ix: u64) -> &mut Header {
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &mut [u8] = &mut self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_mut_ptr() as *mut Header;
|
||||
hdr.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// return uid allocated at index 'ix' or None if vacant
|
||||
pub fn uid(&self, ix: u64) -> Option<Uid> {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
self.header_ptr(ix).uid()
|
||||
}
|
||||
|
||||
/// true if the entry at index 'ix' is free (as opposed to being allocated)
|
||||
pub fn is_free(&self, ix: u64) -> bool {
|
||||
// note that the terminology in the implementation is locked or unlocked.
|
||||
// but our api is allocate/free
|
||||
self.header_ptr(ix).is_unlocked()
|
||||
}
|
||||
|
||||
/// caller knows id is not empty
|
||||
pub fn uid_unchecked(&self, ix: u64) -> Uid {
|
||||
self.uid(ix).unwrap()
|
||||
}
|
||||
|
||||
/// 'is_resizing' true if caller is resizing the index (so don't increment count)
|
||||
/// 'is_resizing' false if caller is adding an item to the index (so increment count)
|
||||
pub fn allocate(
|
||||
&mut self,
|
||||
ix: u64,
|
||||
uid: Uid,
|
||||
is_resizing: bool,
|
||||
) -> Result<(), BucketStorageError> {
|
||||
pub fn allocate(&self, ix: u64, uid: Uid) -> Result<(), BucketStorageError> {
|
||||
assert!(ix < self.capacity(), "allocate: bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "allocate: bad uid");
|
||||
let mut e = Err(BucketStorageError::AlreadyAllocated);
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
//debug!("ALLOC {} {}", ix, uid);
|
||||
if self.header_mut_ptr(ix).try_lock(uid) {
|
||||
e = Ok(());
|
||||
if !is_resizing {
|
||||
self.count.fetch_add(1, Ordering::Relaxed);
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
if hdr.as_ref().unwrap().try_lock(uid) {
|
||||
e = Ok(());
|
||||
self.used.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
};
|
||||
e
|
||||
}
|
||||
|
||||
pub fn free(&mut self, ix: u64, uid: Uid) {
|
||||
pub fn free(&self, ix: u64, uid: Uid) {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "free: bad uid");
|
||||
self.header_mut_ptr(ix).unlock(uid);
|
||||
self.count.fetch_sub(1, Ordering::Relaxed);
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
//debug!("FREE {} {}", ix, uid);
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
//debug!("FREE uid: {}", hdr.as_ref().unwrap().uid());
|
||||
let previous_uid = hdr.as_ref().unwrap().unlock();
|
||||
assert_eq!(
|
||||
previous_uid, uid,
|
||||
"free: unlocked a header with a differet uid: {}",
|
||||
previous_uid
|
||||
);
|
||||
self.used.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<T: Sized>(&self, ix: u64) -> &T {
|
||||
@@ -362,9 +324,6 @@ impl BucketStorage {
|
||||
capacity_pow_2,
|
||||
max_search,
|
||||
Arc::clone(stats),
|
||||
bucket
|
||||
.map(|bucket| Arc::clone(&bucket.count))
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
if let Some(bucket) = bucket {
|
||||
new_bucket.copy_contents(bucket);
|
||||
@@ -382,43 +341,3 @@ impl BucketStorage {
|
||||
1 << self.capacity_pow2
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bucket_storage() {
|
||||
let tmpdir1 = std::env::temp_dir().join("bucket_map_test_mt");
|
||||
let paths: Vec<PathBuf> = [tmpdir1]
|
||||
.iter()
|
||||
.filter(|x| std::fs::create_dir_all(x).is_ok())
|
||||
.cloned()
|
||||
.collect();
|
||||
assert!(!paths.is_empty());
|
||||
|
||||
let mut storage =
|
||||
BucketStorage::new(Arc::new(paths), 1, 1, 1, Arc::default(), Arc::default());
|
||||
let ix = 0;
|
||||
let uid = Uid::MAX;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
let uid = 1;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
}
|
||||
}
|
||||
|
@@ -1,11 +1,9 @@
|
||||
#![allow(dead_code)]
|
||||
use {
|
||||
crate::{
|
||||
bucket::Bucket,
|
||||
bucket_storage::{BucketStorage, Uid},
|
||||
RefCount,
|
||||
},
|
||||
modular_bitfield::prelude::*,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||
std::{
|
||||
collections::hash_map::DefaultHasher,
|
||||
@@ -21,42 +19,13 @@ use {
|
||||
pub struct IndexEntry {
|
||||
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
|
||||
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
|
||||
storage_cap_and_offset: PackedStorage,
|
||||
pub storage_offset: u64, // smaller? since these are variably sized, this could get tricky. well, actually accountinfo is not variable sized...
|
||||
// if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2
|
||||
pub storage_capacity_when_created_pow2: u8, // see data_location
|
||||
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
|
||||
}
|
||||
|
||||
/// Pack the storage offset and capacity-when-crated-pow2 fields into a single u64
|
||||
#[bitfield(bits = 64)]
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
|
||||
struct PackedStorage {
|
||||
capacity_when_created_pow2: B8,
|
||||
offset: B56,
|
||||
}
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn init(&mut self, pubkey: &Pubkey) {
|
||||
self.key = *pubkey;
|
||||
self.ref_count = 0;
|
||||
self.storage_cap_and_offset = PackedStorage::default();
|
||||
self.num_slots = 0;
|
||||
}
|
||||
|
||||
pub fn set_storage_capacity_when_created_pow2(
|
||||
&mut self,
|
||||
storage_capacity_when_created_pow2: u8,
|
||||
) {
|
||||
self.storage_cap_and_offset
|
||||
.set_capacity_when_created_pow2(storage_capacity_when_created_pow2)
|
||||
}
|
||||
|
||||
pub fn set_storage_offset(&mut self, storage_offset: u64) {
|
||||
self.storage_cap_and_offset
|
||||
.set_offset_checked(storage_offset)
|
||||
.expect("New storage offset must fit into 7 bytes!")
|
||||
}
|
||||
|
||||
pub fn data_bucket_from_num_slots(num_slots: Slot) -> u64 {
|
||||
(num_slots as f64).log2().ceil() as u64 // use int log here?
|
||||
}
|
||||
@@ -69,18 +38,10 @@ impl IndexEntry {
|
||||
self.ref_count
|
||||
}
|
||||
|
||||
fn storage_capacity_when_created_pow2(&self) -> u8 {
|
||||
self.storage_cap_and_offset.capacity_when_created_pow2()
|
||||
}
|
||||
|
||||
fn storage_offset(&self) -> u64 {
|
||||
self.storage_cap_and_offset.offset()
|
||||
}
|
||||
|
||||
// This function maps the original data location into an index in the current bucket storage.
|
||||
// This is coupled with how we resize bucket storages.
|
||||
pub fn data_loc(&self, storage: &BucketStorage) -> u64 {
|
||||
self.storage_offset() << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2())
|
||||
self.storage_offset << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2)
|
||||
}
|
||||
|
||||
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {
|
||||
@@ -89,7 +50,7 @@ impl IndexEntry {
|
||||
let slice = if self.num_slots > 0 {
|
||||
let loc = self.data_loc(data_bucket);
|
||||
let uid = Self::key_uid(&self.key);
|
||||
assert_eq!(Some(uid), bucket.data[data_bucket_ix as usize].uid(loc));
|
||||
assert_eq!(uid, bucket.data[data_bucket_ix as usize].uid(loc));
|
||||
bucket.data[data_bucket_ix as usize].get_cell_slice(loc, self.num_slots)
|
||||
} else {
|
||||
// num_slots is 0. This means we don't have an actual allocation.
|
||||
@@ -98,59 +59,9 @@ impl IndexEntry {
|
||||
};
|
||||
Some((slice, self.ref_count))
|
||||
}
|
||||
|
||||
pub fn key_uid(key: &Pubkey) -> Uid {
|
||||
let mut s = DefaultHasher::new();
|
||||
key.hash(&mut s);
|
||||
s.finish().max(1u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn new(key: Pubkey) -> Self {
|
||||
IndexEntry {
|
||||
key,
|
||||
ref_count: 0,
|
||||
storage_cap_and_offset: PackedStorage::default(),
|
||||
num_slots: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// verify that accessors for storage_offset and capacity_when_created are
|
||||
/// correct and independent
|
||||
#[test]
|
||||
fn test_api() {
|
||||
for offset in [0, 1, u32::MAX as u64] {
|
||||
let mut index = IndexEntry::new(solana_sdk::pubkey::new_rand());
|
||||
if offset != 0 {
|
||||
index.set_storage_offset(offset);
|
||||
}
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), 0);
|
||||
for pow in [1, 255, 0] {
|
||||
index.set_storage_capacity_when_created_pow2(pow);
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), pow);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
assert_eq!(std::mem::size_of::<PackedStorage>(), 1 + 7);
|
||||
assert_eq!(std::mem::size_of::<IndexEntry>(), 32 + 8 + 8 + 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "New storage offset must fit into 7 bytes!")]
|
||||
fn test_set_storage_offset_value_too_large() {
|
||||
let too_big = 1 << 56;
|
||||
let mut index = IndexEntry::new(Pubkey::new_unique());
|
||||
index.set_storage_offset(too_big);
|
||||
}
|
||||
}
|
||||
|
@@ -1,365 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Builds a buildkite pipeline based on the environment variables
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
output_file=${1:-/dev/stderr}
|
||||
|
||||
if [[ -n $CI_PULL_REQUEST ]]; then
|
||||
IFS=':' read -ra affected_files <<< "$(buildkite-agent meta-data get affected_files)"
|
||||
if [[ ${#affected_files[*]} -eq 0 ]]; then
|
||||
echo "Unable to determine the files affected by this PR"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
affected_files=()
|
||||
fi
|
||||
|
||||
annotate() {
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
buildkite-agent annotate "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks if a CI pull request affects one or more path patterns. Each
|
||||
# pattern argument is checked in series. If one of them found to be affected,
|
||||
# return immediately as such.
|
||||
#
|
||||
# Bash regular expressions are permitted in the pattern:
|
||||
# affects .rs$ -- any file or directory ending in .rs
|
||||
# affects .rs -- also matches foo.rs.bar
|
||||
# affects ^snap/ -- anything under the snap/ subdirectory
|
||||
# affects snap/ -- also matches foo/snap/
|
||||
# Any pattern starting with the ! character will be negated:
|
||||
# affects !^docs/ -- anything *not* under the docs/ subdirectory
|
||||
#
|
||||
affects() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
# Assume everyting needs to be tested when any Dockerfile changes
|
||||
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
|
||||
if [[ ${pattern:0:1} = "!" ]]; then
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ ! $file =~ ${pattern:1} ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
else
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
# Checks if a CI pull request affects anything other than the provided path patterns
|
||||
#
|
||||
# Syntax is the same as `affects()` except that the negation prefix is not
|
||||
# supported
|
||||
#
|
||||
affects_other_than() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
|
||||
for file in "${affected_files[@]}"; do
|
||||
declare matched=false
|
||||
for pattern in "$@"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
matched=true
|
||||
fi
|
||||
done
|
||||
if ! $matched; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
start_pipeline() {
|
||||
echo "# $*" > "$output_file"
|
||||
echo "steps:" >> "$output_file"
|
||||
}
|
||||
|
||||
command_step() {
|
||||
cat >> "$output_file" <<EOF
|
||||
- name: "$1"
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
trigger_secondary_step() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
EOF
|
||||
}
|
||||
|
||||
wait_step() {
|
||||
echo " - wait" >> "$output_file"
|
||||
}
|
||||
|
||||
all_test_steps() {
|
||||
command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20
|
||||
wait_step
|
||||
|
||||
# Coverage...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
# Coverage in disk...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage-in-disk.sh \
|
||||
; then
|
||||
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
# Full test suite
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-perf skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream Anchor projects backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-anchor-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-anchor-projects.sh"
|
||||
name: "downstream-anchor-projects"
|
||||
timeout_in_minutes: 10
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-anchor-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
^ci/test-stable.sh \
|
||||
^sdk/ \
|
||||
; then
|
||||
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
|
||||
else
|
||||
annotate --style info \
|
||||
"wasm skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^ci/test-bench.sh \
|
||||
; then
|
||||
command_step bench "ci/test-bench.sh" 30
|
||||
else
|
||||
annotate --style info --context test-bench \
|
||||
"Bench skipped as no .rs files were modified"
|
||||
fi
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
30
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
command_step sanity "ci/test-sanity.sh" 5
|
||||
wait_step
|
||||
|
||||
# Check for any .sh file changes
|
||||
if affects .sh$; then
|
||||
command_step shellcheck "ci/shellcheck.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
|
||||
# Run the full test suite by default, skipping only if modifications are local
|
||||
# to some particular areas of the tree
|
||||
if affects_other_than ^.buildkite ^.mergify .md$ ^docs/ ^web3.js/ ^explorer/ ^.gitbook; then
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# web3.js, explorer and docs changes run on Travis or Github actions...
|
||||
}
|
||||
|
||||
|
||||
if [[ -n $BUILDKITE_TAG ]]; then
|
||||
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
|
||||
|
||||
annotate --style info --context release-tag \
|
||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
||||
|
||||
# Jump directly to the secondary build to publish release artifacts quickly
|
||||
trigger_secondary_step
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
echo "+++ Affected files in this PR"
|
||||
for file in "${affected_files[@]}"; do
|
||||
echo "- $file"
|
||||
done
|
||||
|
||||
start_pipeline "Pull request pipeline for $BUILDKITE_BRANCH"
|
||||
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
annotate --style info --context pr-backlink \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
|
||||
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
|
||||
command_step dependabot "ci/dependabot-pr.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
pull_or_push_steps
|
||||
exit 0
|
||||
fi
|
||||
|
||||
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
|
||||
pull_or_push_steps
|
||||
wait_step
|
||||
trigger_secondary_step
|
||||
exit 0
|
@@ -102,8 +102,6 @@ command_step() {
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -170,7 +168,7 @@ all_test_steps() {
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
@@ -223,41 +221,12 @@ EOF
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream Anchor projects backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-anchor-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-anchor-projects.sh"
|
||||
name: "downstream-anchor-projects"
|
||||
timeout_in_minutes: 10
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-anchor-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
|
@@ -8,6 +8,11 @@ src_root="$(readlink -f "${here}/..")"
|
||||
cd "${src_root}"
|
||||
|
||||
cargo_audit_ignores=(
|
||||
# failure is officially deprecated/unmaintained
|
||||
#
|
||||
# Blocked on multiple upstream crates removing their `failure` dependency.
|
||||
--ignore RUSTSEC-2020-0036
|
||||
|
||||
# `net2` crate has been deprecated; use `socket2` instead
|
||||
#
|
||||
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
|
||||
@@ -25,10 +30,22 @@ cargo_audit_ignores=(
|
||||
|
||||
# generic-array: arr! macro erases lifetimes
|
||||
#
|
||||
# Blocked on new spl dependencies on solana-program v1.9
|
||||
# due to curve25519-dalek dependency
|
||||
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
||||
# https://github.com/paritytech/libsecp256k1/issues/66
|
||||
--ignore RUSTSEC-2020-0146
|
||||
|
||||
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0078
|
||||
|
||||
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0079
|
||||
|
||||
# chrono: Potential segfault in `localtime_r` invocations
|
||||
#
|
||||
# Blocked due to no safe upgrade
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.59.0
|
||||
FROM solanalabs/rust:1.57.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.59.0
|
||||
FROM rust:1.57.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--full-snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
|
@@ -18,13 +18,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.59.0
|
||||
stable_version=1.57.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2022-02-24
|
||||
nightly_version=2021-12-03
|
||||
fi
|
||||
|
||||
|
||||
|
@@ -69,14 +69,20 @@ _ ci/order-crates-for-publishing.py
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||
|
||||
_ "$cargo" nightly fmt --all -- --check
|
||||
_ "$cargo" stable fmt --all -- --check
|
||||
|
||||
_ ci/do-audit.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
_ "$cargo" nightly clippy --all -- --deny=warnings --allow=clippy::missing_safety_doc
|
||||
_ "$cargo" nightly fmt --all -- --check
|
||||
for project in rust/*/ ; do
|
||||
echo "+++ do_bpf_checks $project"
|
||||
(
|
||||
cd "$project"
|
||||
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
|
||||
_ "$cargo" stable fmt -- --check
|
||||
)
|
||||
done
|
||||
}
|
||||
|
||||
echo --- ok
|
||||
|
@@ -21,16 +21,15 @@ export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
# limit jobs to 4gb/thread
|
||||
JOBS=$(grep MemTotal /proc/meminfo | awk '{printf "%.0f", ($2 / (4 * 1024 * 1024))}')
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
JOBS=$((JOBS>NPROC ? NPROC : JOBS))
|
||||
|
||||
NPROC=$((NPROC>14 ? 14 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ "$cargo" stable test --jobs "$JOBS" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-bpf)
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
@@ -66,9 +65,6 @@ test-stable-bpf)
|
||||
fi
|
||||
done
|
||||
|
||||
# bpf-tools version
|
||||
"$cargo_build_bpf" -V
|
||||
|
||||
# BPF program instruction count assertion
|
||||
bpf_target_path=programs/bpf/target
|
||||
_ "$cargo" stable test \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,9 +12,9 @@ edition = "2021"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "5.0"
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0", default-features = false}
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.7" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
thiserror = "1.0.30"
|
||||
tiny-bip39 = "0.8.2"
|
||||
uriparse = "0.6.3"
|
||||
@@ -22,7 +22,7 @@ url = "2.2.2"
|
||||
chrono = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.3.0"
|
||||
tempfile = "3.2.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_clap_utils"
|
||||
|
@@ -328,7 +328,7 @@ pub fn is_derivation<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let value = value.as_ref().replace('\'', "");
|
||||
let value = value.as_ref().replace("'", "");
|
||||
let mut parts = value.split('/');
|
||||
let account = parts.next().unwrap();
|
||||
account
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,13 +12,13 @@ documentation = "https://docs.rs/solana-cli-config"
|
||||
[dependencies]
|
||||
dirs-next = "2.0.0"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.136"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.23"
|
||||
serde_yaml = "0.8.21"
|
||||
url = "2.2.2"
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = "1.0.55"
|
||||
anyhow = "1.0.51"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,14 +17,14 @@ console = "0.15.0"
|
||||
humantime = "2.0.1"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.16.2"
|
||||
serde = "1.0.136"
|
||||
serde_json = "1.0.78"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
serde = "1.0.130"
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.7" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.7" }
|
||||
solana-client = { path = "../client", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.7" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -393,19 +393,19 @@ impl fmt::Display for CliValidators {
|
||||
) -> fmt::Result {
|
||||
fn non_zero_or_dash(v: u64, max_v: u64) -> String {
|
||||
if v == 0 {
|
||||
" - ".into()
|
||||
"- ".into()
|
||||
} else if v == max_v {
|
||||
format!("{:>9} ( 0)", v)
|
||||
format!("{:>8} ( 0)", v)
|
||||
} else if v > max_v.saturating_sub(100) {
|
||||
format!("{:>9} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
|
||||
format!("{:>8} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
|
||||
} else {
|
||||
format!("{:>9} ", v)
|
||||
format!("{:>8} ", v)
|
||||
}
|
||||
}
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {:>22} ({:.2}%)",
|
||||
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {}",
|
||||
if validator.delinquent {
|
||||
WARNING.to_string()
|
||||
} else {
|
||||
@@ -419,19 +419,19 @@ impl fmt::Display for CliValidators {
|
||||
if let Some(skip_rate) = validator.skip_rate {
|
||||
format!("{:.2}%", skip_rate)
|
||||
} else {
|
||||
"- ".to_string()
|
||||
"- ".to_string()
|
||||
},
|
||||
validator.epoch_credits,
|
||||
validator.version,
|
||||
build_balance_message_with_config(
|
||||
validator.activated_stake,
|
||||
&BuildBalanceMessageConfig {
|
||||
use_lamports_unit,
|
||||
trim_trailing_zeros: false,
|
||||
..BuildBalanceMessageConfig::default()
|
||||
}
|
||||
),
|
||||
100. * validator.activated_stake as f64 / total_active_stake as f64,
|
||||
if validator.activated_stake > 0 {
|
||||
format!(
|
||||
"{} ({:.2}%)",
|
||||
build_balance_message(validator.activated_stake, use_lamports_unit, true),
|
||||
100. * validator.activated_stake as f64 / total_active_stake as f64,
|
||||
)
|
||||
} else {
|
||||
"-".into()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -441,13 +441,13 @@ impl fmt::Display for CliValidators {
|
||||
0
|
||||
};
|
||||
let header = style(format!(
|
||||
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
|
||||
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
|
||||
" ",
|
||||
"Identity",
|
||||
"Vote Account",
|
||||
"Commission",
|
||||
"Last Vote ",
|
||||
"Root Slot ",
|
||||
"Last Vote ",
|
||||
"Root Slot ",
|
||||
"Skip Rate",
|
||||
"Credits",
|
||||
"Version",
|
||||
@@ -2287,7 +2287,6 @@ impl fmt::Display for CliBlock {
|
||||
let sign = if reward.lamports < 0 { "-" } else { "" };
|
||||
|
||||
total_rewards += reward.lamports;
|
||||
#[allow(clippy::format_in_format_args)]
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:^15} {:>15} {} {}",
|
||||
@@ -2451,8 +2450,6 @@ pub struct CliGossipNode {
|
||||
pub rpc_host: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub version: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub feature_set: Option<u32>,
|
||||
}
|
||||
|
||||
impl CliGossipNode {
|
||||
@@ -2465,7 +2462,6 @@ impl CliGossipNode {
|
||||
tpu_port: info.tpu.map(|addr| addr.port()),
|
||||
rpc_host: info.rpc.map(|addr| addr.to_string()),
|
||||
version: info.version,
|
||||
feature_set: info.feature_set,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2491,7 +2487,7 @@ impl fmt::Display for CliGossipNode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{:15} | {:44} | {:6} | {:5} | {:21} | {:8}| {}",
|
||||
"{:15} | {:44} | {:6} | {:5} | {:21} | {}",
|
||||
unwrap_to_string_or_none(self.ip_address.as_ref()),
|
||||
self.identity_label
|
||||
.as_ref()
|
||||
@@ -2500,7 +2496,6 @@ impl fmt::Display for CliGossipNode {
|
||||
unwrap_to_string_or_none(self.tpu_port.as_ref()),
|
||||
unwrap_to_string_or_none(self.rpc_host.as_ref()),
|
||||
unwrap_to_string_or_default(self.version.as_ref(), "unknown"),
|
||||
unwrap_to_string_or_default(self.feature_set.as_ref(), "unknown"),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -2515,10 +2510,10 @@ impl fmt::Display for CliGossipNodes {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"IP Address | Identity \
|
||||
| Gossip | TPU | RPC Address | Version | Feature Set\n\
|
||||
"IP Address | Node identifier \
|
||||
| Gossip | TPU | RPC Address | Version\n\
|
||||
----------------+----------------------------------------------+\
|
||||
--------+-------+-----------------------+---------+----------------",
|
||||
--------+-------+-----------------------+----------------",
|
||||
)?;
|
||||
for node in self.0.iter() {
|
||||
writeln!(f, "{}", node)?;
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,40 +17,39 @@ criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.2.1", features = ["termination"] }
|
||||
console = "0.15.0"
|
||||
const_format = "0.2.22"
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
humantime = "2.0.1"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.2.1"
|
||||
reqwest = { version = "0.11.9", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "1.0.6"
|
||||
serde = "1.0.136"
|
||||
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "1.0.4"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.78"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.10.0" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
|
||||
solana_rbpf = "=0.2.24"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.7" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.7" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.7" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.9.7" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.9.7" }
|
||||
solana-client = { path = "../client", version = "=1.9.7" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.7" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.7" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.7" }
|
||||
solana_rbpf = "=0.2.23"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.7" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.30"
|
||||
tiny-bip39 = "0.8.2"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
|
||||
tempfile = "3.3.0"
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.7" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.7" }
|
||||
tempfile = "3.2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
|
@@ -1706,7 +1706,7 @@ mod tests {
|
||||
serde_json::{json, Value},
|
||||
solana_client::{
|
||||
blockhash_query,
|
||||
mock_sender_for_cli::SIGNATURE,
|
||||
mock_sender::SIGNATURE,
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
},
|
||||
|
@@ -5,7 +5,6 @@ use {
|
||||
},
|
||||
clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand},
|
||||
console::style,
|
||||
crossbeam_channel::unbounded,
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_clap_utils::{
|
||||
input_parsers::*,
|
||||
@@ -1351,7 +1350,7 @@ pub fn process_ping(
|
||||
fixed_blockhash: &Option<Hash>,
|
||||
print_timestamp: bool,
|
||||
) -> ProcessResult {
|
||||
let (signal_sender, signal_receiver) = unbounded();
|
||||
let (signal_sender, signal_receiver) = std::sync::mpsc::channel();
|
||||
ctrlc::set_handler(move || {
|
||||
let _ = signal_sender.send(());
|
||||
})
|
||||
|
@@ -334,17 +334,9 @@ pub fn check_nonce_account(
|
||||
match state_from_account(nonce_account)? {
|
||||
State::Initialized(ref data) => {
|
||||
if &data.blockhash != nonce_hash {
|
||||
Err(Error::InvalidHash {
|
||||
provided: *nonce_hash,
|
||||
expected: data.blockhash,
|
||||
}
|
||||
.into())
|
||||
Err(Error::InvalidHash.into())
|
||||
} else if nonce_authority != &data.authority {
|
||||
Err(Error::InvalidAuthority {
|
||||
provided: *nonce_authority,
|
||||
expected: data.authority,
|
||||
}
|
||||
.into())
|
||||
Err(Error::InvalidAuthority.into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@@ -954,22 +946,15 @@ mod tests {
|
||||
hash(b"invalid"),
|
||||
0,
|
||||
)));
|
||||
let invalid_hash = Account::new_data(1, &data, &system_program::ID).unwrap();
|
||||
let invalid_hash = Account::new_data(1, &data, &system_program::ID);
|
||||
if let CliError::InvalidNonce(err) =
|
||||
check_nonce_account(&invalid_hash, &nonce_pubkey, &blockhash).unwrap_err()
|
||||
check_nonce_account(&invalid_hash.unwrap(), &nonce_pubkey, &blockhash).unwrap_err()
|
||||
{
|
||||
assert_eq!(
|
||||
err,
|
||||
Error::InvalidHash {
|
||||
provided: blockhash,
|
||||
expected: hash(b"invalid"),
|
||||
}
|
||||
);
|
||||
assert_eq!(err, Error::InvalidHash,);
|
||||
}
|
||||
|
||||
let new_nonce_authority = solana_sdk::pubkey::new_rand();
|
||||
let data = Versions::new_current(State::Initialized(nonce::state::Data::new(
|
||||
new_nonce_authority,
|
||||
solana_sdk::pubkey::new_rand(),
|
||||
blockhash,
|
||||
0,
|
||||
)));
|
||||
@@ -977,13 +962,7 @@ mod tests {
|
||||
if let CliError::InvalidNonce(err) =
|
||||
check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash).unwrap_err()
|
||||
{
|
||||
assert_eq!(
|
||||
err,
|
||||
Error::InvalidAuthority {
|
||||
provided: nonce_pubkey,
|
||||
expected: new_nonce_authority,
|
||||
}
|
||||
);
|
||||
assert_eq!(err, Error::InvalidAuthority,);
|
||||
}
|
||||
|
||||
let data = Versions::new_current(State::Uninitialized);
|
||||
|
@@ -42,7 +42,6 @@ use {
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
transaction_context::TransactionContext,
|
||||
},
|
||||
std::{
|
||||
fs::File,
|
||||
@@ -1991,8 +1990,7 @@ fn read_and_verify_elf(program_location: &str) -> Result<Vec<u8>, Box<dyn std::e
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data)
|
||||
.map_err(|err| format!("Unable to read program file: {}", err))?;
|
||||
let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1);
|
||||
let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]);
|
||||
let mut invoke_context = InvokeContext::new_mock(&[], &[]);
|
||||
|
||||
// Verify the program
|
||||
Executable::<BpfError, ThisInstructionMeter>::from_elf(
|
||||
|
@@ -35,8 +35,7 @@ use {
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_vote_program::{
|
||||
vote_error::VoteError,
|
||||
vote_instruction::{self, withdraw},
|
||||
vote_instruction::{self, withdraw, VoteError},
|
||||
vote_state::{VoteAuthorize, VoteInit, VoteState},
|
||||
},
|
||||
std::sync::Arc,
|
||||
|
@@ -39,7 +39,7 @@ use {
|
||||
system_program,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_transaction_status::{Encodable, EncodedTransaction, UiTransactionEncoding},
|
||||
solana_transaction_status::{EncodedTransaction, UiTransactionEncoding},
|
||||
std::{fmt::Write as FmtWrite, fs::File, io::Write, sync::Arc},
|
||||
};
|
||||
|
||||
@@ -273,14 +273,11 @@ impl WalletSubCommands for App<'_, '_> {
|
||||
}
|
||||
|
||||
fn resolve_derived_address_program_id(matches: &ArgMatches<'_>, arg_name: &str) -> Option<Pubkey> {
|
||||
matches.value_of(arg_name).and_then(|v| {
|
||||
let upper = v.to_ascii_uppercase();
|
||||
match upper.as_str() {
|
||||
"NONCE" | "SYSTEM" => Some(system_program::id()),
|
||||
"STAKE" => Some(stake::program::id()),
|
||||
"VOTE" => Some(solana_vote_program::id()),
|
||||
_ => pubkey_of(matches, arg_name),
|
||||
}
|
||||
matches.value_of(arg_name).and_then(|v| match v {
|
||||
"NONCE" => Some(system_program::id()),
|
||||
"STAKE" => Some(stake::program::id()),
|
||||
"VOTE" => Some(solana_vote_program::id()),
|
||||
_ => pubkey_of(matches, arg_name),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -567,8 +564,10 @@ pub fn process_confirm(
|
||||
.transaction
|
||||
.decode()
|
||||
.expect("Successful decode");
|
||||
let json_transaction =
|
||||
decoded_transaction.encode(UiTransactionEncoding::Json);
|
||||
let json_transaction = EncodedTransaction::encode(
|
||||
decoded_transaction.clone(),
|
||||
UiTransactionEncoding::Json,
|
||||
);
|
||||
|
||||
transaction = Some(CliTransaction {
|
||||
transaction: json_transaction,
|
||||
@@ -610,7 +609,7 @@ pub fn process_decode_transaction(config: &CliConfig, transaction: &Transaction)
|
||||
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction);
|
||||
let decode_transaction = CliTransaction {
|
||||
decoded_transaction: transaction.clone(),
|
||||
transaction: transaction.encode(UiTransactionEncoding::Json),
|
||||
transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json),
|
||||
meta: None,
|
||||
block_time: None,
|
||||
slot: None,
|
||||
|
@@ -78,7 +78,7 @@ fn test_cli_program_deploy_non_upgradeable() {
|
||||
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account0.owner, bpf_loader::id());
|
||||
assert!(account0.executable);
|
||||
let mut file = File::open(noop_path.to_str().unwrap()).unwrap();
|
||||
let mut file = File::open(noop_path.to_str().unwrap().to_string()).unwrap();
|
||||
let mut elf = Vec::new();
|
||||
file.read_to_end(&mut elf).unwrap();
|
||||
assert_eq!(account0.data, elf);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client-test"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana RPC Test"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,28 +10,26 @@ documentation = "https://docs.rs/solana-client-test"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
futures-util = "0.3.21"
|
||||
serde_json = "1.0.78"
|
||||
serial_test = "0.6.0"
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
serde_json = "1.0.72"
|
||||
serial_test = "0.5.1"
|
||||
solana-client = { path = "../client", version = "=1.9.7" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.7" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.7" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.7" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.7" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.7" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
systemstat = "0.1.10"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,16 +1,14 @@
|
||||
use {
|
||||
futures_util::StreamExt,
|
||||
serde_json::{json, Value},
|
||||
serial_test::serial,
|
||||
solana_client::{
|
||||
nonblocking,
|
||||
pubsub_client::PubsubClient,
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
|
||||
RpcProgramAccountsConfig,
|
||||
},
|
||||
rpc_response::SlotInfo,
|
||||
rpc_response::{RpcBlockUpdate, SlotInfo},
|
||||
},
|
||||
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
|
||||
solana_rpc::{
|
||||
@@ -36,7 +34,7 @@ use {
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_test_validator::TestValidator,
|
||||
solana_transaction_status::{ConfirmedBlock, TransactionDetails, UiTransactionEncoding},
|
||||
solana_transaction_status::{TransactionDetails, UiTransactionEncoding},
|
||||
std::{
|
||||
collections::HashSet,
|
||||
net::{IpAddr, SocketAddr},
|
||||
@@ -214,7 +212,6 @@ fn test_block_subscription() {
|
||||
..
|
||||
} = create_genesis_config(10_000);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
|
||||
// setup Blockstore
|
||||
@@ -228,8 +225,6 @@ fn test_block_subscription() {
|
||||
let keypair2 = Keypair::new();
|
||||
let keypair3 = Keypair::new();
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
|
||||
bank.transfer(rent_exempt_amount, &alice, &keypair2.pubkey())
|
||||
.unwrap();
|
||||
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
|
||||
vec![&alice, &keypair1, &keypair2, &keypair3],
|
||||
0,
|
||||
@@ -280,16 +275,23 @@ fn test_block_subscription() {
|
||||
let maybe_actual = receiver.recv_timeout(Duration::from_millis(400));
|
||||
match maybe_actual {
|
||||
Ok(actual) => {
|
||||
let versioned_block = blockstore.get_complete_block(slot, false).unwrap();
|
||||
let legacy_block = ConfirmedBlock::from(versioned_block)
|
||||
.into_legacy_block()
|
||||
.unwrap();
|
||||
let block = legacy_block.configure(
|
||||
let complete_block = blockstore.get_complete_block(slot, false).unwrap();
|
||||
let block = complete_block.clone().configure(
|
||||
UiTransactionEncoding::Json,
|
||||
TransactionDetails::Signatures,
|
||||
false,
|
||||
);
|
||||
assert_eq!(actual.value.slot, slot);
|
||||
let expected = RpcBlockUpdate {
|
||||
slot,
|
||||
block: Some(block),
|
||||
err: None,
|
||||
};
|
||||
let block = complete_block.configure(
|
||||
UiTransactionEncoding::Json,
|
||||
TransactionDetails::Signatures,
|
||||
false,
|
||||
);
|
||||
assert_eq!(actual.value.slot, expected.slot);
|
||||
assert!(block.eq(&actual.value.block.unwrap()));
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -511,86 +513,3 @@ fn test_slot_subscription() {
|
||||
|
||||
assert_eq!(errors, [].to_vec());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_slot_subscription_async() {
|
||||
let sync_service = Arc::new(AtomicU64::new(0));
|
||||
let sync_client = Arc::clone(&sync_service);
|
||||
fn wait_until(atomic: &Arc<AtomicU64>, value: u64) {
|
||||
while atomic.load(Ordering::Relaxed) != value {
|
||||
sleep(Duration::from_millis(1))
|
||||
}
|
||||
}
|
||||
|
||||
let pubsub_addr = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
||||
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
|
||||
);
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let optimistically_confirmed_bank =
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
optimistically_confirmed_bank,
|
||||
));
|
||||
let (trigger, pubsub_service) =
|
||||
PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr);
|
||||
sleep(Duration::from_millis(100));
|
||||
sync_service.store(1, Ordering::Relaxed);
|
||||
|
||||
wait_until(&sync_service, 2);
|
||||
subscriptions.notify_slot(1, 0, 0);
|
||||
sync_service.store(3, Ordering::Relaxed);
|
||||
|
||||
wait_until(&sync_service, 4);
|
||||
subscriptions.notify_slot(2, 1, 1);
|
||||
sync_service.store(5, Ordering::Relaxed);
|
||||
|
||||
wait_until(&sync_service, 6);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
trigger.cancel();
|
||||
pubsub_service.close().unwrap();
|
||||
});
|
||||
|
||||
wait_until(&sync_client, 1);
|
||||
let url = format!("ws://0.0.0.0:{}/", pubsub_addr.port());
|
||||
let pubsub_client = nonblocking::pubsub_client::PubsubClient::new(&url)
|
||||
.await
|
||||
.unwrap();
|
||||
let (mut notifications, unsubscribe) = pubsub_client.slot_subscribe().await.unwrap();
|
||||
sync_client.store(2, Ordering::Relaxed);
|
||||
|
||||
wait_until(&sync_client, 3);
|
||||
assert_eq!(
|
||||
tokio::time::timeout(Duration::from_millis(25), notifications.next()).await,
|
||||
Ok(Some(SlotInfo {
|
||||
slot: 1,
|
||||
parent: 0,
|
||||
root: 0,
|
||||
}))
|
||||
);
|
||||
sync_client.store(4, Ordering::Relaxed);
|
||||
|
||||
wait_until(&sync_client, 5);
|
||||
assert_eq!(
|
||||
tokio::time::timeout(Duration::from_millis(25), notifications.next()).await,
|
||||
Ok(Some(SlotInfo {
|
||||
slot: 2,
|
||||
parent: 1,
|
||||
root: 1,
|
||||
}))
|
||||
);
|
||||
sync_client.store(6, Ordering::Relaxed);
|
||||
|
||||
unsubscribe().await;
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,42 +10,37 @@ license = "Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1.52"
|
||||
base64 = "0.13.0"
|
||||
bincode = "1.3.3"
|
||||
bs58 = "0.4.0"
|
||||
clap = "2.33.0"
|
||||
crossbeam-channel = "0.5"
|
||||
futures-util = "0.3.21"
|
||||
indicatif = "0.16.2"
|
||||
jsonrpc-core = "18.0.0"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
reqwest = { version = "0.11.9", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "1.0.6"
|
||||
serde = "1.0.136"
|
||||
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "1.0.4"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.78"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.7" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.7" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.7" }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-stream = "0.1.8"
|
||||
tokio-tungstenite = { version = "0.17.1", features = ["rustls-tls-webpki-roots"] }
|
||||
tungstenite = { version = "0.17.2", features = ["rustls-tls-webpki-roots"] }
|
||||
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
|
||||
url = "2.2.2"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.5.0"
|
||||
jsonrpc-http-server = "18.0.0"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,4 +1,4 @@
|
||||
//! Nonblocking [`RpcSender`] over HTTP.
|
||||
//! The standard [`RpcSender`] over HTTP.
|
||||
|
||||
use {
|
||||
crate::{
|
||||
@@ -8,7 +8,6 @@ use {
|
||||
rpc_response::RpcSimulateTransactionResult,
|
||||
rpc_sender::*,
|
||||
},
|
||||
async_trait::async_trait,
|
||||
log::*,
|
||||
reqwest::{
|
||||
self,
|
||||
@@ -26,13 +25,13 @@ use {
|
||||
};
|
||||
|
||||
pub struct HttpSender {
|
||||
client: Arc<reqwest::Client>,
|
||||
client: Arc<reqwest::blocking::Client>,
|
||||
url: String,
|
||||
request_id: AtomicU64,
|
||||
stats: RwLock<RpcTransportStats>,
|
||||
}
|
||||
|
||||
/// Nonblocking [`RpcSender`] over HTTP.
|
||||
/// The standard [`RpcSender`] over HTTP.
|
||||
impl HttpSender {
|
||||
/// Create an HTTP RPC sender.
|
||||
///
|
||||
@@ -46,11 +45,15 @@ impl HttpSender {
|
||||
///
|
||||
/// The URL is an HTTP URL, usually for port 8899.
|
||||
pub fn new_with_timeout<U: ToString>(url: U, timeout: Duration) -> Self {
|
||||
// `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the
|
||||
// request to a different tokio thread to avoid this
|
||||
let client = Arc::new(
|
||||
reqwest::Client::builder()
|
||||
.timeout(timeout)
|
||||
.build()
|
||||
.expect("build rpc client"),
|
||||
tokio::task::block_in_place(move || {
|
||||
reqwest::blocking::Client::builder()
|
||||
.timeout(timeout)
|
||||
.build()
|
||||
})
|
||||
.expect("build rpc client"),
|
||||
);
|
||||
|
||||
Self {
|
||||
@@ -63,9 +66,9 @@ impl HttpSender {
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub(crate) struct RpcErrorObject {
|
||||
pub code: i64,
|
||||
pub message: String,
|
||||
struct RpcErrorObject {
|
||||
code: i64,
|
||||
message: String,
|
||||
}
|
||||
|
||||
struct StatsUpdater<'a> {
|
||||
@@ -97,17 +100,12 @@ impl<'a> Drop for StatsUpdater<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RpcSender for HttpSender {
|
||||
fn get_transport_stats(&self) -> RpcTransportStats {
|
||||
self.stats.read().unwrap().clone()
|
||||
}
|
||||
|
||||
async fn send(
|
||||
&self,
|
||||
request: RpcRequest,
|
||||
params: serde_json::Value,
|
||||
) -> Result<serde_json::Value> {
|
||||
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
|
||||
let mut stats_updater = StatsUpdater::new(&self.stats);
|
||||
|
||||
let request_id = self.request_id.fetch_add(1, Ordering::Relaxed);
|
||||
@@ -115,15 +113,18 @@ impl RpcSender for HttpSender {
|
||||
|
||||
let mut too_many_requests_retries = 5;
|
||||
loop {
|
||||
// `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the
|
||||
// request to a different tokio thread to avoid this
|
||||
let response = {
|
||||
let client = self.client.clone();
|
||||
let request_json = request_json.clone();
|
||||
client
|
||||
.post(&self.url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.body(request_json)
|
||||
.send()
|
||||
.await
|
||||
tokio::task::block_in_place(move || {
|
||||
client
|
||||
.post(&self.url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.body(request_json)
|
||||
.send()
|
||||
})
|
||||
}?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
@@ -154,7 +155,8 @@ impl RpcSender for HttpSender {
|
||||
return Err(response.error_for_status().unwrap_err().into());
|
||||
}
|
||||
|
||||
let mut json = response.json::<serde_json::Value>().await?;
|
||||
let mut json =
|
||||
tokio::task::block_in_place(move || response.json::<serde_json::Value>())?;
|
||||
if json["error"].is_object() {
|
||||
return match serde_json::from_value::<RpcErrorObject>(json["error"].clone()) {
|
||||
Ok(rpc_error_object) => {
|
||||
@@ -206,16 +208,14 @@ mod tests {
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn http_sender_on_tokio_multi_thread() {
|
||||
let http_sender = HttpSender::new("http://localhost:1234".to_string());
|
||||
let _ = http_sender
|
||||
.send(RpcRequest::GetVersion, serde_json::Value::Null)
|
||||
.await;
|
||||
let _ = http_sender.send(RpcRequest::GetVersion, serde_json::Value::Null);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn http_sender_on_tokio_current_thread() {
|
||||
let http_sender = HttpSender::new("http://localhost:1234".to_string());
|
||||
let _ = http_sender
|
||||
.send(RpcRequest::GetVersion, serde_json::Value::Null)
|
||||
.await;
|
||||
#[should_panic(expected = "can call blocking only when running on the multi-threaded runtime")]
|
||||
async fn http_sender_ontokio_current_thread_should_panic() {
|
||||
// RpcClient::new() will panic in the tokio current-thread runtime due to `tokio::task::block_in_place()` usage, and there
|
||||
// doesn't seem to be a way to detect whether the tokio runtime is multi_thread or current_thread...
|
||||
let _ = HttpSender::new("http://localhost:1234".to_string());
|
||||
}
|
||||
}
|
||||
|
@@ -4,9 +4,8 @@ extern crate serde_derive;
|
||||
|
||||
pub mod blockhash_query;
|
||||
pub mod client_error;
|
||||
pub(crate) mod http_sender;
|
||||
pub(crate) mod mock_sender;
|
||||
pub mod nonblocking;
|
||||
pub mod http_sender;
|
||||
pub mod mock_sender;
|
||||
pub mod nonce_utils;
|
||||
pub mod perf_utils;
|
||||
pub mod pubsub_client;
|
||||
@@ -18,15 +17,8 @@ pub mod rpc_deprecated_config;
|
||||
pub mod rpc_filter;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_response;
|
||||
pub(crate) mod rpc_sender;
|
||||
pub mod rpc_sender;
|
||||
pub mod spinner;
|
||||
pub mod thin_client;
|
||||
pub mod tpu_client;
|
||||
pub mod transaction_executor;
|
||||
|
||||
pub mod mock_sender_for_cli {
|
||||
/// Magic `SIGNATURE` value used by `solana-cli` unit tests.
|
||||
/// Please don't use this constant.
|
||||
pub const SIGNATURE: &str =
|
||||
"43yNSFC6fYTuPgTNFFhF4axw7AfWxB2BPdurme8yrsWEYwm8299xh8n6TAHjGymiSub1XtyxTNyd9GBfY2hxoBw8";
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
//! A nonblocking [`RpcSender`] used for unit testing [`RpcClient`](crate::rpc_client::RpcClient).
|
||||
//! An [`RpcSender`] used for unit testing [`RpcClient`](crate::rpc_client::RpcClient).
|
||||
|
||||
use {
|
||||
crate::{
|
||||
@@ -15,7 +15,6 @@ use {
|
||||
},
|
||||
rpc_sender::*,
|
||||
},
|
||||
async_trait::async_trait,
|
||||
serde_json::{json, Number, Value},
|
||||
solana_account_decoder::{UiAccount, UiAccountEncoding},
|
||||
solana_sdk::{
|
||||
@@ -31,7 +30,7 @@ use {
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
},
|
||||
solana_transaction_status::{
|
||||
EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, EncodedTransaction,
|
||||
EncodedConfirmedBlock, EncodedConfirmedTransaction, EncodedTransaction,
|
||||
EncodedTransactionWithStatusMeta, Rewards, TransactionConfirmationStatus,
|
||||
TransactionStatus, UiCompiledInstruction, UiMessage, UiRawMessage, UiTransaction,
|
||||
UiTransactionEncoding, UiTransactionStatusMeta,
|
||||
@@ -41,6 +40,8 @@ use {
|
||||
};
|
||||
|
||||
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
|
||||
pub const SIGNATURE: &str =
|
||||
"43yNSFC6fYTuPgTNFFhF4axw7AfWxB2BPdurme8yrsWEYwm8299xh8n6TAHjGymiSub1XtyxTNyd9GBfY2hxoBw8";
|
||||
|
||||
pub type Mocks = HashMap<RpcRequest, Value>;
|
||||
pub struct MockSender {
|
||||
@@ -86,17 +87,12 @@ impl MockSender {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RpcSender for MockSender {
|
||||
fn get_transport_stats(&self) -> RpcTransportStats {
|
||||
RpcTransportStats::default()
|
||||
}
|
||||
|
||||
async fn send(
|
||||
&self,
|
||||
request: RpcRequest,
|
||||
params: serde_json::Value,
|
||||
) -> Result<serde_json::Value> {
|
||||
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
|
||||
if let Some(value) = self.mocks.write().unwrap().remove(&request) {
|
||||
return Ok(value);
|
||||
}
|
||||
@@ -189,7 +185,7 @@ impl RpcSender for MockSender {
|
||||
value: statuses,
|
||||
})?
|
||||
}
|
||||
"getTransaction" => serde_json::to_value(EncodedConfirmedTransactionWithStatusMeta {
|
||||
"getTransaction" => serde_json::to_value(EncodedConfirmedTransaction {
|
||||
slot: 2,
|
||||
transaction: EncodedTransactionWithStatusMeta {
|
||||
transaction: EncodedTransaction::Json(
|
||||
@@ -390,7 +386,7 @@ impl RpcSender for MockSender {
|
||||
"getBlocksWithLimit" => serde_json::to_value(vec![1, 2, 3])?,
|
||||
"getSignaturesForAddress" => {
|
||||
serde_json::to_value(vec![RpcConfirmedTransactionStatusWithSignature {
|
||||
signature: crate::mock_sender_for_cli::SIGNATURE.to_string(),
|
||||
signature: SIGNATURE.to_string(),
|
||||
slot: 123,
|
||||
err: None,
|
||||
memo: None,
|
||||
@@ -439,7 +435,7 @@ impl RpcSender for MockSender {
|
||||
value: vec![Value::Null, Value::Null]
|
||||
})?,
|
||||
"getProgramAccounts" => {
|
||||
let pubkey = Pubkey::from_str(PUBKEY).unwrap();
|
||||
let pubkey = Pubkey::from_str(&PUBKEY.to_string()).unwrap();
|
||||
let account = Account {
|
||||
lamports: 1_000_000,
|
||||
data: vec![],
|
||||
|
@@ -1,2 +0,0 @@
|
||||
pub mod pubsub_client;
|
||||
pub mod rpc_client;
|
@@ -1,341 +0,0 @@
|
||||
use {
|
||||
crate::{
|
||||
http_sender::RpcErrorObject,
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
|
||||
RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_response::{
|
||||
Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse,
|
||||
RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate,
|
||||
},
|
||||
},
|
||||
futures_util::{
|
||||
future::{ready, BoxFuture, FutureExt},
|
||||
sink::SinkExt,
|
||||
stream::{BoxStream, StreamExt},
|
||||
},
|
||||
log::*,
|
||||
serde::de::DeserializeOwned,
|
||||
serde_json::{json, Map, Value},
|
||||
solana_account_decoder::UiAccount,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature},
|
||||
std::collections::BTreeMap,
|
||||
thiserror::Error,
|
||||
tokio::{
|
||||
net::TcpStream,
|
||||
sync::{mpsc, oneshot},
|
||||
task::JoinHandle,
|
||||
time::{sleep, Duration},
|
||||
},
|
||||
tokio_stream::wrappers::UnboundedReceiverStream,
|
||||
tokio_tungstenite::{
|
||||
connect_async,
|
||||
tungstenite::{
|
||||
protocol::frame::{coding::CloseCode, CloseFrame},
|
||||
Message,
|
||||
},
|
||||
MaybeTlsStream, WebSocketStream,
|
||||
},
|
||||
url::Url,
|
||||
};
|
||||
|
||||
pub type PubsubClientResult<T = ()> = Result<T, PubsubClientError>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum PubsubClientError {
|
||||
#[error("url parse error")]
|
||||
UrlParseError(#[from] url::ParseError),
|
||||
|
||||
#[error("unable to connect to server")]
|
||||
ConnectionError(tokio_tungstenite::tungstenite::Error),
|
||||
|
||||
#[error("websocket error")]
|
||||
WsError(#[from] tokio_tungstenite::tungstenite::Error),
|
||||
|
||||
#[error("connection closed (({0})")]
|
||||
ConnectionClosed(String),
|
||||
|
||||
#[error("json parse error")]
|
||||
JsonParseError(#[from] serde_json::error::Error),
|
||||
|
||||
#[error("subscribe failed: {reason}")]
|
||||
SubscribeFailed { reason: String, message: String },
|
||||
}
|
||||
|
||||
type UnsubscribeFn = Box<dyn FnOnce() -> BoxFuture<'static, ()> + Send>;
|
||||
type SubscribeResponseMsg =
|
||||
Result<(mpsc::UnboundedReceiver<Value>, UnsubscribeFn), PubsubClientError>;
|
||||
type SubscribeRequestMsg = (String, Value, oneshot::Sender<SubscribeResponseMsg>);
|
||||
type SubscribeResult<'a, T> = PubsubClientResult<(BoxStream<'a, T>, UnsubscribeFn)>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PubsubClient {
|
||||
subscribe_tx: mpsc::UnboundedSender<SubscribeRequestMsg>,
|
||||
shutdown_tx: oneshot::Sender<()>,
|
||||
ws: JoinHandle<PubsubClientResult>,
|
||||
}
|
||||
|
||||
impl PubsubClient {
|
||||
pub async fn new(url: &str) -> PubsubClientResult<Self> {
|
||||
let url = Url::parse(url)?;
|
||||
let (ws, _response) = connect_async(url)
|
||||
.await
|
||||
.map_err(PubsubClientError::ConnectionError)?;
|
||||
|
||||
let (subscribe_tx, subscribe_rx) = mpsc::unbounded_channel();
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
|
||||
Ok(Self {
|
||||
subscribe_tx,
|
||||
shutdown_tx,
|
||||
ws: tokio::spawn(PubsubClient::run_ws(ws, subscribe_rx, shutdown_rx)),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn shutdown(self) -> PubsubClientResult {
|
||||
let _ = self.shutdown_tx.send(());
|
||||
self.ws.await.unwrap() // WS future should not be cancelled or panicked
|
||||
}
|
||||
|
||||
async fn subscribe<'a, T>(&self, operation: &str, params: Value) -> SubscribeResult<'a, T>
|
||||
where
|
||||
T: DeserializeOwned + Send + 'a,
|
||||
{
|
||||
let (response_tx, response_rx) = oneshot::channel();
|
||||
self.subscribe_tx
|
||||
.send((operation.to_string(), params, response_tx))
|
||||
.map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))?;
|
||||
|
||||
let (notifications, unsubscribe) = response_rx
|
||||
.await
|
||||
.map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))??;
|
||||
Ok((
|
||||
UnboundedReceiverStream::new(notifications)
|
||||
.filter_map(|value| ready(serde_json::from_value::<T>(value).ok()))
|
||||
.boxed(),
|
||||
unsubscribe,
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn account_subscribe(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
config: Option<RpcAccountInfoConfig>,
|
||||
) -> SubscribeResult<'_, RpcResponse<UiAccount>> {
|
||||
let params = json!([pubkey.to_string(), config]);
|
||||
self.subscribe("account", params).await
|
||||
}
|
||||
|
||||
pub async fn block_subscribe(
|
||||
&self,
|
||||
filter: RpcBlockSubscribeFilter,
|
||||
config: Option<RpcBlockSubscribeConfig>,
|
||||
) -> SubscribeResult<'_, RpcResponse<RpcBlockUpdate>> {
|
||||
self.subscribe("block", json!([filter, config])).await
|
||||
}
|
||||
|
||||
pub async fn logs_subscribe(
|
||||
&self,
|
||||
filter: RpcTransactionLogsFilter,
|
||||
config: RpcTransactionLogsConfig,
|
||||
) -> SubscribeResult<'_, RpcResponse<RpcLogsResponse>> {
|
||||
self.subscribe("logs", json!([filter, config])).await
|
||||
}
|
||||
|
||||
pub async fn program_subscribe(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
config: Option<RpcProgramAccountsConfig>,
|
||||
) -> SubscribeResult<'_, RpcResponse<RpcKeyedAccount>> {
|
||||
let params = json!([pubkey.to_string(), config]);
|
||||
self.subscribe("program", params).await
|
||||
}
|
||||
|
||||
pub async fn vote_subscribe(&self) -> SubscribeResult<'_, RpcVote> {
|
||||
self.subscribe("vote", json!([])).await
|
||||
}
|
||||
|
||||
pub async fn root_subscribe(&self) -> SubscribeResult<'_, Slot> {
|
||||
self.subscribe("root", json!([])).await
|
||||
}
|
||||
|
||||
pub async fn signature_subscribe(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
config: Option<RpcSignatureSubscribeConfig>,
|
||||
) -> SubscribeResult<'_, RpcResponse<RpcSignatureResult>> {
|
||||
let params = json!([signature.to_string(), config]);
|
||||
self.subscribe("signature", params).await
|
||||
}
|
||||
|
||||
pub async fn slot_subscribe(&self) -> SubscribeResult<'_, SlotInfo> {
|
||||
self.subscribe("slot", json!([])).await
|
||||
}
|
||||
|
||||
pub async fn slot_updates_subscribe(&self) -> SubscribeResult<'_, SlotUpdate> {
|
||||
self.subscribe("slotsUpdates", json!([])).await
|
||||
}
|
||||
|
||||
async fn run_ws(
|
||||
mut ws: WebSocketStream<MaybeTlsStream<TcpStream>>,
|
||||
mut subscribe_rx: mpsc::UnboundedReceiver<SubscribeRequestMsg>,
|
||||
mut shutdown_rx: oneshot::Receiver<()>,
|
||||
) -> PubsubClientResult {
|
||||
let mut request_id: u64 = 0;
|
||||
|
||||
let mut requests_subscribe = BTreeMap::new();
|
||||
let mut requests_unsubscribe = BTreeMap::<u64, oneshot::Sender<()>>::new();
|
||||
let mut subscriptions = BTreeMap::new();
|
||||
let (unsubscribe_tx, mut unsubscribe_rx) = mpsc::unbounded_channel();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
// Send close on shutdown signal
|
||||
_ = (&mut shutdown_rx) => {
|
||||
let frame = CloseFrame { code: CloseCode::Normal, reason: "".into() };
|
||||
ws.send(Message::Close(Some(frame))).await?;
|
||||
ws.flush().await?;
|
||||
break;
|
||||
},
|
||||
// Send `Message::Ping` each 10s if no any other communication
|
||||
() = sleep(Duration::from_secs(10)) => {
|
||||
ws.send(Message::Ping(Vec::new())).await?;
|
||||
},
|
||||
// Read message for subscribe
|
||||
Some((operation, params, response_tx)) = subscribe_rx.recv() => {
|
||||
request_id += 1;
|
||||
let method = format!("{}Subscribe", operation);
|
||||
let text = json!({"jsonrpc":"2.0","id":request_id,"method":method,"params":params}).to_string();
|
||||
ws.send(Message::Text(text)).await?;
|
||||
requests_subscribe.insert(request_id, (operation, response_tx));
|
||||
},
|
||||
// Read message for unsubscribe
|
||||
Some((operation, sid, response_tx)) = unsubscribe_rx.recv() => {
|
||||
subscriptions.remove(&sid);
|
||||
request_id += 1;
|
||||
let method = format!("{}Unsubscribe", operation);
|
||||
let text = json!({"jsonrpc":"2.0","id":request_id,"method":method,"params":[sid]}).to_string();
|
||||
ws.send(Message::Text(text)).await?;
|
||||
requests_unsubscribe.insert(request_id, response_tx);
|
||||
},
|
||||
// Read incoming WebSocket message
|
||||
next_msg = ws.next() => {
|
||||
let msg = match next_msg {
|
||||
Some(msg) => msg?,
|
||||
None => break,
|
||||
};
|
||||
trace!("ws.next(): {:?}", &msg);
|
||||
|
||||
// Get text from the message
|
||||
let text = match msg {
|
||||
Message::Text(text) => text,
|
||||
Message::Binary(_data) => continue, // Ignore
|
||||
Message::Ping(data) => {
|
||||
ws.send(Message::Pong(data)).await?;
|
||||
continue
|
||||
},
|
||||
Message::Pong(_data) => continue,
|
||||
Message::Close(_frame) => break,
|
||||
Message::Frame(_frame) => continue,
|
||||
};
|
||||
|
||||
|
||||
let mut json: Map<String, Value> = serde_json::from_str(&text)?;
|
||||
|
||||
// Subscribe/Unsubscribe response, example:
|
||||
// `{"jsonrpc":"2.0","result":5308752,"id":1}`
|
||||
if let Some(id) = json.get("id") {
|
||||
let id = id.as_u64().ok_or_else(|| {
|
||||
PubsubClientError::SubscribeFailed { reason: "invalid `id` field".into(), message: text.clone() }
|
||||
})?;
|
||||
|
||||
let err = json.get("error").map(|error_object| {
|
||||
match serde_json::from_value::<RpcErrorObject>(error_object.clone()) {
|
||||
Ok(rpc_error_object) => {
|
||||
format!("{} ({})", rpc_error_object.message, rpc_error_object.code)
|
||||
}
|
||||
Err(err) => format!(
|
||||
"Failed to deserialize RPC error response: {} [{}]",
|
||||
serde_json::to_string(error_object).unwrap(),
|
||||
err
|
||||
)
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(response_tx) = requests_unsubscribe.remove(&id) {
|
||||
let _ = response_tx.send(()); // do not care if receiver is closed
|
||||
} else if let Some((operation, response_tx)) = requests_subscribe.remove(&id) {
|
||||
match err {
|
||||
Some(reason) => {
|
||||
let _ = response_tx.send(Err(PubsubClientError::SubscribeFailed { reason, message: text.clone()}));
|
||||
},
|
||||
None => {
|
||||
// Subscribe Id
|
||||
let sid = json.get("result").and_then(Value::as_u64).ok_or_else(|| {
|
||||
PubsubClientError::SubscribeFailed { reason: "invalid `result` field".into(), message: text.clone() }
|
||||
})?;
|
||||
|
||||
// Create notifications channel and unsubscribe function
|
||||
let (notifications_tx, notifications_rx) = mpsc::unbounded_channel();
|
||||
let unsubscribe_tx = unsubscribe_tx.clone();
|
||||
let unsubscribe = Box::new(move || async move {
|
||||
let (response_tx, response_rx) = oneshot::channel();
|
||||
// do nothing if ws already closed
|
||||
if unsubscribe_tx.send((operation, sid, response_tx)).is_ok() {
|
||||
let _ = response_rx.await; // channel can be closed only if ws is closed
|
||||
}
|
||||
}.boxed());
|
||||
|
||||
if response_tx.send(Ok((notifications_rx, unsubscribe))).is_err() {
|
||||
break;
|
||||
}
|
||||
subscriptions.insert(sid, notifications_tx);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!("Unknown request id: {}", id);
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Notification, example:
|
||||
// `{"jsonrpc":"2.0","method":"logsNotification","params":{"result":{...},"subscription":3114862}}`
|
||||
if let Some(Value::Object(params)) = json.get_mut("params") {
|
||||
if let Some(sid) = params.get("subscription").and_then(Value::as_u64) {
|
||||
let mut unsubscribe_required = false;
|
||||
|
||||
if let Some(notifications_tx) = subscriptions.get(&sid) {
|
||||
if let Some(result) = params.remove("result") {
|
||||
if notifications_tx.send(result).is_err() {
|
||||
unsubscribe_required = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
unsubscribe_required = true;
|
||||
}
|
||||
|
||||
if unsubscribe_required {
|
||||
if let Some(Value::String(method)) = json.remove("method") {
|
||||
if let Some(operation) = method.strip_suffix("Notification") {
|
||||
let (response_tx, _response_rx) = oneshot::channel();
|
||||
let _ = unsubscribe_tx.send((operation.to_string(), sid, response_tx));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
// see client-test/test/client.rs
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,6 @@ use {
|
||||
account::{Account, ReadableAccount},
|
||||
account_utils::StateMut,
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
@@ -22,10 +21,10 @@ pub enum Error {
|
||||
InvalidAccountData,
|
||||
#[error("unexpected account data size")]
|
||||
UnexpectedDataSize,
|
||||
#[error("provided hash ({provided}) does not match nonce hash ({expected})")]
|
||||
InvalidHash { provided: Hash, expected: Hash },
|
||||
#[error("provided authority ({provided}) does not match nonce authority ({expected})")]
|
||||
InvalidAuthority { provided: Pubkey, expected: Pubkey },
|
||||
#[error("query hash does not match stored hash")]
|
||||
InvalidHash,
|
||||
#[error("query authority does not match account authority")]
|
||||
InvalidAuthority,
|
||||
#[error("invalid state for requested operation")]
|
||||
InvalidStateForOperation,
|
||||
#[error("client error: {0}")]
|
||||
|
@@ -43,16 +43,18 @@ pub fn sample_txs<T>(
|
||||
total_elapsed = start_time.elapsed();
|
||||
let elapsed = now.elapsed();
|
||||
now = Instant::now();
|
||||
let mut txs =
|
||||
match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) {
|
||||
Err(e) => {
|
||||
// ThinClient with multiple options should pick a better one now.
|
||||
info!("Couldn't get transaction count {:?}", e);
|
||||
sleep(Duration::from_secs(sample_period));
|
||||
continue;
|
||||
}
|
||||
Ok(tx_count) => tx_count,
|
||||
};
|
||||
let mut txs;
|
||||
match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) {
|
||||
Err(e) => {
|
||||
// ThinClient with multiple options should pick a better one now.
|
||||
info!("Couldn't get transaction count {:?}", e);
|
||||
sleep(Duration::from_secs(sample_period));
|
||||
continue;
|
||||
}
|
||||
Ok(tx_count) => {
|
||||
txs = tx_count;
|
||||
}
|
||||
}
|
||||
|
||||
if txs < last_txs {
|
||||
info!("Expected txs({}) >= last_txs({})", txs, last_txs);
|
||||
|
@@ -10,7 +10,6 @@ use {
|
||||
RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate,
|
||||
},
|
||||
},
|
||||
crossbeam_channel::{unbounded, Receiver, Sender},
|
||||
log::*,
|
||||
serde::de::DeserializeOwned,
|
||||
serde_json::{
|
||||
@@ -25,6 +24,7 @@ use {
|
||||
net::TcpStream,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, JoinHandle},
|
||||
@@ -242,7 +242,7 @@ impl PubsubClient {
|
||||
) -> Result<AccountSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -283,7 +283,7 @@ impl PubsubClient {
|
||||
) -> Result<BlockSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -322,7 +322,7 @@ impl PubsubClient {
|
||||
) -> Result<LogsSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -361,7 +361,7 @@ impl PubsubClient {
|
||||
) -> Result<ProgramSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -398,7 +398,7 @@ impl PubsubClient {
|
||||
pub fn vote_subscribe(url: &str) -> Result<VoteSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -431,7 +431,7 @@ impl PubsubClient {
|
||||
pub fn root_subscribe(url: &str) -> Result<RootSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -468,7 +468,7 @@ impl PubsubClient {
|
||||
) -> Result<SignatureSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -506,7 +506,7 @@ impl PubsubClient {
|
||||
pub fn slot_subscribe(url: &str) -> Result<SlotsSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = unbounded::<SlotInfo>();
|
||||
let (sender, receiver) = channel::<SlotInfo>();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -615,5 +615,5 @@ impl PubsubClient {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
// see client-test/test/client.rs
|
||||
// see core/tests/client.rs#test_slot_subscription()
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -20,7 +20,6 @@ pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011;
|
||||
pub const JSON_RPC_SCAN_ERROR: i64 = -32012;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH: i64 = -32013;
|
||||
pub const JSON_RPC_SERVER_ERROR_BLOCK_STATUS_NOT_AVAILABLE_YET: i64 = -32014;
|
||||
pub const JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION: i64 = -32015;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RpcCustomError {
|
||||
@@ -58,8 +57,6 @@ pub enum RpcCustomError {
|
||||
TransactionSignatureLenMismatch,
|
||||
#[error("BlockStatusNotAvailableYet")]
|
||||
BlockStatusNotAvailableYet { slot: Slot },
|
||||
#[error("UnsupportedTransactionVersion")]
|
||||
UnsupportedTransactionVersion,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -172,11 +169,6 @@ impl From<RpcCustomError> for Error {
|
||||
message: format!("Block status not yet available for slot {}", slot),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::UnsupportedTransactionVersion => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION),
|
||||
message: "Versioned transactions are not supported".to_string(),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -431,9 +431,6 @@ pub struct RpcInflationReward {
|
||||
pub enum RpcBlockUpdateError {
|
||||
#[error("block store error")]
|
||||
BlockStoreError,
|
||||
|
||||
#[error("unsupported transaction version")]
|
||||
UnsupportedTransactionVersion,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
|
@@ -1,7 +1,7 @@
|
||||
//! A transport for RPC calls.
|
||||
|
||||
use {
|
||||
crate::{client_error::Result, rpc_request::RpcRequest},
|
||||
async_trait::async_trait,
|
||||
std::time::Duration,
|
||||
};
|
||||
|
||||
@@ -26,14 +26,10 @@ pub struct RpcTransportStats {
|
||||
/// It is typically implemented by [`HttpSender`] in production, and
|
||||
/// [`MockSender`] in unit tests.
|
||||
///
|
||||
/// [`RpcClient`]: crate::rpc_client::RpcClient
|
||||
/// [`HttpSender`]: crate::http_sender::HttpSender
|
||||
/// [`MockSender`]: crate::mock_sender::MockSender
|
||||
#[async_trait]
|
||||
pub trait RpcSender {
|
||||
async fn send(
|
||||
&self,
|
||||
request: RpcRequest,
|
||||
params: serde_json::Value,
|
||||
) -> Result<serde_json::Value>;
|
||||
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value>;
|
||||
fn get_transport_stats(&self) -> RpcTransportStats;
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.7"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-core"
|
||||
readme = "../README.md"
|
||||
@@ -21,64 +21,64 @@ bs58 = "0.4.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
crossbeam-channel = "0.5"
|
||||
dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] }
|
||||
etcd-client = { version = "0.8.3", features = ["tls"]}
|
||||
etcd-client = { version = "0.8.1", features = ["tls"]}
|
||||
fs_extra = "1.2.0"
|
||||
histogram = "0.6.9"
|
||||
itertools = "0.10.3"
|
||||
itertools = "0.10.1"
|
||||
log = "0.4.14"
|
||||
lru = "0.7.2"
|
||||
lru = "0.7.1"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
raptorq = "1.6.4"
|
||||
rayon = "1.5.1"
|
||||
retain_mut = "0.1.5"
|
||||
serde = "1.0.136"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.10.0" }
|
||||
solana-bloom = { path = "../bloom", version = "=1.10.0" }
|
||||
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-entry = { path = "../entry", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.10.0" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.10.0" }
|
||||
solana-replica-lib = { path = "../replica-lib", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
tempfile = "3.3.0"
|
||||
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.7" }
|
||||
solana-bloom = { path = "../bloom", version = "=1.9.7" }
|
||||
solana-client = { path = "../client", version = "=1.9.7" }
|
||||
solana-entry = { path = "../entry", version = "=1.9.7" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.7" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.7" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.7" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.7" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.7" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.7" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.7" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.7" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.7" }
|
||||
solana-replica-lib = { path = "../replica-lib", version = "=1.9.7" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.7" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.7" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.7" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.7" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.7" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.7" }
|
||||
tempfile = "3.2.0"
|
||||
thiserror = "1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.7" }
|
||||
sys-info = "0.9.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
trees = "0.4.2"
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "18.0.0"
|
||||
jsonrpc-core-client = { version = "18.0.0", features = ["ipc", "ws"] }
|
||||
jsonrpc-derive = "18.0.0"
|
||||
jsonrpc-pubsub = "18.0.0"
|
||||
matches = "0.1.9"
|
||||
raptorq = "1.6.5"
|
||||
reqwest = { version = "0.11.9", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.78"
|
||||
serial_test = "0.6.0"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.72"
|
||||
serial_test = "0.5.1"
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.7" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.9.7" }
|
||||
solana-version = { path = "../version", version = "=1.9.7" }
|
||||
static_assertions = "1.1.0"
|
||||
systemstat = "0.1.10"
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
sysctl = "0.4.3"
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
||||
|
||||
|
@@ -4,7 +4,7 @@
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
crossbeam_channel::{unbounded, Receiver},
|
||||
crossbeam_channel::unbounded,
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
@@ -37,7 +37,7 @@ use {
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
collections::VecDeque,
|
||||
sync::{atomic::Ordering, Arc, RwLock},
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, RwLock},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
test::Bencher,
|
||||
@@ -98,7 +98,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
None::<Box<dyn Fn()>>,
|
||||
&BankingStageStats::default(),
|
||||
&recorder,
|
||||
&QosService::new(Arc::new(RwLock::new(CostModel::default())), 1),
|
||||
&Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))),
|
||||
&mut LeaderSlotMetricsTracker::new(0),
|
||||
);
|
||||
});
|
||||
@@ -170,8 +170,8 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let mut bank = Bank::new_for_benches(&genesis_config);
|
||||
// Allow arbitrary transaction processing time for the purposes of this bench
|
||||
bank.ns_per_slot = u128::MAX;
|
||||
let bank = Arc::new(bank);
|
||||
bank.ns_per_slot = std::u128::MAX;
|
||||
let bank = Arc::new(Bank::new_for_benches(&genesis_config));
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker()
|
||||
|
@@ -1,112 +0,0 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
rand::{seq::SliceRandom, Rng},
|
||||
solana_core::{
|
||||
cluster_nodes::{make_test_cluster, new_cluster_nodes, ClusterNodes},
|
||||
retransmit_stage::RetransmitStage,
|
||||
},
|
||||
solana_gossip::contact_info::ContactInfo,
|
||||
solana_sdk::{clock::Slot, hash::hashv, pubkey::Pubkey, signature::Signature},
|
||||
test::Bencher,
|
||||
};
|
||||
|
||||
const NUM_SIMULATED_SHREDS: usize = 4;
|
||||
|
||||
fn make_cluster_nodes<R: Rng>(
|
||||
rng: &mut R,
|
||||
unstaked_ratio: Option<(u32, u32)>,
|
||||
) -> (Vec<ContactInfo>, ClusterNodes<RetransmitStage>) {
|
||||
let (nodes, stakes, cluster_info) = make_test_cluster(rng, 5_000, unstaked_ratio);
|
||||
let cluster_nodes = new_cluster_nodes::<RetransmitStage>(&cluster_info, &stakes);
|
||||
(nodes, cluster_nodes)
|
||||
}
|
||||
|
||||
fn get_retransmit_peers_deterministic(
|
||||
cluster_nodes: &ClusterNodes<RetransmitStage>,
|
||||
slot: &Slot,
|
||||
slot_leader: &Pubkey,
|
||||
num_simulated_shreds: usize,
|
||||
) {
|
||||
for i in 0..num_simulated_shreds {
|
||||
// see Shred::seed
|
||||
let shred_seed = hashv(&[
|
||||
&slot.to_le_bytes(),
|
||||
&(i as u32).to_le_bytes(),
|
||||
&slot_leader.to_bytes(),
|
||||
])
|
||||
.to_bytes();
|
||||
|
||||
let (_neighbors, _children) = cluster_nodes.get_retransmit_peers_deterministic(
|
||||
shred_seed,
|
||||
solana_gossip::cluster_info::DATA_PLANE_FANOUT,
|
||||
*slot_leader,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_retransmit_peers_compat(
|
||||
cluster_nodes: &ClusterNodes<RetransmitStage>,
|
||||
slot_leader: &Pubkey,
|
||||
signatures: &[Signature],
|
||||
) {
|
||||
for signature in signatures.iter() {
|
||||
// see Shred::seed
|
||||
let signature = signature.as_ref();
|
||||
let offset = signature.len().checked_sub(32).unwrap();
|
||||
let shred_seed = signature[offset..].try_into().unwrap();
|
||||
|
||||
let (_neighbors, _children) = cluster_nodes.get_retransmit_peers_compat(
|
||||
shred_seed,
|
||||
solana_gossip::cluster_info::DATA_PLANE_FANOUT,
|
||||
*slot_leader,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_retransmit_peers_deterministic_wrapper(b: &mut Bencher, unstaked_ratio: Option<(u32, u32)>) {
|
||||
let mut rng = rand::thread_rng();
|
||||
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
|
||||
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
|
||||
let slot = rand::random::<u64>();
|
||||
b.iter(|| {
|
||||
get_retransmit_peers_deterministic(
|
||||
&cluster_nodes,
|
||||
&slot,
|
||||
&slot_leader,
|
||||
NUM_SIMULATED_SHREDS,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn get_retransmit_peers_compat_wrapper(b: &mut Bencher, unstaked_ratio: Option<(u32, u32)>) {
|
||||
let mut rng = rand::thread_rng();
|
||||
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
|
||||
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
|
||||
let signatures: Vec<_> = std::iter::repeat_with(Signature::new_unique)
|
||||
.take(NUM_SIMULATED_SHREDS)
|
||||
.collect();
|
||||
b.iter(|| get_retransmit_peers_compat(&cluster_nodes, &slot_leader, &signatures));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_retransmit_peers_deterministic_unstaked_ratio_1_2(b: &mut Bencher) {
|
||||
get_retransmit_peers_deterministic_wrapper(b, Some((1, 2)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_retransmit_peers_compat_unstaked_ratio_1_2(b: &mut Bencher) {
|
||||
get_retransmit_peers_compat_wrapper(b, Some((1, 2)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_retransmit_peers_deterministic_unstaked_ratio_1_32(b: &mut Bencher) {
|
||||
get_retransmit_peers_deterministic_wrapper(b, Some((1, 32)));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_retransmit_peers_compat_unstaked_ratio_1_32(b: &mut Bencher) {
|
||||
get_retransmit_peers_compat_wrapper(b, Some((1, 32)));
|
||||
}
|
@@ -4,7 +4,6 @@ extern crate solana_core;
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
crossbeam_channel::unbounded,
|
||||
log::*,
|
||||
solana_core::retransmit_stage::retransmitter,
|
||||
solana_entry::entry::Entry,
|
||||
@@ -31,6 +30,7 @@ use {
|
||||
net::UdpSocket,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder},
|
||||
@@ -77,7 +77,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let bank = bank_forks.working_bank();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let (shreds_sender, shreds_receiver) = unbounded();
|
||||
let (shreds_sender, shreds_receiver) = channel();
|
||||
const NUM_THREADS: usize = 2;
|
||||
let sockets = (0..NUM_THREADS)
|
||||
.map(|_| UdpSocket::bind("0.0.0.0:0").unwrap())
|
||||
|
@@ -9,17 +9,17 @@ use {
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage},
|
||||
solana_perf::{
|
||||
packet::{to_packet_batches, PacketBatch},
|
||||
test_tx::test_tx,
|
||||
},
|
||||
solana_perf::{packet::to_packet_batches, packet::PacketBatch, test_tx::test_tx},
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::{Keypair, Signer},
|
||||
system_transaction,
|
||||
timing::duration_as_ms,
|
||||
},
|
||||
std::time::{Duration, Instant},
|
||||
std::{
|
||||
sync::mpsc::channel,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
test::Bencher,
|
||||
};
|
||||
|
||||
@@ -140,7 +140,7 @@ fn gen_batches(use_same_tx: bool) -> Vec<PacketBatch> {
|
||||
fn bench_sigverify_stage(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
trace!("start");
|
||||
let (packet_s, packet_r) = unbounded();
|
||||
let (packet_s, packet_r) = channel();
|
||||
let (verified_s, verified_r) = unbounded();
|
||||
let verifier = TransactionSigVerifier::default();
|
||||
let stage = SigVerifyStage::new(packet_r, verified_s, verifier);
|
||||
|
@@ -5,7 +5,6 @@
|
||||
// set and halt the node if a mismatch is detected.
|
||||
|
||||
use {
|
||||
crossbeam_channel::RecvTimeoutError,
|
||||
rayon::ThreadPool,
|
||||
solana_gossip::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES},
|
||||
solana_measure::measure::Measure,
|
||||
@@ -25,6 +24,7 @@ use {
|
||||
path::{Path, PathBuf},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::RecvTimeoutError,
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
|
@@ -3,7 +3,6 @@ use {
|
||||
cluster_slots::ClusterSlots,
|
||||
duplicate_repair_status::{DeadSlotAncestorRequestStatus, DuplicateAncestorDecision},
|
||||
outstanding_requests::OutstandingRequests,
|
||||
packet_threshold::DynamicPacketToProcessThreshold,
|
||||
repair_response::{self},
|
||||
repair_service::{DuplicateSlotsResetSender, RepairInfo, RepairStatsGroup},
|
||||
replay_stage::DUPLICATE_THRESHOLD,
|
||||
@@ -13,6 +12,7 @@ use {
|
||||
crossbeam_channel::{unbounded, Receiver, Sender},
|
||||
dashmap::{mapref::entry::Entry::Occupied, DashMap},
|
||||
solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::{
|
||||
packet::{limited_deserialize, Packet, PacketBatch},
|
||||
recycler::Recycler,
|
||||
@@ -29,6 +29,7 @@ use {
|
||||
net::UdpSocket,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::channel,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
@@ -146,7 +147,7 @@ impl AncestorHashesService {
|
||||
) -> Self {
|
||||
let outstanding_requests: Arc<RwLock<OutstandingAncestorHashesRepairs>> =
|
||||
Arc::new(RwLock::new(OutstandingAncestorHashesRepairs::default()));
|
||||
let (response_sender, response_receiver) = unbounded();
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_receiver = streamer::receiver(
|
||||
ancestor_hashes_request_socket.clone(),
|
||||
&exit,
|
||||
@@ -208,7 +209,7 @@ impl AncestorHashesService {
|
||||
.spawn(move || {
|
||||
let mut last_stats_report = Instant::now();
|
||||
let mut stats = AncestorHashesResponsesStats::default();
|
||||
let mut packet_threshold = DynamicPacketToProcessThreshold::default();
|
||||
let mut max_packets = 1024;
|
||||
loop {
|
||||
let result = Self::process_new_packets_from_channel(
|
||||
&ancestor_hashes_request_statuses,
|
||||
@@ -216,13 +217,13 @@ impl AncestorHashesService {
|
||||
&blockstore,
|
||||
&outstanding_requests,
|
||||
&mut stats,
|
||||
&mut packet_threshold,
|
||||
&mut max_packets,
|
||||
&duplicate_slots_reset_sender,
|
||||
&retryable_slots_sender,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeout(_)) | Ok(_) => {}
|
||||
Err(err) => info!("ancestors hashes responses listener error: {:?}", err),
|
||||
Err(err) => info!("ancestors hashes reponses listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
@@ -243,7 +244,7 @@ impl AncestorHashesService {
|
||||
blockstore: &Blockstore,
|
||||
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
|
||||
stats: &mut AncestorHashesResponsesStats,
|
||||
packet_threshold: &mut DynamicPacketToProcessThreshold,
|
||||
max_packets: &mut usize,
|
||||
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
|
||||
retryable_slots_sender: &RetryableSlotsSender,
|
||||
) -> Result<()> {
|
||||
@@ -254,17 +255,18 @@ impl AncestorHashesService {
|
||||
let mut dropped_packets = 0;
|
||||
while let Ok(batch) = response_receiver.try_recv() {
|
||||
total_packets += batch.packets.len();
|
||||
if packet_threshold.should_drop(total_packets) {
|
||||
dropped_packets += batch.packets.len();
|
||||
} else {
|
||||
if total_packets < *max_packets {
|
||||
// Drop the rest in the channel in case of DOS
|
||||
packet_batches.push(batch);
|
||||
} else {
|
||||
dropped_packets += batch.packets.len();
|
||||
}
|
||||
}
|
||||
|
||||
stats.dropped_packets += dropped_packets;
|
||||
stats.total_packets += total_packets;
|
||||
|
||||
let timer = Instant::now();
|
||||
let mut time = Measure::start("ancestor_hashes::handle_packets");
|
||||
for packet_batch in packet_batches {
|
||||
Self::process_packet_batch(
|
||||
ancestor_hashes_request_statuses,
|
||||
@@ -276,7 +278,14 @@ impl AncestorHashesService {
|
||||
retryable_slots_sender,
|
||||
);
|
||||
}
|
||||
packet_threshold.update(total_packets, timer.elapsed());
|
||||
time.stop();
|
||||
if total_packets >= *max_packets {
|
||||
if time.as_ms() > 1000 {
|
||||
*max_packets = (*max_packets * 9) / 10;
|
||||
} else {
|
||||
*max_packets = (*max_packets * 10) / 9;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -694,7 +703,7 @@ mod test {
|
||||
solana_runtime::{accounts_background_service::AbsRequestSender, bank_forks::BankForks},
|
||||
solana_sdk::{hash::Hash, signature::Keypair},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::collections::HashMap,
|
||||
std::{collections::HashMap, sync::mpsc::channel},
|
||||
trees::tr,
|
||||
};
|
||||
|
||||
@@ -887,8 +896,8 @@ mod test {
|
||||
// Set up thread to give us responses
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (requests_sender, requests_receiver) = unbounded();
|
||||
let (response_sender, response_receiver) = unbounded();
|
||||
let (requests_sender, requests_receiver) = channel();
|
||||
let (response_sender, response_receiver) = channel();
|
||||
|
||||
// Set up blockstore for responses
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -12,14 +12,17 @@ use {
|
||||
cluster_nodes::{ClusterNodes, ClusterNodesCache},
|
||||
result::{Error, Result},
|
||||
},
|
||||
crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender},
|
||||
crossbeam_channel::{
|
||||
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
|
||||
Sender as CrossbeamSender,
|
||||
},
|
||||
itertools::Itertools,
|
||||
solana_gossip::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT},
|
||||
solana_ledger::{blockstore::Blockstore, shred::Shred},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{inc_new_counter_error, inc_new_counter_info},
|
||||
solana_poh::poh_recorder::WorkingBankEntry,
|
||||
solana_runtime::bank_forks::BankForks,
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks},
|
||||
solana_sdk::{
|
||||
clock::Slot,
|
||||
pubkey::Pubkey,
|
||||
@@ -31,11 +34,12 @@ use {
|
||||
socket::SocketAddrSpace,
|
||||
},
|
||||
std::{
|
||||
collections::{HashMap, HashSet},
|
||||
collections::HashMap,
|
||||
iter::repeat,
|
||||
net::UdpSocket,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{channel, Receiver, RecvError, RecvTimeoutError, Sender},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
@@ -54,8 +58,8 @@ const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8;
|
||||
const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5);
|
||||
|
||||
pub(crate) const NUM_INSERT_THREADS: usize = 2;
|
||||
pub(crate) type RetransmitSlotsSender = Sender<Slot>;
|
||||
pub(crate) type RetransmitSlotsReceiver = Receiver<Slot>;
|
||||
pub(crate) type RetransmitSlotsSender = CrossbeamSender<HashMap<Slot, Arc<Bank>>>;
|
||||
pub(crate) type RetransmitSlotsReceiver = CrossbeamReceiver<HashMap<Slot, Arc<Bank>>>;
|
||||
pub(crate) type RecordReceiver = Receiver<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>;
|
||||
pub(crate) type TransmitReceiver = Receiver<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>;
|
||||
|
||||
@@ -207,11 +211,13 @@ impl BroadcastStage {
|
||||
match e {
|
||||
Error::RecvTimeout(RecvTimeoutError::Disconnected)
|
||||
| Error::Send
|
||||
| Error::Recv(RecvError) => {
|
||||
| Error::Recv(RecvError)
|
||||
| Error::CrossbeamRecvTimeout(CrossbeamRecvTimeoutError::Disconnected) => {
|
||||
return Some(BroadcastStageReturnType::ChannelDisconnected);
|
||||
}
|
||||
Error::RecvTimeout(RecvTimeoutError::Timeout)
|
||||
| Error::ClusterInfo(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
| Error::CrossbeamRecvTimeout(CrossbeamRecvTimeoutError::Timeout) => (),
|
||||
Error::ClusterInfo(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
_ => {
|
||||
inc_new_counter_error!("streamer-broadcaster-error", 1, 1);
|
||||
error!("{} broadcaster error: {:?}", name, e);
|
||||
@@ -250,8 +256,8 @@ impl BroadcastStage {
|
||||
) -> Self {
|
||||
let btree = blockstore.clone();
|
||||
let exit = exit_sender.clone();
|
||||
let (socket_sender, socket_receiver) = unbounded();
|
||||
let (blockstore_sender, blockstore_receiver) = unbounded();
|
||||
let (socket_sender, socket_receiver) = channel();
|
||||
let (blockstore_sender, blockstore_receiver) = channel();
|
||||
let bs_run = broadcast_stage_run.clone();
|
||||
|
||||
let socket_sender_ = socket_sender.clone();
|
||||
@@ -336,34 +342,33 @@ impl BroadcastStage {
|
||||
retransmit_slots_receiver: &RetransmitSlotsReceiver,
|
||||
socket_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()> {
|
||||
const RECV_TIMEOUT: Duration = Duration::from_millis(100);
|
||||
let retransmit_slots: HashSet<Slot> =
|
||||
std::iter::once(retransmit_slots_receiver.recv_timeout(RECV_TIMEOUT)?)
|
||||
.chain(retransmit_slots_receiver.try_iter())
|
||||
.collect();
|
||||
let timer = Duration::from_millis(100);
|
||||
|
||||
for new_retransmit_slot in retransmit_slots {
|
||||
// Check for a retransmit signal
|
||||
let mut retransmit_slots = retransmit_slots_receiver.recv_timeout(timer)?;
|
||||
while let Ok(new_retransmit_slots) = retransmit_slots_receiver.try_recv() {
|
||||
retransmit_slots.extend(new_retransmit_slots);
|
||||
}
|
||||
|
||||
for (_, bank) in retransmit_slots.iter() {
|
||||
let slot = bank.slot();
|
||||
let data_shreds = Arc::new(
|
||||
blockstore
|
||||
.get_data_shreds_for_slot(new_retransmit_slot, 0)
|
||||
.get_data_shreds_for_slot(slot, 0)
|
||||
.expect("My own shreds must be reconstructable"),
|
||||
);
|
||||
debug_assert!(data_shreds
|
||||
.iter()
|
||||
.all(|shred| shred.slot() == new_retransmit_slot));
|
||||
debug_assert!(data_shreds.iter().all(|shred| shred.slot() == slot));
|
||||
if !data_shreds.is_empty() {
|
||||
socket_sender.send((data_shreds, None))?;
|
||||
}
|
||||
|
||||
let coding_shreds = Arc::new(
|
||||
blockstore
|
||||
.get_coding_shreds_for_slot(new_retransmit_slot, 0)
|
||||
.get_coding_shreds_for_slot(slot, 0)
|
||||
.expect("My own shreds must be reconstructable"),
|
||||
);
|
||||
|
||||
debug_assert!(coding_shreds
|
||||
.iter()
|
||||
.all(|shred| shred.slot() == new_retransmit_slot));
|
||||
debug_assert!(coding_shreds.iter().all(|shred| shred.slot() == slot));
|
||||
if !coding_shreds.is_empty() {
|
||||
socket_sender.send((coding_shreds, None))?;
|
||||
}
|
||||
@@ -468,7 +473,7 @@ pub mod test {
|
||||
},
|
||||
std::{
|
||||
path::Path,
|
||||
sync::{atomic::AtomicBool, Arc},
|
||||
sync::{atomic::AtomicBool, mpsc::channel, Arc},
|
||||
thread::sleep,
|
||||
},
|
||||
};
|
||||
@@ -540,8 +545,10 @@ pub mod test {
|
||||
// Setup
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let (transmit_sender, transmit_receiver) = unbounded();
|
||||
let (transmit_sender, transmit_receiver) = channel();
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
|
||||
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
||||
|
||||
// Make some shreds
|
||||
let updated_slot = 0;
|
||||
@@ -561,8 +568,12 @@ pub mod test {
|
||||
|
||||
// Insert duplicate retransmit signal, blocks should
|
||||
// only be retransmitted once
|
||||
retransmit_slots_sender.send(updated_slot).unwrap();
|
||||
retransmit_slots_sender.send(updated_slot).unwrap();
|
||||
retransmit_slots_sender
|
||||
.send(vec![(updated_slot, bank0.clone())].into_iter().collect())
|
||||
.unwrap();
|
||||
retransmit_slots_sender
|
||||
.send(vec![(updated_slot, bank0)].into_iter().collect())
|
||||
.unwrap();
|
||||
BroadcastStage::check_retransmit_signals(
|
||||
&blockstore,
|
||||
&retransmit_slots_receiver,
|
||||
@@ -645,7 +656,7 @@ pub mod test {
|
||||
// Create the leader scheduler
|
||||
let leader_keypair = Keypair::new();
|
||||
|
||||
let (entry_sender, entry_receiver) = unbounded();
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let broadcast_service = setup_dummy_broadcast_service(
|
||||
&leader_keypair.pubkey(),
|
||||
|
@@ -260,7 +260,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||
.map(|(node, _)| node)
|
||||
.collect();
|
||||
|
||||
// Create cluster partition.
|
||||
// Creat cluster partition.
|
||||
let cluster_partition: HashSet<Pubkey> = {
|
||||
let mut cumilative_stake = 0;
|
||||
let epoch = root_bank.get_leader_schedule_epoch(slot);
|
||||
|
@@ -287,7 +287,7 @@ mod test {
|
||||
let slot_broadcast_stats = Arc::new(Mutex::new(SlotBroadcastStats::default()));
|
||||
let num_threads = 5;
|
||||
let slot = 0;
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
let thread_handles: Vec<_> = (0..num_threads)
|
||||
.map(|i| {
|
||||
let slot_broadcast_stats = slot_broadcast_stats.clone();
|
||||
|
@@ -1,13 +1,12 @@
|
||||
use {
|
||||
crate::result::Result,
|
||||
crossbeam_channel::Receiver,
|
||||
solana_entry::entry::Entry,
|
||||
solana_ledger::shred::Shred,
|
||||
solana_poh::poh_recorder::WorkingBankEntry,
|
||||
solana_runtime::bank::Bank,
|
||||
solana_sdk::clock::Slot,
|
||||
std::{
|
||||
sync::Arc,
|
||||
sync::{mpsc::Receiver, Arc},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
};
|
||||
@@ -85,12 +84,12 @@ pub(super) fn recv_slot_entries(receiver: &Receiver<WorkingBankEntry>) -> Result
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
crossbeam_channel::unbounded,
|
||||
solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
solana_sdk::{
|
||||
genesis_config::GenesisConfig, pubkey::Pubkey, system_transaction,
|
||||
transaction::Transaction,
|
||||
},
|
||||
std::sync::mpsc::channel,
|
||||
};
|
||||
|
||||
fn setup_test() -> (GenesisConfig, Arc<Bank>, Transaction) {
|
||||
@@ -115,7 +114,7 @@ mod tests {
|
||||
let (genesis_config, bank0, tx) = setup_test();
|
||||
|
||||
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
||||
let (s, r) = unbounded();
|
||||
let (s, r) = channel();
|
||||
let mut last_hash = genesis_config.hash();
|
||||
|
||||
assert!(bank1.max_tick_height() > 1);
|
||||
@@ -145,7 +144,7 @@ mod tests {
|
||||
|
||||
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
||||
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &Pubkey::default(), 2));
|
||||
let (s, r) = unbounded();
|
||||
let (s, r) = channel();
|
||||
|
||||
let mut last_hash = genesis_config.hash();
|
||||
assert!(bank1.max_tick_height() > 1);
|
||||
|
@@ -166,8 +166,8 @@ impl StandardBroadcastRun {
|
||||
receive_results: ReceiveResults,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
) -> Result<()> {
|
||||
let (bsend, brecv) = unbounded();
|
||||
let (ssend, srecv) = unbounded();
|
||||
let (bsend, brecv) = channel();
|
||||
let (ssend, srecv) = channel();
|
||||
self.process_receive_results(keypair, blockstore, &ssend, &bsend, receive_results)?;
|
||||
let srecv = Arc::new(Mutex::new(srecv));
|
||||
let brecv = Arc::new(Mutex::new(brecv));
|
||||
@@ -763,8 +763,8 @@ mod test {
|
||||
let num_shreds_per_slot = 2;
|
||||
let (blockstore, genesis_config, _cluster_info, bank, leader_keypair, _socket, _bank_forks) =
|
||||
setup(num_shreds_per_slot);
|
||||
let (bsend, brecv) = unbounded();
|
||||
let (ssend, _srecv) = unbounded();
|
||||
let (bsend, brecv) = channel();
|
||||
let (ssend, _srecv) = channel();
|
||||
let mut last_tick_height = 0;
|
||||
let mut standard_broadcast_run = StandardBroadcastRun::new(0);
|
||||
let mut process_ticks = |num_ticks| {
|
||||
|
@@ -9,7 +9,10 @@ use {
|
||||
},
|
||||
vote_stake_tracker::VoteStakeTracker,
|
||||
},
|
||||
crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Select, Sender},
|
||||
crossbeam_channel::{
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Select,
|
||||
Sender as CrossbeamSender,
|
||||
},
|
||||
log::*,
|
||||
solana_gossip::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
@@ -29,9 +32,8 @@ use {
|
||||
bank_forks::BankForks,
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
epoch_stakes::EpochStakes,
|
||||
vote_parser::{self, ParsedVote},
|
||||
vote_sender_types::ReplayVoteReceiver,
|
||||
vote_transaction::VoteTransaction,
|
||||
vote_parser,
|
||||
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
|
||||
},
|
||||
solana_sdk::{
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
|
||||
@@ -41,6 +43,7 @@ use {
|
||||
slot_hashes,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_vote_program::vote_state::Vote,
|
||||
std::{
|
||||
collections::{HashMap, HashSet},
|
||||
iter::repeat,
|
||||
@@ -55,16 +58,16 @@ use {
|
||||
|
||||
// Map from a vote account to the authorized voter for an epoch
|
||||
pub type ThresholdConfirmedSlots = Vec<(Slot, Hash)>;
|
||||
pub type VerifiedLabelVotePacketsSender = Sender<Vec<VerifiedVoteMetadata>>;
|
||||
pub type VerifiedLabelVotePacketsReceiver = Receiver<Vec<VerifiedVoteMetadata>>;
|
||||
pub type VerifiedVoteTransactionsSender = Sender<Vec<Transaction>>;
|
||||
pub type VerifiedVoteTransactionsReceiver = Receiver<Vec<Transaction>>;
|
||||
pub type VerifiedVoteSender = Sender<(Pubkey, Vec<Slot>)>;
|
||||
pub type VerifiedVoteReceiver = Receiver<(Pubkey, Vec<Slot>)>;
|
||||
pub type GossipVerifiedVoteHashSender = Sender<(Pubkey, Slot, Hash)>;
|
||||
pub type GossipVerifiedVoteHashReceiver = Receiver<(Pubkey, Slot, Hash)>;
|
||||
pub type GossipDuplicateConfirmedSlotsSender = Sender<ThresholdConfirmedSlots>;
|
||||
pub type GossipDuplicateConfirmedSlotsReceiver = Receiver<ThresholdConfirmedSlots>;
|
||||
pub type VerifiedLabelVotePacketsSender = CrossbeamSender<Vec<VerifiedVoteMetadata>>;
|
||||
pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver<Vec<VerifiedVoteMetadata>>;
|
||||
pub type VerifiedVoteTransactionsSender = CrossbeamSender<Vec<Transaction>>;
|
||||
pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
|
||||
pub type VerifiedVoteSender = CrossbeamSender<(Pubkey, Vec<Slot>)>;
|
||||
pub type VerifiedVoteReceiver = CrossbeamReceiver<(Pubkey, Vec<Slot>)>;
|
||||
pub type GossipVerifiedVoteHashSender = CrossbeamSender<(Pubkey, Slot, Hash)>;
|
||||
pub type GossipVerifiedVoteHashReceiver = CrossbeamReceiver<(Pubkey, Slot, Hash)>;
|
||||
pub type GossipDuplicateConfirmedSlotsSender = CrossbeamSender<ThresholdConfirmedSlots>;
|
||||
pub type GossipDuplicateConfirmedSlotsReceiver = CrossbeamReceiver<ThresholdConfirmedSlots>;
|
||||
|
||||
const THRESHOLDS_TO_CHECK: [f64; 2] = [DUPLICATE_THRESHOLD, VOTE_THRESHOLD_SIZE];
|
||||
const BANK_SEND_VOTES_LOOP_SLEEP_MS: u128 = 10;
|
||||
@@ -190,7 +193,7 @@ impl ClusterInfoVoteListener {
|
||||
pub fn new(
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
verified_packets_sender: Sender<Vec<PacketBatch>>,
|
||||
verified_packets_sender: CrossbeamSender<Vec<PacketBatch>>,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
@@ -291,11 +294,7 @@ impl ClusterInfoVoteListener {
|
||||
let mut packet_batches = packet::to_packet_batches(&votes, 1);
|
||||
|
||||
// Votes should already be filtered by this point.
|
||||
sigverify::ed25519_verify_cpu(
|
||||
&mut packet_batches,
|
||||
/*reject_non_vote=*/ false,
|
||||
votes.len(),
|
||||
);
|
||||
sigverify::ed25519_verify_cpu(&mut packet_batches, /*reject_non_vote=*/ false);
|
||||
let root_bank = bank_forks.read().unwrap().root_bank();
|
||||
let epoch_schedule = root_bank.epoch_schedule();
|
||||
votes
|
||||
@@ -333,7 +332,7 @@ impl ClusterInfoVoteListener {
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
verified_packets_sender: &Sender<Vec<PacketBatch>>,
|
||||
verified_packets_sender: &CrossbeamSender<Vec<PacketBatch>>,
|
||||
) -> Result<()> {
|
||||
let mut verified_vote_packets = VerifiedVotePackets::default();
|
||||
let mut time_since_lock = Instant::now();
|
||||
@@ -354,8 +353,8 @@ impl ClusterInfoVoteListener {
|
||||
would_be_leader,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeout(RecvTimeoutError::Disconnected)
|
||||
| Error::RecvTimeout(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected)
|
||||
| Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
@@ -381,7 +380,7 @@ impl ClusterInfoVoteListener {
|
||||
fn check_for_leader_bank_and_send_votes(
|
||||
bank_vote_sender_state_option: &mut Option<BankVoteSenderState>,
|
||||
current_working_bank: Arc<Bank>,
|
||||
verified_packets_sender: &Sender<Vec<PacketBatch>>,
|
||||
verified_packets_sender: &CrossbeamSender<Vec<PacketBatch>>,
|
||||
verified_vote_packets: &VerifiedVotePackets,
|
||||
) -> Result<()> {
|
||||
// We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS`
|
||||
@@ -485,7 +484,7 @@ impl ClusterInfoVoteListener {
|
||||
.add_new_optimistic_confirmed_slots(confirmed_slots.clone());
|
||||
}
|
||||
Err(e) => match e {
|
||||
Error::RecvTimeout(RecvTimeoutError::Disconnected) => {
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
}
|
||||
Error::ReadyTimeout => (),
|
||||
@@ -567,7 +566,7 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn track_new_votes_and_notify_confirmations(
|
||||
vote: VoteTransaction,
|
||||
vote: Vote,
|
||||
vote_pubkey: &Pubkey,
|
||||
vote_tracker: &VoteTracker,
|
||||
root_bank: &Bank,
|
||||
@@ -580,17 +579,17 @@ impl ClusterInfoVoteListener {
|
||||
bank_notification_sender: &Option<BankNotificationSender>,
|
||||
cluster_confirmed_slot_sender: &Option<GossipDuplicateConfirmedSlotsSender>,
|
||||
) {
|
||||
if vote.is_empty() {
|
||||
if vote.slots.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let (last_vote_slot, last_vote_hash) = vote.last_voted_slot_hash().unwrap();
|
||||
let last_vote_slot = *vote.slots.last().unwrap();
|
||||
let last_vote_hash = vote.hash;
|
||||
|
||||
let root = root_bank.slot();
|
||||
let mut is_new_vote = false;
|
||||
let vote_slots = vote.slots();
|
||||
// If slot is before the root, ignore it
|
||||
for slot in vote_slots.iter().filter(|slot| **slot > root).rev() {
|
||||
for slot in vote.slots.iter().filter(|slot| **slot > root).rev() {
|
||||
let slot = *slot;
|
||||
|
||||
// if we don't have stake information, ignore it
|
||||
@@ -603,9 +602,6 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
// The last vote slot, which is the greatest slot in the stack
|
||||
// of votes in a vote transaction, qualifies for optimistic confirmation.
|
||||
// We cannot count any other slots in this vote toward optimistic confirmation because:
|
||||
// 1) There may have been a switch between the earlier vote and the last vote
|
||||
// 2) We do not know the hash of the earlier slot
|
||||
if slot == last_vote_slot {
|
||||
let vote_accounts = epoch_stakes.stakes().vote_accounts();
|
||||
let stake = vote_accounts
|
||||
@@ -677,15 +673,15 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
|
||||
if is_new_vote {
|
||||
subscriptions.notify_vote(*vote_pubkey, vote);
|
||||
let _ = verified_vote_sender.send((*vote_pubkey, vote_slots));
|
||||
subscriptions.notify_vote(*vote_pubkey, &vote);
|
||||
let _ = verified_vote_sender.send((*vote_pubkey, vote.slots));
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_and_confirm_with_new_votes(
|
||||
vote_tracker: &VoteTracker,
|
||||
gossip_vote_txs: Vec<Transaction>,
|
||||
replayed_votes: Vec<ParsedVote>,
|
||||
replayed_votes: Vec<ReplayedVote>,
|
||||
root_bank: &Bank,
|
||||
subscriptions: &RpcSubscriptions,
|
||||
gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender,
|
||||
@@ -1015,7 +1011,7 @@ mod tests {
|
||||
replay_votes_sender
|
||||
.send((
|
||||
vote_keypair.pubkey(),
|
||||
VoteTransaction::from(replay_vote.clone()),
|
||||
replay_vote.clone(),
|
||||
switch_proof_hash,
|
||||
))
|
||||
.unwrap();
|
||||
@@ -1262,8 +1258,7 @@ mod tests {
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver): (ReplayVoteSender, ReplayVoteReceiver) =
|
||||
unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slot = 1;
|
||||
let vote_bank_hash = Hash::default();
|
||||
@@ -1303,7 +1298,7 @@ mod tests {
|
||||
replay_votes_sender
|
||||
.send((
|
||||
vote_keypair.pubkey(),
|
||||
VoteTransaction::from(Vote::new(vec![vote_slot], Hash::default())),
|
||||
Vote::new(vec![vote_slot], Hash::default()),
|
||||
switch_proof_hash,
|
||||
))
|
||||
.unwrap();
|
||||
@@ -1398,7 +1393,7 @@ mod tests {
|
||||
// Add gossip vote for same slot, should not affect outcome
|
||||
vec![(
|
||||
validator0_keypairs.vote_keypair.pubkey(),
|
||||
VoteTransaction::from(Vote::new(vec![voted_slot], Hash::default())),
|
||||
Vote::new(vec![voted_slot], Hash::default()),
|
||||
None,
|
||||
)],
|
||||
&bank,
|
||||
@@ -1443,7 +1438,7 @@ mod tests {
|
||||
vote_txs,
|
||||
vec![(
|
||||
validator_keypairs[1].vote_keypair.pubkey(),
|
||||
VoteTransaction::from(Vote::new(vec![first_slot_in_new_epoch], Hash::default())),
|
||||
Vote::new(vec![first_slot_in_new_epoch], Hash::default()),
|
||||
None,
|
||||
)],
|
||||
&new_root_bank,
|
||||
|
@@ -2,14 +2,12 @@ use {
|
||||
crate::{broadcast_stage::BroadcastStage, retransmit_stage::RetransmitStage},
|
||||
itertools::Itertools,
|
||||
lru::LruCache,
|
||||
rand::{seq::SliceRandom, Rng, SeedableRng},
|
||||
rand::SeedableRng,
|
||||
rand_chacha::ChaChaRng,
|
||||
solana_gossip::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo},
|
||||
contact_info::ContactInfo,
|
||||
crds::GossipRoute,
|
||||
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
crds_value::{CrdsData, CrdsValue},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle, WeightedShuffle},
|
||||
},
|
||||
solana_ledger::shred::Shred,
|
||||
@@ -18,7 +16,6 @@ use {
|
||||
clock::{Epoch, Slot},
|
||||
feature_set,
|
||||
pubkey::Pubkey,
|
||||
signature::Keypair,
|
||||
timing::timestamp,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
@@ -26,7 +23,6 @@ use {
|
||||
any::TypeId,
|
||||
cmp::Reverse,
|
||||
collections::HashMap,
|
||||
iter::repeat_with,
|
||||
marker::PhantomData,
|
||||
net::SocketAddr,
|
||||
ops::Deref,
|
||||
@@ -43,7 +39,7 @@ enum NodeId {
|
||||
Pubkey(Pubkey),
|
||||
}
|
||||
|
||||
pub struct Node {
|
||||
struct Node {
|
||||
node: NodeId,
|
||||
stake: u64,
|
||||
}
|
||||
@@ -237,18 +233,6 @@ impl ClusterNodes<RetransmitStage> {
|
||||
if !enable_turbine_peers_shuffle_patch(shred.slot(), root_bank) {
|
||||
return self.get_retransmit_peers_compat(shred_seed, fanout, slot_leader);
|
||||
}
|
||||
self.get_retransmit_peers_deterministic(shred_seed, fanout, slot_leader)
|
||||
}
|
||||
|
||||
pub fn get_retransmit_peers_deterministic(
|
||||
&self,
|
||||
shred_seed: [u8; 32],
|
||||
fanout: usize,
|
||||
slot_leader: Pubkey,
|
||||
) -> (
|
||||
Vec<&Node>, // neighbors
|
||||
Vec<&Node>, // children
|
||||
) {
|
||||
let mut weighted_shuffle = self.weighted_shuffle.clone();
|
||||
// Exclude slot leader from list of nodes.
|
||||
if slot_leader == self.pubkey {
|
||||
@@ -272,7 +256,7 @@ impl ClusterNodes<RetransmitStage> {
|
||||
(neighbors, children)
|
||||
}
|
||||
|
||||
pub fn get_retransmit_peers_compat(
|
||||
fn get_retransmit_peers_compat(
|
||||
&self,
|
||||
shred_seed: [u8; 32],
|
||||
fanout: usize,
|
||||
@@ -313,7 +297,7 @@ impl ClusterNodes<RetransmitStage> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_cluster_nodes<T: 'static>(
|
||||
fn new_cluster_nodes<T: 'static>(
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> ClusterNodes<T> {
|
||||
@@ -478,61 +462,22 @@ impl From<Pubkey> for NodeId {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_test_cluster<R: Rng>(
|
||||
rng: &mut R,
|
||||
num_nodes: usize,
|
||||
unstaked_ratio: Option<(u32, u32)>,
|
||||
) -> (
|
||||
Vec<ContactInfo>,
|
||||
HashMap<Pubkey, u64>, // stakes
|
||||
ClusterInfo,
|
||||
) {
|
||||
let (unstaked_numerator, unstaked_denominator) = unstaked_ratio.unwrap_or((1, 7));
|
||||
let mut nodes: Vec<_> = repeat_with(|| ContactInfo::new_rand(rng, None))
|
||||
.take(num_nodes)
|
||||
.collect();
|
||||
nodes.shuffle(rng);
|
||||
let this_node = nodes[0].clone();
|
||||
let mut stakes: HashMap<Pubkey, u64> = nodes
|
||||
.iter()
|
||||
.filter_map(|node| {
|
||||
if rng.gen_ratio(unstaked_numerator, unstaked_denominator) {
|
||||
None // No stake for some of the nodes.
|
||||
} else {
|
||||
Some((node.id, rng.gen_range(0, 20)))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Add some staked nodes with no contact-info.
|
||||
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
|
||||
let cluster_info = ClusterInfo::new(
|
||||
this_node,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
{
|
||||
let now = timestamp();
|
||||
let mut gossip_crds = cluster_info.gossip.crds.write().unwrap();
|
||||
// First node is pushed to crds table by ClusterInfo constructor.
|
||||
for node in nodes.iter().skip(1) {
|
||||
let node = CrdsData::ContactInfo(node.clone());
|
||||
let node = CrdsValue::new_unsigned(node);
|
||||
assert_eq!(
|
||||
gossip_crds.insert(node, now, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
}
|
||||
}
|
||||
(nodes, stakes, cluster_info)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
solana_gossip::deprecated::{
|
||||
shuffle_peers_and_index, sorted_retransmit_peers_and_stakes, sorted_stakes_with_index,
|
||||
rand::{seq::SliceRandom, Rng},
|
||||
solana_gossip::{
|
||||
crds::GossipRoute,
|
||||
crds_value::{CrdsData, CrdsValue},
|
||||
deprecated::{
|
||||
shuffle_peers_and_index, sorted_retransmit_peers_and_stakes,
|
||||
sorted_stakes_with_index,
|
||||
},
|
||||
},
|
||||
solana_sdk::{signature::Keypair, timing::timestamp},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{iter::repeat_with, sync::Arc},
|
||||
};
|
||||
|
||||
// Legacy methods copied for testing backward compatibility.
|
||||
@@ -554,10 +499,55 @@ mod tests {
|
||||
sorted_stakes_with_index(peers, stakes)
|
||||
}
|
||||
|
||||
fn make_cluster<R: Rng>(
|
||||
rng: &mut R,
|
||||
) -> (
|
||||
Vec<ContactInfo>,
|
||||
HashMap<Pubkey, u64>, // stakes
|
||||
ClusterInfo,
|
||||
) {
|
||||
let mut nodes: Vec<_> = repeat_with(|| ContactInfo::new_rand(rng, None))
|
||||
.take(1000)
|
||||
.collect();
|
||||
nodes.shuffle(rng);
|
||||
let this_node = nodes[0].clone();
|
||||
let mut stakes: HashMap<Pubkey, u64> = nodes
|
||||
.iter()
|
||||
.filter_map(|node| {
|
||||
if rng.gen_ratio(1, 7) {
|
||||
None // No stake for some of the nodes.
|
||||
} else {
|
||||
Some((node.id, rng.gen_range(0, 20)))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Add some staked nodes with no contact-info.
|
||||
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
|
||||
let cluster_info = ClusterInfo::new(
|
||||
this_node,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
{
|
||||
let now = timestamp();
|
||||
let mut gossip_crds = cluster_info.gossip.crds.write().unwrap();
|
||||
// First node is pushed to crds table by ClusterInfo constructor.
|
||||
for node in nodes.iter().skip(1) {
|
||||
let node = CrdsData::ContactInfo(node.clone());
|
||||
let node = CrdsValue::new_unsigned(node);
|
||||
assert_eq!(
|
||||
gossip_crds.insert(node, now, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
}
|
||||
}
|
||||
(nodes, stakes, cluster_info)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cluster_nodes_retransmit() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let (nodes, stakes, cluster_info) = make_test_cluster(&mut rng, 1_000, None);
|
||||
let (nodes, stakes, cluster_info) = make_cluster(&mut rng);
|
||||
let this_node = cluster_info.my_contact_info();
|
||||
// ClusterInfo::tvu_peers excludes the node itself.
|
||||
assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1);
|
||||
@@ -638,7 +628,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_cluster_nodes_broadcast() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let (nodes, stakes, cluster_info) = make_test_cluster(&mut rng, 1_000, None);
|
||||
let (nodes, stakes, cluster_info) = make_cluster(&mut rng);
|
||||
// ClusterInfo::tvu_peers excludes the node itself.
|
||||
assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1);
|
||||
let cluster_nodes = ClusterNodes::<BroadcastStage>::new(&cluster_info, &stakes);
|
||||
|
@@ -1,6 +1,5 @@
|
||||
use {
|
||||
crate::consensus::Stake,
|
||||
crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::datapoint_info,
|
||||
solana_rpc::rpc_subscriptions::RpcSubscriptions,
|
||||
@@ -15,6 +14,7 @@ use {
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
@@ -63,7 +63,7 @@ impl AggregateCommitmentService {
|
||||
let (sender, receiver): (
|
||||
Sender<CommitmentAggregationData>,
|
||||
Receiver<CommitmentAggregationData>,
|
||||
) = unbounded();
|
||||
) = channel();
|
||||
let exit_ = exit.clone();
|
||||
(
|
||||
sender,
|
||||
|
@@ -3,8 +3,7 @@ use {
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
|
||||
progress_map::{LockoutIntervals, ProgressMap},
|
||||
tower1_7_14::Tower1_7_14,
|
||||
tower_storage::{SavedTower, SavedTowerVersions, TowerStorage},
|
||||
tower_storage::{SavedTower, TowerStorage},
|
||||
},
|
||||
chrono::prelude::*,
|
||||
solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db},
|
||||
@@ -14,7 +13,6 @@ use {
|
||||
},
|
||||
solana_sdk::{
|
||||
clock::{Slot, UnixTimestamp},
|
||||
feature_set,
|
||||
hash::Hash,
|
||||
instruction::Instruction,
|
||||
pubkey::Pubkey,
|
||||
@@ -23,10 +21,7 @@ use {
|
||||
},
|
||||
solana_vote_program::{
|
||||
vote_instruction,
|
||||
vote_state::{
|
||||
BlockTimestamp, Lockout, Vote, VoteState, VoteStateUpdate, VoteTransaction,
|
||||
MAX_LOCKOUT_HISTORY,
|
||||
},
|
||||
vote_state::{BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY},
|
||||
},
|
||||
std::{
|
||||
cmp::Ordering,
|
||||
@@ -50,43 +45,29 @@ pub enum SwitchForkDecision {
|
||||
impl SwitchForkDecision {
|
||||
pub fn to_vote_instruction(
|
||||
&self,
|
||||
vote: VoteTransaction,
|
||||
vote: Vote,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
authorized_voter_pubkey: &Pubkey,
|
||||
) -> Option<Instruction> {
|
||||
match (self, vote) {
|
||||
(SwitchForkDecision::FailedSwitchThreshold(_, total_stake), _) => {
|
||||
match self {
|
||||
SwitchForkDecision::FailedSwitchThreshold(_, total_stake) => {
|
||||
assert_ne!(*total_stake, 0);
|
||||
None
|
||||
}
|
||||
(SwitchForkDecision::FailedSwitchDuplicateRollback(_), _) => None,
|
||||
(SwitchForkDecision::SameFork, VoteTransaction::Vote(v)) => Some(
|
||||
vote_instruction::vote(vote_account_pubkey, authorized_voter_pubkey, v),
|
||||
),
|
||||
(SwitchForkDecision::SameFork, VoteTransaction::VoteStateUpdate(v)) => {
|
||||
Some(vote_instruction::update_vote_state(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
v,
|
||||
))
|
||||
}
|
||||
(SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::Vote(v)) => {
|
||||
SwitchForkDecision::FailedSwitchDuplicateRollback(_) => None,
|
||||
SwitchForkDecision::SameFork => Some(vote_instruction::vote(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
vote,
|
||||
)),
|
||||
SwitchForkDecision::SwitchProof(switch_proof_hash) => {
|
||||
Some(vote_instruction::vote_switch(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
v,
|
||||
vote,
|
||||
*switch_proof_hash,
|
||||
))
|
||||
}
|
||||
(
|
||||
SwitchForkDecision::SwitchProof(switch_proof_hash),
|
||||
VoteTransaction::VoteStateUpdate(v),
|
||||
) => Some(vote_instruction::update_vote_state_switch(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
v,
|
||||
*switch_proof_hash,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,47 +102,14 @@ pub(crate) struct ComputedBankState {
|
||||
pub my_latest_landed_vote: Option<Slot>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
||||
pub enum TowerVersions {
|
||||
V1_17_14(Tower1_7_14),
|
||||
Current(Tower),
|
||||
}
|
||||
|
||||
impl TowerVersions {
|
||||
pub fn new_current(tower: Tower) -> Self {
|
||||
Self::Current(tower)
|
||||
}
|
||||
|
||||
pub fn convert_to_current(self) -> Tower {
|
||||
match self {
|
||||
TowerVersions::V1_17_14(tower) => {
|
||||
let box_last_vote = VoteTransaction::from(tower.last_vote.clone());
|
||||
|
||||
Tower {
|
||||
node_pubkey: tower.node_pubkey,
|
||||
threshold_depth: tower.threshold_depth,
|
||||
threshold_size: tower.threshold_size,
|
||||
vote_state: tower.vote_state,
|
||||
last_vote: box_last_vote,
|
||||
last_vote_tx_blockhash: tower.last_vote_tx_blockhash,
|
||||
last_timestamp: tower.last_timestamp,
|
||||
stray_restored_slot: tower.stray_restored_slot,
|
||||
last_switch_threshold_check: tower.last_switch_threshold_check,
|
||||
}
|
||||
}
|
||||
TowerVersions::Current(tower) => tower,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[frozen_abi(digest = "BfeSJNsfQeX6JU7dmezv1s1aSvR5SoyxKRRZ4ubTh2mt")]
|
||||
#[frozen_abi(digest = "GMs1FxKteU7K4ZFRofMBqNhBpM4xkPVxfYod6R8DQmpT")]
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
|
||||
pub struct Tower {
|
||||
pub node_pubkey: Pubkey,
|
||||
pub(crate) node_pubkey: Pubkey,
|
||||
threshold_depth: usize,
|
||||
threshold_size: f64,
|
||||
pub(crate) vote_state: VoteState,
|
||||
last_vote: VoteTransaction,
|
||||
vote_state: VoteState,
|
||||
last_vote: Vote,
|
||||
#[serde(skip)]
|
||||
// The blockhash used in the last vote transaction, may or may not equal the
|
||||
// blockhash of the voted block itself, depending if the vote slot was refreshed.
|
||||
@@ -188,7 +136,7 @@ impl Default for Tower {
|
||||
threshold_depth: VOTE_THRESHOLD_DEPTH,
|
||||
threshold_size: VOTE_THRESHOLD_SIZE,
|
||||
vote_state: VoteState::default(),
|
||||
last_vote: VoteTransaction::from(VoteStateUpdate::default()),
|
||||
last_vote: Vote::default(),
|
||||
last_timestamp: BlockTimestamp::default(),
|
||||
last_vote_tx_blockhash: Hash::default(),
|
||||
stray_restored_slot: Option::default(),
|
||||
@@ -411,31 +359,31 @@ impl Tower {
|
||||
self.last_vote_tx_blockhash = new_vote_tx_blockhash;
|
||||
}
|
||||
|
||||
// Returns true if we have switched the new vote instruction that directly sets vote state
|
||||
pub(crate) fn is_direct_vote_state_update_enabled(bank: &Bank) -> bool {
|
||||
bank.feature_set
|
||||
.is_active(&feature_set::allow_votes_to_directly_update_vote_state::id())
|
||||
}
|
||||
|
||||
fn apply_vote_and_generate_vote_diff(
|
||||
local_vote_state: &mut VoteState,
|
||||
slot: Slot,
|
||||
hash: Hash,
|
||||
last_voted_slot_in_bank: Option<Slot>,
|
||||
) -> VoteTransaction {
|
||||
) -> Vote {
|
||||
let vote = Vote::new(vec![slot], hash);
|
||||
local_vote_state.process_vote_unchecked(vote);
|
||||
let slots = if let Some(last_voted_slot) = last_voted_slot_in_bank {
|
||||
local_vote_state.process_vote_unchecked(&vote);
|
||||
let slots = if let Some(last_voted_slot_in_bank) = last_voted_slot_in_bank {
|
||||
local_vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|v| v.slot)
|
||||
.skip_while(|s| *s <= last_voted_slot)
|
||||
.skip_while(|s| *s <= last_voted_slot_in_bank)
|
||||
.collect()
|
||||
} else {
|
||||
local_vote_state.votes.iter().map(|v| v.slot).collect()
|
||||
};
|
||||
VoteTransaction::from(Vote::new(slots, hash))
|
||||
trace!(
|
||||
"new vote with {:?} {:?} {:?}",
|
||||
last_voted_slot_in_bank,
|
||||
slots,
|
||||
local_vote_state.votes
|
||||
);
|
||||
Vote::new(slots, hash)
|
||||
}
|
||||
|
||||
pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
|
||||
@@ -449,12 +397,7 @@ impl Tower {
|
||||
|
||||
// Returns the new root if one is made after applying a vote for the given bank to
|
||||
// `self.vote_state`
|
||||
self.record_bank_vote_and_update_lockouts(
|
||||
bank.slot(),
|
||||
bank.hash(),
|
||||
last_voted_slot_in_bank,
|
||||
Self::is_direct_vote_state_update_enabled(bank),
|
||||
)
|
||||
self.record_bank_vote_and_update_lockouts(bank.slot(), bank.hash(), last_voted_slot_in_bank)
|
||||
}
|
||||
|
||||
fn record_bank_vote_and_update_lockouts(
|
||||
@@ -462,29 +405,17 @@ impl Tower {
|
||||
vote_slot: Slot,
|
||||
vote_hash: Hash,
|
||||
last_voted_slot_in_bank: Option<Slot>,
|
||||
is_direct_vote_state_update_enabled: bool,
|
||||
) -> Option<Slot> {
|
||||
trace!("{} record_vote for {}", self.node_pubkey, vote_slot);
|
||||
let old_root = self.root();
|
||||
let mut new_vote = Self::apply_vote_and_generate_vote_diff(
|
||||
&mut self.vote_state,
|
||||
vote_slot,
|
||||
vote_hash,
|
||||
last_voted_slot_in_bank,
|
||||
);
|
||||
|
||||
let mut new_vote = if is_direct_vote_state_update_enabled {
|
||||
let vote = Vote::new(vec![vote_slot], vote_hash);
|
||||
self.vote_state.process_vote_unchecked(vote);
|
||||
VoteTransaction::from(VoteStateUpdate::new(
|
||||
self.vote_state.votes.clone(),
|
||||
self.vote_state.root_slot,
|
||||
vote_hash,
|
||||
))
|
||||
} else {
|
||||
Self::apply_vote_and_generate_vote_diff(
|
||||
&mut self.vote_state,
|
||||
vote_slot,
|
||||
vote_hash,
|
||||
last_voted_slot_in_bank,
|
||||
)
|
||||
};
|
||||
|
||||
new_vote.set_timestamp(self.maybe_timestamp(self.last_voted_slot().unwrap_or_default()));
|
||||
new_vote.timestamp = self.maybe_timestamp(self.last_vote.last_voted_slot().unwrap_or(0));
|
||||
self.last_vote = new_vote;
|
||||
|
||||
let new_root = self.root();
|
||||
@@ -503,33 +434,22 @@ impl Tower {
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option<Slot> {
|
||||
self.record_bank_vote_and_update_lockouts(slot, hash, self.last_voted_slot(), true)
|
||||
}
|
||||
|
||||
/// Used for tests
|
||||
pub fn increase_lockout(&mut self, confirmation_count_increase: u32) {
|
||||
for vote in self.vote_state.votes.iter_mut() {
|
||||
vote.confirmation_count += confirmation_count_increase;
|
||||
}
|
||||
self.record_bank_vote_and_update_lockouts(slot, hash, self.last_voted_slot())
|
||||
}
|
||||
|
||||
pub fn last_voted_slot(&self) -> Option<Slot> {
|
||||
if self.last_vote.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(self.last_vote.slot(self.last_vote.len() - 1))
|
||||
}
|
||||
self.last_vote.last_voted_slot()
|
||||
}
|
||||
|
||||
pub fn last_voted_slot_hash(&self) -> Option<(Slot, Hash)> {
|
||||
Some((self.last_voted_slot()?, self.last_vote.hash()))
|
||||
self.last_vote.last_voted_slot_hash()
|
||||
}
|
||||
|
||||
pub fn stray_restored_slot(&self) -> Option<Slot> {
|
||||
self.stray_restored_slot
|
||||
}
|
||||
|
||||
pub fn last_vote(&self) -> VoteTransaction {
|
||||
pub fn last_vote(&mut self) -> Vote {
|
||||
self.last_vote.clone()
|
||||
}
|
||||
|
||||
@@ -560,17 +480,12 @@ impl Tower {
|
||||
self.vote_state.root_slot.unwrap()
|
||||
}
|
||||
|
||||
// a slot is recent if it's newer than the last vote we have. If we haven't voted yet
|
||||
// but have a root (hard forks situation) then comparre it to the root
|
||||
// a slot is recent if it's newer than the last vote we have
|
||||
pub fn is_recent(&self, slot: Slot) -> bool {
|
||||
if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
|
||||
if slot <= last_voted_slot {
|
||||
return false;
|
||||
}
|
||||
} else if let Some(root) = self.vote_state.root_slot {
|
||||
if slot <= root {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
@@ -666,9 +581,9 @@ impl Tower {
|
||||
// `switch < last` is needed not to warn! this message just because of using
|
||||
// newer snapshots on validator restart
|
||||
let message = format!(
|
||||
"bank_forks doesn't have corresponding data for the stray restored \
|
||||
"bank_forks doesn't have corresponding data for the stray restored \
|
||||
last vote({}), meaning some inconsistency between saved tower and ledger.",
|
||||
last_voted_slot
|
||||
last_voted_slot
|
||||
);
|
||||
warn!("{}", message);
|
||||
datapoint_warn!("tower_warn", ("warn", message, String));
|
||||
@@ -710,56 +625,42 @@ impl Tower {
|
||||
// TODO: Handle if the last vote is on a dupe, and then we restart. The dupe won't be in
|
||||
// heaviest_subtree_fork_choice, so `heaviest_subtree_fork_choice.latest_invalid_ancestor()` will return
|
||||
// None, but the last vote will be persisted in tower.
|
||||
let switch_hash = progress
|
||||
.get_hash(switch_slot)
|
||||
.expect("Slot we're trying to switch to must exist AND be frozen in progress map");
|
||||
if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash))
|
||||
{
|
||||
let switch_hash = progress.get_hash(switch_slot).expect("Slot we're trying to switch to must exist AND be frozen in progress map");
|
||||
if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash)) {
|
||||
// We're rolling back because one of the ancestors of the last vote was a duplicate. In this
|
||||
// case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
|
||||
// just fail the switch check because there's no point in voting on an ancestor. ReplayStage
|
||||
// should then have a special case continue building an alternate fork from this ancestor, NOT
|
||||
// the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks
|
||||
// on latest vote. See `ReplayStage::select_vote_and_reset_forks()` for more details.
|
||||
if heaviest_subtree_fork_choice.is_strict_ancestor(
|
||||
&(switch_slot, switch_hash),
|
||||
&(last_voted_slot, last_voted_hash),
|
||||
) {
|
||||
if heaviest_subtree_fork_choice.is_strict_ancestor(&(switch_slot, switch_hash), &(last_voted_slot, last_voted_hash)) {
|
||||
return rollback_due_to_to_to_duplicate_ancestor(latest_duplicate_ancestor);
|
||||
} else if progress
|
||||
.get_hash(last_voted_slot)
|
||||
.map(|current_slot_hash| current_slot_hash != last_voted_hash)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
} else if progress.get_hash(last_voted_slot).map(|current_slot_hash| current_slot_hash != last_voted_hash).unwrap_or(true) {
|
||||
// Our last vote slot was purged because it was on a duplicate fork, don't continue below
|
||||
// where checks may panic. We allow a freebie vote here that may violate switching
|
||||
// thresholds
|
||||
// TODO: Properly handle this case
|
||||
info!(
|
||||
"Allowing switch vote on {:?} because last vote {:?} was rolled back",
|
||||
(switch_slot, switch_hash),
|
||||
(last_voted_slot, last_voted_hash)
|
||||
);
|
||||
info!("Allowing switch vote on {:?} because last vote {:?} was rolled back", (switch_slot, switch_hash), (last_voted_slot, last_voted_hash));
|
||||
return SwitchForkDecision::SwitchProof(Hash::default());
|
||||
}
|
||||
}
|
||||
|
||||
let last_vote_ancestors = ancestors.get(&last_voted_slot).unwrap_or_else(|| {
|
||||
if self.is_stray_last_vote() {
|
||||
// Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
|
||||
// return Some(_), justifying to panic! here.
|
||||
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
|
||||
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
|
||||
// touched in that case as well.
|
||||
// In other words, except being stray, all other slots have been voted on while
|
||||
// this validator has been running, so we must be able to fetch ancestors for
|
||||
// all of them.
|
||||
empty_ancestors_due_to_minor_unsynced_ledger()
|
||||
} else {
|
||||
panic!("no ancestors found with slot: {}", last_voted_slot);
|
||||
}
|
||||
});
|
||||
let last_vote_ancestors =
|
||||
ancestors.get(&last_voted_slot).unwrap_or_else(|| {
|
||||
if self.is_stray_last_vote() {
|
||||
// Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
|
||||
// return Some(_), justifying to panic! here.
|
||||
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
|
||||
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
|
||||
// touched in that case as well.
|
||||
// In other words, except being stray, all other slots have been voted on while
|
||||
// this validator has been running, so we must be able to fetch ancestors for
|
||||
// all of them.
|
||||
empty_ancestors_due_to_minor_unsynced_ledger()
|
||||
} else {
|
||||
panic!("no ancestors found with slot: {}", last_voted_slot);
|
||||
}
|
||||
});
|
||||
|
||||
let switch_slot_ancestors = ancestors.get(&switch_slot).unwrap();
|
||||
|
||||
@@ -831,76 +732,22 @@ impl Tower {
|
||||
// Find any locked out intervals for vote accounts in this bank with
|
||||
// `lockout_interval_end` >= `last_vote`, which implies they are locked out at
|
||||
// `last_vote` on another fork.
|
||||
for (_lockout_interval_end, intervals_keyed_by_end) in
|
||||
lockout_intervals.range((Included(last_voted_slot), Unbounded))
|
||||
{
|
||||
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
|
||||
if locked_out_vote_accounts.contains(vote_account_pubkey) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Only count lockouts on slots that are:
|
||||
// 1) Not ancestors of `last_vote`, meaning being on different fork
|
||||
// 2) Not from before the current root as we can't determine if
|
||||
// anything before the root was an ancestor of `last_vote` or not
|
||||
if !last_vote_ancestors.contains(lockout_interval_start)
|
||||
// Given a `lockout_interval_start` < root that appears in a
|
||||
// bank for a `candidate_slot`, it must be that `lockout_interval_start`
|
||||
// is an ancestor of the current root, because `candidate_slot` is a
|
||||
// descendant of the current root
|
||||
&& *lockout_interval_start > root
|
||||
{
|
||||
let stake = epoch_vote_accounts
|
||||
.get(vote_account_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or(0);
|
||||
locked_out_stake += stake;
|
||||
if (locked_out_stake as f64 / total_stake as f64)
|
||||
> SWITCH_FORK_THRESHOLD
|
||||
{
|
||||
return SwitchForkDecision::SwitchProof(switch_proof);
|
||||
}
|
||||
locked_out_vote_accounts.insert(vote_account_pubkey);
|
||||
}
|
||||
}
|
||||
for (_lockout_interval_end, intervals_keyed_by_end) in lockout_intervals.range((Included(last_voted_slot), Unbounded)) {
|
||||
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
|
||||
if locked_out_vote_accounts.contains(vote_account_pubkey) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Check the latest votes for potentially gossip votes that haven't landed yet
|
||||
for (
|
||||
vote_account_pubkey,
|
||||
(candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash),
|
||||
) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes()
|
||||
{
|
||||
if locked_out_vote_accounts.contains(&vote_account_pubkey) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *candidate_latest_frozen_vote > last_voted_slot
|
||||
&&
|
||||
// Because `candidate_latest_frozen_vote` is the last vote made by some validator
|
||||
// in the cluster for a frozen bank `B` observed through gossip, we may have cleared
|
||||
// that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
|
||||
// like so:
|
||||
//
|
||||
// |----------X ------candidate_latest_frozen_vote (frozen)
|
||||
// old root
|
||||
// |----------new root ----last_voted_slot
|
||||
//
|
||||
// In most cases, because `last_voted_slot` must be a descendant of `root`, then
|
||||
// if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
|
||||
// directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
|
||||
// because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
|
||||
// should be safe to count towards the switching proof:
|
||||
//
|
||||
// However, there is also the possibility that `last_voted_slot` is a stray, in which
|
||||
// case we cannot make this conclusion as we do not know the ancestors/descendants
|
||||
// of strays. Hence we err on the side of caution here and ignore this vote. This
|
||||
// is ok because validators voting on different unrooted forks should eventually vote
|
||||
// on some descendant of the root, at which time they can be included in switching proofs.
|
||||
!Self::is_candidate_slot_descendant_of_last_vote(
|
||||
*candidate_latest_frozen_vote, last_voted_slot, ancestors)
|
||||
.unwrap_or(true)
|
||||
// Only count lockouts on slots that are:
|
||||
// 1) Not ancestors of `last_vote`, meaning being on different fork
|
||||
// 2) Not from before the current root as we can't determine if
|
||||
// anything before the root was an ancestor of `last_vote` or not
|
||||
if !last_vote_ancestors.contains(lockout_interval_start)
|
||||
// Given a `lockout_interval_start` < root that appears in a
|
||||
// bank for a `candidate_slot`, it must be that `lockout_interval_start`
|
||||
// is an ancestor of the current root, because `candidate_slot` is a
|
||||
// descendant of the current root
|
||||
&& *lockout_interval_start > root
|
||||
{
|
||||
let stake = epoch_vote_accounts
|
||||
.get(vote_account_pubkey)
|
||||
@@ -912,13 +759,58 @@ impl Tower {
|
||||
}
|
||||
locked_out_vote_accounts.insert(vote_account_pubkey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the latest votes for potentially gossip votes that haven't landed yet
|
||||
for (vote_account_pubkey, (candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash)) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes() {
|
||||
if locked_out_vote_accounts.contains(&vote_account_pubkey) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *candidate_latest_frozen_vote > last_voted_slot
|
||||
&&
|
||||
// Because `candidate_latest_frozen_vote` is the last vote made by some validator
|
||||
// in the cluster for a frozen bank `B` observed through gossip, we may have cleared
|
||||
// that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
|
||||
// like so:
|
||||
//
|
||||
// |----------X ------candidate_latest_frozen_vote (frozen)
|
||||
// old root
|
||||
// |----------new root ----last_voted_slot
|
||||
//
|
||||
// In most cases, because `last_voted_slot` must be a descendant of `root`, then
|
||||
// if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
|
||||
// directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
|
||||
// because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
|
||||
// should be safe to count towards the switching proof:
|
||||
//
|
||||
// However, there is also the possibility that `last_voted_slot` is a stray, in which
|
||||
// case we cannot make this conclusion as we do not know the ancestors/descendants
|
||||
// of strays. Hence we err on the side of caution here and ignore this vote. This
|
||||
// is ok because validators voting on different unrooted forks should eventually vote
|
||||
// on some descendant of the root, at which time they can be included in switching proofs.
|
||||
!Self::is_candidate_slot_descendant_of_last_vote(
|
||||
*candidate_latest_frozen_vote, last_voted_slot, ancestors)
|
||||
.unwrap_or(true) {
|
||||
let stake = epoch_vote_accounts
|
||||
.get(vote_account_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or(0);
|
||||
locked_out_stake += stake;
|
||||
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
|
||||
return SwitchForkDecision::SwitchProof(switch_proof);
|
||||
}
|
||||
locked_out_vote_accounts.insert(vote_account_pubkey);
|
||||
}
|
||||
}
|
||||
|
||||
// We have not detected sufficient lockout past the last voted slot to generate
|
||||
// a switching proof
|
||||
SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
|
||||
})
|
||||
.unwrap_or(SwitchForkDecision::SameFork)
|
||||
.unwrap_or(SwitchForkDecision::SameFork)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
@@ -1039,7 +931,13 @@ impl Tower {
|
||||
}
|
||||
|
||||
pub fn is_stray_last_vote(&self) -> bool {
|
||||
self.stray_restored_slot == self.last_voted_slot()
|
||||
if let Some(last_voted_slot) = self.last_voted_slot() {
|
||||
if let Some(stray_restored_slot) = self.stray_restored_slot {
|
||||
return stray_restored_slot == last_voted_slot;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
// The tower root can be older/newer if the validator booted from a newer/older snapshot, so
|
||||
@@ -1061,10 +959,8 @@ impl Tower {
|
||||
assert_eq!(slot_history.check(replayed_root), Check::Found);
|
||||
|
||||
assert!(
|
||||
self.last_vote == VoteTransaction::from(VoteStateUpdate::default())
|
||||
&& self.vote_state.votes.is_empty()
|
||||
|| self.last_vote != VoteTransaction::from(VoteStateUpdate::default())
|
||||
&& !self.vote_state.votes.is_empty(),
|
||||
self.last_vote == Vote::default() && self.vote_state.votes.is_empty()
|
||||
|| self.last_vote != Vote::default() && !self.vote_state.votes.is_empty(),
|
||||
"last vote: {:?} vote_state.votes: {:?}",
|
||||
self.last_vote,
|
||||
self.vote_state.votes
|
||||
@@ -1218,7 +1114,7 @@ impl Tower {
|
||||
info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
|
||||
// we might not have banks for those votes so just reset.
|
||||
// That's because the votes may well past replayed_root
|
||||
self.last_vote = VoteTransaction::from(Vote::default());
|
||||
self.last_vote = Vote::default();
|
||||
} else {
|
||||
info!(
|
||||
"{} restored votes (out of {}) were on different fork or are upcoming votes on unrooted slots: {:?}!",
|
||||
@@ -1227,8 +1123,11 @@ impl Tower {
|
||||
self.voted_slots()
|
||||
);
|
||||
|
||||
assert_eq!(self.last_voted_slot(), self.voted_slots().last().copied());
|
||||
self.stray_restored_slot = self.last_vote.last_voted_slot()
|
||||
assert_eq!(
|
||||
self.last_vote.last_voted_slot().unwrap(),
|
||||
*self.voted_slots().last().unwrap()
|
||||
);
|
||||
self.stray_restored_slot = Some(self.last_vote.last_voted_slot().unwrap());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1275,12 +1174,13 @@ impl Tower {
|
||||
|
||||
pub fn save(&self, tower_storage: &dyn TowerStorage, node_keypair: &Keypair) -> Result<()> {
|
||||
let saved_tower = SavedTower::new(self, node_keypair)?;
|
||||
tower_storage.store(&SavedTowerVersions::from(saved_tower))?;
|
||||
tower_storage.store(&saved_tower)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn restore(tower_storage: &dyn TowerStorage, node_pubkey: &Pubkey) -> Result<Self> {
|
||||
tower_storage.load(node_pubkey)
|
||||
let saved_tower = tower_storage.load(node_pubkey)?;
|
||||
saved_tower.try_into_tower(node_pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1388,7 +1288,7 @@ pub mod test {
|
||||
},
|
||||
solana_vote_program::vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY},
|
||||
std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
collections::HashMap,
|
||||
fs::{remove_file, OpenOptions},
|
||||
io::{Read, Seek, SeekFrom, Write},
|
||||
path::PathBuf,
|
||||
@@ -1429,29 +1329,17 @@ pub mod test {
|
||||
let vote = Vote::default();
|
||||
let mut decision = SwitchForkDecision::FailedSwitchThreshold(0, 1);
|
||||
assert!(decision
|
||||
.to_vote_instruction(
|
||||
VoteTransaction::from(vote.clone()),
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default()
|
||||
)
|
||||
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
|
||||
.is_none());
|
||||
|
||||
decision = SwitchForkDecision::FailedSwitchDuplicateRollback(0);
|
||||
assert!(decision
|
||||
.to_vote_instruction(
|
||||
VoteTransaction::from(vote.clone()),
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default()
|
||||
)
|
||||
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
|
||||
.is_none());
|
||||
|
||||
decision = SwitchForkDecision::SameFork;
|
||||
assert_eq!(
|
||||
decision.to_vote_instruction(
|
||||
VoteTransaction::from(vote.clone()),
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default()
|
||||
),
|
||||
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
|
||||
Some(vote_instruction::vote(
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default(),
|
||||
@@ -1461,11 +1349,7 @@ pub mod test {
|
||||
|
||||
decision = SwitchForkDecision::SwitchProof(Hash::default());
|
||||
assert_eq!(
|
||||
decision.to_vote_instruction(
|
||||
VoteTransaction::from(vote.clone()),
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default()
|
||||
),
|
||||
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
|
||||
Some(vote_instruction::vote_switch(
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default(),
|
||||
@@ -1487,7 +1371,7 @@ pub mod test {
|
||||
|
||||
// Set the voting behavior
|
||||
let mut cluster_votes = HashMap::new();
|
||||
let votes = vec![1, 2, 3, 4, 5];
|
||||
let votes = vec![0, 1, 2, 3, 4, 5];
|
||||
cluster_votes.insert(node_pubkey, votes.clone());
|
||||
vote_simulator.fill_bank_forks(forks, &cluster_votes, true);
|
||||
|
||||
@@ -1498,12 +1382,9 @@ pub mod test {
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
for i in 1..5 {
|
||||
assert_eq!(tower.vote_state.votes[i - 1].slot as usize, i);
|
||||
assert_eq!(
|
||||
tower.vote_state.votes[i - 1].confirmation_count as usize,
|
||||
6 - i
|
||||
);
|
||||
for i in 0..5 {
|
||||
assert_eq!(tower.vote_state.votes[i].slot as usize, i);
|
||||
assert_eq!(tower.vote_state.votes[i].confirmation_count as usize, 6 - i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1963,7 +1844,7 @@ pub mod test {
|
||||
/ (tr(44)
|
||||
// Minor fork 2
|
||||
/ (tr(45) / (tr(46))))
|
||||
/ (tr(110)))));
|
||||
/ (tr(110)))));
|
||||
|
||||
// Have two validators, each representing 20% of the stake vote on
|
||||
// minor fork 2 at slots 46 + 47
|
||||
@@ -2035,7 +1916,7 @@ pub mod test {
|
||||
let mut my_votes: Vec<Slot> = vec![];
|
||||
let next_unlocked_slot = 110;
|
||||
// Vote on the first minor fork
|
||||
my_votes.extend(1..=14);
|
||||
my_votes.extend(0..=14);
|
||||
// Come back to the main fork
|
||||
my_votes.extend(43..=44);
|
||||
// Vote on the second minor fork
|
||||
@@ -2224,8 +2105,8 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_locked_out_empty() {
|
||||
let tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors = HashSet::from([0]);
|
||||
assert!(!tower.is_locked_out(1, &ancestors));
|
||||
let ancestors = HashSet::new();
|
||||
assert!(!tower.is_locked_out(0, &ancestors));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2256,7 +2137,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_check_recent_slot() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
assert!(tower.is_recent(1));
|
||||
assert!(tower.is_recent(0));
|
||||
assert!(tower.is_recent(32));
|
||||
for i in 0..64 {
|
||||
tower.record_vote(i, Hash::default());
|
||||
@@ -2371,7 +2252,7 @@ pub mod test {
|
||||
let mut local = VoteState::default();
|
||||
let vote = Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), None);
|
||||
assert_eq!(local.votes.len(), 1);
|
||||
assert_eq!(vote.slots(), vec![0]);
|
||||
assert_eq!(vote.slots, vec![0]);
|
||||
assert_eq!(local.tower(), vec![0]);
|
||||
}
|
||||
|
||||
@@ -2382,7 +2263,7 @@ pub mod test {
|
||||
// another vote for slot 0 should return an empty vote as the diff.
|
||||
let vote =
|
||||
Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), Some(0));
|
||||
assert!(vote.is_empty());
|
||||
assert!(vote.slots.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2393,11 +2274,11 @@ pub mod test {
|
||||
hash: Hash::default(),
|
||||
timestamp: None,
|
||||
};
|
||||
local.process_vote_unchecked(vote);
|
||||
local.process_vote_unchecked(&vote);
|
||||
assert_eq!(local.votes.len(), 1);
|
||||
let vote =
|
||||
Tower::apply_vote_and_generate_vote_diff(&mut local, 1, Hash::default(), Some(0));
|
||||
assert_eq!(vote.slots(), vec![1]);
|
||||
assert_eq!(vote.slots, vec![1]);
|
||||
assert_eq!(local.tower(), vec![0, 1]);
|
||||
}
|
||||
|
||||
@@ -2409,7 +2290,7 @@ pub mod test {
|
||||
hash: Hash::default(),
|
||||
timestamp: None,
|
||||
};
|
||||
local.process_vote_unchecked(vote);
|
||||
local.process_vote_unchecked(&vote);
|
||||
assert_eq!(local.votes.len(), 1);
|
||||
|
||||
// First vote expired, so should be evicted from tower. Thus even with
|
||||
@@ -2417,7 +2298,7 @@ pub mod test {
|
||||
// observable in any of the results.
|
||||
let vote =
|
||||
Tower::apply_vote_and_generate_vote_diff(&mut local, 3, Hash::default(), Some(0));
|
||||
assert_eq!(vote.slots(), vec![3]);
|
||||
assert_eq!(vote.slots, vec![3]);
|
||||
assert_eq!(local.tower(), vec![3]);
|
||||
}
|
||||
|
||||
@@ -2490,26 +2371,17 @@ pub mod test {
|
||||
fn vote_and_check_recent(num_votes: usize) {
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let slots = if num_votes > 0 {
|
||||
{ 0..num_votes }
|
||||
.map(|i| Lockout {
|
||||
slot: i as u64,
|
||||
confirmation_count: (num_votes as u32) - (i as u32),
|
||||
})
|
||||
.collect()
|
||||
vec![num_votes as u64 - 1]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let mut expected = VoteStateUpdate::new(
|
||||
VecDeque::from(slots),
|
||||
if num_votes > 0 { Some(0) } else { None },
|
||||
Hash::default(),
|
||||
);
|
||||
let mut expected = Vote::new(slots, Hash::default());
|
||||
for i in 0..num_votes {
|
||||
tower.record_vote(i as u64, Hash::default());
|
||||
}
|
||||
|
||||
expected.timestamp = tower.last_vote.timestamp();
|
||||
assert_eq!(VoteTransaction::from(expected), tower.last_vote)
|
||||
expected.timestamp = tower.last_vote.timestamp;
|
||||
assert_eq!(expected, tower.last_vote)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2809,12 +2681,10 @@ pub mod test {
|
||||
.write(true)
|
||||
.open(path)
|
||||
.unwrap();
|
||||
// 4 is the offset into SavedTowerVersions for the signature
|
||||
assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
|
||||
let mut buf = [0u8];
|
||||
assert_eq!(file.read(&mut buf).unwrap(), 1);
|
||||
buf[0] = !buf[0];
|
||||
assert_eq!(file.seek(SeekFrom::Start(4)).unwrap(), 4);
|
||||
assert_eq!(file.seek(SeekFrom::Start(0)).unwrap(), 0);
|
||||
assert_eq!(file.write(&buf).unwrap(), 1);
|
||||
},
|
||||
);
|
||||
@@ -3153,7 +3023,7 @@ pub mod test {
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
tower.vote_state.votes.push_back(Lockout::new(0));
|
||||
let vote = Vote::new(vec![0], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(0);
|
||||
@@ -3171,7 +3041,7 @@ pub mod test {
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
tower.vote_state.votes.push_back(Lockout::new(2));
|
||||
let vote = Vote::new(vec![2], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(0);
|
||||
@@ -3196,7 +3066,7 @@ pub mod test {
|
||||
tower.vote_state.votes.push_back(Lockout::new(0));
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
let vote = Vote::new(vec![1], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(MAX_ENTRIES);
|
||||
@@ -3215,7 +3085,7 @@ pub mod test {
|
||||
tower.vote_state.votes.push_back(Lockout::new(2));
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
let vote = Vote::new(vec![1], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(0);
|
||||
@@ -3234,7 +3104,7 @@ pub mod test {
|
||||
tower.vote_state.votes.push_back(Lockout::new(3));
|
||||
tower.vote_state.votes.push_back(Lockout::new(3));
|
||||
let vote = Vote::new(vec![3], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(0);
|
||||
@@ -3253,7 +3123,7 @@ pub mod test {
|
||||
tower.vote_state.votes.push_back(Lockout::new(43));
|
||||
tower.vote_state.votes.push_back(Lockout::new(44));
|
||||
let vote = Vote::new(vec![44], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
@@ -3267,7 +3137,7 @@ pub mod test {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.vote_state.votes.push_back(Lockout::new(0));
|
||||
let vote = Vote::new(vec![0], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(0);
|
||||
@@ -3281,7 +3151,7 @@ pub mod test {
|
||||
tower.vote_state.votes.push_back(Lockout::new(13));
|
||||
tower.vote_state.votes.push_back(Lockout::new(14));
|
||||
let vote = Vote::new(vec![14], Hash::default());
|
||||
tower.last_vote = VoteTransaction::from(vote);
|
||||
tower.last_vote = vote;
|
||||
tower.initialize_root(12);
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
|
@@ -4,14 +4,13 @@
|
||||
//! table to blockstore.
|
||||
|
||||
use {
|
||||
crossbeam_channel::Receiver,
|
||||
solana_ledger::blockstore::Blockstore,
|
||||
solana_measure::measure::Measure,
|
||||
solana_program_runtime::timings::ExecuteTimings,
|
||||
solana_runtime::{bank::Bank, cost_model::CostModel},
|
||||
solana_sdk::timing::timestamp,
|
||||
std::{
|
||||
sync::{Arc, RwLock},
|
||||
sync::{mpsc::Receiver, Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
};
|
||||
|
@@ -1,9 +1,8 @@
|
||||
use {
|
||||
crossbeam_channel::Receiver,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::bank::Bank,
|
||||
std::{
|
||||
sync::Arc,
|
||||
sync::{mpsc::Receiver, Arc},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
};
|
||||
|
@@ -5,7 +5,6 @@ use {
|
||||
banking_stage::HOLD_TRANSACTIONS_SLOT_OFFSET,
|
||||
result::{Error, Result},
|
||||
},
|
||||
crossbeam_channel::{unbounded, RecvTimeoutError},
|
||||
solana_metrics::{inc_new_counter_debug, inc_new_counter_info},
|
||||
solana_perf::{packet::PacketBatchRecycler, recycler::Recycler},
|
||||
solana_poh::poh_recorder::PohRecorder,
|
||||
@@ -16,7 +15,11 @@ use {
|
||||
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
|
||||
std::{
|
||||
net::UdpSocket,
|
||||
sync::{atomic::AtomicBool, Arc, Mutex},
|
||||
sync::{
|
||||
atomic::AtomicBool,
|
||||
mpsc::{channel, RecvTimeoutError},
|
||||
Arc, Mutex,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
};
|
||||
@@ -35,8 +38,8 @@ impl FetchStage {
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
coalesce_ms: u64,
|
||||
) -> (Self, PacketBatchReceiver, PacketBatchReceiver) {
|
||||
let (sender, receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
let (vote_sender, vote_receiver) = channel();
|
||||
(
|
||||
Self::new_with_sender(
|
||||
sockets,
|
||||
@@ -144,7 +147,7 @@ impl FetchStage {
|
||||
)
|
||||
});
|
||||
|
||||
let (forward_sender, forward_receiver) = unbounded();
|
||||
let (forward_sender, forward_receiver) = channel();
|
||||
let tpu_forwards_threads = tpu_forwards_sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
|
@@ -2715,8 +2715,7 @@ mod test {
|
||||
stake
|
||||
);
|
||||
}
|
||||
{
|
||||
let slot = &17;
|
||||
for slot in &[17] {
|
||||
assert_eq!(
|
||||
tree1
|
||||
.stake_voted_subtree(&(*slot, Hash::default()))
|
||||
|
@@ -1,5 +1,4 @@
|
||||
use {
|
||||
crate::leader_slot_banking_stage_timing_metrics::*,
|
||||
solana_poh::poh_recorder::BankStart,
|
||||
solana_sdk::{clock::Slot, saturating_add_assign},
|
||||
std::time::Instant,
|
||||
@@ -39,12 +38,41 @@ pub(crate) struct ProcessTransactionsSummary {
|
||||
|
||||
// The number of transactions filtered out by the cost model
|
||||
pub cost_model_throttled_transactions_count: usize,
|
||||
}
|
||||
|
||||
// Total amount of time spent running the cost model
|
||||
pub cost_model_us: u64,
|
||||
// Metrics capturing wallclock time spent in various parts of BankingStage during this
|
||||
// validator's leader slot
|
||||
#[derive(Debug)]
|
||||
struct LeaderSlotTimingMetrics {
|
||||
bank_detected_time: Instant,
|
||||
|
||||
// Breakdown of time spent executing and comitting transactions
|
||||
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,
|
||||
// Delay from when the bank was created to when this thread detected it
|
||||
bank_detected_delay_us: u64,
|
||||
}
|
||||
|
||||
impl LeaderSlotTimingMetrics {
|
||||
fn new(bank_creation_time: &Instant) -> Self {
|
||||
Self {
|
||||
bank_detected_time: Instant::now(),
|
||||
bank_detected_delay_us: bank_creation_time.elapsed().as_micros() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
fn report(&self, id: u32, slot: Slot) {
|
||||
let bank_detected_to_now = self.bank_detected_time.elapsed().as_micros() as u64;
|
||||
datapoint_info!(
|
||||
"banking_stage-leader_slot_loop_timings",
|
||||
("id", id as i64, i64),
|
||||
("slot", slot as i64, i64),
|
||||
("bank_detected_to_now_us", bank_detected_to_now, i64),
|
||||
(
|
||||
"bank_creation_to_now_us",
|
||||
bank_detected_to_now + self.bank_detected_delay_us,
|
||||
i64
|
||||
),
|
||||
("bank_detected_delay_us", self.bank_detected_delay_us, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Metrics describing packets ingested/processed in various parts of BankingStage during this
|
||||
@@ -334,8 +362,6 @@ impl LeaderSlotMetricsTracker {
|
||||
failed_commit_count,
|
||||
ref retryable_transaction_indexes,
|
||||
cost_model_throttled_transactions_count,
|
||||
cost_model_us,
|
||||
ref execute_and_commit_timings,
|
||||
..
|
||||
} = process_transactions_summary;
|
||||
|
||||
@@ -389,23 +415,9 @@ impl LeaderSlotMetricsTracker {
|
||||
.cost_model_throttled_transactions_count,
|
||||
*cost_model_throttled_transactions_count as u64
|
||||
);
|
||||
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_packets_timings
|
||||
.cost_model_us,
|
||||
*cost_model_us as u64
|
||||
);
|
||||
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.execute_and_commit_timings
|
||||
.accumulate(execute_and_commit_timings);
|
||||
}
|
||||
}
|
||||
|
||||
// Packet inflow/outflow/processing metrics
|
||||
pub(crate) fn increment_total_new_valid_packets(&mut self, count: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
@@ -515,166 +527,6 @@ impl LeaderSlotMetricsTracker {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Outermost banking thread's loop timing metrics
|
||||
pub(crate) fn increment_process_buffered_packets_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.outer_loop_timings
|
||||
.process_buffered_packets_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_slot_metrics_check_slot_boundary_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.outer_loop_timings
|
||||
.slot_metrics_check_slot_boundary_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_receive_and_buffer_packets_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.outer_loop_timings
|
||||
.receive_and_buffer_packets_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Processing buffer timing metrics
|
||||
pub(crate) fn increment_make_decision_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_buffered_packets_timings
|
||||
.make_decision_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_consume_buffered_packets_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_buffered_packets_timings
|
||||
.consume_buffered_packets_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_forward_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_buffered_packets_timings
|
||||
.forward_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_forward_and_hold_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_buffered_packets_timings
|
||||
.forward_and_hold_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Consuming buffered packets timing metrics
|
||||
pub(crate) fn increment_end_of_slot_filtering_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.consume_buffered_packets_timings
|
||||
.end_of_slot_filtering_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_consume_buffered_packets_poh_recorder_lock_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.consume_buffered_packets_timings
|
||||
.poh_recorder_lock_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_process_packets_transactions_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.consume_buffered_packets_timings
|
||||
.process_packets_transactions_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Processing packets timing metrics
|
||||
pub(crate) fn increment_transactions_from_packets_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_packets_timings
|
||||
.transactions_from_packets_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_process_transactions_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_packets_timings
|
||||
.process_transactions_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn increment_filter_retryable_packets_us(&mut self, us: u64) {
|
||||
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
|
||||
saturating_add_assign!(
|
||||
leader_slot_metrics
|
||||
.timing_metrics
|
||||
.process_packets_timings
|
||||
.filter_retryable_packets_us,
|
||||
us
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user