Compare commits
307 Commits
Author | SHA1 | Date | |
---|---|---|---|
03b930515b | |||
c9f763ea6e | |||
422044f375 | |||
7584262f47 | |||
894f121d0e | |||
4b133509d9 | |||
e8040a828d | |||
78086329be | |||
6deeedd886 | |||
1a0146f21d | |||
7d0a9e0381 | |||
0f7b84197f | |||
bb06502d24 | |||
b7f1f19d8e | |||
0707290bbf | |||
8934a3961f | |||
b142ef5f8b | |||
f2dc9dd96e | |||
20ad3005b5 | |||
eacc69efba | |||
3c200ae45a | |||
74e7c7cbd0 | |||
6bd6d6212c | |||
4d2e66cf9c | |||
65fe00b7ad | |||
02c509390a | |||
49b0d1792b | |||
9511031490 | |||
04ee86e93c | |||
82f25f982e | |||
1cc32c9cda | |||
aedcab846c | |||
548ddff7ed | |||
7aced9e772 | |||
1cc8de0fed | |||
f08c7b2294 | |||
cc58d36de6 | |||
128393da54 | |||
e7964a0b89 | |||
77dc3746a3 | |||
e88f4d689c | |||
f1858c74a4 | |||
7427dafc36 | |||
1cdcabf7cf | |||
ea192b3c83 | |||
927057df26 | |||
19049ca91b | |||
3542348c1e | |||
df9061b933 | |||
98658ebed5 | |||
2a93147b1b | |||
4145c629c0 | |||
551dc0a74c | |||
85bbcdad9a | |||
336c1c1d37 | |||
1570afe493 | |||
c7c650fccc | |||
9b7fba69f4 | |||
0bd355c166 | |||
0d8c8d013d | |||
7a57f7b8cc | |||
940c4731c4 | |||
4332f0ca05 | |||
e0e6e20e02 | |||
0fdaa1438e | |||
8a111229f7 | |||
e3eb9c195a | |||
8dd5ec6fbd | |||
e5d60bc56d | |||
cba97d576a | |||
0670213365 | |||
ed5c11b3aa | |||
4b8c4704b0 | |||
c5374095e6 | |||
6abd5fbc3e | |||
a6a302f41f | |||
4c764829da | |||
9900c1ad8a | |||
3fabbab417 | |||
6c99c1ae13 | |||
c2320fceab | |||
8b87d86358 | |||
5d0e1d62d5 | |||
7e613c7a78 | |||
2a30436e45 | |||
5315a89e7d | |||
8c328316ae | |||
030a97d098 | |||
c40e71dc03 | |||
2f633cdfb7 | |||
edd6ae588a | |||
2a73ba2fbb | |||
5eadb86500 | |||
c98ab6c6dc | |||
5321463892 | |||
d668a7694f | |||
9bb482e46f | |||
c534c928a7 | |||
0d0478c4a4 | |||
cda681a2f0 | |||
72ed4f28b1 | |||
6afeaac7a5 | |||
75a2b66206 | |||
5966053a6d | |||
e500b79858 | |||
428c20c79f | |||
bf84bc17ea | |||
30fa9cbee7 | |||
eefca613ad | |||
f6d943aec7 | |||
88462e67b5 | |||
eb683dd402 | |||
c7d0aea5f4 | |||
8f08953100 | |||
09b009abd9 | |||
c37e481a43 | |||
72cf55b8c3 | |||
6cb24ae7b6 | |||
eeaf0234f0 | |||
0200740d70 | |||
1268eef3b2 | |||
b433048003 | |||
daf2c3c155 | |||
03d213d764 | |||
99f9481b5d | |||
10bd14bca6 | |||
d26533e370 | |||
820abacf49 | |||
4466aa39c4 | |||
f4e43731ef | |||
884ef211f7 | |||
896ef5a15f | |||
4ff482cd47 | |||
6e27360fbc | |||
a10a0824eb | |||
2fdda2ec1b | |||
57f76a2111 | |||
287daa9248 | |||
7e40a051a4 | |||
bd0a7730b6 | |||
0d1c87e650 | |||
56cd963fd7 | |||
d1b26cb267 | |||
6a0a03415c | |||
41d50adbf3 | |||
328b52c4d6 | |||
b7d04cf7b9 | |||
7ed2cf30a5 | |||
78fe5576a9 | |||
41179b1282 | |||
f82d99b4c2 | |||
967f0d07f2 | |||
940dbe99e9 | |||
98e1c68a70 | |||
d8e250e9b0 | |||
c8be8510ba | |||
c99460ba15 | |||
769fcb7f50 | |||
60812790e1 | |||
a01e44f3b9 | |||
0917370bd5 | |||
09b612b130 | |||
26fdf3fb07 | |||
f41f3f6b51 | |||
677664e71d | |||
a8071f1039 | |||
bc08351a0a | |||
088b3893c3 | |||
d9d6dd9ba6 | |||
aca66674d3 | |||
597429ab3e | |||
715360c1e7 | |||
fcabaa7eff | |||
b14af989b8 | |||
1a919e0c3e | |||
9c1a6bed7b | |||
c48ec02f42 | |||
6f376489a5 | |||
0cbf7bef1e | |||
0f87e598f6 | |||
363b75619f | |||
090c801cc6 | |||
b711778d4a | |||
d52569d66f | |||
c676b7a473 | |||
71c49bc8cd | |||
97a7f747fb | |||
cef9e0de0c | |||
5637acb799 | |||
3d3bdcb966 | |||
c9f02ae020 | |||
0e7512a225 | |||
c330016109 | |||
cb13cdec85 | |||
c65c580b20 | |||
d159ae9342 | |||
a540af1ca7 | |||
e9c234d89f | |||
b472dac6b3 | |||
523dac1be3 | |||
962a2126b5 | |||
5c495ad1b0 | |||
f633f34e43 | |||
bacf1b9acc | |||
4df9da5c48 | |||
30bbc1350d | |||
2f0f1fd5f5 | |||
c28e6ebc4c | |||
6479c11e9a | |||
4ed0fcdde6 | |||
296a8ade63 | |||
a84953ccfd | |||
8492031fd0 | |||
bff7259111 | |||
67e1814581 | |||
12e92dd59d | |||
4ee366edfa | |||
61573756f8 | |||
fe5fed1182 | |||
0c90307677 | |||
cdd2a51f1f | |||
0dbe3434f0 | |||
ef205593c5 | |||
8b5ba771ad | |||
991f99b239 | |||
d6f17517cb | |||
15b2f280e3 | |||
60b43a1ddf | |||
d6b83e3b0a | |||
0446f89d22 | |||
54bc3e606e | |||
fed90cfbe8 | |||
e2e41a29eb | |||
3b813db42f | |||
16b1a4d003 | |||
b51ea3ca0c | |||
dc76675644 | |||
274a238a00 | |||
10507f0ade | |||
af2a6106da | |||
120a7e433f | |||
98e34f07df | |||
738df79394 | |||
98e9b6e70b | |||
c9318f8dc2 | |||
78f3606e30 | |||
97f4d098e1 | |||
144a13b096 | |||
af0869c66c | |||
48e565038a | |||
cd6e1d921c | |||
fb767f4612 | |||
9f35db28e5 | |||
ab19543dff | |||
3d5f333a3b | |||
d06ca605cf | |||
334e11e4b9 | |||
fd68b4e7a8 | |||
e5ea16fad8 | |||
6d5a4b5cce | |||
ffb6b5a23b | |||
e247625025 | |||
67c07aa5d3 | |||
0de1ce0c2c | |||
59f4fba05c | |||
c0c764377c | |||
c9bc059637 | |||
78147d48e4 | |||
5e7db52087 | |||
ae42413d57 | |||
d433bd3d84 | |||
58dd6dc227 | |||
893df9b277 | |||
17dc13760b | |||
498bf911eb | |||
96de58b3a4 | |||
9b61fa24c7 | |||
e9be3cf6bc | |||
ea44a71914 | |||
a00fbbf5ca | |||
eb1a04af65 | |||
d1fbf77f3f | |||
04fbf73a29 | |||
db70eb3160 | |||
cd974c26b6 | |||
53e0f5d61c | |||
e864bf4898 | |||
81d12b7644 | |||
309fcd6270 | |||
938112e449 | |||
3e012ea69e | |||
975c942ea7 | |||
2798271da0 | |||
dc258cebab | |||
4b8c5194c7 | |||
3c7c6dacfb | |||
e36337a764 | |||
a49856b898 | |||
8ca2f52041 | |||
2f7f243022 | |||
7e443770d7 | |||
8ec09884b8 | |||
88c7e636d6 | |||
add3fd479d | |||
70410536b9 | |||
6f3f9b485c | |||
bd9ce3590d |
@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script is used to upload the full buildkite pipeline. The steps defined
|
||||
# in the buildkite UI should simply be:
|
||||
#
|
||||
# steps:
|
||||
# - command: ".buildkite/pipeline-upload.sh"
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/_
|
||||
sudo chmod 0777 ci/buildkite-pipeline-in-disk.sh
|
||||
|
||||
_ ci/buildkite-pipeline-in-disk.sh pipeline.yml
|
||||
echo +++ pipeline
|
||||
cat pipeline.yml
|
||||
|
||||
_ buildkite-agent pipeline upload pipeline.yml
|
@ -12,14 +12,7 @@ export PS4="++"
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
eval "$(ci/channel-info.sh)"
|
||||
eval "$(ci/sbf-tools-info.sh)"
|
||||
source "ci/rust-version.sh"
|
||||
HOST_RUST_VERSION="$rust_stable"
|
||||
pattern='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
if [[ ${HOST_RUST_VERSION} =~ ${pattern} ]]; then
|
||||
HOST_RUST_VERSION="${rust_stable%.*}"
|
||||
fi
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"-"$HOST_RUST_VERSION"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
(
|
||||
set -x
|
||||
MAX_CACHE_SIZE=18 # gigabytes
|
||||
|
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
#### Problem
|
||||
|
||||
|
||||
|
||||
#### Proposed Solution
|
||||
|
12
.github/ISSUE_TEMPLATE/0-general.md
vendored
12
.github/ISSUE_TEMPLATE/0-general.md
vendored
@ -1,12 +0,0 @@
|
||||
---
|
||||
name: General Issue
|
||||
about: Create a report describing a problem and a proposed solution
|
||||
title: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
#### Problem
|
||||
|
||||
|
||||
|
||||
#### Proposed Solution
|
70
.github/ISSUE_TEMPLATE/1-feature-gate.yml
vendored
70
.github/ISSUE_TEMPLATE/1-feature-gate.yml
vendored
@ -1,70 +0,0 @@
|
||||
name: Feature Gate Tracker
|
||||
description: Track the development and status of an on-chain feature
|
||||
title: "Feature Gate: "
|
||||
labels: ["feature-gate"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Steps to add a new feature are outlined below. Note that these steps only cover
|
||||
the process of getting a feature into the core Solana code.
|
||||
|
||||
- For features that are unambiguously good (ie bug fixes), these steps are sufficient.
|
||||
|
||||
- For features that should go up for community vote (ie fee structure changes), more
|
||||
information on the additional steps to follow can be found at:
|
||||
<https://spl.solana.com/feature-proposal#feature-proposal-life-cycle>
|
||||
|
||||
1. Generate a new keypair with `solana-keygen new --outfile feature.json --no-passphrase`
|
||||
- Keypairs should be held by core contributors only. If you're a non-core contirbutor going
|
||||
through these steps, the PR process will facilitate a keypair holder being picked. That
|
||||
person will generate the keypair, provide pubkey for PR, and ultimately enable the feature.
|
||||
|
||||
2. Add a public module for the feature, specifying keypair pubkey as the id with
|
||||
`solana_sdk::declare_id!()` within the module. Additionally, add an entry to `FEATURE_NAMES` map.
|
||||
|
||||
3. Add desired logic to check for and switch on feature availability.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
placeholder: Describe why the new feature gate is needed and any necessary conditions for its activation
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: id
|
||||
attributes:
|
||||
label: Feature ID
|
||||
description: The public key of the feature account
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: activation-method
|
||||
attributes:
|
||||
label: Activation Method
|
||||
options:
|
||||
- Single Core Contributor
|
||||
- Staked Validator Vote
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: testnet
|
||||
attributes:
|
||||
label: Testnet Activation Epoch
|
||||
placeholder: Edit this response when feature is activated on this cluster
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: devnet
|
||||
attributes:
|
||||
label: Devnet Activation Epoch
|
||||
placeholder: Edit this response when feature is activated on this cluster
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: mainnet-beta
|
||||
attributes:
|
||||
label: Mainnet-Beta Activation Epoch
|
||||
placeholder: Edit this response when feature is activated on this cluster
|
||||
validations:
|
||||
required: false
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,9 +1,5 @@
|
||||
#### Problem
|
||||
|
||||
|
||||
#### Summary of Changes
|
||||
|
||||
|
||||
Fixes #
|
||||
<!-- OPTIONAL: Feature Gate Issue: # -->
|
||||
<!-- Don't forget to add the "feature-gate" label -->
|
||||
|
37
.github/workflows/autolock_bot_PR.txt
vendored
37
.github/workflows/autolock_bot_PR.txt
vendored
@ -1,37 +0,0 @@
|
||||
name: 'Autolock RitBot for for PR'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
|
||||
github-token: ${{ github.token }}
|
||||
pr-inactive-days: '14'
|
||||
exclude-pr-created-before: ''
|
||||
exclude-pr-created-after: ''
|
||||
exclude-pr-created-between: ''
|
||||
exclude-pr-closed-before: ''
|
||||
exclude-pr-closed-after: ''
|
||||
exclude-pr-closed-between: ''
|
||||
include-any-pr-labels: 'automerge'
|
||||
include-all-pr-labels: ''
|
||||
exclude-any-pr-labels: ''
|
||||
add-pr-labels: 'locked PR'
|
||||
remove-pr-labels: ''
|
||||
pr-comment: 'This PR has been automatically locked since there has not been any activity in past 14 days after it was merged.'
|
||||
pr-lock-reason: 'resolved'
|
||||
log-output: true
|
38
.github/workflows/autolock_bot_closed_issue.txt
vendored
38
.github/workflows/autolock_bot_closed_issue.txt
vendored
@ -1,38 +0,0 @@
|
||||
name: 'Autolock NaviBot for closed issue'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
|
||||
github-token: ${{ github.token }}
|
||||
issue-inactive-days: '7'
|
||||
exclude-issue-created-before: ''
|
||||
exclude-issue-created-after: ''
|
||||
exclude-issue-created-between: ''
|
||||
exclude-issue-closed-before: ''
|
||||
exclude-issue-closed-after: ''
|
||||
exclude-issue-closed-between: ''
|
||||
include-any-issue-labels: ''
|
||||
include-all-issue-labels: ''
|
||||
exclude-any-issue-labels: ''
|
||||
add-issue-labels: 'locked issue'
|
||||
remove-issue-labels: ''
|
||||
issue-comment: 'This issue has been automatically locked since there has not been any activity in past 7 days after it was closed. Please open a new issue for related bugs.'
|
||||
issue-lock-reason: 'resolved'
|
||||
process-only: 'issues'
|
||||
log-output: true
|
66
.github/workflows/client-targets.yml
vendored
66
.github/workflows/client-targets.yml
vendored
@ -1,66 +0,0 @@
|
||||
name: client_targets
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "client/**"
|
||||
- "sdk/**"
|
||||
- ".github/workflows/client-targets.yml"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
check_compilation:
|
||||
name: Client compilation
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
target: [aarch64-apple-ios, x86_64-apple-ios, aarch64-apple-darwin, x86_64-apple-darwin, aarch64-linux-android, armv7-linux-androideabi, i686-linux-android, x86_64-linux-android]
|
||||
include:
|
||||
- target: aarch64-apple-ios
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: x86_64-apple-ios
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: aarch64-apple-darwin
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: x86_64-apple-darwin
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: aarch64-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: armv7-linux-androideabi
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: i686-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: x86_64-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
target: ${{ matrix.target }}
|
||||
- name: Install cargo-ndk
|
||||
if: ${{ matrix.platform == 'android' }}
|
||||
run: cargo install cargo-ndk
|
||||
- uses: actions-rs/cargo@v1
|
||||
if: ${{ matrix.platform == 'android' }}
|
||||
with:
|
||||
command: ndk
|
||||
args: --target ${{ matrix.target }} build -p solana-client
|
||||
- uses: actions-rs/cargo@v1
|
||||
if: ${{ matrix.platform == 'ios' }}
|
||||
with:
|
||||
command: build
|
||||
args: -p solana-client --target ${{ matrix.target }}
|
28
.github/workflows/explorer.yml
vendored
28
.github/workflows/explorer.yml
vendored
@ -1,28 +0,0 @@
|
||||
name: Explorer_build&test_on_PR
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'explorer/**'
|
||||
jobs:
|
||||
check-explorer:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: explorer
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: explorer/package-lock.json
|
||||
- run: npm i -g npm@7
|
||||
- run: npm ci
|
||||
- run: npm run format
|
||||
- run: npm run build
|
||||
- run: npm run test
|
58
.github/workflows/explorer_preview.yml
vendored
58
.github/workflows/explorer_preview.yml
vendored
@ -1,58 +0,0 @@
|
||||
name : explorer_preview
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Explorer_build&test_on_PR"]
|
||||
types:
|
||||
- completed
|
||||
jobs:
|
||||
explorer_preview:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }} #Optional
|
||||
vercel-org-id: ${{ secrets.ORG_ID}} #Required
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
scope: ${{ secrets.TEAM_ID }}
|
||||
|
||||
- name: vercel url
|
||||
run : |
|
||||
touch vercelfile.txt
|
||||
vercel --token ${{secrets.VERCEL_TOKEN}} ls explorer --scope team_8A2WD7p4uR7tmKX9M68loHXI > vercelfile.txt
|
||||
touch vercelfile1.txt
|
||||
head -n 2 vercelfile.txt > vercelfile1.txt
|
||||
touch vercelfile2.txt
|
||||
tail -n 1 vercelfile1.txt > vercelfile2.txt
|
||||
filtered_url7=$(cut -f7 -d" " vercelfile2.txt)
|
||||
echo "filtered_url7 is: $filtered_url7"
|
||||
touch .env.preview1
|
||||
echo "$filtered_url7" > .env.preview1
|
||||
#filtered_url=$(cat vercelfile2.txt )
|
||||
#echo "$filtered_url" >> .env.preview1
|
||||
|
||||
- name: Fetching Vercel Preview Deployment Link
|
||||
uses: mathiasvr/command-output@v1
|
||||
id: test1
|
||||
with:
|
||||
run: |
|
||||
echo "$(cat .env.preview1)"
|
||||
- name: Fetching PR commit URL
|
||||
uses: mathiasvr/command-output@v1
|
||||
id: test2
|
||||
with:
|
||||
run: |
|
||||
HEAD_SHA=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/solana-labs/solana/pulls | jq .[0] | jq -r .head.sha)
|
||||
USER_NAME=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/solana-labs/solana/pulls | jq .[0] | jq -r .head.user.login)
|
||||
echo "github.com/$USER_NAME/solana/commit/$HEAD_SHA"
|
||||
|
||||
- name: Slack Notification4
|
||||
uses: rtCamp/action-slack-notify@master
|
||||
env:
|
||||
SLACK_MESSAGE: ' Vercel Link: ${{ steps.test1.outputs.stdout }} PR Commit: ${{steps.test2.outputs.stdout}}'
|
||||
SLACK_TITLE: Vercel "Explorer" Preview Deployment Link , PR Commit
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
46
.github/workflows/explorer_production.yml
vendored
46
.github/workflows/explorer_production.yml
vendored
@ -1,46 +0,0 @@
|
||||
name: Explorer_production_build&test
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
paths:
|
||||
- 'explorer/**'
|
||||
jobs:
|
||||
Explorer_production_build_test:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: explorer
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: explorer/package-lock.json
|
||||
- run: npm i -g npm@7
|
||||
- run: npm ci
|
||||
- run: npm run format
|
||||
- run: npm run build
|
||||
- run: npm run test
|
||||
|
||||
Explorer_production_deploy:
|
||||
needs: Explorer_production_build_test
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: explorer
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
github-token: ${{ secrets.PAT }} #Optional
|
||||
vercel-args: '--prod' #for production
|
||||
vercel-org-id: ${{ secrets.ORG_ID}} #Required
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
scope: ${{ secrets.TEAM_ID }}
|
208
.github/workflows/solana-action.yml.txt
vendored
208
.github/workflows/solana-action.yml.txt
vendored
@ -1,208 +0,0 @@
|
||||
name : minimal
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
Export_Github_Repositories:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
VERCEL_TOKEN: ${{secrets.VERCEL_TOKEN}}
|
||||
GITHUB_TOKEN: ${{secrets.PAT_ANM}}
|
||||
COMMIT_RANGE: ${{ github.event.before}}...${{ github.event.after}}
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- run: echo "COMMIT_DIFF_RANGE=$(echo $COMMIT_RANGE)" >> $GITHUB_ENV
|
||||
# - run: echo "$COMMIT_DIFF_RANGE"
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{secrets.PAT_ANM}}
|
||||
if: ${{ github.event_name == 'push' && 'cron'&& github.ref == 'refs/heads/master'}}
|
||||
|
||||
- name: cmd
|
||||
run : |
|
||||
.travis/export-github-repo.sh web3.js/ solana-web3.js
|
||||
|
||||
macos-artifacts:
|
||||
needs: [Export_Github_Repositories]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: macos-latest
|
||||
if : ${{ github.event_name == 'api' && 'cron' || 'push' || startsWith(github.ref, 'refs/tags/v')}}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup | Rust
|
||||
uses: ATiltedTree/setup-rust@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
- name: release artifact
|
||||
run: |
|
||||
source ci/rust-version.sh
|
||||
brew install coreutils
|
||||
export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
greadlink -f .
|
||||
source ci/env.sh
|
||||
rustup set profile default
|
||||
ci/publish-tarball.sh
|
||||
shell: bash
|
||||
|
||||
- name: Cache modules
|
||||
uses: actions/cache@master
|
||||
id: yarn-cache
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: ${{ runner.os }}-yarn-
|
||||
|
||||
|
||||
# - To stop from uploading on the production
|
||||
# - uses: ochanje210/simple-s3-upload-action@master
|
||||
# with:
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
|
||||
# AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
# SOURCE_DIR: 'travis-s3-upload1'
|
||||
# DEST_DIR: 'giitsol'
|
||||
|
||||
# - uses: ochanje210/simple-s3-upload-action@master
|
||||
# with:
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
|
||||
# AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
# SOURCE_DIR: './docs/'
|
||||
# DEST_DIR: 'giitsol'
|
||||
|
||||
|
||||
windows-artifact:
|
||||
needs: [Export_Github_Repositories]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: windows-latest
|
||||
if : ${{ github.event_name == 'api' && 'cron' || 'push' || startsWith(github.ref, 'refs/tags/v')}}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup | Rust
|
||||
uses: ATiltedTree/setup-rust@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
release-artifact:
|
||||
needs: windows-artifact
|
||||
runs-on: windows-latest
|
||||
if : ${{ github.event_name == 'api' && 'cron' || github.ref == 'refs/heads/master'}}
|
||||
steps:
|
||||
- name: release artifact
|
||||
run: |
|
||||
git clone git://git.openssl.org/openssl.git
|
||||
cd openssl
|
||||
make
|
||||
make test
|
||||
make install
|
||||
openssl version
|
||||
# choco install openssl
|
||||
# vcpkg integrate install
|
||||
# refreshenv
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2
|
||||
- run: choco install msys2
|
||||
- uses: actions/checkout@v2
|
||||
- run: |
|
||||
openssl version
|
||||
bash ci/rust-version.sh
|
||||
readlink -f .
|
||||
bash ci/env.sh
|
||||
rustup set profile default
|
||||
bash ci/publish-tarball.sh
|
||||
shell: bash
|
||||
|
||||
- name: Cache modules
|
||||
uses: actions/cache@v1
|
||||
id: yarn-cache
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: ${{ runner.os }}-yarn-
|
||||
|
||||
# - To stop from uploading on the production
|
||||
# - name: Config. aws cred
|
||||
# uses: aws-actions/configure-aws-credentials@v1
|
||||
# with:
|
||||
# aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# aws-region: us-east-2
|
||||
# - name: Deploy
|
||||
# uses: shallwefootball/s3-upload-action@master
|
||||
# with:
|
||||
# folder: build
|
||||
# aws_bucket: ${{ secrets.AWS_S3_BUCKET }}
|
||||
# aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# destination_dir: /
|
||||
# bucket-region: us-east-2
|
||||
# delete-removed: true
|
||||
# no-cache: true
|
||||
# private: true
|
||||
|
||||
# Docs:
|
||||
# needs: [windows-artifact,release-artifact]
|
||||
# runs-on: ubuntu-latest
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{secrets.PAT_NEW}}
|
||||
# GITHUB_EVENT_BEFORE: ${{ github.event.before }}
|
||||
# GITHUB_EVENT_AFTER: ${{ github.event.after }}
|
||||
# COMMIT_RANGE: ${{ github.event.before}}...${{ github.event.after}}
|
||||
# steps:
|
||||
# - name: Checkout repo
|
||||
# uses: actions/checkout@v2
|
||||
# with:
|
||||
# fetch-depth: 2
|
||||
# - name: docs
|
||||
# if: ${{github.event_name == 'pull_request' || startsWith(github.ref, 'refs/tags/v')}}
|
||||
# run: |
|
||||
# touch .env
|
||||
# echo "COMMIT_RANGE=($COMMIT_RANGE)" > .env
|
||||
# source ci/env.sh
|
||||
# .travis/channel_restriction.sh edge beta || exit 0
|
||||
# .travis/affects.sh docs/ .travis || exit 0
|
||||
# cd docs/
|
||||
# source .travis/before_install.sh
|
||||
# source .travis/script.sh
|
||||
# - name: setup-node
|
||||
# uses: actions/checkout@v2
|
||||
# - name: setup-node
|
||||
# uses: actions/setup-node@v2
|
||||
# with:
|
||||
# node-version: 'lts/*'
|
||||
# - name: Cache
|
||||
# uses: actions/cache@v1
|
||||
# with:
|
||||
# path: ~/.npm
|
||||
# key: ${{ runner.OS }}-npm-cache-${{ hashFiles('**/package-lock.json') }}
|
||||
# restore-keys: |
|
||||
# ${{ runner.OS }}-npm-cache-2
|
||||
|
||||
# auto_bump:
|
||||
# needs: [windows-artifact,release-artifact,Docs]
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name : checkout repo
|
||||
# uses: actions/checkout@v2
|
||||
# with:
|
||||
# fetch-depth: '0'
|
||||
# - name: Bump version and push tag
|
||||
# uses: anothrNick/github-tag-action@1.26.0
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# WITH_V: true
|
||||
# DEFAULT_BUMP: patch
|
71
.github/workflows/web3.yml
vendored
71
.github/workflows/web3.yml
vendored
@ -1,71 +0,0 @@
|
||||
name: Web3
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- "web3.js/**"
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- "web3.js/**"
|
||||
|
||||
jobs:
|
||||
# needed for grouping check-web3 strategies into one check for mergify
|
||||
all-web3-checks:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check-web3
|
||||
steps:
|
||||
- run: echo "Done"
|
||||
|
||||
web3-commit-lint:
|
||||
runs-on: ubuntu-latest
|
||||
# Set to true in order to avoid cancelling other workflow jobs.
|
||||
# Mergify will still require web3-commit-lint for automerge
|
||||
continue-on-error: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: web3.js
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
# maybe needed for base sha below
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: web3.js/package-lock.json
|
||||
- run: npm ci
|
||||
- name: commit-lint
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: bash commitlint.sh
|
||||
env:
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
|
||||
check-web3:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: web3.js
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
node: [ '12', '14', '16' ]
|
||||
|
||||
name: Node ${{ matrix.node }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: ${{ matrix.node }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: web3.js/package-lock.json
|
||||
- run: |
|
||||
source .travis/before_install.sh
|
||||
npm install
|
||||
source .travis/script.sh
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -4,7 +4,6 @@
|
||||
/solana-metrics/
|
||||
/solana-metrics.tar.bz2
|
||||
/target/
|
||||
/test-ledger/
|
||||
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
@ -28,5 +27,3 @@ log-*/
|
||||
/spl_*.so
|
||||
|
||||
.DS_Store
|
||||
# scripts that may be generated by cargo *-bpf commands
|
||||
**/cargo-*-bpf-child-script-*.sh
|
||||
|
129
.mergify.yml
129
.mergify.yml
@ -4,74 +4,24 @@
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: label changes from community
|
||||
conditions:
|
||||
- author≠@core-contributors
|
||||
- author≠mergify[bot]
|
||||
- author≠dependabot[bot]
|
||||
actions:
|
||||
label:
|
||||
add:
|
||||
- community
|
||||
- name: request review for community changes
|
||||
conditions:
|
||||
- author≠@core-contributors
|
||||
- author≠mergify[bot]
|
||||
- author≠dependabot[bot]
|
||||
# Only request reviews from the pr subscribers group if no one
|
||||
# has reviewed the community PR yet. These checks only match
|
||||
# reviewers with admin, write or maintain permission on the repository.
|
||||
- "#approved-reviews-by=0"
|
||||
- "#commented-reviews-by=0"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "#review-requested=0"
|
||||
actions:
|
||||
request_reviews:
|
||||
teams:
|
||||
- "@solana-labs/community-pr-subscribers"
|
||||
- name: automatic merge (squash) on CI success
|
||||
conditions:
|
||||
- and:
|
||||
- status-success=buildkite/solana
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- label!=no-automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
- or:
|
||||
# only require travis success if docs files changed
|
||||
- status-success=Travis CI - Pull Request
|
||||
- -files~=^docs/
|
||||
- or:
|
||||
# only require explorer checks if explorer files changed
|
||||
- status-success=check-explorer
|
||||
- -files~=^explorer/
|
||||
- or:
|
||||
- and:
|
||||
- status-success=all-web3-checks
|
||||
- status-success=web3-commit-lint
|
||||
# only require web3 checks if web3.js files changed
|
||||
- -files~=^web3.js/
|
||||
- status-success=buildkite/solana
|
||||
- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
# Join the dont-squash-my-commits group if you won't like your commits squashed
|
||||
- name: automatic merge (rebase) on CI success
|
||||
conditions:
|
||||
- and:
|
||||
- status-success=buildkite/solana
|
||||
- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- label!=no-automerge
|
||||
- author=@dont-squash-my-commits
|
||||
- or:
|
||||
# only require explorer checks if explorer files changed
|
||||
- status-success=check-explorer
|
||||
- -files~=^explorer/
|
||||
- or:
|
||||
# only require web3 checks if web3.js files changed
|
||||
- status-success=all-web3-checks
|
||||
- -files~=^web3.js/
|
||||
- status-success=buildkite/solana
|
||||
- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
@ -96,56 +46,39 @@ pull_request_rules:
|
||||
- author=mergify[bot]
|
||||
- head~=^mergify/bp/
|
||||
- "#status-failure=0"
|
||||
- "-merged"
|
||||
- label!=no-automerge
|
||||
actions:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v1.9 feature-gate backport
|
||||
- name: v1.4 backport
|
||||
conditions:
|
||||
- label=v1.9
|
||||
- label=feature-gate
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
labels:
|
||||
- feature-gate
|
||||
branches:
|
||||
- v1.9
|
||||
- name: v1.9 non-feature-gate backport
|
||||
conditions:
|
||||
- label=v1.9
|
||||
- label!=feature-gate
|
||||
- label=v1.4
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.9
|
||||
- name: v1.10 feature-gate backport
|
||||
- v1.4
|
||||
- name: v1.5 backport
|
||||
conditions:
|
||||
- label=v1.10
|
||||
- label=feature-gate
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
labels:
|
||||
- feature-gate
|
||||
branches:
|
||||
- v1.10
|
||||
- name: v1.10 non-feature-gate backport
|
||||
conditions:
|
||||
- label=v1.10
|
||||
- label!=feature-gate
|
||||
- label=v1.5
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.10
|
||||
|
||||
commands_restrictions:
|
||||
# The author of copied PRs is the Mergify user.
|
||||
# Restrict `copy` access to Core Contributors
|
||||
copy:
|
||||
- v1.5
|
||||
- name: v1.6 backport
|
||||
conditions:
|
||||
- author=@core-contributors
|
||||
- label=v1.6
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.6
|
||||
- name: v1.7 backport
|
||||
conditions:
|
||||
- label=v1.7
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.7
|
||||
|
75
.travis.yml
75
.travis.yml
@ -23,12 +23,12 @@ jobs:
|
||||
depth: false
|
||||
script:
|
||||
- .travis/export-github-repo.sh web3.js/ solana-web3.js
|
||||
- .travis/export-github-repo.sh explorer/ explorer
|
||||
|
||||
- &release-artifacts
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
osx_image: xcode12
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
@ -36,9 +36,6 @@ jobs:
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
- brew install gnu-tar
|
||||
- PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH"
|
||||
- tar --version
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- rustup set profile default
|
||||
@ -64,12 +61,6 @@ jobs:
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
install:
|
||||
- choco install openssl
|
||||
- export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64"
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
# Linux release artifacts are still built by ci/buildkite-secondary.yml
|
||||
#- <<: *release-artifacts
|
||||
# name: "Linux release artifacts"
|
||||
@ -77,30 +68,56 @@ jobs:
|
||||
# before_install:
|
||||
# - sudo apt-get install libssl-dev libudev-dev
|
||||
|
||||
# explorer pull request
|
||||
- name: "explorer"
|
||||
if: type = pull_request AND branch = master
|
||||
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh explorer/ .travis || travis_terminate 0
|
||||
- cd explorer
|
||||
|
||||
script:
|
||||
- npm run build
|
||||
- npm run format
|
||||
|
||||
# web3.js pull request
|
||||
- name: "web3.js"
|
||||
if: type = pull_request AND branch = master
|
||||
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh web3.js/ .travis || travis_terminate 0
|
||||
- cd web3.js/
|
||||
- source .travis/before_install.sh
|
||||
|
||||
script:
|
||||
- ../.travis/commitlint.sh
|
||||
- source .travis/script.sh
|
||||
|
||||
# docs pull request
|
||||
# - name: "explorer"
|
||||
# if: type = pull_request AND branch = master
|
||||
|
||||
# language: node_js
|
||||
# node_js:
|
||||
# - "lts/*"
|
||||
|
||||
# cache:
|
||||
# directories:
|
||||
# - ~/.npm
|
||||
|
||||
# before_install:
|
||||
# - .travis/affects.sh explorer/ .travis || travis_terminate 0
|
||||
# - cd explorer
|
||||
|
||||
# script:
|
||||
# - npm run build
|
||||
# - npm run format
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
- "node"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
@ -20,13 +20,13 @@ if [[ ! -f "$basedir"/commitlint.config.js ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $COMMIT_RANGE ]]; then
|
||||
echo "Error: COMMIT_RANGE not defined"
|
||||
if [[ -z $TRAVIS_COMMIT_RANGE ]]; then
|
||||
echo "Error: TRAVIS_COMMIT_RANGE not defined"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$basedir"
|
||||
echo "Checking commits in COMMIT_RANGE: $COMMIT_RANGE"
|
||||
echo "Checking commits in TRAVIS_COMMIT_RANGE: $TRAVIS_COMMIT_RANGE"
|
||||
while IFS= read -r line; do
|
||||
echo "$line" | npx commitlint
|
||||
done < <(git log "$COMMIT_RANGE" --format=%s -- .)
|
||||
done < <(git log "$TRAVIS_COMMIT_RANGE" --format=%s -- .)
|
@ -74,7 +74,7 @@ minutes to execute. Use that time to write a detailed problem description. Once
|
||||
the description is written and CI succeeds, click the "Ready to Review" button
|
||||
and add reviewers. Adding reviewers before CI succeeds is a fast path to losing
|
||||
reviewer engagement. Not only will they be notified and see the PR is not yet
|
||||
ready for them, they will also be bombarded with additional notifications
|
||||
ready for them, they will also be bombarded them with additional notifications
|
||||
each time you push a commit to get past CI or until they "mute" the PR. Once
|
||||
muted, you'll need to reach out over some other medium, such as Discord, to
|
||||
request they have another look. When you use draft PRs, no notifications are
|
||||
@ -146,31 +146,6 @@ the subject lines of the git commits contained in the PR. It's especially
|
||||
generous (and not expected) to rebase or reword commits such that each change
|
||||
matches the logical flow in your PR description.
|
||||
|
||||
### The PR / Issue Labels
|
||||
Labels make it easier to manage and track PRs / issues. Below some common labels
|
||||
that we use in Solana. For the complete list of labels, please refer to the
|
||||
[label page](https://github.com/solana-labs/solana/issues/labels):
|
||||
|
||||
* "feature-gate": when you add a new feature gate or modify the behavior of
|
||||
an existing feature gate, please add the "feature-gate" label to your PR.
|
||||
New feature gates should also always have a corresponding tracking issue
|
||||
(go to "New Issue" -> "Feature Gate Tracker [Get Started](https://github.com/solana-labs/solana/issues/new?assignees=&labels=feature-gate&template=1-feature-gate.yml&title=Feature+Gate%3A+)")
|
||||
and should be updated each time the feature is activated on a cluster.
|
||||
|
||||
* "automerge": When a PR is labelled with "automerge", the PR will be
|
||||
automically merged once CI passes. In general, this label should only
|
||||
be used for small hot-fix (fewer than 100 lines) or automatic generated
|
||||
PRs. If you're uncertain, it's usually the case that the PR is not
|
||||
qualified as "automerge".
|
||||
|
||||
* "good first issue": If you happen to find an issue that is non-urgent and
|
||||
self-contained with moderate scope, you might want to consider attaching
|
||||
"good first issue" to it as it might be a good practice for newcomers.
|
||||
|
||||
* "rust": this pull request updates Rust code.
|
||||
|
||||
* "javascript": this pull request updates Javascript code.
|
||||
|
||||
### When will my PR be reviewed?
|
||||
|
||||
PRs are typically reviewed and merged in under 7 days. If your PR has been open
|
||||
|
5707
Cargo.lock
generated
5707
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
60
Cargo.toml
60
Cargo.toml
@ -1,91 +1,85 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"account-decoder",
|
||||
"accounts-bench",
|
||||
"accounts-cluster-bench",
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"accounts-bench",
|
||||
"banking-bench",
|
||||
"banks-client",
|
||||
"banks-interface",
|
||||
"banks-server",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"bloom",
|
||||
"bucket_map",
|
||||
"clap-utils",
|
||||
"cli",
|
||||
"cli-config",
|
||||
"cli-output",
|
||||
"client",
|
||||
"client-test",
|
||||
"core",
|
||||
"dos",
|
||||
"download-utils",
|
||||
"entry",
|
||||
"faucet",
|
||||
"frozen-abi",
|
||||
"perf",
|
||||
"validator",
|
||||
"genesis",
|
||||
"genesis-utils",
|
||||
"geyser-plugin-interface",
|
||||
"geyser-plugin-manager",
|
||||
"gossip",
|
||||
"install",
|
||||
"keygen",
|
||||
"ledger",
|
||||
"ledger-tool",
|
||||
"local-cluster",
|
||||
"log-analyzer",
|
||||
"logger",
|
||||
"measure",
|
||||
"log-analyzer",
|
||||
"merkle-root-bench",
|
||||
"merkle-tree",
|
||||
"storage-bigtable",
|
||||
"storage-proto",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"net-utils",
|
||||
"notifier",
|
||||
"perf",
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/address-lookup-table",
|
||||
"programs/address-lookup-table-tests",
|
||||
"programs/bpf_loader",
|
||||
"programs/bpf_loader/gen-syscall-list",
|
||||
"programs/compute-budget",
|
||||
"programs/config",
|
||||
"programs/ed25519-tests",
|
||||
"programs/exchange",
|
||||
"programs/failure",
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/secp256k1",
|
||||
"programs/stake",
|
||||
"programs/vote",
|
||||
"programs/zk-token-proof",
|
||||
"rayon-threadlimit",
|
||||
"rbpf-cli",
|
||||
"remote-wallet",
|
||||
"rpc",
|
||||
"rpc-test",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
"sdk/cargo-build-bpf",
|
||||
"sdk/cargo-test-bpf",
|
||||
"send-transaction-service",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"storage-bigtable",
|
||||
"storage-proto",
|
||||
"streamer",
|
||||
"sys-tuner",
|
||||
"test-validator",
|
||||
"tokens",
|
||||
"transaction-dos",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
"validator",
|
||||
"net-utils",
|
||||
"version",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
"watchtower",
|
||||
"zk-token-sdk",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
]
|
||||
|
||||
# This prevents a Travis CI error when building for Windows.
|
||||
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
|
||||
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
|
||||
# specify arch-specific features.
|
||||
resolver = "2"
|
||||
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
2
LICENSE
2
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2022 Solana Foundation.
|
||||
Copyright 2020 Solana Foundation.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
37
README.md
37
README.md
@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
<img alt="Solana" src="https://i.imgur.com/uBVzyX3.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
@ -19,23 +19,23 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
When building the master branch, please make sure you are using the latest stable rust version by running:
|
||||
Please make sure you are always using the latest stable rust version by running:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
```
|
||||
|
||||
When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running:
|
||||
```bash
|
||||
$ rustup install VERSION
|
||||
```
|
||||
Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version.
|
||||
|
||||
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang cmake make
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
||||
```
|
||||
|
||||
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
|
||||
|
||||
```bash
|
||||
$ softwareupdate --install-rosetta
|
||||
```
|
||||
|
||||
## **2. Download the source code.**
|
||||
@ -51,6 +51,11 @@ $ cd solana
|
||||
$ cargo build
|
||||
```
|
||||
|
||||
## **4. Run a minimal local cluster.**
|
||||
```bash
|
||||
$ ./run.sh
|
||||
```
|
||||
|
||||
# Testing
|
||||
|
||||
**Run the test suite:**
|
||||
@ -68,7 +73,7 @@ devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://doc
|
||||
|
||||
# Benchmarking
|
||||
|
||||
First, install the nightly build of rustc. `cargo bench` requires the use of the
|
||||
First install the nightly build of rustc. `cargo bench` requires use of the
|
||||
unstable features only available in the nightly build.
|
||||
|
||||
```bash
|
||||
@ -110,17 +115,17 @@ send us that patch!
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps,
|
||||
specifications, and performance measurements described in this project
|
||||
are done with the Solana Foundation's ("SF") good faith efforts. It is up to
|
||||
are done with the Solana Foundation's ("SF") best efforts. It is up to
|
||||
the reader to check and validate their accuracy and truthfulness.
|
||||
Furthermore, nothing in this project constitutes a solicitation for
|
||||
Furthermore nothing in this project constitutes a solicitation for
|
||||
investment.
|
||||
|
||||
Any content produced by SF or developer resources that SF provides are
|
||||
for educational and inspirational purposes only. SF does not encourage,
|
||||
Any content produced by SF or developer resources that SF provides, are
|
||||
for educational and inspiration purposes only. SF does not encourage,
|
||||
induce or sanction the deployment, integration or use of any such
|
||||
applications (including the code comprising the Solana blockchain
|
||||
protocol) in violation of applicable laws or regulations and hereby
|
||||
prohibits any such deployment, integration or use. This includes the use of
|
||||
prohibits any such deployment, integration or use. This includes use of
|
||||
any such applications by the reader (a) in violation of export control
|
||||
or sanctions laws of the United States or any other applicable
|
||||
jurisdiction, (b) if the reader is located in or ordinarily resident in
|
||||
@ -133,7 +138,7 @@ prohibitions.
|
||||
The reader should be aware that U.S. export control and sanctions laws
|
||||
prohibit U.S. persons (and other persons that are subject to such laws)
|
||||
from transacting with persons in certain countries and territories or
|
||||
that are on the SDN list. As a project-based primarily on open-source
|
||||
that are on the SDN list. As a project based primarily on open-source
|
||||
software, it is possible that such sanctioned persons may nevertheless
|
||||
bypass prohibitions, obtain the code comprising the Solana blockchain
|
||||
protocol (or other project code or applications) and deploy, integrate,
|
||||
|
@ -94,7 +94,7 @@ Alternatively use the Github UI.
|
||||
```
|
||||
1. Confirm that your freshly cut release branch is shown as `BETA_CHANNEL` and the previous release branch as `STABLE_CHANNEL`:
|
||||
```
|
||||
ci/channel-info.sh
|
||||
ci/channel_info.sh
|
||||
```
|
||||
|
||||
## Steps to Create a Release
|
||||
@ -152,5 +152,5 @@ appearing. To check for progress:
|
||||
[Crates.io](https://crates.io/crates/solana) should have an updated Solana version. This can take 2-3 hours, and sometimes fails in the `solana-secondary` job.
|
||||
If this happens and the error is non-fatal, click "Retry" on the "publish crate" job
|
||||
|
||||
### Update software on testnet.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/. devnet.solana.com and mainnet-beta.solana.com run stable releases that have been tested on testnet. Do not update devnet or mainnet-beta with a beta release.
|
||||
### Update software on devnet.solana.com/testnet.solana.com/mainnet-beta.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/
|
||||
|
10
SECURITY.md
10
SECURITY.md
@ -11,31 +11,31 @@
|
||||
email to security@solana.com and provide your github username so we can add you
|
||||
to a new draft security advisory for further discussion.
|
||||
|
||||
Expect a response as fast as possible, typically within 72 hours.
|
||||
Expect a response as fast as possible, within one business day at the latest.
|
||||
|
||||
<a name="bounty"></a>
|
||||
## Security Bug Bounties
|
||||
We offer bounties for critical security issues. Please see below for more details.
|
||||
|
||||
Loss of Funds:
|
||||
$2,000,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$500,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Theft of funds without users signature from any account
|
||||
* Theft of funds without users interaction in system, token, stake, vote programs
|
||||
* Theft of funds that requires users signature - creating a vote program that drains the delegated stakes.
|
||||
|
||||
Consensus/Safety Violations:
|
||||
$1,000,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$250,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Consensus safety violation
|
||||
* Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc..
|
||||
|
||||
Other Attacks:
|
||||
$400,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$100,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Protocol liveness attacks,
|
||||
* Eclipse attacks,
|
||||
* Remote attacks that partition the network,
|
||||
|
||||
DoS Attacks:
|
||||
$100,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$25,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Remote resource exaustion via Non-RPC protocols
|
||||
|
||||
RPC DoS/Crashes:
|
||||
|
@ -1,30 +1,30 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-account-decoder"
|
||||
license = "Apache-2.0"
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
Inflector = "0.11.4"
|
||||
base64 = "0.13.0"
|
||||
bincode = "1.3.3"
|
||||
bs58 = "0.4.0"
|
||||
base64 = "0.12.3"
|
||||
bincode = "1.3.1"
|
||||
bs58 = "0.3.1"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.136"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.79"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.10" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.11.1"
|
||||
zstd = "0.5.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -17,10 +17,8 @@ pub mod validator_info;
|
||||
use {
|
||||
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
|
||||
solana_sdk::{
|
||||
account::{ReadableAccount, WritableAccount},
|
||||
clock::Epoch,
|
||||
fee_calculator::FeeCalculator,
|
||||
pubkey::Pubkey,
|
||||
account::ReadableAccount, account::WritableAccount, clock::Epoch,
|
||||
fee_calculator::FeeCalculator, pubkey::Pubkey,
|
||||
},
|
||||
std::{
|
||||
io::{Read, Write},
|
||||
@ -30,10 +28,9 @@ use {
|
||||
|
||||
pub type StringAmount = String;
|
||||
pub type StringDecimals = String;
|
||||
pub const MAX_BASE58_BYTES: usize = 128;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAccount {
|
||||
pub lamports: u64,
|
||||
@ -51,7 +48,7 @@ pub enum UiAccountData {
|
||||
Binary(String, UiAccountEncoding),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary, // Legacy. Retained for RPC backwards compatibility
|
||||
@ -63,17 +60,6 @@ pub enum UiAccountEncoding {
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
fn encode_bs58<T: ReadableAccount>(
|
||||
account: &T,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> String {
|
||||
if account.data().len() <= MAX_BASE58_BYTES {
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string()
|
||||
} else {
|
||||
"error: data too large for bs58 encoding".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode<T: ReadableAccount>(
|
||||
pubkey: &Pubkey,
|
||||
account: &T,
|
||||
@ -82,14 +68,13 @@ impl UiAccount {
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::LegacyBinary(data)
|
||||
}
|
||||
UiAccountEncoding::Base58 => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::Binary(data, encoding)
|
||||
}
|
||||
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string(),
|
||||
),
|
||||
UiAccountEncoding::Base58 => UiAccountData::Binary(
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string(),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64 => UiAccountData::Binary(
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
encoding,
|
||||
@ -136,13 +121,16 @@ impl UiAccount {
|
||||
UiAccountData::Binary(blob, encoding) => match encoding {
|
||||
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob).ok().and_then(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
}),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob)
|
||||
.ok()
|
||||
.map(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
})
|
||||
.flatten(),
|
||||
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
|
||||
},
|
||||
}?;
|
||||
@ -178,7 +166,7 @@ impl Default for UiFeeCalculator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDataSliceConfig {
|
||||
pub offset: usize,
|
||||
@ -201,10 +189,8 @@ fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::account::{Account, AccountSharedData},
|
||||
};
|
||||
use super::*;
|
||||
use solana_sdk::account::{Account, AccountSharedData};
|
||||
|
||||
#[test]
|
||||
fn test_slice_data() {
|
||||
|
@ -1,19 +1,17 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_ids},
|
||||
parse_vote::parse_vote,
|
||||
},
|
||||
inflector::Inflector,
|
||||
serde_json::Value,
|
||||
solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar},
|
||||
std::collections::HashMap,
|
||||
thiserror::Error,
|
||||
use crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id_v2_0},
|
||||
parse_vote::parse_vote,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
||||
@ -21,6 +19,7 @@ lazy_static! {
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
@ -30,9 +29,7 @@ lazy_static! {
|
||||
);
|
||||
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
for spl_token_id in spl_token_ids() {
|
||||
m.insert(spl_token_id, ParsableAccount::SplToken);
|
||||
}
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
|
||||
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
|
||||
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
@ -115,14 +112,12 @@ pub fn parse_account_data(
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
solana_vote_program::vote_state::{VoteState, VoteStateVersions},
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
|
@ -1,11 +1,9 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
},
|
||||
bincode::{deserialize, serialized_size},
|
||||
solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
};
|
||||
use bincode::{deserialize, serialized_size};
|
||||
use solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey};
|
||||
|
||||
pub fn parse_bpf_upgradeable_loader(
|
||||
data: &[u8],
|
||||
@ -92,7 +90,9 @@ pub struct UiProgramData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {super::*, bincode::serialize, solana_sdk::pubkey::Pubkey};
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_parse_bpf_upgradeable_loader_accounts() {
|
||||
|
@ -1,16 +1,12 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
},
|
||||
bincode::deserialize,
|
||||
serde_json::Value,
|
||||
solana_config_program::{get_config_data, ConfigKeys},
|
||||
solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
stake::config::{self as stake_config, Config as StakeConfig},
|
||||
},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::stake::config::{self as stake_config, Config as StakeConfig};
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &stake_config::id() {
|
||||
@ -91,10 +87,11 @@ pub struct UiConfig<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*, crate::validator_info::ValidatorInfo, serde_json::json,
|
||||
solana_config_program::create_config_account, solana_sdk::account::ReadableAccount,
|
||||
};
|
||||
use super::*;
|
||||
use crate::validator_info::ValidatorInfo;
|
||||
use serde_json::json;
|
||||
use solana_config_program::create_config_account;
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
|
@ -1,9 +1,7 @@
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, UiFeeCalculator},
|
||||
solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
},
|
||||
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
@ -44,16 +42,14 @@ pub struct UiNonceData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
|
@ -1,14 +1,10 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
},
|
||||
bincode::deserialize,
|
||||
solana_sdk::{
|
||||
clock::{Epoch, UnixTimestamp},
|
||||
stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState},
|
||||
},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_sdk::stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
@ -136,7 +132,8 @@ impl From<Delegation> for UiDelegation {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {super::*, bincode::serialize};
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
|
||||
#[test]
|
||||
fn test_parse_stake() {
|
||||
|
@ -1,26 +1,21 @@
|
||||
#[allow(deprecated)]
|
||||
use solana_sdk::sysvar::{fees::Fees, recent_blockhashes::RecentBlockhashes};
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
},
|
||||
bincode::deserialize,
|
||||
bv::BitVec,
|
||||
solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, rewards::Rewards},
|
||||
},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use bv::BitVec;
|
||||
use solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
};
|
||||
|
||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||
#[allow(deprecated)]
|
||||
let parsed_account = {
|
||||
if pubkey == &sysvar::clock::id() {
|
||||
deserialize::<Clock>(data)
|
||||
@ -96,9 +91,7 @@ pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, P
|
||||
pub enum SysvarAccountType {
|
||||
Clock(UiClock),
|
||||
EpochSchedule(EpochSchedule),
|
||||
#[allow(deprecated)]
|
||||
Fees(UiFees),
|
||||
#[allow(deprecated)]
|
||||
RecentBlockhashes(Vec<UiRecentBlockhashesEntry>),
|
||||
Rent(UiRent),
|
||||
Rewards(UiRewards),
|
||||
@ -134,7 +127,6 @@ impl From<Clock> for UiClock {
|
||||
pub struct UiFees {
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
#[allow(deprecated)]
|
||||
impl From<Fees> for UiFees {
|
||||
fn from(fees: Fees) -> Self {
|
||||
Self {
|
||||
@ -220,17 +212,14 @@ pub struct UiStakeHistoryEntry {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#[allow(deprecated)]
|
||||
use solana_sdk::sysvar::recent_blockhashes::IterItem;
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash},
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
let hash = Hash::new(&[1; 32]);
|
||||
|
||||
let clock_sysvar = create_account_for_test(&Clock::default());
|
||||
assert_eq!(
|
||||
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
|
||||
@ -250,29 +239,31 @@ mod test {
|
||||
SysvarAccountType::EpochSchedule(epoch_schedule),
|
||||
);
|
||||
|
||||
#[allow(deprecated)]
|
||||
{
|
||||
let fees_sysvar = create_account_for_test(&Fees::default());
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
);
|
||||
let fees_sysvar = create_account_for_test(&Fees::default());
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
);
|
||||
|
||||
let recent_blockhashes: RecentBlockhashes =
|
||||
vec![IterItem(0, &hash, 10)].into_iter().collect();
|
||||
let recent_blockhashes_sysvar = create_account_for_test(&recent_blockhashes);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
&sysvar::recent_blockhashes::id()
|
||||
)
|
||||
.unwrap(),
|
||||
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
|
||||
blockhash: hash.to_string(),
|
||||
fee_calculator: FeeCalculator::new(10).into(),
|
||||
}]),
|
||||
);
|
||||
}
|
||||
let hash = Hash::new(&[1; 32]);
|
||||
let fee_calculator = FeeCalculator {
|
||||
lamports_per_signature: 10,
|
||||
};
|
||||
let recent_blockhashes: RecentBlockhashes = vec![IterItem(0, &hash, &fee_calculator)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let recent_blockhashes_sysvar = create_account_for_test(&recent_blockhashes);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
&sysvar::recent_blockhashes::id()
|
||||
)
|
||||
.unwrap(),
|
||||
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
|
||||
blockhash: hash.to_string(),
|
||||
fee_calculator: fee_calculator.into(),
|
||||
}]),
|
||||
);
|
||||
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 10,
|
||||
|
@ -1,52 +1,35 @@
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
spl_token::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
},
|
||||
std::str::FromStr,
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v2_0::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
};
|
||||
use std::str::FromStr;
|
||||
|
||||
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
fn spl_token_id() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::id().to_bytes())
|
||||
pub fn spl_token_id_v2_0() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token_v2_0::id().to_bytes())
|
||||
}
|
||||
|
||||
// Returns all known SPL Token program ids
|
||||
pub fn spl_token_ids() -> Vec<Pubkey> {
|
||||
vec![spl_token_id()]
|
||||
}
|
||||
|
||||
// Check if the provided program id as a known SPL Token program id
|
||||
pub fn is_known_spl_token_id(program_id: &Pubkey) -> bool {
|
||||
*program_id == spl_token_id()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
|
||||
}
|
||||
|
||||
// The program id of the `spl_token_native_mint` account
|
||||
pub fn spl_token_native_mint_program_id() -> Pubkey {
|
||||
spl_token_id()
|
||||
pub fn spl_token_v2_0_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token_v2_0::native_mint::id().to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::new_from_array(pubkey.to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
|
||||
pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
Pubkey::new_from_array(pubkey.to_bytes())
|
||||
}
|
||||
|
||||
@ -133,7 +116,6 @@ pub fn parse_token(
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum TokenAccountType {
|
||||
Account(UiTokenAccount),
|
||||
Mint(UiMint),
|
||||
|
@ -1,11 +1,9 @@
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, StringAmount},
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState},
|
||||
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
@ -123,7 +121,8 @@ struct UiEpochCredits {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {super::*, solana_vote_program::vote_state::VoteStateVersions};
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
|
@ -1,22 +1,24 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.10" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.10" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
solana-version = { path = "../version", version = "=1.7.10" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,25 +1,17 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
rayon::prelude::*,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::{
|
||||
accounts::{
|
||||
test_utils::{create_test_accounts, update_accounts_bench},
|
||||
Accounts,
|
||||
},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
rent_collector::RentCollector,
|
||||
},
|
||||
solana_sdk::{
|
||||
genesis_config::ClusterType, pubkey::Pubkey, sysvar::epoch_schedule::EpochSchedule,
|
||||
},
|
||||
std::{env, fs, path::PathBuf},
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
};
|
||||
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
|
||||
use std::{env, fs, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
@ -68,7 +60,7 @@ fn main() {
|
||||
if fs::remove_dir_all(path.clone()).is_err() {
|
||||
println!("Warning: Couldn't remove {:?}", path);
|
||||
}
|
||||
let accounts = Accounts::new_with_config_for_benches(
|
||||
let accounts = Accounts::new_with_config(
|
||||
vec![path],
|
||||
&ClusterType::Testnet,
|
||||
AccountSecondaryIndexes::default(),
|
||||
@ -110,7 +102,7 @@ fn main() {
|
||||
for x in 0..iterations {
|
||||
if clean {
|
||||
let mut time = Measure::start("clean");
|
||||
accounts.accounts_db.clean_accounts(None, false, None);
|
||||
accounts.accounts_db.clean_accounts(None, false);
|
||||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
@ -120,12 +112,7 @@ fn main() {
|
||||
} else {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
let mut time = Measure::start("hash");
|
||||
let results = accounts.accounts_db.update_accounts_hash(
|
||||
0,
|
||||
&ancestors,
|
||||
&EpochSchedule::default(),
|
||||
&RentCollector::default(),
|
||||
);
|
||||
let results = accounts.accounts_db.update_accounts_hash(0, &ancestors);
|
||||
time.stop();
|
||||
let mut time_store = Measure::start("hash using store");
|
||||
let results_store = accounts.accounts_db.update_accounts_hash_with_index_option(
|
||||
@ -135,9 +122,6 @@ fn main() {
|
||||
&ancestors,
|
||||
None,
|
||||
false,
|
||||
&EpochSchedule::default(),
|
||||
&RentCollector::default(),
|
||||
false,
|
||||
);
|
||||
time_store.stop();
|
||||
if results != results_store {
|
||||
|
@ -1,8 +1,8 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,28 +10,27 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
log = "0.4.14"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
|
||||
solana-client = { path = "../client", version = "=1.11.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.11.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.11.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.10" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.10" }
|
||||
solana-client = { path = "../client", version = "=1.7.10" }
|
||||
solana-core = { path = "../core", version = "=1.7.10" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.10" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.10" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.10" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.10" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.10" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.10" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.10" }
|
||||
solana-version = { path = "../version", version = "=1.7.10" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "=1.11.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.10" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,39 +1,41 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg},
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_account_decoder::parse_token::spl_token_pubkey,
|
||||
solana_clap_utils::input_parsers::pubkey_of,
|
||||
solana_client::{rpc_client::RpcClient, transaction_executor::TransactionExecutor},
|
||||
solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT},
|
||||
solana_gossip::gossip_service::discover,
|
||||
solana_runtime::inline_spl_token,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
instruction::{AccountMeta, Instruction},
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signer},
|
||||
system_instruction, system_program,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_transaction_status::parse_token::spl_token_instruction,
|
||||
std::{
|
||||
cmp::min,
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
use clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg};
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_account_decoder::parse_token::spl_token_v2_0_pubkey;
|
||||
use solana_clap_utils::input_parsers::pubkey_of;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT};
|
||||
use solana_gossip::gossip_service::discover;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::inline_spl_token_v2_0;
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// Create and close messages both require 2 signatures; if transaction construction changes, update
|
||||
// this magic number
|
||||
const NUM_SIGNATURES: u64 = 2;
|
||||
|
||||
pub fn airdrop_lamports(
|
||||
client: &RpcClient,
|
||||
@ -53,7 +55,7 @@ pub fn airdrop_lamports(
|
||||
id.pubkey(),
|
||||
);
|
||||
|
||||
let blockhash = client.get_latest_blockhash().unwrap();
|
||||
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
@ -98,6 +100,160 @@ pub fn airdrop_lamports(
|
||||
true
|
||||
}
|
||||
|
||||
// signature, timestamp, id
|
||||
type PendingQueue = Vec<(Signature, u64, u64)>;
|
||||
|
||||
struct TransactionExecutor {
|
||||
sig_clear_t: JoinHandle<()>,
|
||||
sigs: Arc<RwLock<PendingQueue>>,
|
||||
cleared: Arc<RwLock<Vec<u64>>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
counter: AtomicU64,
|
||||
client: RpcClient,
|
||||
}
|
||||
|
||||
impl TransactionExecutor {
|
||||
fn new(entrypoint_addr: SocketAddr) -> Self {
|
||||
let sigs = Arc::new(RwLock::new(Vec::new()));
|
||||
let cleared = Arc::new(RwLock::new(Vec::new()));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let sig_clear_t = Self::start_sig_clear_thread(&exit, &sigs, &cleared, entrypoint_addr);
|
||||
let client =
|
||||
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
|
||||
Self {
|
||||
sigs,
|
||||
cleared,
|
||||
sig_clear_t,
|
||||
exit,
|
||||
counter: AtomicU64::new(0),
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
fn num_outstanding(&self) -> usize {
|
||||
self.sigs.read().unwrap().len()
|
||||
}
|
||||
|
||||
fn push_transactions(&self, txs: Vec<Transaction>) -> Vec<u64> {
|
||||
let mut ids = vec![];
|
||||
let new_sigs = txs.into_iter().filter_map(|tx| {
|
||||
let id = self.counter.fetch_add(1, Ordering::Relaxed);
|
||||
ids.push(id);
|
||||
match self.client.send_transaction(&tx) {
|
||||
Ok(sig) => {
|
||||
return Some((sig, timestamp(), id));
|
||||
}
|
||||
Err(e) => {
|
||||
info!("error: {:#?}", e);
|
||||
}
|
||||
}
|
||||
None
|
||||
});
|
||||
let mut sigs_w = self.sigs.write().unwrap();
|
||||
sigs_w.extend(new_sigs);
|
||||
ids
|
||||
}
|
||||
|
||||
fn drain_cleared(&self) -> Vec<u64> {
|
||||
std::mem::take(&mut *self.cleared.write().unwrap())
|
||||
}
|
||||
|
||||
fn close(self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
self.sig_clear_t.join().unwrap();
|
||||
}
|
||||
|
||||
fn start_sig_clear_thread(
|
||||
exit: &Arc<AtomicBool>,
|
||||
sigs: &Arc<RwLock<PendingQueue>>,
|
||||
cleared: &Arc<RwLock<Vec<u64>>>,
|
||||
entrypoint_addr: SocketAddr,
|
||||
) -> JoinHandle<()> {
|
||||
let sigs = sigs.clone();
|
||||
let exit = exit.clone();
|
||||
let cleared = cleared.clone();
|
||||
Builder::new()
|
||||
.name("sig_clear".to_string())
|
||||
.spawn(move || {
|
||||
let client = RpcClient::new_socket_with_commitment(
|
||||
entrypoint_addr,
|
||||
CommitmentConfig::confirmed(),
|
||||
);
|
||||
let mut success = 0;
|
||||
let mut error_count = 0;
|
||||
let mut timed_out = 0;
|
||||
let mut last_log = Instant::now();
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
let sigs_len = sigs.read().unwrap().len();
|
||||
if sigs_len > 0 {
|
||||
let mut sigs_w = sigs.write().unwrap();
|
||||
let mut start = Measure::start("sig_status");
|
||||
let statuses: Vec<_> = sigs_w
|
||||
.chunks(200)
|
||||
.flat_map(|sig_chunk| {
|
||||
let only_sigs: Vec<_> = sig_chunk.iter().map(|s| s.0).collect();
|
||||
client
|
||||
.get_signature_statuses(&only_sigs)
|
||||
.expect("status fail")
|
||||
.value
|
||||
})
|
||||
.collect();
|
||||
let mut num_cleared = 0;
|
||||
let start_len = sigs_w.len();
|
||||
let now = timestamp();
|
||||
let mut new_ids = vec![];
|
||||
let mut i = 0;
|
||||
let mut j = 0;
|
||||
while i != sigs_w.len() {
|
||||
let mut retain = true;
|
||||
let sent_ts = sigs_w[i].1;
|
||||
if let Some(e) = &statuses[j] {
|
||||
debug!("error: {:?}", e);
|
||||
if e.status.is_ok() {
|
||||
success += 1;
|
||||
} else {
|
||||
error_count += 1;
|
||||
}
|
||||
num_cleared += 1;
|
||||
retain = false;
|
||||
} else if now - sent_ts > 30_000 {
|
||||
retain = false;
|
||||
timed_out += 1;
|
||||
}
|
||||
if !retain {
|
||||
new_ids.push(sigs_w.remove(i).2);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
j += 1;
|
||||
}
|
||||
let final_sigs_len = sigs_w.len();
|
||||
drop(sigs_w);
|
||||
cleared.write().unwrap().extend(new_ids);
|
||||
start.stop();
|
||||
debug!(
|
||||
"sigs len: {:?} success: {} took: {}ms cleared: {}/{}",
|
||||
final_sigs_len,
|
||||
success,
|
||||
start.as_ms(),
|
||||
num_cleared,
|
||||
start_len,
|
||||
);
|
||||
if last_log.elapsed().as_millis() > 5000 {
|
||||
info!(
|
||||
"success: {} error: {} timed_out: {}",
|
||||
success, error_count, timed_out,
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(200));
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
struct SeedTracker {
|
||||
max_created: Arc<AtomicU64>,
|
||||
max_closed: Arc<AtomicU64>,
|
||||
@ -116,9 +272,9 @@ fn make_create_message(
|
||||
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.flat_map(|_| {
|
||||
.map(|_| {
|
||||
let program_id = if mint.is_some() {
|
||||
inline_spl_token::id()
|
||||
inline_spl_token_v2_0::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
@ -135,12 +291,12 @@ fn make_create_message(
|
||||
&program_id,
|
||||
)];
|
||||
if let Some(mint_address) = mint {
|
||||
instructions.push(spl_token_instruction(
|
||||
spl_token::instruction::initialize_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&to_pubkey),
|
||||
&spl_token_pubkey(&mint_address),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
instructions.push(spl_token_v2_0_instruction(
|
||||
spl_token_v2_0::instruction::initialize_account(
|
||||
&spl_token_v2_0::id(),
|
||||
&spl_token_v2_0_pubkey(&to_pubkey),
|
||||
&spl_token_v2_0_pubkey(&mint_address),
|
||||
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
|
||||
)
|
||||
.unwrap(),
|
||||
));
|
||||
@ -149,6 +305,7 @@ fn make_create_message(
|
||||
instructions
|
||||
})
|
||||
.collect();
|
||||
let instructions: Vec<_> = instructions.into_iter().flatten().collect();
|
||||
|
||||
Message::new(&instructions, Some(&keypair.pubkey()))
|
||||
}
|
||||
@ -156,48 +313,42 @@ fn make_create_message(
|
||||
fn make_close_message(
|
||||
keypair: &Keypair,
|
||||
base_keypair: &Keypair,
|
||||
max_created: Arc<AtomicU64>,
|
||||
max_closed: Arc<AtomicU64>,
|
||||
max_closed_seed: Arc<AtomicU64>,
|
||||
num_instructions: usize,
|
||||
balance: u64,
|
||||
spl_token: bool,
|
||||
) -> Message {
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.filter_map(|_| {
|
||||
.map(|_| {
|
||||
let program_id = if spl_token {
|
||||
inline_spl_token::id()
|
||||
inline_spl_token_v2_0::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
let max_created_seed = max_created.load(Ordering::Relaxed);
|
||||
let max_closed_seed = max_closed.load(Ordering::Relaxed);
|
||||
if max_closed_seed >= max_created_seed {
|
||||
return None;
|
||||
}
|
||||
let seed = max_closed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let address =
|
||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||
if spl_token {
|
||||
Some(spl_token_instruction(
|
||||
spl_token::instruction::close_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&address),
|
||||
&spl_token_pubkey(&keypair.pubkey()),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
spl_token_v2_0_instruction(
|
||||
spl_token_v2_0::instruction::close_account(
|
||||
&spl_token_v2_0::id(),
|
||||
&spl_token_v2_0_pubkey(&address),
|
||||
&spl_token_v2_0_pubkey(&keypair.pubkey()),
|
||||
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
|
||||
&[],
|
||||
)
|
||||
.unwrap(),
|
||||
))
|
||||
)
|
||||
} else {
|
||||
Some(system_instruction::transfer_with_seed(
|
||||
system_instruction::transfer_with_seed(
|
||||
&address,
|
||||
&base_keypair.pubkey(),
|
||||
seed,
|
||||
&program_id,
|
||||
&keypair.pubkey(),
|
||||
balance,
|
||||
))
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
@ -217,7 +368,6 @@ fn run_accounts_bench(
|
||||
maybe_lamports: Option<u64>,
|
||||
num_instructions: usize,
|
||||
mint: Option<Pubkey>,
|
||||
reclaim_accounts: bool,
|
||||
) {
|
||||
assert!(num_instructions > 0);
|
||||
let client =
|
||||
@ -225,10 +375,10 @@ fn run_accounts_bench(
|
||||
|
||||
info!("Targeting {}", entrypoint_addr);
|
||||
|
||||
let mut latest_blockhash = Instant::now();
|
||||
let mut last_blockhash = Instant::now();
|
||||
let mut last_log = Instant::now();
|
||||
let mut count = 0;
|
||||
let mut blockhash = client.get_latest_blockhash().expect("blockhash");
|
||||
let mut recent_blockhash = client.get_recent_blockhash().expect("blockhash");
|
||||
let mut tx_sent_count = 0;
|
||||
let mut total_accounts_created = 0;
|
||||
let mut total_accounts_closed = 0;
|
||||
@ -256,33 +406,16 @@ fn run_accounts_bench(
|
||||
|
||||
let executor = TransactionExecutor::new(entrypoint_addr);
|
||||
|
||||
// Create and close messages both require 2 signatures, fake a 2 signature message to calculate fees
|
||||
let mut message = Message::new(
|
||||
&[
|
||||
Instruction::new_with_bytes(
|
||||
Pubkey::new_unique(),
|
||||
&[],
|
||||
vec![AccountMeta::new(Pubkey::new_unique(), true)],
|
||||
),
|
||||
Instruction::new_with_bytes(
|
||||
Pubkey::new_unique(),
|
||||
&[],
|
||||
vec![AccountMeta::new(Pubkey::new_unique(), true)],
|
||||
),
|
||||
],
|
||||
None,
|
||||
);
|
||||
|
||||
loop {
|
||||
if latest_blockhash.elapsed().as_millis() > 10_000 {
|
||||
blockhash = client.get_latest_blockhash().expect("blockhash");
|
||||
latest_blockhash = Instant::now();
|
||||
if last_blockhash.elapsed().as_millis() > 10_000 {
|
||||
recent_blockhash = client.get_recent_blockhash().expect("blockhash");
|
||||
last_blockhash = Instant::now();
|
||||
}
|
||||
|
||||
message.recent_blockhash = blockhash;
|
||||
let fee = client
|
||||
.get_fee_for_message(&message)
|
||||
.expect("get_fee_for_message");
|
||||
let fee = recent_blockhash
|
||||
.1
|
||||
.lamports_per_signature
|
||||
.saturating_mul(NUM_SIGNATURES);
|
||||
let lamports = min_balance + fee;
|
||||
|
||||
for (i, balance) in balances.iter_mut().enumerate() {
|
||||
@ -331,7 +464,7 @@ fn run_accounts_bench(
|
||||
mint,
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||
Transaction::new(&signers, message, blockhash)
|
||||
Transaction::new(&signers, message, recent_blockhash.0)
|
||||
})
|
||||
.collect();
|
||||
balances[i] = balances[i].saturating_sub(lamports * txs.len() as u64);
|
||||
@ -357,14 +490,13 @@ fn run_accounts_bench(
|
||||
let message = make_close_message(
|
||||
payer_keypairs[0],
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
seed_tracker.max_closed.clone(),
|
||||
1,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
|
||||
Transaction::new(&signers, message, blockhash)
|
||||
Transaction::new(&signers, message, recent_blockhash.0)
|
||||
})
|
||||
.collect();
|
||||
balances[0] = balances[0].saturating_sub(fee * txs.len() as u64);
|
||||
@ -380,7 +512,7 @@ fn run_accounts_bench(
|
||||
}
|
||||
|
||||
count += 1;
|
||||
if last_log.elapsed().as_millis() > 3000 || count >= iterations {
|
||||
if last_log.elapsed().as_millis() > 3000 {
|
||||
info!(
|
||||
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
||||
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
|
||||
@ -395,83 +527,6 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
executor.close();
|
||||
|
||||
if reclaim_accounts {
|
||||
let executor = TransactionExecutor::new(entrypoint_addr);
|
||||
loop {
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
let max_created_seed = seed_tracker.max_created.load(Ordering::Relaxed);
|
||||
|
||||
if latest_blockhash.elapsed().as_millis() > 10_000 {
|
||||
blockhash = client.get_latest_blockhash().expect("blockhash");
|
||||
latest_blockhash = Instant::now();
|
||||
}
|
||||
message.recent_blockhash = blockhash;
|
||||
let fee = client
|
||||
.get_fee_for_message(&message)
|
||||
.expect("get_fee_for_message");
|
||||
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size && max_closed_seed < max_created_seed {
|
||||
let num_to_close = min(
|
||||
batch_size - sigs_len,
|
||||
(max_created_seed - max_closed_seed) as usize,
|
||||
);
|
||||
if num_to_close >= payer_keypairs.len() {
|
||||
info!("closing {} accounts", num_to_close);
|
||||
let chunk_size = num_to_close / payer_keypairs.len();
|
||||
info!("{:?} chunk_size", chunk_size);
|
||||
if chunk_size > 0 {
|
||||
for (i, keypair) in payer_keypairs.iter().enumerate() {
|
||||
let txs: Vec<_> = (0..chunk_size)
|
||||
.into_par_iter()
|
||||
.filter_map(|_| {
|
||||
let message = make_close_message(
|
||||
keypair,
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
seed_tracker.max_closed.clone(),
|
||||
num_instructions,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
if message.instructions.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||
Some(Transaction::new(&signers, message, blockhash))
|
||||
})
|
||||
.collect();
|
||||
balances[i] = balances[i].saturating_sub(fee * txs.len() as u64);
|
||||
info!("close txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("close ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_accounts_closed += (num_instructions * new_ids.len()) as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let _ = executor.drain_cleared();
|
||||
}
|
||||
count += 1;
|
||||
if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
|
||||
info!(
|
||||
"total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
||||
total_accounts_closed, tx_sent_count, count, balances
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
|
||||
if max_closed_seed >= max_created_seed {
|
||||
break;
|
||||
}
|
||||
if executor.num_outstanding() >= batch_size {
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
executor.close();
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
@ -547,7 +602,7 @@ fn main() {
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.value_name("NUM")
|
||||
.help("Number of iterations to make. 0 = unlimited iterations."),
|
||||
.help("Number of iterations to make"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("check_gossip")
|
||||
@ -560,12 +615,6 @@ fn main() {
|
||||
.takes_value(true)
|
||||
.help("Mint address to initialize account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("reclaim_accounts")
|
||||
.long("reclaim-accounts")
|
||||
.takes_value(false)
|
||||
.help("Reclaim accounts after session ends; incompatible with --iterations 0"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let skip_gossip = !matches.is_present("check_gossip");
|
||||
@ -647,33 +696,23 @@ fn main() {
|
||||
lamports,
|
||||
num_instructions,
|
||||
mint,
|
||||
matches.is_present("reclaim_accounts"),
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::run_local_faucet,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::{native_token::sol_to_lamports, poh_config::PohConfig},
|
||||
solana_test_validator::TestValidator,
|
||||
spl_token::{
|
||||
solana_program::program_pack::Pack,
|
||||
state::{Account, Mint},
|
||||
},
|
||||
use super::*;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
|
||||
#[test]
|
||||
fn test_accounts_cluster_bench() {
|
||||
solana_logger::setup();
|
||||
let validator_config = ValidatorConfig::default_for_test();
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let num_nodes = 1;
|
||||
let mut config = ClusterConfig {
|
||||
cluster_lamports: 10_000_000,
|
||||
@ -703,108 +742,6 @@ pub mod test {
|
||||
maybe_lamports,
|
||||
num_instructions,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
start.stop();
|
||||
info!("{}", start);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_then_reclaim_spl_token_accounts() {
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
let test_validator = TestValidator::with_custom_fees(
|
||||
mint_pubkey,
|
||||
1,
|
||||
Some(faucet_addr),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let rpc_client =
|
||||
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
|
||||
|
||||
// Created funder
|
||||
let funder = Keypair::new();
|
||||
let latest_blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
let signature = rpc_client
|
||||
.request_airdrop_with_blockhash(
|
||||
&funder.pubkey(),
|
||||
sol_to_lamports(1.0),
|
||||
&latest_blockhash,
|
||||
)
|
||||
.unwrap();
|
||||
rpc_client
|
||||
.confirm_transaction_with_spinner(
|
||||
&signature,
|
||||
&latest_blockhash,
|
||||
CommitmentConfig::confirmed(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Create Mint
|
||||
let spl_mint_keypair = Keypair::new();
|
||||
let spl_mint_len = Mint::get_packed_len();
|
||||
let spl_mint_rent = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(spl_mint_len)
|
||||
.unwrap();
|
||||
let transaction = Transaction::new_signed_with_payer(
|
||||
&[
|
||||
system_instruction::create_account(
|
||||
&funder.pubkey(),
|
||||
&spl_mint_keypair.pubkey(),
|
||||
spl_mint_rent,
|
||||
spl_mint_len as u64,
|
||||
&inline_spl_token::id(),
|
||||
),
|
||||
spl_token_instruction(
|
||||
spl_token::instruction::initialize_mint(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
|
||||
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
|
||||
None,
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
],
|
||||
Some(&funder.pubkey()),
|
||||
&[&funder, &spl_mint_keypair],
|
||||
latest_blockhash,
|
||||
);
|
||||
let _sig = rpc_client
|
||||
.send_and_confirm_transaction(&transaction)
|
||||
.unwrap();
|
||||
|
||||
let account_len = Account::get_packed_len();
|
||||
let minimum_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(account_len)
|
||||
.unwrap();
|
||||
|
||||
let iterations = 5;
|
||||
let batch_size = 100;
|
||||
let close_nth_batch = 0;
|
||||
let num_instructions = 4;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
run_accounts_bench(
|
||||
test_validator
|
||||
.rpc_url()
|
||||
.replace("http://", "")
|
||||
.parse()
|
||||
.unwrap(),
|
||||
faucet_addr,
|
||||
&[&keypair0, &keypair1, &keypair2],
|
||||
iterations,
|
||||
Some(account_len as u64),
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
Some(minimum_balance),
|
||||
num_instructions,
|
||||
Some(spl_mint_keypair.pubkey()),
|
||||
true,
|
||||
);
|
||||
start.stop();
|
||||
info!("{}", start);
|
||||
|
@ -1,8 +1,8 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,21 +10,22 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-core = { path = "../core", version = "=1.11.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.11.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.11.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.11.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "=1.7.10" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.10" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.10" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.10" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.10" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.10" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.10" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.10" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.10" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
solana-version = { path = "../version", version = "=1.7.10" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,38 +1,35 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
crossbeam_channel::{unbounded, Receiver},
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_core::banking_stage::BankingStage,
|
||||
solana_gossip::cluster_info::{ClusterInfo, Node},
|
||||
solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::to_packet_batches,
|
||||
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
|
||||
solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
cost_model::CostModel,
|
||||
},
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::{Keypair, Signature},
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{atomic::Ordering, Arc, Mutex, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::banking_stage::BankingStage;
|
||||
use solana_gossip::{cluster_info::ClusterInfo, cluster_info::Node};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry};
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::Keypair,
|
||||
signature::Signature,
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
fn check_txs(
|
||||
@ -169,17 +166,11 @@ fn main() {
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
|
||||
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new_for_benches(&genesis_config);
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker()
|
||||
.unwrap()
|
||||
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
||||
let same_payer = matches.is_present("same_payer");
|
||||
@ -210,27 +201,21 @@ fn main() {
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
|
||||
let res = bank.process_transactions(transactions.iter());
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
||||
}
|
||||
bank.clear_signatures();
|
||||
}
|
||||
|
||||
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(
|
||||
&bank,
|
||||
&blockstore,
|
||||
None,
|
||||
Some(leader_schedule_cache.clone()),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost().info,
|
||||
Arc::new(Keypair::new()),
|
||||
@ -241,11 +226,9 @@ fn main() {
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
verified_receiver,
|
||||
tpu_vote_receiver,
|
||||
vote_receiver,
|
||||
None,
|
||||
replay_vote_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
@ -325,10 +308,11 @@ fn main() {
|
||||
tx_total_us += duration_as_us(&now.elapsed());
|
||||
|
||||
let mut poh_time = Measure::start("poh_time");
|
||||
poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
.reset(bank.clone(), Some((bank.slot(), bank.slot() + 1)));
|
||||
poh_recorder.lock().unwrap().reset(
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
Some((bank.slot(), bank.slot() + 1)),
|
||||
);
|
||||
poh_time.stop();
|
||||
|
||||
let mut new_bank_time = Measure::start("new_bank");
|
||||
@ -340,17 +324,9 @@ fn main() {
|
||||
bank = bank_forks.working_bank();
|
||||
insert_time.stop();
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker().unwrap().set_limits(
|
||||
std::u64::MAX,
|
||||
std::u64::MAX,
|
||||
std::u64::MAX,
|
||||
);
|
||||
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
leader_schedule_cache.set_root(&bank);
|
||||
bank_forks.set_root(root, &AbsRequestSender::default(), None);
|
||||
root += 1;
|
||||
}
|
||||
@ -383,7 +359,7 @@ fn main() {
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
@ -405,7 +381,6 @@ fn main() {
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
drop(tpu_vote_sender);
|
||||
drop(vote_sender);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
banking_stage.join().unwrap();
|
||||
|
@ -1,28 +1,30 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-banks-client"
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
borsh = "0.9.3"
|
||||
bincode = "1.3.1"
|
||||
borsh = "0.9.0"
|
||||
borsh-derive = "0.9.0"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
thiserror = "1.0"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.10" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.10" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.10" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -1,77 +0,0 @@
|
||||
use {
|
||||
solana_sdk::{
|
||||
transaction::TransactionError, transaction_context::TransactionReturnData,
|
||||
transport::TransportError,
|
||||
},
|
||||
std::io,
|
||||
tarpc::client::RpcError,
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
/// Errors from BanksClient
|
||||
#[derive(Error, Debug)]
|
||||
pub enum BanksClientError {
|
||||
#[error("client error: {0}")]
|
||||
ClientError(&'static str),
|
||||
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
RpcError(#[from] RpcError),
|
||||
|
||||
#[error("transport transaction error: {0}")]
|
||||
TransactionError(#[from] TransactionError),
|
||||
|
||||
#[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")]
|
||||
SimulationError {
|
||||
err: TransactionError,
|
||||
logs: Vec<String>,
|
||||
units_consumed: u64,
|
||||
return_data: Option<TransactionReturnData>,
|
||||
},
|
||||
}
|
||||
|
||||
impl BanksClientError {
|
||||
pub fn unwrap(&self) -> TransactionError {
|
||||
match self {
|
||||
BanksClientError::TransactionError(err)
|
||||
| BanksClientError::SimulationError { err, .. } => err.clone(),
|
||||
_ => panic!("unexpected transport error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for io::Error {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::Io(err) => err,
|
||||
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::TransactionError(err) => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
BanksClientError::SimulationError { err, .. } => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for TransportError {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::Io(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::RpcError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::TransactionError(err) => Self::TransactionError(err),
|
||||
BanksClientError::SimulationError { err, .. } => Self::TransactionError(err),
|
||||
}
|
||||
}
|
||||
}
|
@ -5,36 +5,37 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
pub use {
|
||||
crate::error::BanksClientError,
|
||||
solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus},
|
||||
use borsh::BorshDeserialize;
|
||||
use futures::{future::join_all, Future, FutureExt};
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||
use solana_program::{
|
||||
clock::Clock,
|
||||
clock::Slot,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
program_pack::Pack,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
sysvar::{self, Sysvar},
|
||||
};
|
||||
use {
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt, TryFutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
|
||||
solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar::Sysvar,
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
message::Message,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
},
|
||||
tarpc::{
|
||||
client::{self, NewClient, RequestDispatch},
|
||||
context::{self, Context},
|
||||
serde_transport::tcp,
|
||||
ClientMessage, Response, Transport,
|
||||
},
|
||||
tokio::{net::ToSocketAddrs, time::Duration},
|
||||
tokio_serde::formats::Bincode,
|
||||
use solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
};
|
||||
|
||||
mod error;
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use tarpc::{
|
||||
client::{self, channel::RequestDispatch, NewClient},
|
||||
context::{self, Context},
|
||||
rpc::{ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
Transport,
|
||||
};
|
||||
use tokio::{net::ToSocketAddrs, time::Duration};
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
// This exists only for backward compatibility
|
||||
pub trait BanksClientExt {}
|
||||
@ -60,55 +61,42 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.send_transaction_with_context(ctx, transaction)
|
||||
.map_err(Into::into)
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.inner.send_transaction_with_context(ctx, transaction)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.9.0",
|
||||
note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead"
|
||||
)]
|
||||
pub fn get_fees_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_transaction_status_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.inner
|
||||
.get_transaction_status_with_context(ctx, signature)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_slot_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_slot_with_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_slot_with_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_block_height_with_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_block_height_with_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_commitment_and_context(
|
||||
@ -116,26 +104,9 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<transaction::Result<()>>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
|
||||
self.inner
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_
|
||||
{
|
||||
self.inner
|
||||
.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_account_with_commitment_and_context(
|
||||
@ -143,10 +114,9 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.inner
|
||||
.get_account_with_commitment_and_context(ctx, address, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Send a transaction and return immediately. The server will resend the
|
||||
@ -155,49 +125,49 @@ impl BanksClient {
|
||||
pub fn send_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
}
|
||||
|
||||
/// Return the cluster clock
|
||||
pub fn get_clock(&mut self) -> impl Future<Output = io::Result<Clock>> + '_ {
|
||||
self.get_account(sysvar::clock::id()).map(|result| {
|
||||
let clock_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Clock sysvar not present"))?;
|
||||
from_account::<Clock, _>(&clock_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Clock sysvar")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
||||
/// will use the transaction's blockhash to look up these same fee parameters and
|
||||
/// use them to calculate the transaction fee.
|
||||
#[deprecated(
|
||||
since = "1.9.0",
|
||||
note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead"
|
||||
)]
|
||||
pub fn get_fees(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the cluster Sysvar
|
||||
pub fn get_sysvar<T: Sysvar>(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?.ok_or(BanksClientError::ClientError("Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar).ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
let sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = Result<Rent, BanksClientError>> + '_ {
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
self.get_sysvar::<Rent>()
|
||||
}
|
||||
|
||||
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
||||
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
|
||||
/// method to get both a blockhash and the blockhash's last valid slot.
|
||||
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
|
||||
pub fn get_recent_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
self.get_fees().map(|result| Ok(result?.1))
|
||||
}
|
||||
|
||||
@ -207,72 +177,23 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map(|result| match result? {
|
||||
None => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
None => {
|
||||
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
|
||||
}
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
})
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been rejected or reached the given level of commitment.
|
||||
pub fn process_transaction_with_preflight_and_commitment(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map(|result| match result? {
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: None,
|
||||
simulation_details: _,
|
||||
} => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(simulation_details),
|
||||
} => Err(BanksClientError::SimulationError {
|
||||
err,
|
||||
logs: simulation_details.logs,
|
||||
units_consumed: simulation_details.units_consumed,
|
||||
return_data: simulation_details.return_data,
|
||||
}),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(result),
|
||||
simulation_details: _,
|
||||
} => result.map_err(Into::into),
|
||||
})
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been finalized or rejected.
|
||||
pub fn process_transaction_with_preflight(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transaction_with_preflight_and_commitment(
|
||||
transaction,
|
||||
CommitmentLevel::default(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Send a transaction and return until the transaction has been finalized or rejected.
|
||||
pub fn process_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@ -280,7 +201,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Result<(), BanksClientError> {
|
||||
) -> transport::Result<()> {
|
||||
let mut clients: Vec<_> = transactions.iter().map(|_| self.clone()).collect();
|
||||
let futures = clients
|
||||
.iter_mut()
|
||||
@ -296,21 +217,19 @@ impl BanksClient {
|
||||
pub fn process_transactions(
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted slot. All transactions at or below this slot
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted block height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher block height.
|
||||
pub fn get_root_block_height(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@ -320,7 +239,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
}
|
||||
|
||||
@ -329,7 +248,7 @@ impl BanksClient {
|
||||
pub fn get_account(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.get_account_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@ -338,11 +257,12 @@ impl BanksClient {
|
||||
pub fn get_packed_account_data<T: Pack>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
|
||||
T::unpack_from_slice(&account.data)
|
||||
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
|
||||
})
|
||||
}
|
||||
|
||||
@ -351,10 +271,11 @@ impl BanksClient {
|
||||
pub fn get_account_data_with_borsh<T: BorshDeserialize>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::try_from_slice(&account.data).map_err(Into::into)
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
|
||||
T::try_from_slice(&account.data)
|
||||
})
|
||||
}
|
||||
|
||||
@ -364,17 +285,14 @@ impl BanksClient {
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
.map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0)))
|
||||
}
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the time
|
||||
/// of the most recent root slot.
|
||||
pub fn get_balance(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
pub fn get_balance(&mut self, address: Pubkey) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
self.get_balance_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@ -386,7 +304,7 @@ impl BanksClient {
|
||||
pub fn get_transaction_status(
|
||||
&mut self,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.get_transaction_status_with_context(context::current(), signature)
|
||||
}
|
||||
|
||||
@ -394,7 +312,7 @@ impl BanksClient {
|
||||
pub async fn get_transaction_statuses(
|
||||
&mut self,
|
||||
signatures: Vec<Signature>,
|
||||
) -> Result<Vec<Option<TransactionStatus>>, BanksClientError> {
|
||||
) -> io::Result<Vec<Option<TransactionStatus>>> {
|
||||
// tarpc futures oddly hold a mutable reference back to the client so clone the client upfront
|
||||
let mut clients_and_signatures: Vec<_> = signatures
|
||||
.into_iter()
|
||||
@ -410,78 +328,36 @@ impl BanksClient {
|
||||
// Convert Vec<Result<_, _>> to Result<Vec<_>>
|
||||
statuses.into_iter().collect()
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment(CommitmentLevel::default())
|
||||
.map(|result| {
|
||||
result?
|
||||
.map(|x| x.0)
|
||||
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash_with_commitment(
|
||||
&mut self,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment_and_context(context::current(), commitment)
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_fee_for_message_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> impl Future<Output = Result<Option<u64>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_client<C>(transport: C) -> Result<BanksClient, BanksClientError>
|
||||
pub async fn start_client<C>(transport: C) -> io::Result<BanksClient>
|
||||
where
|
||||
C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>> + Send + 'static,
|
||||
{
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn(),
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn()?,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> Result<BanksClient, BanksClientError> {
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
|
||||
let transport = tcp::connect(addr, Bincode::default).await?;
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn(),
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn()?,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
solana_banks_server::banks_server::start_local_server,
|
||||
solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
},
|
||||
solana_sdk::{message::Message, signature::Signer, system_instruction},
|
||||
std::sync::{Arc, RwLock},
|
||||
tarpc::transport,
|
||||
tokio::{runtime::Runtime, time::sleep},
|
||||
use super::*;
|
||||
use solana_banks_server::banks_server::start_local_server;
|
||||
use solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
};
|
||||
use solana_sdk::{message::Message, signature::Signer, system_instruction};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tarpc::transport;
|
||||
use tokio::{runtime::Runtime, time::sleep};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
@ -490,13 +366,13 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
|
||||
fn test_banks_server_transfer_via_server() -> io::Result<()> {
|
||||
// This test shows the preferred way to interact with BanksServer.
|
||||
// It creates a runtime explicitly (no globals via tokio macros) and calls
|
||||
// `runtime.block_on()` just once, to run all the async code.
|
||||
|
||||
let genesis = create_genesis_config(10);
|
||||
let bank = Bank::new_for_tests(&genesis.genesis_config);
|
||||
let bank = Bank::new(&genesis.genesis_config);
|
||||
let slot = bank.slot();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
|
||||
@ -509,12 +385,10 @@ mod tests {
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
|
||||
let recent_blockhash = banks_client.get_latest_blockhash().await?;
|
||||
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
banks_client.process_transaction(transaction).await.unwrap();
|
||||
assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1);
|
||||
@ -523,13 +397,13 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
|
||||
fn test_banks_server_transfer_via_client() -> io::Result<()> {
|
||||
// The caller may not want to hold the connection open until the transaction
|
||||
// is processed (or blockhash expires). In this test, we verify the
|
||||
// server-side functionality is available to the client.
|
||||
|
||||
let genesis = create_genesis_config(10);
|
||||
let bank = Bank::new_for_tests(&genesis.genesis_config);
|
||||
let bank = Bank::new(&genesis.genesis_config);
|
||||
let slot = bank.slot();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
|
||||
@ -542,14 +416,9 @@ mod tests {
|
||||
let message = Message::new(&[instruction], Some(mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
let (recent_blockhash, last_valid_block_height) = banks_client
|
||||
.get_latest_blockhash_with_commitment(CommitmentLevel::default())
|
||||
.await?
|
||||
.unwrap();
|
||||
let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
let signature = transaction.signatures[0];
|
||||
banks_client.send_transaction(transaction).await?;
|
||||
|
@ -1,18 +1,22 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-banks-interface"
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -1,19 +1,13 @@
|
||||
#![allow(deprecated)]
|
||||
|
||||
use {
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
transaction_context::TransactionReturnData,
|
||||
},
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@ -31,27 +25,9 @@ pub struct TransactionStatus {
|
||||
pub confirmation_status: Option<TransactionConfirmationStatus>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TransactionSimulationDetails {
|
||||
pub logs: Vec<String>,
|
||||
pub units_consumed: u64,
|
||||
pub return_data: Option<TransactionReturnData>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BanksTransactionResultWithSimulation {
|
||||
pub result: Option<transaction::Result<()>>,
|
||||
pub simulation_details: Option<TransactionSimulationDetails>,
|
||||
}
|
||||
|
||||
#[tarpc::service]
|
||||
pub trait Banks {
|
||||
async fn send_transaction_with_context(transaction: Transaction);
|
||||
#[deprecated(
|
||||
since = "1.9.0",
|
||||
note = "Please use `get_fee_for_message_with_commitment_and_context` instead"
|
||||
)]
|
||||
async fn get_fees_with_commitment_and_context(
|
||||
commitment: CommitmentLevel,
|
||||
) -> (FeeCalculator, Hash, Slot);
|
||||
@ -59,10 +35,6 @@ pub trait Banks {
|
||||
-> Option<TransactionStatus>;
|
||||
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
||||
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation;
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
@ -71,22 +43,12 @@ pub trait Banks {
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<Account>;
|
||||
async fn get_latest_blockhash_with_context() -> Hash;
|
||||
async fn get_latest_blockhash_with_commitment_and_context(
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<(Hash, u64)>;
|
||||
async fn get_fee_for_message_with_commitment_and_context(
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> Option<u64>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
tarpc::{client, transport},
|
||||
};
|
||||
use super::*;
|
||||
use tarpc::{client, transport};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
|
@ -1,23 +1,24 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-banks-server"
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.3"
|
||||
crossbeam-channel = "0.5"
|
||||
bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.10" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.10" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
tokio-stream = "0.1"
|
||||
|
@ -1,57 +1,48 @@
|
||||
use {
|
||||
bincode::{deserialize, serialize},
|
||||
crossbeam_channel::{unbounded, Receiver, Sender},
|
||||
futures::{future, prelude::stream::StreamExt},
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
|
||||
TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus,
|
||||
},
|
||||
solana_runtime::{
|
||||
bank::{Bank, TransactionSimulationResult},
|
||||
bank_forks::BankForks,
|
||||
commitment::BlockCommitmentCache,
|
||||
},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
feature_set::FeatureSet,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::{Message, SanitizedMessage},
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, SanitizedTransaction, Transaction},
|
||||
},
|
||||
solana_send_transaction_service::{
|
||||
send_transaction_service::{SendTransactionService, TransactionInfo, DEFAULT_TPU_USE_QUIC},
|
||||
tpu_info::NullTpuInfo,
|
||||
},
|
||||
std::{
|
||||
convert::TryFrom,
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{Arc, RwLock},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
},
|
||||
tarpc::{
|
||||
context::Context,
|
||||
serde_transport::tcp,
|
||||
server::{self, incoming::Incoming, Channel},
|
||||
transport::{self, channel::UnboundedChannel},
|
||||
ClientMessage, Response,
|
||||
},
|
||||
tokio::time::sleep,
|
||||
tokio_serde::formats::Bincode,
|
||||
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
|
||||
use bincode::{deserialize, serialize};
|
||||
use futures::{
|
||||
future,
|
||||
prelude::stream::{self, StreamExt},
|
||||
};
|
||||
use solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
};
|
||||
use tarpc::{
|
||||
context::Context,
|
||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Handler},
|
||||
transport,
|
||||
};
|
||||
use tokio::time::sleep;
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
}
|
||||
|
||||
impl BanksServer {
|
||||
@ -63,13 +54,11 @@ impl BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
Self {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
}
|
||||
}
|
||||
|
||||
@ -84,7 +73,7 @@ impl BanksServer {
|
||||
.map(|info| deserialize(&info.wire_transaction).unwrap())
|
||||
.collect();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let _ = bank.try_process_transactions(transactions.iter());
|
||||
let _ = bank.process_transactions(&transactions);
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,9 +81,8 @@ impl BanksServer {
|
||||
fn new_loopback(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
let (transaction_sender, transaction_receiver) = unbounded();
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let slot = bank.slot();
|
||||
{
|
||||
@ -107,12 +95,7 @@ impl BanksServer {
|
||||
.name("solana-bank-forks-client".to_string())
|
||||
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
|
||||
.unwrap();
|
||||
Self::new(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
)
|
||||
Self::new(bank_forks, block_commitment_cache, transaction_sender)
|
||||
}
|
||||
|
||||
fn slot(&self, commitment: CommitmentLevel) -> Slot {
|
||||
@ -137,7 +120,7 @@ impl BanksServer {
|
||||
.bank(commitment)
|
||||
.get_signature_status_with_blockhash(signature, blockhash);
|
||||
while status.is_none() {
|
||||
sleep(self.poll_signature_status_sleep_duration).await;
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
let bank = self.bank(commitment);
|
||||
if bank.block_height() > last_valid_block_height {
|
||||
break;
|
||||
@ -150,11 +133,11 @@ impl BanksServer {
|
||||
|
||||
fn verify_transaction(
|
||||
transaction: &Transaction,
|
||||
feature_set: &Arc<FeatureSet>,
|
||||
libsecp256k1_0_5_upgrade_enabled: bool,
|
||||
) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
|
||||
} else if let Err(err) = transaction.verify_precompiles(libsecp256k1_0_5_upgrade_enabled) {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -177,8 +160,6 @@ impl Banks for BanksServer {
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
}
|
||||
@ -189,16 +170,11 @@ impl Banks for BanksServer {
|
||||
commitment: CommitmentLevel,
|
||||
) -> (FeeCalculator, Hash, u64) {
|
||||
let bank = self.bank(commitment);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let lamports_per_signature = bank.get_lamports_per_signature();
|
||||
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
|
||||
let last_valid_block_height = bank
|
||||
.get_blockhash_last_valid_block_height(&blockhash)
|
||||
.unwrap();
|
||||
(
|
||||
FeeCalculator::new(lamports_per_signature),
|
||||
blockhash,
|
||||
last_valid_block_height,
|
||||
)
|
||||
(fee_calculator, blockhash, last_valid_block_height)
|
||||
}
|
||||
|
||||
async fn get_transaction_status_with_context(
|
||||
@ -245,62 +221,25 @@ impl Banks for BanksServer {
|
||||
self.bank(commitment).block_height()
|
||||
}
|
||||
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation {
|
||||
let sanitized_transaction =
|
||||
match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) {
|
||||
Err(err) => {
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: None,
|
||||
};
|
||||
}
|
||||
Ok(tx) => tx,
|
||||
};
|
||||
if let TransactionSimulationResult {
|
||||
result: Err(err),
|
||||
logs,
|
||||
post_simulation_accounts: _,
|
||||
units_consumed,
|
||||
return_data,
|
||||
} = self
|
||||
.bank(commitment)
|
||||
.simulate_transaction_unchecked(sanitized_transaction)
|
||||
{
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(TransactionSimulationDetails {
|
||||
logs,
|
||||
units_consumed,
|
||||
return_data,
|
||||
}),
|
||||
};
|
||||
}
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: self
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.await,
|
||||
simulation_details: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
|
||||
if let Err(err) = verify_transaction(
|
||||
&transaction,
|
||||
self.bank(commitment).libsecp256k1_0_5_upgrade_enabled(),
|
||||
) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_block_height = self
|
||||
.bank(commitment)
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
@ -308,8 +247,6 @@ impl Banks for BanksServer {
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment)
|
||||
@ -325,47 +262,17 @@ impl Banks for BanksServer {
|
||||
let bank = self.bank(commitment);
|
||||
bank.get_account(&address).map(Account::from)
|
||||
}
|
||||
|
||||
async fn get_latest_blockhash_with_context(self, _: Context) -> Hash {
|
||||
let bank = self.bank(CommitmentLevel::default());
|
||||
bank.last_blockhash()
|
||||
}
|
||||
|
||||
async fn get_latest_blockhash_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<(Hash, u64)> {
|
||||
let bank = self.bank(commitment);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let last_valid_block_height = bank.get_blockhash_last_valid_block_height(&blockhash)?;
|
||||
Some((blockhash, last_valid_block_height))
|
||||
}
|
||||
|
||||
async fn get_fee_for_message_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> Option<u64> {
|
||||
let bank = self.bank(commitment);
|
||||
let sanitized_message = SanitizedMessage::try_from(message).ok()?;
|
||||
bank.get_fee_for_message(&sanitized_message)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_local_server(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
|
||||
let banks_server = BanksServer::new_loopback(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
poll_signature_status_sleep_duration,
|
||||
);
|
||||
let banks_server = BanksServer::new_loopback(bank_forks, block_commitment_cache);
|
||||
let (client_transport, server_transport) = transport::channel::unbounded();
|
||||
let server = server::BaseChannel::with_defaults(server_transport).execute(banks_server.serve());
|
||||
let server = server::new(server::Config::default())
|
||||
.incoming(stream::once(future::ready(server_transport)))
|
||||
.respond_with(banks_server.serve());
|
||||
tokio::spawn(server);
|
||||
client_transport
|
||||
}
|
||||
@ -392,25 +299,13 @@ pub async fn start_tcp_server(
|
||||
// serve is generated by the service attribute. It takes as input any type implementing
|
||||
// the generated Banks trait.
|
||||
.map(move |chan| {
|
||||
let (sender, receiver) = unbounded();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
SendTransactionService::new::<NullTpuInfo>(
|
||||
tpu_addr,
|
||||
&bank_forks,
|
||||
None,
|
||||
receiver,
|
||||
5_000,
|
||||
0,
|
||||
DEFAULT_TPU_USE_QUIC,
|
||||
);
|
||||
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
|
||||
|
||||
let server = BanksServer::new(
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
sender,
|
||||
Duration::from_millis(200),
|
||||
);
|
||||
chan.execute(server.serve())
|
||||
let server =
|
||||
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
|
||||
chan.respond_with(server.serve()).execute()
|
||||
})
|
||||
// Max 10 channels.
|
||||
.buffer_unordered(10)
|
||||
|
@ -1,3 +1,7 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
pub mod banks_server;
|
||||
pub mod rpc_banks_service;
|
||||
pub mod send_transaction_service;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_metrics;
|
||||
|
@ -1,23 +1,21 @@
|
||||
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
|
||||
|
||||
use {
|
||||
crate::banks_server::start_tcp_server,
|
||||
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
|
||||
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
use crate::banks_server::start_tcp_server;
|
||||
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
|
||||
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
},
|
||||
tokio_stream::wrappers::IntervalStream,
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
};
|
||||
use tokio_stream::wrappers::IntervalStream;
|
||||
|
||||
pub struct RpcBanksService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@ -103,11 +101,12 @@ impl RpcBanksService {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {super::*, solana_runtime::bank::Bank};
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
|
||||
#[test]
|
||||
fn test_rpc_banks_server_exit() {
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::default_for_tests())));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::default())));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let addr = "127.0.0.1:0".parse().unwrap();
|
||||
|
347
banks-server/src/send_transaction_service.rs
Normal file
347
banks-server/src/send_transaction_service.rs
Normal file
@ -0,0 +1,347 @@
|
||||
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
|
||||
use log::*;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::signature::Signature;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
mpsc::{Receiver, RecvTimeoutError},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Maximum size of the transaction queue
|
||||
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
|
||||
|
||||
pub struct SendTransactionService {
|
||||
thread: JoinHandle<()>,
|
||||
}
|
||||
|
||||
pub struct TransactionInfo {
|
||||
pub signature: Signature,
|
||||
pub wire_transaction: Vec<u8>,
|
||||
pub last_valid_block_height: u64,
|
||||
}
|
||||
|
||||
impl TransactionInfo {
|
||||
pub fn new(
|
||||
signature: Signature,
|
||||
wire_transaction: Vec<u8>,
|
||||
last_valid_block_height: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
signature,
|
||||
wire_transaction,
|
||||
last_valid_block_height,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq)]
|
||||
struct ProcessTransactionsResult {
|
||||
rooted: u64,
|
||||
expired: u64,
|
||||
retried: u64,
|
||||
failed: u64,
|
||||
retained: u64,
|
||||
}
|
||||
|
||||
impl SendTransactionService {
|
||||
pub fn new(
|
||||
tpu_address: SocketAddr,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
receiver: Receiver<TransactionInfo>,
|
||||
) -> Self {
|
||||
let thread = Self::retry_thread(receiver, bank_forks.clone(), tpu_address);
|
||||
Self { thread }
|
||||
}
|
||||
|
||||
fn retry_thread(
|
||||
receiver: Receiver<TransactionInfo>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
tpu_address: SocketAddr,
|
||||
) -> JoinHandle<()> {
|
||||
let mut last_status_check = Instant::now();
|
||||
let mut transactions = HashMap::new();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
Builder::new()
|
||||
.name("send-tx-svc".to_string())
|
||||
.spawn(move || loop {
|
||||
match receiver.recv_timeout(Duration::from_secs(1)) {
|
||||
Err(RecvTimeoutError::Disconnected) => break,
|
||||
Err(RecvTimeoutError::Timeout) => {}
|
||||
Ok(transaction_info) => {
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
|
||||
transactions.insert(transaction_info.signature, transaction_info);
|
||||
} else {
|
||||
datapoint_warn!("send_transaction_service-queue-overflow");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
|
||||
if !transactions.is_empty() {
|
||||
datapoint_info!(
|
||||
"send_transaction_service-queue-size",
|
||||
("len", transactions.len(), i64)
|
||||
);
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let working_bank = bank_forks.working_bank();
|
||||
|
||||
let _result = Self::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
}
|
||||
last_status_check = Instant::now();
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn process_transactions(
|
||||
working_bank: &Arc<Bank>,
|
||||
root_bank: &Arc<Bank>,
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
transactions: &mut HashMap<Signature, TransactionInfo>,
|
||||
) -> ProcessTransactionsResult {
|
||||
let mut result = ProcessTransactionsResult::default();
|
||||
|
||||
transactions.retain(|signature, transaction_info| {
|
||||
if root_bank.has_signature(signature) {
|
||||
info!("Transaction is rooted: {}", signature);
|
||||
result.rooted += 1;
|
||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||
false
|
||||
} else if transaction_info.last_valid_block_height < root_bank.block_height() {
|
||||
info!("Dropping expired transaction: {}", signature);
|
||||
result.expired += 1;
|
||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||
false
|
||||
} else {
|
||||
match working_bank.get_signature_status_slot(signature) {
|
||||
None => {
|
||||
// Transaction is unknown to the working bank, it might have been
|
||||
// dropped or landed in another fork. Re-send it
|
||||
info!("Retrying transaction: {}", signature);
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
send_socket,
|
||||
tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
}
|
||||
Some((_slot, status)) => {
|
||||
if status.is_err() {
|
||||
info!("Dropping failed transaction: {}", signature);
|
||||
result.failed += 1;
|
||||
inc_new_counter_info!("send_transaction_service-failed", 1);
|
||||
false
|
||||
} else {
|
||||
result.retained += 1;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn send_transaction(
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
wire_transaction: &[u8],
|
||||
) {
|
||||
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
|
||||
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
||||
system_transaction,
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
fn service_exit() {
|
||||
let tpu_address = "127.0.0.1:0".parse().unwrap();
|
||||
let bank = Bank::default();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let send_tranaction_service =
|
||||
SendTransactionService::new(tpu_address, &bank_forks, receiver);
|
||||
|
||||
drop(sender);
|
||||
send_tranaction_service.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_transactions() {
|
||||
let (genesis_config, mint_keypair) = create_genesis_config(4);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let tpu_address = "127.0.0.1:0".parse().unwrap();
|
||||
|
||||
let root_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank_forks.read().unwrap().working_bank(),
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
));
|
||||
let rooted_signature = root_bank
|
||||
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
|
||||
|
||||
let non_rooted_signature = working_bank
|
||||
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let failed_signature = {
|
||||
let blockhash = working_bank.last_blockhash();
|
||||
let transaction =
|
||||
system_transaction::transfer(&mint_keypair, &Pubkey::default(), 1, blockhash);
|
||||
let signature = transaction.signatures[0];
|
||||
working_bank.process_transaction(&transaction).unwrap_err();
|
||||
signature
|
||||
};
|
||||
|
||||
let mut transactions = HashMap::new();
|
||||
|
||||
info!("Expired transactions are dropped..");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo::new(Signature::default(), vec![], root_bank.slot() - 1),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
expired: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Rooted transactions are dropped...");
|
||||
transactions.insert(
|
||||
rooted_signature,
|
||||
TransactionInfo::new(rooted_signature, vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
rooted: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Failed transactions are dropped...");
|
||||
transactions.insert(
|
||||
failed_signature,
|
||||
TransactionInfo::new(failed_signature, vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
failed: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Non-rooted transactions are kept...");
|
||||
transactions.insert(
|
||||
non_rooted_signature,
|
||||
TransactionInfo::new(non_rooted_signature, vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retained: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
transactions.clear();
|
||||
|
||||
info!("Unknown transactions are retried...");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo::new(Signature::default(), vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retried: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
4
bench-exchange/.gitignore
vendored
Normal file
4
bench-exchange/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
/target/
|
||||
/config/
|
||||
/config-local/
|
||||
/farf/
|
40
bench-exchange/Cargo.toml
Normal file
40
bench-exchange/Cargo.toml
Normal file
@ -0,0 +1,40 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.7.10"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.11"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.10" }
|
||||
solana-core = { path = "../core", version = "=1.7.10" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.10" }
|
||||
solana-client = { path = "../client", version = "=1.7.10" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.10" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.10" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.10" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.10" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.10" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.10" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.10" }
|
||||
solana-version = { path = "../version", version = "=1.7.10" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.10" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
479
bench-exchange/README.md
Normal file
479
bench-exchange/README.md
Normal file
@ -0,0 +1,479 @@
|
||||
# token-exchange
|
||||
Solana Token Exchange Bench
|
||||
|
||||
If you can't wait; jump to [Running the exchange](#Running-the-exchange) to
|
||||
learn how to start and interact with the exchange.
|
||||
|
||||
### Table of Contents
|
||||
[Overview](#Overview)<br>
|
||||
[Premise](#Premise)<br>
|
||||
[Exchange startup](#Exchange-startup)<br>
|
||||
[Order Requests](#Trade-requests)<br>
|
||||
[Order Cancellations](#Trade-cancellations)<br>
|
||||
[Trade swap](#Trade-swap)<br>
|
||||
[Exchange program operations](#Exchange-program-operations)<br>
|
||||
[Quotes and OHLCV](#Quotes-and-OHLCV)<br>
|
||||
[Investor strategies](#Investor-strategies)<br>
|
||||
[Running the exchange](#Running-the-exchange)<br>
|
||||
|
||||
## Overview
|
||||
|
||||
An exchange is a marketplace where one asset can be traded for another. This
|
||||
demo demonstrates one way to host an exchange on the Solana blockchain by
|
||||
emulating a currency exchange.
|
||||
|
||||
The assets are virtual tokens held by investors who may post order requests to
|
||||
the exchange. A Matcher monitors the exchange and posts swap requests for
|
||||
matching orders. All the transactions can execute concurrently.
|
||||
|
||||
## Premise
|
||||
|
||||
- Exchange
|
||||
- An exchange is a marketplace where one asset can be traded for another.
|
||||
The exchange in this demo is the on-chain program that implements the
|
||||
tokens and the policies for trading those tokens.
|
||||
- Token
|
||||
- A virtual asset that can be owned, traded, and holds virtual intrinsic value
|
||||
compared to other assets. There are four types of tokens in this demo, A,
|
||||
B, C, D. Each one may be traded for another.
|
||||
- Token account
|
||||
- An account owned by the exchange that holds a quantity of one type of token.
|
||||
- Account request
|
||||
- A request to create a token account
|
||||
- Token request
|
||||
- A request to deposit tokens of a particular type into a token account.
|
||||
- Asset pair
|
||||
- A struct with fields Base and Quote, representing the two assets which make up a
|
||||
trading pair, which themselves are Tokens. The Base or 'primary' asset is the
|
||||
numerator and the Quote is the denominator for pricing purposes.
|
||||
- Order side
|
||||
- Describes which side of the market an investor wants to place a trade on. Options
|
||||
are "Bid" or "Ask", where a bid represents an offer to purchase the Base asset of
|
||||
the AssetPair for a sum of the Quote Asset and an Ask is an offer to sell Base asset
|
||||
for the Quote asset.
|
||||
- Price ratio
|
||||
- An expression of the relative prices of two tokens. Calculated with the Base
|
||||
Asset as the numerator and the Quote Asset as the denominator. Ratios are
|
||||
represented as fixed point numbers. The fixed point scaler is defined in
|
||||
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
|
||||
- Order request
|
||||
- A Solana transaction sent by a trader to the exchange to submit an order.
|
||||
Order requests are made up of the token pair, the order side (bid or ask),
|
||||
quantity of the primary token, the price ratio, and the two token accounts
|
||||
to be credited/deducted. An example trade request looks like "T AB 5 2"
|
||||
which reads "Exchange 5 A tokens to B tokens at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
||||
deducted and 10 B tokens credited to the trade initiator's token accounts.
|
||||
Successful order requests result in an order.
|
||||
- Order
|
||||
- The result of a successful order request. orders are stored in
|
||||
accounts owned by the submitter of the order request. They can only be
|
||||
canceled by their owner but can be used by anyone in a trade swap. They
|
||||
contain the same information as the order request.
|
||||
- Price spread
|
||||
- The difference between the two matching orders. The spread is the
|
||||
profit of the Matcher initiating the swap request.
|
||||
- Match requirements
|
||||
- Policies that result in a successful trade swap.
|
||||
- Match request
|
||||
- A request to fill two complementary orders (bid/ask), resulting if successful,
|
||||
in a trade being created.
|
||||
- Trade
|
||||
- A successful trade is created from two matching orders that meet
|
||||
swap requirements which are submitted in a Match Request by a Matcher and
|
||||
executed by the exchange. A trade may not wholly satisfy one or both of the
|
||||
orders in which case the orders are adjusted appropriately. Upon execution,
|
||||
tokens are distributed to the traders' accounts and any overlap or
|
||||
"negative spread" between orders is deposited into the Matcher's profit
|
||||
account. All successful trades are recorded in the data of a new solana
|
||||
account for posterity.
|
||||
- Investor
|
||||
- Individual investors who hold a number of tokens and wish to trade them on
|
||||
the exchange. Investors operate as Solana thin clients who own a set of
|
||||
accounts containing tokens and/or order requests. Investors post
|
||||
transactions to the exchange in order to request tokens and post or cancel
|
||||
order requests.
|
||||
- Matcher
|
||||
- An agent who facilitates trading between investors. Matchers operate as
|
||||
Solana thin clients who monitor all the orders looking for a trade
|
||||
match. Once found, the Matcher issues a swap request to the exchange.
|
||||
Matchers are the engine of the exchange and are rewarded for their efforts by
|
||||
accumulating the price spreads of the swaps they initiate. Matchers also
|
||||
provide current bid/ask price and OHLCV (Open, High, Low, Close, Volume)
|
||||
information on demand via a public network port.
|
||||
- Transaction fees
|
||||
- Solana transaction fees are paid for by the transaction submitters who are
|
||||
the Investors and Matchers.
|
||||
|
||||
## Exchange startup
|
||||
|
||||
The exchange is up and running when it reaches a state where it can take
|
||||
investors' trades and Matchers' match requests. To achieve this state the
|
||||
following must occur in order:
|
||||
|
||||
- Start the Solana blockchain
|
||||
- Start the thin-client
|
||||
- The Matcher subscribes to change notifications for all the accounts owned by
|
||||
the exchange program id. The subscription is managed via Solana's JSON RPC
|
||||
interface.
|
||||
- The Matcher starts responding to queries for bid/ask price and OHLCV
|
||||
|
||||
The Matcher responding successfully to price and OHLCV requests is the signal to
|
||||
the investors that trades submitted after that point will be analyzed. <!--This
|
||||
is not ideal, and instead investors should be able to submit trades at any time,
|
||||
and the Matcher could come and go without missing a trade. One way to achieve
|
||||
this is for the Matcher to read the current state of all accounts looking for all
|
||||
open orders.-->
|
||||
|
||||
Investors will initially query the exchange to discover their current balance
|
||||
for each type of token. If the investor does not already have an account for
|
||||
each type of token, they will submit account requests. Matcher as well will
|
||||
request accounts to hold the tokens they earn by initiating trade swaps.
|
||||
|
||||
```rust
|
||||
/// Supported token types
|
||||
pub enum Token {
|
||||
A,
|
||||
B,
|
||||
C,
|
||||
D,
|
||||
}
|
||||
|
||||
/// Supported token pairs
|
||||
pub enum TokenPair {
|
||||
AB,
|
||||
AC,
|
||||
AD,
|
||||
BC,
|
||||
BD,
|
||||
CD,
|
||||
}
|
||||
|
||||
pub enum ExchangeInstruction {
|
||||
/// New token account
|
||||
/// key 0 - Signer
|
||||
/// key 1 - New token account
|
||||
AccountRequest,
|
||||
}
|
||||
|
||||
/// Token accounts are populated with this structure
|
||||
pub struct TokenAccountInfo {
|
||||
/// Investor who owns this account
|
||||
pub owner: Pubkey,
|
||||
/// Current number of tokens this account holds
|
||||
pub tokens: Tokens,
|
||||
}
|
||||
```
|
||||
|
||||
For this demo investors or Matcher can request more tokens from the exchange at
|
||||
any time by submitting token requests. In non-demos, an exchange of this type
|
||||
would provide another way to exchange a 3rd party asset into tokens.
|
||||
|
||||
To request tokens, investors submit transfer requests:
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// Transfer tokens between two accounts
|
||||
/// key 0 - Account to transfer tokens to
|
||||
/// key 1 - Account to transfer tokens from. This can be the exchange program itself,
|
||||
/// the exchange has a limitless number of tokens it can transfer.
|
||||
TransferRequest(Token, u64),
|
||||
}
|
||||
```
|
||||
|
||||
## Order Requests
|
||||
|
||||
When an investor decides to exchange a token of one type for another, they
|
||||
submit a transaction to the Solana Blockchain containing an order request, which,
|
||||
if successful, is turned into an order. orders do not expire but are
|
||||
cancellable. <!-- orders should have a timestamp to enable trade
|
||||
expiration --> When an order is created, tokens are deducted from a token
|
||||
account and the order acts as an escrow. The tokens are held until the
|
||||
order is fulfilled or canceled. If the direction is `To`, then the number
|
||||
of `tokens` are deducted from the primary account, if `From` then `tokens`
|
||||
multiplied by `price` are deducted from the secondary account. orders are
|
||||
no longer valid when the number of `tokens` goes to zero, at which point they
|
||||
can no longer be used. <!-- Could support refilling orders, so order
|
||||
accounts are refilled rather than accumulating -->
|
||||
|
||||
```rust
|
||||
/// Direction of the exchange between two tokens in a pair
|
||||
pub enum Direction {
|
||||
/// Trade first token type (primary) in the pair 'To' the second
|
||||
To,
|
||||
/// Trade first token type in the pair 'From' the second (secondary)
|
||||
From,
|
||||
}
|
||||
|
||||
pub struct OrderRequestInfo {
|
||||
/// Direction of trade
|
||||
pub direction: Direction,
|
||||
|
||||
/// Token pair to trade
|
||||
pub pair: TokenPair,
|
||||
|
||||
/// Number of tokens to exchange; refers to the primary or the secondary depending on the direction
|
||||
pub tokens: u64,
|
||||
|
||||
/// The price ratio the primary price over the secondary price. The primary price is fixed
|
||||
/// and equal to the variable `SCALER`.
|
||||
pub price: u64,
|
||||
|
||||
/// Token account to deposit tokens on successful swap
|
||||
pub dst_account: Pubkey,
|
||||
}
|
||||
|
||||
pub enum ExchangeInstruction {
|
||||
/// order request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - Token account associated with this trade
|
||||
TradeRequest(TradeRequestInfo),
|
||||
}
|
||||
|
||||
/// Trade accounts are populated with this structure
|
||||
pub struct TradeOrderInfo {
|
||||
/// Owner of the order
|
||||
pub owner: Pubkey,
|
||||
/// Direction of the exchange
|
||||
pub direction: Direction,
|
||||
/// Token pair indicating two tokens to exchange, first is primary
|
||||
pub pair: TokenPair,
|
||||
/// Number of tokens to exchange; primary or secondary depending on direction
|
||||
pub tokens: u64,
|
||||
/// Scaled price of the secondary token given the primary is equal to the scale value
|
||||
/// If scale is 1 and price is 2 then ratio is 1:2 or 1 primary token for 2 secondary tokens
|
||||
pub price: u64,
|
||||
/// account which the tokens were source from. The trade account holds the tokens in escrow
|
||||
/// until either one or more part of a swap or the trade is canceled.
|
||||
pub src_account: Pubkey,
|
||||
/// account which the tokens the tokens will be deposited into on a successful trade
|
||||
pub dst_account: Pubkey,
|
||||
}
|
||||
```
|
||||
|
||||
## Order cancellations
|
||||
|
||||
An investor may cancel a trade at anytime, but only trades they own. If the
|
||||
cancellation is successful, any tokens held in escrow are returned to the
|
||||
account from which they came.
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// order cancellation
|
||||
/// key 0 - Signer
|
||||
/// key 1 -order to cancel
|
||||
TradeCancellation,
|
||||
}
|
||||
```
|
||||
|
||||
## Trade swaps
|
||||
|
||||
The Matcher is monitoring the accounts assigned to the exchange program and
|
||||
building a trade-order table. The order table is used to identify
|
||||
matching orders which could be fulfilled. When a match is found the
|
||||
Matcher should issue a swap request. Swap requests may not satisfy the entirety
|
||||
of either order, but the exchange will greedily fulfill it. Any leftover tokens
|
||||
in either account will keep the order valid for further swap requests in
|
||||
the future.
|
||||
|
||||
Matching orders are defined by the following swap requirements:
|
||||
|
||||
- Opposite polarity (one `To` and one `From`)
|
||||
- Operate on the same token pair
|
||||
- The price ratio of the `From` order is greater than or equal to the `To` order
|
||||
- There are sufficient tokens to perform the trade
|
||||
|
||||
Orders can be written in the following format:
|
||||
|
||||
`investor direction pair quantity price-ratio`
|
||||
|
||||
For example:
|
||||
|
||||
- `1 T AB 2 1`
|
||||
- Investor 1 wishes to exchange 2 A tokens to B tokens at a ratio of 1 A to 1
|
||||
B
|
||||
- `2 F AC 6 1.2`
|
||||
- Investor 2 wishes to exchange A tokens from 6 B tokens at a ratio of 1 A
|
||||
from 1.2 B
|
||||
|
||||
An order table could look something like the following. Notice how the columns
|
||||
are sorted low to high and high to low, respectively. Prices are dramatic and
|
||||
whole for clarity.
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 2 4 | 2 F AB 2 8 |
|
||||
| 2 | 1 T AB 1 4 | 2 F AB 2 8 |
|
||||
| 3 | 1 T AB 6 6 | 2 F AB 2 7 |
|
||||
| 4 | 1 T AB 2 8 | 2 F AB 3 6 |
|
||||
| 5 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
As part of a successful swap request, the exchange will credit tokens to the
|
||||
Matcher's account equal to the difference in the price ratios or the two orders.
|
||||
These tokens are considered the Matcher's profit for initiating the trade.
|
||||
|
||||
The Matcher would initiate the following swap on the order table above:
|
||||
|
||||
- Row 1, To: Investor 1 trades 2 A tokens to 8 B tokens
|
||||
- Row 1, From: Investor 2 trades 2 A tokens from 8 B tokens
|
||||
- Matcher takes 8 B tokens as profit
|
||||
|
||||
Both row 1 trades are fully realized, table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 1 4 | 2 F AB 2 8 |
|
||||
| 2 | 1 T AB 6 6 | 2 F AB 2 7 |
|
||||
| 3 | 1 T AB 2 8 | 2 F AB 3 6 |
|
||||
| 4 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
The Matcher would initiate the following swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 1 A token to 4 B tokens
|
||||
- Row 1, From: Investor 2 trades 1 A token from 4 B tokens
|
||||
- Matcher takes 4 B tokens as profit
|
||||
|
||||
Row 1 From is not fully realized, table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 6 6 | 2 F AB 1 8 |
|
||||
| 2 | 1 T AB 2 8 | 2 F AB 2 7 |
|
||||
| 3 | 1 T AB 2 10 | 2 F AB 3 6 |
|
||||
| 4 | | 2 F AB 1 5 |
|
||||
|
||||
The Matcher would initiate the following swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 1 A token to 6 B tokens
|
||||
- Row 1, From: Investor 2 trades 1 A token from 6 B tokens
|
||||
- Matcher takes 2 B tokens as profit
|
||||
|
||||
Row 1 To is now fully realized, table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 5 6 | 2 F AB 2 7 |
|
||||
| 2 | 1 T AB 2 8 | 2 F AB 3 5 |
|
||||
| 3 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
The Matcher would initiate the following last swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 2 A token to 12 B tokens
|
||||
- Row 1, From: Investor 2 trades 2 A token from 12 B tokens
|
||||
- Matcher takes 2 B tokens as profit
|
||||
|
||||
Table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 3 6 | 2 F AB 3 5 |
|
||||
| 2 | 1 T AB 2 8 | 2 F AB 1 5 |
|
||||
| 3 | 1 T AB 2 10 | |
|
||||
|
||||
At this point the lowest To's price is larger than the largest From's price so
|
||||
no more swaps would be initiated until new orders came in.
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// Trade swap request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - 'To' order
|
||||
/// key 3 - `From` order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Matcher profit from the swap.
|
||||
SwapRequest,
|
||||
}
|
||||
|
||||
/// Swap accounts are populated with this structure
|
||||
pub struct TradeSwapInfo {
|
||||
/// Pair swapped
|
||||
pub pair: TokenPair,
|
||||
/// `To` order
|
||||
pub to_trade_order: Pubkey,
|
||||
/// `From` order
|
||||
pub from_trade_order: Pubkey,
|
||||
/// Number of primary tokens exchanged
|
||||
pub primary_tokens: u64,
|
||||
/// Price the primary tokens were exchanged for
|
||||
pub primary_price: u64,
|
||||
/// Number of secondary tokens exchanged
|
||||
pub secondary_tokens: u64,
|
||||
/// Price the secondary tokens were exchanged for
|
||||
pub secondary_price: u64,
|
||||
}
|
||||
```
|
||||
|
||||
## Exchange program operations
|
||||
|
||||
Putting all the commands together from above, the following operations will be
|
||||
supported by the on-chain exchange program:
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// New token account
|
||||
/// key 0 - Signer
|
||||
/// key 1 - New token account
|
||||
AccountRequest,
|
||||
|
||||
/// Transfer tokens between two accounts
|
||||
/// key 0 - Account to transfer tokens to
|
||||
/// key 1 - Account to transfer tokens from. This can be the exchange program itself,
|
||||
/// the exchange has a limitless number of tokens it can transfer.
|
||||
TransferRequest(Token, u64),
|
||||
|
||||
/// order request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - Token account associated with this trade
|
||||
TradeRequest(TradeRequestInfo),
|
||||
|
||||
/// order cancellation
|
||||
/// key 0 - Signer
|
||||
/// key 1 -order to cancel
|
||||
TradeCancellation,
|
||||
|
||||
/// Trade swap request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - 'To' order
|
||||
/// key 3 - `From` order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Matcher profit from the swap.
|
||||
SwapRequest,
|
||||
}
|
||||
```
|
||||
|
||||
## Quotes and OHLCV
|
||||
|
||||
The Matcher will provide current bid/ask price quotes based on trade actively and
|
||||
also provide OHLCV based on some time window. The details of how the bid/ask
|
||||
price quotes are calculated are yet to be decided.
|
||||
|
||||
## Investor strategies
|
||||
|
||||
To make a compelling demo, the investors needs to provide interesting trade
|
||||
behavior. Something as simple as a randomly twiddled baseline would be a
|
||||
minimum starting point.
|
||||
|
||||
## Running the exchange
|
||||
|
||||
The exchange bench posts trades and swaps matches as fast as it can.
|
||||
|
||||
You might want to bump the duration up
|
||||
to 60 seconds and the batch size to 1000 for better numbers. You can modify those
|
||||
in client_demo/src/demo.rs::test_exchange_local_cluster.
|
||||
|
||||
The following command runs the bench:
|
||||
|
||||
```bash
|
||||
$ RUST_LOG=solana_bench_exchange=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||
```
|
||||
|
||||
To also see the cluster messages:
|
||||
|
||||
```bash
|
||||
$ RUST_LOG=solana_bench_exchange=info,solana=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||
```
|
1028
bench-exchange/src/bench.rs
Normal file
1028
bench-exchange/src/bench.rs
Normal file
File diff suppressed because it is too large
Load Diff
221
bench-exchange/src/cli.rs
Normal file
221
bench-exchange/src/cli.rs
Normal file
@ -0,0 +1,221 @@
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct Config {
|
||||
pub entrypoint_addr: SocketAddr,
|
||||
pub faucet_addr: SocketAddr,
|
||||
pub identity: Keypair,
|
||||
pub threads: usize,
|
||||
pub num_nodes: usize,
|
||||
pub duration: Duration,
|
||||
pub transfer_delay: u64,
|
||||
pub fund_amount: u64,
|
||||
pub batch_size: usize,
|
||||
pub chunk_size: usize,
|
||||
pub account_groups: usize,
|
||||
pub client_ids_and_stake_file: String,
|
||||
pub write_to_client_file: bool,
|
||||
pub read_from_client_file: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
||||
faucet_addr: SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)),
|
||||
identity: Keypair::new(),
|
||||
num_nodes: 1,
|
||||
threads: 4,
|
||||
duration: Duration::new(u64::max_value(), 0),
|
||||
transfer_delay: 0,
|
||||
fund_amount: 100_000,
|
||||
batch_size: 100,
|
||||
chunk_size: 100,
|
||||
account_groups: 100,
|
||||
client_ids_and_stake_file: String::new(),
|
||||
write_to_client_file: false,
|
||||
read_from_client_file: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(version)
|
||||
.arg(
|
||||
Arg::with_name("entrypoint")
|
||||
.short("n")
|
||||
.long("entrypoint")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("127.0.0.1:8001")
|
||||
.help("Cluster entry point; defaults to 127.0.0.1:8001"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("faucet")
|
||||
.short("d")
|
||||
.long("faucet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("127.0.0.1:9900")
|
||||
.help("Location of the faucet; defaults to 127.0.0.1:9900"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
.short("i")
|
||||
.long("identity")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("File containing a client identity (keypair)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("threads")
|
||||
.long("threads")
|
||||
.value_name("<threads>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1")
|
||||
.help("Number of threads submitting transactions"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num-nodes")
|
||||
.long("num-nodes")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1")
|
||||
.help("Wait for NUM nodes to converge"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("duration")
|
||||
.long("duration")
|
||||
.value_name("SECS")
|
||||
.takes_value(true)
|
||||
.default_value("60")
|
||||
.help("Seconds to run benchmark, then exit; default is forever"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("transfer-delay")
|
||||
.long("transfer-delay")
|
||||
.value_name("<delay>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("0")
|
||||
.help("Delay between each chunk"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("fund-amount")
|
||||
.long("fund-amount")
|
||||
.value_name("<fund>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("100000")
|
||||
.help("Number of lamports to fund to each signer"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("batch-size")
|
||||
.long("batch-size")
|
||||
.value_name("<batch>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1000")
|
||||
.help("Number of transactions before the signer rolls over"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("chunk-size")
|
||||
.long("chunk-size")
|
||||
.value_name("<cunk>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("500")
|
||||
.help("Number of transactions to generate and send at a time"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("account-groups")
|
||||
.long("account-groups")
|
||||
.value_name("<groups>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("10")
|
||||
.help("Number of account groups to cycle for each batch"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("write-client-keys")
|
||||
.long("write-client-keys")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.help("Generate client keys and stakes and write the list to YAML file"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("read-client-keys")
|
||||
.long("read-client-keys")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.help("Read client keys and stakes from the YAML file"),
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
let mut args = Config::default();
|
||||
|
||||
args.entrypoint_addr = solana_net_utils::parse_host_port(
|
||||
matches.value_of("entrypoint").unwrap(),
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse entrypoint address: {}", e);
|
||||
exit(1)
|
||||
});
|
||||
|
||||
args.faucet_addr = solana_net_utils::parse_host_port(matches.value_of("faucet").unwrap())
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse faucet address: {}", e);
|
||||
exit(1)
|
||||
});
|
||||
|
||||
if matches.is_present("identity") {
|
||||
args.identity = read_keypair_file(matches.value_of("identity").unwrap())
|
||||
.expect("can't read client identity");
|
||||
} else {
|
||||
args.identity = {
|
||||
let seed = [42_u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
rnd.gen_keypair()
|
||||
};
|
||||
}
|
||||
args.threads = value_t!(matches.value_of("threads"), usize).expect("Failed to parse threads");
|
||||
args.num_nodes =
|
||||
value_t!(matches.value_of("num-nodes"), usize).expect("Failed to parse num-nodes");
|
||||
let duration = value_t!(matches.value_of("duration"), u64).expect("Failed to parse duration");
|
||||
args.duration = Duration::from_secs(duration);
|
||||
args.transfer_delay =
|
||||
value_t!(matches.value_of("transfer-delay"), u64).expect("Failed to parse transfer-delay");
|
||||
args.fund_amount =
|
||||
value_t!(matches.value_of("fund-amount"), u64).expect("Failed to parse fund-amount");
|
||||
args.batch_size =
|
||||
value_t!(matches.value_of("batch-size"), usize).expect("Failed to parse batch-size");
|
||||
args.chunk_size =
|
||||
value_t!(matches.value_of("chunk-size"), usize).expect("Failed to parse chunk-size");
|
||||
args.account_groups = value_t!(matches.value_of("account-groups"), usize)
|
||||
.expect("Failed to parse account-groups");
|
||||
|
||||
if let Some(s) = matches.value_of("write-client-keys") {
|
||||
args.write_to_client_file = true;
|
||||
args.client_ids_and_stake_file = s.to_string();
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("read-client-keys") {
|
||||
assert!(!args.write_to_client_file);
|
||||
args.read_from_client_file = true;
|
||||
args.client_ids_and_stake_file = s.to_string();
|
||||
}
|
||||
args
|
||||
}
|
3
bench-exchange/src/lib.rs
Normal file
3
bench-exchange/src/lib.rs
Normal file
@ -0,0 +1,3 @@
|
||||
pub mod bench;
|
||||
pub mod cli;
|
||||
mod order_book;
|
85
bench-exchange/src/main.rs
Normal file
85
bench-exchange/src/main.rs
Normal file
@ -0,0 +1,85 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
pub mod bench;
|
||||
mod cli;
|
||||
pub mod order_book;
|
||||
|
||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||
use log::*;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::signature::Signer;
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
solana_metrics::set_panic_hook("bench-exchange");
|
||||
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
entrypoint_addr,
|
||||
faucet_addr,
|
||||
identity,
|
||||
threads,
|
||||
num_nodes,
|
||||
duration,
|
||||
transfer_delay,
|
||||
fund_amount,
|
||||
batch_size,
|
||||
chunk_size,
|
||||
account_groups,
|
||||
client_ids_and_stake_file,
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
..
|
||||
} = cli_config;
|
||||
|
||||
let config = Config {
|
||||
identity,
|
||||
threads,
|
||||
duration,
|
||||
transfer_delay,
|
||||
fund_amount,
|
||||
batch_size,
|
||||
chunk_size,
|
||||
account_groups,
|
||||
client_ids_and_stake_file,
|
||||
read_from_client_file,
|
||||
};
|
||||
|
||||
if write_to_client_file {
|
||||
create_client_accounts_file(
|
||||
&config.client_ids_and_stake_file,
|
||||
config.batch_size,
|
||||
config.account_groups,
|
||||
config.fund_amount,
|
||||
);
|
||||
} else {
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
|
||||
info!("{} nodes found", num_clients);
|
||||
if num_clients < num_nodes {
|
||||
panic!("Error: Insufficient nodes discovered");
|
||||
}
|
||||
|
||||
if !read_from_client_file {
|
||||
info!("Funding keypair: {}", config.identity.pubkey());
|
||||
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
const NUM_SIGNERS: u64 = 2;
|
||||
airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
&config.identity,
|
||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||
);
|
||||
}
|
||||
do_bench_exchange(vec![client], config);
|
||||
}
|
||||
}
|
134
bench-exchange/src/order_book.rs
Normal file
134
bench-exchange/src/order_book.rs
Normal file
@ -0,0 +1,134 @@
|
||||
use itertools::EitherOrBoth::{Both, Left, Right};
|
||||
use itertools::Itertools;
|
||||
use log::*;
|
||||
use solana_exchange_program::exchange_state::*;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::{error, fmt};
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ToOrder {
|
||||
pub pubkey: Pubkey,
|
||||
pub info: OrderInfo,
|
||||
}
|
||||
|
||||
impl Ord for ToOrder {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
other.info.price.cmp(&self.info.price)
|
||||
}
|
||||
}
|
||||
impl PartialOrd for ToOrder {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct FromOrder {
|
||||
pub pubkey: Pubkey,
|
||||
pub info: OrderInfo,
|
||||
}
|
||||
|
||||
impl Ord for FromOrder {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.info.price.cmp(&other.info.price)
|
||||
}
|
||||
}
|
||||
impl PartialOrd for FromOrder {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct OrderBook {
|
||||
// TODO scale to x token types
|
||||
to_ab: BinaryHeap<ToOrder>,
|
||||
from_ab: BinaryHeap<FromOrder>,
|
||||
}
|
||||
impl fmt::Display for OrderBook {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"+-Order Book--------------------------+-------------------------------------+"
|
||||
)?;
|
||||
for (i, it) in self
|
||||
.to_ab
|
||||
.iter()
|
||||
.zip_longest(self.from_ab.iter())
|
||||
.enumerate()
|
||||
{
|
||||
match it {
|
||||
Both(to, from) => writeln!(
|
||||
f,
|
||||
"| T AB {:8} for {:8}/{:8} | F AB {:8} for {:8}/{:8} |{}",
|
||||
to.info.tokens,
|
||||
SCALER,
|
||||
to.info.price,
|
||||
from.info.tokens,
|
||||
SCALER,
|
||||
from.info.price,
|
||||
i
|
||||
)?,
|
||||
Left(to) => writeln!(
|
||||
f,
|
||||
"| T AB {:8} for {:8}/{:8} | |{}",
|
||||
to.info.tokens, SCALER, to.info.price, i
|
||||
)?,
|
||||
Right(from) => writeln!(
|
||||
f,
|
||||
"| | F AB {:8} for {:8}/{:8} |{}",
|
||||
from.info.tokens, SCALER, from.info.price, i
|
||||
)?,
|
||||
}
|
||||
}
|
||||
write!(
|
||||
f,
|
||||
"+-------------------------------------+-------------------------------------+"
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl OrderBook {
|
||||
// TODO
|
||||
// pub fn cancel(&mut self, pubkey: Pubkey) -> Result<(), Box<dyn error::Error>> {
|
||||
// Ok(())
|
||||
// }
|
||||
pub fn push(&mut self, pubkey: Pubkey, info: OrderInfo) -> Result<(), Box<dyn error::Error>> {
|
||||
check_trade(info.side, info.tokens, info.price)?;
|
||||
match info.side {
|
||||
OrderSide::Ask => {
|
||||
self.to_ab.push(ToOrder { pubkey, info });
|
||||
}
|
||||
OrderSide::Bid => {
|
||||
self.from_ab.push(FromOrder { pubkey, info });
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn pop(&mut self) -> Option<(ToOrder, FromOrder)> {
|
||||
if let Some(pair) = Self::pop_pair(&mut self.to_ab, &mut self.from_ab) {
|
||||
return Some(pair);
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn get_num_outstanding(&self) -> (usize, usize) {
|
||||
(self.to_ab.len(), self.from_ab.len())
|
||||
}
|
||||
|
||||
fn pop_pair(
|
||||
to_ab: &mut BinaryHeap<ToOrder>,
|
||||
from_ab: &mut BinaryHeap<FromOrder>,
|
||||
) -> Option<(ToOrder, FromOrder)> {
|
||||
let to = to_ab.peek()?;
|
||||
let from = from_ab.peek()?;
|
||||
if from.info.price < to.info.price {
|
||||
debug!("Trade not viable");
|
||||
return None;
|
||||
}
|
||||
let to = to_ab.pop()?;
|
||||
let from = from_ab.pop()?;
|
||||
Some((to, from))
|
||||
}
|
||||
}
|
124
bench-exchange/tests/bench_exchange.rs
Normal file
124
bench-exchange/tests/bench_exchange.rs
Normal file
@ -0,0 +1,124 @@
|
||||
use log::*;
|
||||
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_exchange_program::{
|
||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||
};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_client::BankClient};
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::{process::exit, sync::mpsc::channel, time::Duration};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_exchange_local_cluster() {
|
||||
solana_logger::setup();
|
||||
|
||||
const NUM_NODES: usize = 1;
|
||||
|
||||
let config = Config {
|
||||
identity: Keypair::new(),
|
||||
duration: Duration::from_secs(1),
|
||||
fund_amount: 100_000,
|
||||
threads: 1,
|
||||
transfer_delay: 20, // 15
|
||||
batch_size: 100, // 1000
|
||||
chunk_size: 10, // 200
|
||||
account_groups: 1, // 10
|
||||
..Config::default()
|
||||
};
|
||||
let Config {
|
||||
fund_amount,
|
||||
batch_size,
|
||||
account_groups,
|
||||
..
|
||||
} = config;
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||
..ClusterConfig::default()
|
||||
},
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&faucet_keypair.pubkey(),
|
||||
2_000_000_000_000,
|
||||
);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, Some(1_000_000_000_000), 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
.expect("run_local_faucet")
|
||||
.expect("faucet_addr");
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
NUM_NODES,
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
|
||||
info!("clients: {}", num_clients);
|
||||
assert!(num_clients >= NUM_NODES);
|
||||
|
||||
const NUM_SIGNERS: u64 = 2;
|
||||
airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
&config.identity,
|
||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||
);
|
||||
|
||||
do_bench_exchange(vec![client], config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exchange_bank_client() {
|
||||
solana_logger::setup();
|
||||
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
||||
let mut bank = Bank::new(&genesis_config);
|
||||
bank.add_builtin("exchange_program", id(), process_instruction);
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
do_bench_exchange(
|
||||
clients,
|
||||
Config {
|
||||
identity,
|
||||
duration: Duration::from_secs(1),
|
||||
fund_amount: 100_000,
|
||||
threads: 1,
|
||||
transfer_delay: 20, // 0;
|
||||
batch_size: 100, // 1500;
|
||||
chunk_size: 10, // 1500;
|
||||
account_groups: 1, // 50;
|
||||
..Config::default()
|
||||
},
|
||||
);
|
||||
}
|
@ -1,19 +1,20 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam-channel = "0.5"
|
||||
clap = { version = "3.1.5", features = ["cargo"] }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.10" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.10" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.10" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.10" }
|
||||
solana-version = { path = "../version", version = "=1.7.10" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,38 +1,32 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, Arg, Command},
|
||||
crossbeam_channel::unbounded,
|
||||
solana_streamer::{
|
||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketBatchReceiver},
|
||||
},
|
||||
std::{
|
||||
cmp::max,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{sleep, spawn, JoinHandle, Result},
|
||||
time::{Duration, SystemTime},
|
||||
},
|
||||
};
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle, Result};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut packet_batch = PacketBatch::default();
|
||||
packet_batch.packets.resize(10, Packet::default());
|
||||
for w in packet_batch.packets.iter_mut() {
|
||||
let mut msgs = Packets::default();
|
||||
msgs.packets.resize(10, Packet::default());
|
||||
for w in msgs.packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(addr);
|
||||
}
|
||||
let packet_batch = Arc::new(packet_batch);
|
||||
let msgs = Arc::new(msgs);
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in &packet_batch.packets {
|
||||
for p in &msgs.packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size <= PACKET_DATA_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
@ -42,14 +36,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
if let Ok(packet_batch) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -57,32 +51,23 @@ fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) ->
|
||||
fn main() -> Result<()> {
|
||||
let mut num_sockets = 1usize;
|
||||
|
||||
let matches = Command::new(crate_name!())
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_version::version!())
|
||||
.arg(
|
||||
Arg::new("num-recv-sockets")
|
||||
Arg::with_name("num-recv-sockets")
|
||||
.long("num-recv-sockets")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("Use NUM receive sockets"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("num-producers")
|
||||
.long("num-producers")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("Use this many producer threads."),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
if let Some(n) = matches.value_of("num-recv-sockets") {
|
||||
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
|
||||
}
|
||||
|
||||
let num_producers: u64 = matches.value_of_t("num_producers").unwrap_or(4);
|
||||
|
||||
let port = 0;
|
||||
let mut port = 0;
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let mut addr = SocketAddr::new(ip_addr, 0);
|
||||
|
||||
@ -90,18 +75,15 @@ fn main() -> Result<()> {
|
||||
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketBatchRecycler::default();
|
||||
let (_port, read_sockets) = solana_net_utils::multi_bind_in_range(
|
||||
ip_addr,
|
||||
(port, port + num_sockets as u16),
|
||||
num_sockets,
|
||||
)
|
||||
.unwrap();
|
||||
for read in read_sockets {
|
||||
let recycler = PacketsRecycler::default();
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
||||
addr = read.local_addr().unwrap();
|
||||
let (s_reader, r_reader) = unbounded();
|
||||
port = addr.port();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
read_channels.push(r_reader);
|
||||
read_threads.push(receiver(
|
||||
Arc::new(read),
|
||||
@ -114,10 +96,9 @@ fn main() -> Result<()> {
|
||||
));
|
||||
}
|
||||
|
||||
let producer_threads: Vec<_> = (0..num_producers)
|
||||
.into_iter()
|
||||
.map(|_| producer(&addr, exit.clone()))
|
||||
.collect();
|
||||
let t_producer1 = producer(&addr, exit.clone());
|
||||
let t_producer2 = producer(&addr, exit.clone());
|
||||
let t_producer3 = producer(&addr, exit.clone());
|
||||
|
||||
let rvs = Arc::new(AtomicUsize::new(0));
|
||||
let sink_threads: Vec<_> = read_channels
|
||||
@ -137,9 +118,9 @@ fn main() -> Result<()> {
|
||||
for t_reader in read_threads {
|
||||
t_reader.join()?;
|
||||
}
|
||||
for t_producer in producer_threads {
|
||||
t_producer.join()?;
|
||||
}
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
for t_sink in sink_threads {
|
||||
t_sink.join()?;
|
||||
}
|
||||
|
@ -1,42 +1,38 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.11.0"
|
||||
version = "1.7.10"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
serde_json = "1.0.79"
|
||||
serde_yaml = "0.8.23"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
|
||||
solana-client = { path = "../client", version = "=1.11.0" }
|
||||
solana-core = { path = "../core", version = "=1.11.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.11.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.11.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.11.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.11.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
thiserror = "1.0"
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.10" }
|
||||
solana-core = { path = "../core", version = "=1.7.10" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.10" }
|
||||
solana-client = { path = "../client", version = "=1.7.10" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.10" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.10" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.10" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.10" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.10" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.10" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.10" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.10" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.10" }
|
||||
solana-version = { path = "../version", version = "=1.7.10" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.6.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.10" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,37 +1,34 @@
|
||||
use {
|
||||
crate::{
|
||||
bench_tps_client::*,
|
||||
cli::Config,
|
||||
perf_utils::{sample_txs, SampleStats},
|
||||
},
|
||||
log::*,
|
||||
rayon::prelude::*,
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{self, datapoint_info},
|
||||
solana_sdk::{
|
||||
clock::{DEFAULT_MS_PER_SLOT, DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
instruction::{AccountMeta, Instruction},
|
||||
message::Message,
|
||||
native_token::Sol,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
use crate::cli::Config;
|
||||
use log::*;
|
||||
use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// The point at which transactions become "too old", in seconds.
|
||||
@ -39,14 +36,23 @@ const MAX_TX_QUEUE_AGE: u64 = (MAX_PROCESSING_AGE as f64 * DEFAULT_S_PER_SLOT) a
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BenchTpsError {
|
||||
AirdropFailure,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||
|
||||
fn get_latest_blockhash<T: BenchTpsClient>(client: &T) -> Hash {
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_latest_blockhash_with_commitment(CommitmentConfig::processed()) {
|
||||
Ok((blockhash, _)) => return blockhash,
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::processed()) {
|
||||
Ok((blockhash, fee_calculator, _last_valid_slot)) => {
|
||||
return (blockhash, fee_calculator)
|
||||
}
|
||||
Err(err) => {
|
||||
info!("Couldn't get last blockhash: {:?}", err);
|
||||
info!("Couldn't get recent blockhash: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
};
|
||||
@ -55,7 +61,7 @@ fn get_latest_blockhash<T: BenchTpsClient>(client: &T) -> Hash {
|
||||
|
||||
fn wait_for_target_slots_per_epoch<T>(target_slots_per_epoch: u64, client: &Arc<T>)
|
||||
where
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
if target_slots_per_epoch != 0 {
|
||||
info!(
|
||||
@ -85,7 +91,7 @@ fn create_sampler_thread<T>(
|
||||
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
||||
) -> JoinHandle<()>
|
||||
where
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
info!("Sampling TPS every {} second...", sample_period);
|
||||
let exit_signal = exit_signal.clone();
|
||||
@ -104,7 +110,7 @@ fn generate_chunked_transfers(
|
||||
shared_txs: &SharedTransactions,
|
||||
shared_tx_active_thread_count: Arc<AtomicIsize>,
|
||||
source_keypair_chunks: Vec<Vec<&Keypair>>,
|
||||
dest_keypair_chunks: &mut [VecDeque<&Keypair>],
|
||||
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
@ -163,7 +169,7 @@ fn create_sender_threads<T>(
|
||||
shared_tx_active_thread_count: &Arc<AtomicIsize>,
|
||||
) -> Vec<JoinHandle<()>>
|
||||
where
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
(0..threads)
|
||||
.map(|_| {
|
||||
@ -191,7 +197,7 @@ where
|
||||
|
||||
pub fn do_bench_tps<T>(client: Arc<T>, config: Config, gen_keypairs: Vec<Keypair>) -> u64
|
||||
where
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
let Config {
|
||||
id,
|
||||
@ -233,19 +239,19 @@ where
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let blockhash = Arc::new(RwLock::new(get_latest_blockhash(client.as_ref())));
|
||||
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let blockhash_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let blockhash = blockhash.clone();
|
||||
let recent_blockhash = recent_blockhash.clone();
|
||||
let client = client.clone();
|
||||
let id = id.pubkey();
|
||||
Builder::new()
|
||||
.name("solana-blockhash-poller".to_string())
|
||||
.spawn(move || {
|
||||
poll_blockhash(&exit_signal, &blockhash, &client, &id);
|
||||
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
@ -265,7 +271,7 @@ where
|
||||
let start = Instant::now();
|
||||
|
||||
generate_chunked_transfers(
|
||||
blockhash,
|
||||
recent_blockhash,
|
||||
&shared_txs,
|
||||
shared_tx_active_thread_count,
|
||||
source_keypair_chunks,
|
||||
@ -385,23 +391,7 @@ fn generate_txs(
|
||||
}
|
||||
}
|
||||
|
||||
fn get_new_latest_blockhash<T: BenchTpsClient>(client: &Arc<T>, blockhash: &Hash) -> Option<Hash> {
|
||||
let start = Instant::now();
|
||||
while start.elapsed().as_secs() < 5 {
|
||||
if let Ok(new_blockhash) = client.get_latest_blockhash() {
|
||||
if new_blockhash != *blockhash {
|
||||
return Some(new_blockhash);
|
||||
}
|
||||
}
|
||||
debug!("Got same blockhash ({:?}), will retry...", blockhash);
|
||||
|
||||
// Retry ~twice during a slot
|
||||
sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn poll_blockhash<T: BenchTpsClient>(
|
||||
fn poll_blockhash<T: Client>(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
blockhash: &Arc<RwLock<Hash>>,
|
||||
client: &Arc<T>,
|
||||
@ -412,7 +402,7 @@ fn poll_blockhash<T: BenchTpsClient>(
|
||||
loop {
|
||||
let blockhash_updated = {
|
||||
let old_blockhash = *blockhash.read().unwrap();
|
||||
if let Some(new_blockhash) = get_new_latest_blockhash(client, &old_blockhash) {
|
||||
if let Ok((new_blockhash, _fee)) = client.get_new_blockhash(&old_blockhash) {
|
||||
*blockhash.write().unwrap() = new_blockhash;
|
||||
blockhash_last_updated = Instant::now();
|
||||
true
|
||||
@ -443,7 +433,7 @@ fn poll_blockhash<T: BenchTpsClient>(
|
||||
}
|
||||
}
|
||||
|
||||
fn do_tx_transfers<T: BenchTpsClient>(
|
||||
fn do_tx_transfers<T: Client>(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_txs: &SharedTransactions,
|
||||
shared_tx_thread_count: &Arc<AtomicIsize>,
|
||||
@ -461,11 +451,14 @@ fn do_tx_transfers<T: BenchTpsClient>(
|
||||
};
|
||||
if let Some(txs0) = txs {
|
||||
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
||||
info!("Transferring 1 unit {} times...", txs0.len());
|
||||
info!(
|
||||
"Transferring 1 unit {} times... to {}",
|
||||
txs0.len(),
|
||||
client.as_ref().tpu_addr(),
|
||||
);
|
||||
let tx_len = txs0.len();
|
||||
let transfer_start = Instant::now();
|
||||
let mut old_transactions = false;
|
||||
let mut transactions = Vec::<_>::new();
|
||||
for tx in txs0 {
|
||||
let now = timestamp();
|
||||
// Transactions that are too old will be rejected by the cluster Don't bother
|
||||
@ -474,13 +467,10 @@ fn do_tx_transfers<T: BenchTpsClient>(
|
||||
old_transactions = true;
|
||||
continue;
|
||||
}
|
||||
transactions.push(tx.0);
|
||||
client
|
||||
.async_send_transaction(tx.0)
|
||||
.expect("async_send_transaction in do_tx_transfers");
|
||||
}
|
||||
|
||||
if let Err(error) = client.send_batch(transactions) {
|
||||
warn!("send_batch_sync in do_tx_transfers failed: {}", error);
|
||||
}
|
||||
|
||||
if old_transactions {
|
||||
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
|
||||
shared_txs_wl.clear();
|
||||
@ -504,11 +494,7 @@ fn do_tx_transfers<T: BenchTpsClient>(
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_funding_transfer<T: BenchTpsClient>(
|
||||
client: &Arc<T>,
|
||||
tx: &Transaction,
|
||||
amount: u64,
|
||||
) -> bool {
|
||||
fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount: u64) -> bool {
|
||||
for a in &tx.message().account_keys[1..] {
|
||||
match client.get_balance_with_commitment(a, CommitmentConfig::processed()) {
|
||||
Ok(balance) => return balance >= amount,
|
||||
@ -519,7 +505,7 @@ fn verify_funding_transfer<T: BenchTpsClient>(
|
||||
}
|
||||
|
||||
trait FundingTransactions<'a> {
|
||||
fn fund<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
fn fund<T: 'static + Client + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||
@ -527,16 +513,12 @@ trait FundingTransactions<'a> {
|
||||
);
|
||||
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]);
|
||||
fn sign(&mut self, blockhash: Hash);
|
||||
fn send<T: BenchTpsClient>(&self, client: &Arc<T>);
|
||||
fn verify<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_lamports: u64,
|
||||
);
|
||||
fn send<T: Client>(&self, client: &Arc<T>);
|
||||
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64);
|
||||
}
|
||||
|
||||
impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
fn fund<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
fn fund<T: 'static + Client + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||
@ -558,7 +540,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
self.len(),
|
||||
);
|
||||
|
||||
let blockhash = get_latest_blockhash(client.as_ref());
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client.as_ref());
|
||||
|
||||
// re-sign retained to_fund_txes with updated blockhash
|
||||
self.sign(blockhash);
|
||||
@ -605,20 +587,16 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
debug!("sign {} txs: {}us", self.len(), sign_txs.as_us());
|
||||
}
|
||||
|
||||
fn send<T: BenchTpsClient>(&self, client: &Arc<T>) {
|
||||
fn send<T: Client>(&self, client: &Arc<T>) {
|
||||
let mut send_txs = Measure::start("send_txs");
|
||||
self.iter().for_each(|(_, tx)| {
|
||||
client.send_transaction(tx.clone()).expect("transfer");
|
||||
client.async_send_transaction(tx.clone()).expect("transfer");
|
||||
});
|
||||
send_txs.stop();
|
||||
debug!("send {} txs: {}us", self.len(), send_txs.as_us());
|
||||
}
|
||||
|
||||
fn verify<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_lamports: u64,
|
||||
) {
|
||||
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64) {
|
||||
let starting_txs = self.len();
|
||||
let verified_txs = Arc::new(AtomicUsize::new(0));
|
||||
let too_many_failures = Arc::new(AtomicBool::new(false));
|
||||
@ -693,7 +671,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
||||
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
||||
/// or full
|
||||
pub fn fund_keys<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
pub fn fund_keys<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
source: &Keypair,
|
||||
dests: &[Keypair],
|
||||
@ -735,6 +713,75 @@ pub fn fund_keys<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn airdrop_lamports<T: Client>(
|
||||
client: &T,
|
||||
faucet_addr: &SocketAddr,
|
||||
id: &Keypair,
|
||||
desired_balance: u64,
|
||||
) -> Result<()> {
|
||||
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(starting_balance);
|
||||
info!("starting balance {}", starting_balance);
|
||||
|
||||
if starting_balance < desired_balance {
|
||||
let airdrop_amount = desired_balance - starting_balance;
|
||||
info!(
|
||||
"Airdropping {:?} lamports from {} for {}",
|
||||
airdrop_amount,
|
||||
faucet_addr,
|
||||
id.pubkey(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
tries += 1;
|
||||
let signature = client.async_send_transaction(transaction.clone()).unwrap();
|
||||
let result = client.poll_for_signature_confirmation(&signature, 1);
|
||||
|
||||
if result.is_ok() {
|
||||
break;
|
||||
}
|
||||
if tries >= 5 {
|
||||
panic!(
|
||||
"Error requesting airdrop: to addr: {:?} amount: {} {:?}",
|
||||
faucet_addr, airdrop_amount, result
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
panic!(
|
||||
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
|
||||
err, faucet_addr, airdrop_amount
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
let current_balance = client
|
||||
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::processed())
|
||||
.unwrap_or_else(|e| {
|
||||
info!("airdrop error {}", e);
|
||||
starting_balance
|
||||
});
|
||||
info!("current balance {}...", current_balance);
|
||||
|
||||
metrics_submit_lamport_balance(current_balance);
|
||||
if current_balance - starting_balance != airdrop_amount {
|
||||
info!(
|
||||
"Airdrop failed! {} {} {}",
|
||||
id.pubkey(),
|
||||
current_balance,
|
||||
starting_balance
|
||||
);
|
||||
return Err(BenchTpsError::AirdropFailure);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn compute_and_report_stats(
|
||||
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
||||
sample_period: u64,
|
||||
@ -818,33 +865,15 @@ pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
) -> Result<Vec<Keypair>> {
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0)?;
|
||||
let lamports_per_account = lamports_per_account + rent;
|
||||
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
fund_keypairs(client, funding_key, &keypairs, extra, lamports_per_account)?;
|
||||
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok(keypairs)
|
||||
}
|
||||
|
||||
pub fn fund_keypairs<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
extra: u64,
|
||||
lamports_per_account: u64,
|
||||
) -> Result<()> {
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0)?;
|
||||
info!("Get lamports...");
|
||||
|
||||
// Sample the first keypair, to prevent lamport loss on repeated solana-bench-tps executions
|
||||
@ -852,7 +881,7 @@ pub fn fund_keypairs<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
let first_keypair_balance = client.get_balance(&first_key).unwrap_or(0);
|
||||
|
||||
// Sample the last keypair, to check if funding was already completed
|
||||
let last_key = keypairs[keypairs.len() - 1].pubkey();
|
||||
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||
|
||||
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||
@ -861,16 +890,8 @@ pub fn fund_keypairs<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
// pay for the transaction fees in a new run.
|
||||
let enough_lamports = 8 * lamports_per_account / 10;
|
||||
if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports {
|
||||
let single_sig_message = Message::new_with_blockhash(
|
||||
&[Instruction::new_with_bytes(
|
||||
Pubkey::new_unique(),
|
||||
&[],
|
||||
vec![AccountMeta::new(Pubkey::new_unique(), true)],
|
||||
)],
|
||||
None,
|
||||
&client.get_latest_blockhash().unwrap(),
|
||||
);
|
||||
let max_fee = client.get_fee_for_message(&single_sig_message).unwrap();
|
||||
let fee_rate_governor = client.get_fee_rate_governor().unwrap();
|
||||
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let total = lamports_per_account * total_keypairs + extra_fees;
|
||||
@ -881,52 +902,39 @@ pub fn fund_keypairs<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
funding_key_balance, max_fee, lamports_per_account, extra, total
|
||||
);
|
||||
|
||||
if funding_key_balance < total + rent {
|
||||
error!(
|
||||
"funder has {}, needed {}",
|
||||
Sol(funding_key_balance),
|
||||
Sol(total)
|
||||
);
|
||||
let latest_blockhash = get_latest_blockhash(client.as_ref());
|
||||
if client
|
||||
.request_airdrop_with_blockhash(
|
||||
&funding_key.pubkey(),
|
||||
total + rent - funding_key_balance,
|
||||
&latest_blockhash,
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
return Err(BenchTpsError::AirdropFailure);
|
||||
}
|
||||
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
||||
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
|
||||
fund_keys(
|
||||
client,
|
||||
funding_key,
|
||||
keypairs,
|
||||
&keypairs,
|
||||
total,
|
||||
max_fee,
|
||||
lamports_per_account,
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok(keypairs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor, genesis_config::create_genesis_config,
|
||||
native_token::sol_to_lamports,
|
||||
},
|
||||
};
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::client::SyncClient;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::genesis_config::create_genesis_config;
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0));
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
|
||||
let config = Config {
|
||||
@ -938,49 +946,48 @@ mod tests {
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), &config.id, keypair_count, 20).unwrap();
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(client, config, keypairs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_fund_keys() {
|
||||
let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0));
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap();
|
||||
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports).unwrap();
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
client
|
||||
.get_balance_with_commitment(&kp.pubkey(), CommitmentConfig::processed())
|
||||
.unwrap(),
|
||||
lamports + rent
|
||||
lamports
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_fund_keys_with_fees() {
|
||||
let (mut genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0));
|
||||
let (mut genesis_config, id) = create_genesis_config(10_000);
|
||||
let fee_rate_governor = FeeRateGovernor::new(11, 0);
|
||||
genesis_config.fee_rate_governor = fee_rate_governor;
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap();
|
||||
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports).unwrap();
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports + rent);
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,87 +0,0 @@
|
||||
use {
|
||||
solana_client::{client_error::ClientError, tpu_client::TpuSenderError},
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, message::Message,
|
||||
pubkey::Pubkey, signature::Signature, transaction::Transaction, transport::TransportError,
|
||||
},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum BenchTpsError {
|
||||
#[error("Airdrop failure")]
|
||||
AirdropFailure,
|
||||
#[error("IO error: {0:?}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
#[error("Client error: {0:?}")]
|
||||
ClientError(#[from] ClientError),
|
||||
#[error("TpuClient error: {0:?}")]
|
||||
TpuSenderError(#[from] TpuSenderError),
|
||||
#[error("Transport error: {0:?}")]
|
||||
TransportError(#[from] TransportError),
|
||||
#[error("Custom error: {0}")]
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
pub(crate) type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub trait BenchTpsClient {
|
||||
/// Send a signed transaction without confirmation
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature>;
|
||||
|
||||
/// Send a batch of signed transactions without confirmation.
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()>;
|
||||
|
||||
/// Get latest blockhash
|
||||
fn get_latest_blockhash(&self) -> Result<Hash>;
|
||||
|
||||
/// Get latest blockhash and its last valid block height, using explicit commitment
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)>;
|
||||
|
||||
/// Get transaction count
|
||||
fn get_transaction_count(&self) -> Result<u64>;
|
||||
|
||||
/// Get transaction count, using explicit commitment
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64>;
|
||||
|
||||
/// Get epoch info
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo>;
|
||||
|
||||
/// Get account balance
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64>;
|
||||
|
||||
/// Get account balance, using explicit commitment
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64>;
|
||||
|
||||
/// Calculate the fee for a `Message`
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64>;
|
||||
|
||||
/// Get the rent-exempt minimum for an account
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64>;
|
||||
|
||||
/// Return the address of client
|
||||
fn addr(&self) -> String;
|
||||
|
||||
/// Request, submit, and confirm an airdrop transaction
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature>;
|
||||
}
|
||||
|
||||
mod bank_client;
|
||||
mod rpc_client;
|
||||
mod thin_client;
|
||||
mod tpu_client;
|
@ -1,85 +0,0 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result},
|
||||
solana_runtime::bank_client::BankClient,
|
||||
solana_sdk::{
|
||||
client::{AsyncClient, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_info::EpochInfo,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for BankClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
AsyncClient::async_send_transaction(self, transaction).map_err(|err| err.into())
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
AsyncClient::async_send_batch(self, transactions).map_err(|err| err.into())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
SyncClient::get_latest_blockhash(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
SyncClient::get_latest_blockhash_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
SyncClient::get_transaction_count(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_transaction_count_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
SyncClient::get_epoch_info(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
SyncClient::get_balance(self, pubkey).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_balance_with_commitment(self, pubkey, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
SyncClient::get_fee_for_message(self, message).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
SyncClient::get_minimum_balance_for_rent_exemption(self, data_len).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
"Local BankClient".to_string()
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
_pubkey: &Pubkey,
|
||||
_lamports: u64,
|
||||
_recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
// BankClient doesn't support airdrops
|
||||
Err(BenchTpsError::AirdropFailure)
|
||||
}
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, Result},
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, message::Message,
|
||||
pubkey::Pubkey, signature::Signature, transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for RpcClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
RpcClient::send_transaction(self, &transaction).map_err(|err| err.into())
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
for transaction in transactions {
|
||||
BenchTpsClient::send_transaction(self, transaction)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
RpcClient::get_latest_blockhash(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
RpcClient::get_latest_blockhash_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
RpcClient::get_transaction_count(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
RpcClient::get_transaction_count_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
RpcClient::get_epoch_info(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
RpcClient::get_balance(self, pubkey).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
RpcClient::get_balance_with_commitment(self, pubkey, commitment_config)
|
||||
.map(|res| res.value)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
RpcClient::get_fee_for_message(self, message).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
RpcClient::get_minimum_balance_for_rent_exemption(self, data_len).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
self.url()
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
RpcClient::request_airdrop_with_blockhash(self, pubkey, lamports, recent_blockhash)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, Result},
|
||||
solana_client::thin_client::ThinClient,
|
||||
solana_sdk::{
|
||||
client::{AsyncClient, Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_info::EpochInfo,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for ThinClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
AsyncClient::async_send_transaction(self, transaction).map_err(|err| err.into())
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
AsyncClient::async_send_batch(self, transactions).map_err(|err| err.into())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
SyncClient::get_latest_blockhash(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
SyncClient::get_latest_blockhash_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
SyncClient::get_transaction_count(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_transaction_count_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
SyncClient::get_epoch_info(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
SyncClient::get_balance(self, pubkey).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_balance_with_commitment(self, pubkey, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
SyncClient::get_fee_for_message(self, message).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
SyncClient::get_minimum_balance_for_rent_exemption(self, data_len).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
Client::tpu_addr(self)
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
self.rpc_client()
|
||||
.request_airdrop_with_blockhash(pubkey, lamports, recent_blockhash)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
}
|
@ -1,99 +0,0 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, Result},
|
||||
solana_client::tpu_client::TpuClient,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, message::Message,
|
||||
pubkey::Pubkey, signature::Signature, transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for TpuClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
let signature = transaction.signatures[0];
|
||||
self.try_send_transaction(&transaction)?;
|
||||
Ok(signature)
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
for transaction in transactions {
|
||||
BenchTpsClient::send_transaction(self, transaction)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
self.rpc_client()
|
||||
.get_latest_blockhash()
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
self.rpc_client()
|
||||
.get_latest_blockhash_with_commitment(commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_transaction_count()
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_transaction_count_with_commitment(commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
self.rpc_client().get_epoch_info().map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_balance(pubkey)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_balance_with_commitment(pubkey, commitment_config)
|
||||
.map(|res| res.value)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_fee_for_message(message)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_minimum_balance_for_rent_exemption(data_len)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
self.rpc_client().url()
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
self.rpc_client()
|
||||
.request_airdrop_with_blockhash(pubkey, lamports, recent_blockhash)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
}
|
@ -1,39 +1,18 @@
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg, ArgMatches},
|
||||
solana_clap_utils::input_validators::{is_url, is_url_or_moniker},
|
||||
solana_cli_config::{ConfigInput, CONFIG_FILE},
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
},
|
||||
std::{net::SocketAddr, process::exit, time::Duration},
|
||||
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
};
|
||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||
|
||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||
|
||||
pub enum ExternalClientType {
|
||||
// Submits transactions to an Rpc node using an RpcClient
|
||||
RpcClient,
|
||||
// Submits transactions directly to leaders using a ThinClient, broadcasting to multiple
|
||||
// leaders when num_nodes > 1
|
||||
ThinClient,
|
||||
// Submits transactions directly to leaders using a TpuClient, broadcasting to upcoming leaders
|
||||
// via TpuClient default configuration
|
||||
TpuClient,
|
||||
}
|
||||
|
||||
impl Default for ExternalClientType {
|
||||
fn default() -> Self {
|
||||
Self::ThinClient
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds the configuration for a single run of the benchmark
|
||||
pub struct Config {
|
||||
pub entrypoint_addr: SocketAddr,
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub faucet_addr: SocketAddr,
|
||||
pub id: Keypair,
|
||||
pub threads: usize,
|
||||
pub num_nodes: usize,
|
||||
@ -50,16 +29,13 @@ pub struct Config {
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
pub target_node: Option<Pubkey>,
|
||||
pub external_client_type: ExternalClientType,
|
||||
pub use_quic: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
||||
json_rpc_url: ConfigInput::default().json_rpc_url,
|
||||
websocket_url: ConfigInput::default().websocket_url,
|
||||
faucet_addr: SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)),
|
||||
id: Keypair::new(),
|
||||
threads: 4,
|
||||
num_nodes: 1,
|
||||
@ -76,8 +52,6 @@ impl Default for Config {
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
target_node: None,
|
||||
external_client_type: ExternalClientType::default(),
|
||||
use_quic: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -86,42 +60,6 @@ impl Default for Config {
|
||||
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
App::new(crate_name!()).about(crate_description!())
|
||||
.version(version)
|
||||
.arg({
|
||||
let arg = Arg::with_name("config_file")
|
||||
.short("C")
|
||||
.long("config")
|
||||
.value_name("FILEPATH")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.help("Configuration file to use");
|
||||
if let Some(ref config_file) = *CONFIG_FILE {
|
||||
arg.default_value(config_file)
|
||||
} else {
|
||||
arg
|
||||
}
|
||||
})
|
||||
.arg(
|
||||
Arg::with_name("json_rpc_url")
|
||||
.short("u")
|
||||
.long("url")
|
||||
.value_name("URL_OR_MONIKER")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.validator(is_url_or_moniker)
|
||||
.help(
|
||||
"URL for Solana's JSON RPC or moniker (or their first letter): \
|
||||
[mainnet-beta, testnet, devnet, localhost]",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("websocket_url")
|
||||
.long("ws")
|
||||
.value_name("URL")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.validator(is_url)
|
||||
.help("WebSocket URL for the solana cluster"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("entrypoint")
|
||||
.short("n")
|
||||
@ -136,8 +74,7 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
.long("faucet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.hidden(true)
|
||||
.help("Deprecated. BenchTps no longer queries the faucet directly"),
|
||||
.help("Location of the faucet; defaults to entrypoint:FAUCET_PORT"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
@ -252,27 +189,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
"Wait until epochs are this many slots long.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("rpc_client")
|
||||
.long("use-rpc-client")
|
||||
.conflicts_with("tpu_client")
|
||||
.takes_value(false)
|
||||
.help("Submit transactions with a RpcClient")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tpu_client")
|
||||
.long("use-tpu-client")
|
||||
.conflicts_with("rpc_client")
|
||||
.takes_value(false)
|
||||
.help("Submit transactions with a TpuClient")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tpu_use_quic")
|
||||
.long("tpu-use-quic")
|
||||
.takes_value(false)
|
||||
.help("Submit transactions via QUIC; only affects ThinClient (default) \
|
||||
or TpuClient sends"),
|
||||
)
|
||||
}
|
||||
|
||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||
@ -283,45 +199,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
let mut args = Config::default();
|
||||
|
||||
let config = if let Some(config_file) = matches.value_of("config_file") {
|
||||
solana_cli_config::Config::load(config_file).unwrap_or_default()
|
||||
} else {
|
||||
solana_cli_config::Config::default()
|
||||
};
|
||||
let (_, json_rpc_url) = ConfigInput::compute_json_rpc_url_setting(
|
||||
matches.value_of("json_rpc_url").unwrap_or(""),
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
args.json_rpc_url = json_rpc_url;
|
||||
|
||||
let (_, websocket_url) = ConfigInput::compute_websocket_url_setting(
|
||||
matches.value_of("websocket_url").unwrap_or(""),
|
||||
&config.websocket_url,
|
||||
matches.value_of("json_rpc_url").unwrap_or(""),
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
args.websocket_url = websocket_url;
|
||||
|
||||
let (_, id_path) = ConfigInput::compute_keypair_path_setting(
|
||||
matches.value_of("identity").unwrap_or(""),
|
||||
&config.keypair_path,
|
||||
);
|
||||
if let Ok(id) = read_keypair_file(id_path) {
|
||||
args.id = id;
|
||||
} else if matches.is_present("identity") {
|
||||
panic!("could not parse identity path");
|
||||
}
|
||||
|
||||
if matches.is_present("tpu_client") {
|
||||
args.external_client_type = ExternalClientType::TpuClient;
|
||||
} else if matches.is_present("rpc_client") {
|
||||
args.external_client_type = ExternalClientType::RpcClient;
|
||||
}
|
||||
|
||||
if matches.is_present("tpu_use_quic") {
|
||||
args.use_quic = true;
|
||||
}
|
||||
|
||||
if let Some(addr) = matches.value_of("entrypoint") {
|
||||
args.entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse entrypoint address: {}", e);
|
||||
@ -329,6 +206,18 @@ pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(addr) = matches.value_of("faucet") {
|
||||
args.faucet_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse faucet address: {}", e);
|
||||
exit(1)
|
||||
});
|
||||
}
|
||||
|
||||
if matches.is_present("identity") {
|
||||
args.id = read_keypair_file(matches.value_of("identity").unwrap())
|
||||
.expect("can't read client identity");
|
||||
}
|
||||
|
||||
if let Some(t) = matches.value_of("threads") {
|
||||
args.threads = t.to_string().parse().expect("can't parse threads");
|
||||
}
|
||||
|
@ -1,72 +0,0 @@
|
||||
use {
|
||||
crate::{
|
||||
bench::{fund_keypairs, generate_and_fund_keypairs},
|
||||
bench_tps_client::BenchTpsClient,
|
||||
},
|
||||
log::*,
|
||||
solana_genesis::Base64Account,
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
std::{collections::HashMap, fs::File, path::Path, process::exit, sync::Arc},
|
||||
};
|
||||
|
||||
pub fn get_keypairs<T>(
|
||||
client: Arc<T>,
|
||||
id: &Keypair,
|
||||
keypair_count: usize,
|
||||
num_lamports_per_account: u64,
|
||||
client_ids_and_stake_file: &str,
|
||||
read_from_client_file: bool,
|
||||
) -> Vec<Keypair>
|
||||
where
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
{
|
||||
if read_from_client_file {
|
||||
let path = Path::new(client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
info!("Reading {}", client_ids_and_stake_file);
|
||||
let accounts: HashMap<String, Base64Account> = serde_yaml::from_reader(file).unwrap();
|
||||
let mut keypairs = vec![];
|
||||
let mut last_balance = 0;
|
||||
|
||||
accounts
|
||||
.into_iter()
|
||||
.for_each(|(keypair, primordial_account)| {
|
||||
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
||||
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
||||
last_balance = primordial_account.balance;
|
||||
});
|
||||
|
||||
if keypairs.len() < keypair_count {
|
||||
eprintln!(
|
||||
"Expected {} accounts in {}, only received {} (--tx_count mismatch?)",
|
||||
keypair_count,
|
||||
client_ids_and_stake_file,
|
||||
keypairs.len(),
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
fund_keypairs(
|
||||
client,
|
||||
id,
|
||||
&keypairs,
|
||||
keypairs.len().saturating_sub(keypair_count) as u64,
|
||||
last_balance,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
exit(1);
|
||||
});
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(client, id, keypair_count, num_lamports_per_account)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
exit(1);
|
||||
})
|
||||
}
|
||||
}
|
@ -1,6 +1,3 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
pub mod bench;
|
||||
pub mod bench_tps_client;
|
||||
pub mod cli;
|
||||
pub mod keypairs;
|
||||
mod perf_utils;
|
||||
|
@ -1,39 +1,28 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
log::*,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_keypairs},
|
||||
cli::{self, ExternalClientType},
|
||||
keypairs::get_keypairs,
|
||||
},
|
||||
solana_client::{
|
||||
connection_cache,
|
||||
rpc_client::RpcClient,
|
||||
tpu_client::{TpuClient, TpuClientConfig},
|
||||
},
|
||||
solana_genesis::Base64Account,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, fee_calculator::FeeRateGovernor, system_program,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
|
||||
};
|
||||
use log::*;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||
use solana_bench_tps::cli;
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_program;
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||
|
||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
solana_metrics::set_panic_hook("bench-tps", /*version:*/ None);
|
||||
solana_metrics::set_panic_hook("bench-tps");
|
||||
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
entrypoint_addr,
|
||||
json_rpc_url,
|
||||
websocket_url,
|
||||
faucet_addr,
|
||||
id,
|
||||
num_nodes,
|
||||
tx_count,
|
||||
@ -45,8 +34,6 @@ fn main() {
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
target_node,
|
||||
external_client_type,
|
||||
use_quic,
|
||||
..
|
||||
} = &cli_config;
|
||||
|
||||
@ -82,93 +69,83 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
match external_client_type {
|
||||
ExternalClientType::RpcClient => {
|
||||
let client = Arc::new(RpcClient::new_with_commitment(
|
||||
json_rpc_url.to_string(),
|
||||
CommitmentConfig::confirmed(),
|
||||
));
|
||||
let keypairs = get_keypairs(
|
||||
client.clone(),
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
client_ids_and_stake_file,
|
||||
*read_from_client_file,
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
exit(1);
|
||||
}
|
||||
ExternalClientType::ThinClient => {
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
if *use_quic {
|
||||
connection_cache::set_use_quic(true);
|
||||
Arc::new(client)
|
||||
} else if let Some(target_node) = target_node {
|
||||
info!("Searching for target_node: {:?}", target_node);
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == *target_node {
|
||||
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
||||
break;
|
||||
}
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
Arc::new(client)
|
||||
} else if let Some(target_node) = target_node {
|
||||
info!("Searching for target_node: {:?}", target_node);
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == *target_node {
|
||||
target_client =
|
||||
Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
target_client.unwrap_or_else(|| {
|
||||
eprintln!("Target node {} not found", target_node);
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
||||
};
|
||||
let keypairs = get_keypairs(
|
||||
client.clone(),
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
client_ids_and_stake_file,
|
||||
*read_from_client_file,
|
||||
);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
ExternalClientType::TpuClient => {
|
||||
let rpc_client = Arc::new(RpcClient::new_with_commitment(
|
||||
json_rpc_url.to_string(),
|
||||
CommitmentConfig::confirmed(),
|
||||
));
|
||||
if *use_quic {
|
||||
connection_cache::set_use_quic(true);
|
||||
}
|
||||
let client = Arc::new(
|
||||
TpuClient::new(rpc_client, websocket_url, TpuClientConfig::default())
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Could not create TpuClient {:?}", err);
|
||||
exit(1);
|
||||
}),
|
||||
);
|
||||
let keypairs = get_keypairs(
|
||||
client.clone(),
|
||||
id,
|
||||
target_client.unwrap_or_else(|| {
|
||||
eprintln!("Target node {} not found", target_node);
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
||||
};
|
||||
|
||||
let keypairs = if *read_from_client_file {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
info!("Reading {}", client_ids_and_stake_file);
|
||||
let accounts: HashMap<String, Base64Account> = serde_yaml::from_reader(file).unwrap();
|
||||
let mut keypairs = vec![];
|
||||
let mut last_balance = 0;
|
||||
|
||||
accounts
|
||||
.into_iter()
|
||||
.for_each(|(keypair, primordial_account)| {
|
||||
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
||||
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
||||
last_balance = primordial_account.balance;
|
||||
});
|
||||
|
||||
if keypairs.len() < keypair_count {
|
||||
eprintln!(
|
||||
"Expected {} accounts in {}, only received {} (--tx_count mismatch?)",
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
client_ids_and_stake_file,
|
||||
*read_from_client_file,
|
||||
keypairs.len(),
|
||||
);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(*faucet_addr),
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
exit(1);
|
||||
})
|
||||
};
|
||||
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
|
@ -1,64 +1,35 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
crossbeam_channel::unbounded,
|
||||
serial_test::serial,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
},
|
||||
solana_client::{
|
||||
rpc_client::RpcClient,
|
||||
thin_client::create_client,
|
||||
tpu_client::{TpuClient, TpuClientConfig},
|
||||
},
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::{run_local_faucet, run_local_faucet_with_port},
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_rpc::rpc::JsonRpcConfig,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
signature::{Keypair, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_test_validator::TestValidator,
|
||||
std::{sync::Arc, time::Duration},
|
||||
use serial_test::serial;
|
||||
use solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
};
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_gossip::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
let native_instruction_processors = vec![];
|
||||
|
||||
solana_logger::setup();
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
let faucet_pubkey = faucet_keypair.pubkey();
|
||||
let (addr_sender, addr_receiver) = unbounded();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
.expect("run_local_faucet")
|
||||
.expect("faucet_addr");
|
||||
|
||||
const NUM_NODES: usize = 1;
|
||||
let mut validator_config = ValidatorConfig::default_for_test();
|
||||
validator_config.rpc_config = JsonRpcConfig {
|
||||
faucet_addr: Some(faucet_addr),
|
||||
..JsonRpcConfig::default_for_test()
|
||||
};
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig {
|
||||
rpc_config: JsonRpcConfig {
|
||||
faucet_addr: Some(faucet_addr),
|
||||
..JsonRpcConfig::default_for_test()
|
||||
},
|
||||
..ValidatorConfig::default_for_test()
|
||||
},
|
||||
&ValidatorConfig::default(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors,
|
||||
@ -67,55 +38,31 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
|
||||
cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000);
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&faucet_keypair.pubkey(),
|
||||
100_000_000,
|
||||
);
|
||||
|
||||
let client = Arc::new(create_client(
|
||||
cluster.entry_point_info.rpc,
|
||||
cluster.entry_point_info.tpu,
|
||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
));
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
.expect("run_local_faucet")
|
||||
.expect("faucet_addr");
|
||||
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _total = do_bench_tps(client, config, keypairs);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
assert!(_total > 100);
|
||||
}
|
||||
|
||||
fn test_bench_tps_test_validator(config: Config) {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
|
||||
let test_validator =
|
||||
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
|
||||
|
||||
let rpc_client = Arc::new(RpcClient::new_with_commitment(
|
||||
test_validator.rpc_url(),
|
||||
CommitmentConfig::processed(),
|
||||
));
|
||||
let websocket_url = test_validator.rpc_pubsub_url();
|
||||
|
||||
let client =
|
||||
Arc::new(TpuClient::new(rpc_client, &websocket_url, TpuClientConfig::default()).unwrap());
|
||||
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(faucet_addr),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
@ -137,13 +84,3 @@ fn test_bench_tps_local_cluster_solana() {
|
||||
..Config::default()
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_tpu_client() {
|
||||
test_bench_tps_test_validator(Config {
|
||||
tx_count: 100,
|
||||
duration: Duration::from_secs(10),
|
||||
..Config::default()
|
||||
});
|
||||
}
|
||||
|
@ -1,32 +0,0 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.11.0"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bloom"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
serde = { version = "1.0.136", features = ["rc"] }
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bloom"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
@ -1 +0,0 @@
|
||||
../frozen-abi/build.rs
|
@ -1,5 +0,0 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
pub mod bloom;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_frozen_abi_macro;
|
@ -1,32 +0,0 @@
|
||||
[package]
|
||||
name = "solana-bucket-map"
|
||||
version = "1.11.0"
|
||||
description = "solana-bucket-map"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bucket-map"
|
||||
readme = "../README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
license = "Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.11" }
|
||||
memmap2 = "0.5.3"
|
||||
modular-bitfield = "0.11.2"
|
||||
rand = "0.7.0"
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
tempfile = "3.3.0"
|
||||
|
||||
[dev-dependencies]
|
||||
fs_extra = "1.2.0"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bucket_map"
|
||||
|
||||
[[bench]]
|
||||
name = "bucket_map"
|
@ -1,77 +0,0 @@
|
||||
#![feature(test)]
|
||||
|
||||
macro_rules! DEFINE_NxM_BENCH {
|
||||
($i:ident, $n:literal, $m:literal) => {
|
||||
mod $i {
|
||||
use super::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_insert_baseline_hashmap(bencher: &mut Bencher) {
|
||||
do_bench_insert_baseline_hashmap(bencher, $n, $m);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_insert_bucket_map(bencher: &mut Bencher) {
|
||||
do_bench_insert_bucket_map(bencher, $n, $m);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
extern crate test;
|
||||
use {
|
||||
rayon::prelude::*,
|
||||
solana_bucket_map::bucket_map::{BucketMap, BucketMapConfig},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{collections::hash_map::HashMap, sync::RwLock},
|
||||
test::Bencher,
|
||||
};
|
||||
|
||||
type IndexValue = u64;
|
||||
|
||||
DEFINE_NxM_BENCH!(dim_01x02, 1, 2);
|
||||
DEFINE_NxM_BENCH!(dim_02x04, 2, 4);
|
||||
DEFINE_NxM_BENCH!(dim_04x08, 4, 8);
|
||||
DEFINE_NxM_BENCH!(dim_08x16, 8, 16);
|
||||
DEFINE_NxM_BENCH!(dim_16x32, 16, 32);
|
||||
DEFINE_NxM_BENCH!(dim_32x64, 32, 64);
|
||||
|
||||
/// Benchmark insert with Hashmap as baseline for N threads inserting M keys each
|
||||
fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) {
|
||||
let index = RwLock::new(HashMap::new());
|
||||
(0..n).into_iter().into_par_iter().for_each(|i| {
|
||||
let key = Pubkey::new_unique();
|
||||
index
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(key, vec![(i, IndexValue::default())]);
|
||||
});
|
||||
bencher.iter(|| {
|
||||
(0..n).into_iter().into_par_iter().for_each(|_| {
|
||||
for j in 0..m {
|
||||
let key = Pubkey::new_unique();
|
||||
index
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(key, vec![(j, IndexValue::default())]);
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
/// Benchmark insert with BucketMap with N buckets for N threads inserting M keys each
|
||||
fn do_bench_insert_bucket_map(bencher: &mut Bencher, n: usize, m: usize) {
|
||||
let index = BucketMap::new(BucketMapConfig::new(n));
|
||||
(0..n).into_iter().into_par_iter().for_each(|i| {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![(i, IndexValue::default())], 0)));
|
||||
});
|
||||
bencher.iter(|| {
|
||||
(0..n).into_iter().into_par_iter().for_each(|_| {
|
||||
for j in 0..m {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![(j, IndexValue::default())], 0)));
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
@ -1,501 +0,0 @@
|
||||
use {
|
||||
crate::{
|
||||
bucket_item::BucketItem,
|
||||
bucket_map::BucketMapError,
|
||||
bucket_stats::BucketMapStats,
|
||||
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2},
|
||||
index_entry::IndexEntry,
|
||||
MaxSearch, RefCount,
|
||||
},
|
||||
rand::{thread_rng, Rng},
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{
|
||||
collections::hash_map::DefaultHasher,
|
||||
hash::{Hash, Hasher},
|
||||
marker::PhantomData,
|
||||
ops::RangeBounds,
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ReallocatedItems {
|
||||
// Some if the index was reallocated
|
||||
// u64 is random associated with the new index
|
||||
pub index: Option<(u64, BucketStorage)>,
|
||||
// Some for a data bucket reallocation
|
||||
// u64 is data bucket index
|
||||
pub data: Option<(u64, BucketStorage)>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Reallocated {
|
||||
/// > 0 if reallocations are encoded
|
||||
pub active_reallocations: AtomicUsize,
|
||||
|
||||
/// actual reallocated bucket
|
||||
/// mutex because bucket grow code runs with a read lock
|
||||
pub items: Mutex<ReallocatedItems>,
|
||||
}
|
||||
|
||||
impl Reallocated {
|
||||
/// specify that a reallocation has occurred
|
||||
pub fn add_reallocation(&self) {
|
||||
assert_eq!(
|
||||
0,
|
||||
self.active_reallocations.fetch_add(1, Ordering::Relaxed),
|
||||
"Only 1 reallocation can occur at a time"
|
||||
);
|
||||
}
|
||||
/// Return true IFF a reallocation has occurred.
|
||||
/// Calling this takes conceptual ownership of the reallocation encoded in the struct.
|
||||
pub fn get_reallocated(&self) -> bool {
|
||||
self.active_reallocations
|
||||
.compare_exchange(1, 0, Ordering::Acquire, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
// >= 2 instances of BucketStorage per 'bucket' in the bucket map. 1 for index, >= 1 for data
|
||||
pub struct Bucket<T> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
//index
|
||||
pub index: BucketStorage,
|
||||
//random offset for the index
|
||||
random: u64,
|
||||
//storage buckets to store SlotSlice up to a power of 2 in len
|
||||
pub data: Vec<BucketStorage>,
|
||||
_phantom: PhantomData<T>,
|
||||
stats: Arc<BucketMapStats>,
|
||||
|
||||
pub reallocated: Reallocated,
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy> Bucket<T> {
|
||||
pub fn new(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let index = BucketStorage::new(
|
||||
Arc::clone(&drives),
|
||||
1,
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
max_search,
|
||||
Arc::clone(&stats.index),
|
||||
count,
|
||||
);
|
||||
Self {
|
||||
random: thread_rng().gen(),
|
||||
drives,
|
||||
index,
|
||||
data: vec![],
|
||||
_phantom: PhantomData::default(),
|
||||
stats,
|
||||
reallocated: Reallocated::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keys(&self) -> Vec<Pubkey> {
|
||||
let mut rv = vec![];
|
||||
for i in 0..self.index.capacity() {
|
||||
if self.index.is_free(i) {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(i);
|
||||
rv.push(ix.key);
|
||||
}
|
||||
rv
|
||||
}
|
||||
|
||||
pub fn items_in_range<R>(&self, range: &Option<&R>) -> Vec<BucketItem<T>>
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
{
|
||||
let mut result = Vec::with_capacity(self.index.count.load(Ordering::Relaxed) as usize);
|
||||
for i in 0..self.index.capacity() {
|
||||
let ii = i % self.index.capacity();
|
||||
if self.index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(ii);
|
||||
let key = ix.key;
|
||||
if range.map(|r| r.contains(&key)).unwrap_or(true) {
|
||||
let val = ix.read_value(self);
|
||||
result.push(BucketItem {
|
||||
pubkey: key,
|
||||
ref_count: ix.ref_count(),
|
||||
slot_list: val.map(|(v, _ref_count)| v.to_vec()).unwrap_or_default(),
|
||||
});
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub fn find_entry(&self, key: &Pubkey) -> Option<(&IndexEntry, u64)> {
|
||||
Self::bucket_find_entry(&self.index, key, self.random)
|
||||
}
|
||||
|
||||
fn find_entry_mut(&self, key: &Pubkey) -> Option<(&mut IndexEntry, u64)> {
|
||||
Self::bucket_find_entry_mut(&self.index, key, self.random)
|
||||
}
|
||||
|
||||
fn bucket_find_entry_mut<'a>(
|
||||
index: &'a BucketStorage,
|
||||
key: &Pubkey,
|
||||
random: u64,
|
||||
) -> Option<(&'a mut IndexEntry, u64)> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
if elem.key == *key {
|
||||
return Some((elem, ii));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn bucket_find_entry<'a>(
|
||||
index: &'a BucketStorage,
|
||||
key: &Pubkey,
|
||||
random: u64,
|
||||
) -> Option<(&'a IndexEntry, u64)> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let elem: &IndexEntry = index.get(ii);
|
||||
if elem.key == *key {
|
||||
return Some((elem, ii));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn bucket_create_key(
|
||||
index: &mut BucketStorage,
|
||||
key: &Pubkey,
|
||||
elem_uid: Uid,
|
||||
random: u64,
|
||||
is_resizing: bool,
|
||||
) -> Result<u64, BucketMapError> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i as u64 % index.capacity();
|
||||
if !index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
index.allocate(ii, elem_uid, is_resizing).unwrap();
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
// These fields will be overwritten after allocation by callers.
|
||||
// Since this part of the mmapped file could have previously been used by someone else, there can be garbage here.
|
||||
elem.init(key);
|
||||
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
|
||||
return Ok(ii);
|
||||
}
|
||||
Err(BucketMapError::IndexNoSpace(index.capacity_pow2))
|
||||
}
|
||||
|
||||
pub fn addref(&mut self, key: &Pubkey) -> Option<RefCount> {
|
||||
let (elem, _) = self.find_entry_mut(key)?;
|
||||
elem.ref_count += 1;
|
||||
Some(elem.ref_count)
|
||||
}
|
||||
|
||||
pub fn unref(&mut self, key: &Pubkey) -> Option<RefCount> {
|
||||
let (elem, _) = self.find_entry_mut(key)?;
|
||||
elem.ref_count -= 1;
|
||||
Some(elem.ref_count)
|
||||
}
|
||||
|
||||
fn create_key(&mut self, key: &Pubkey) -> Result<u64, BucketMapError> {
|
||||
Self::bucket_create_key(
|
||||
&mut self.index,
|
||||
key,
|
||||
IndexEntry::key_uid(key),
|
||||
self.random,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(&[T], RefCount)> {
|
||||
//debug!("READ_VALUE: {:?}", key);
|
||||
let (elem, _) = self.find_entry(key)?;
|
||||
elem.read_value(self)
|
||||
}
|
||||
|
||||
pub fn try_write(
|
||||
&mut self,
|
||||
key: &Pubkey,
|
||||
data: &[T],
|
||||
ref_count: u64,
|
||||
) -> Result<(), BucketMapError> {
|
||||
let best_fit_bucket = IndexEntry::data_bucket_from_num_slots(data.len() as u64);
|
||||
if self.data.get(best_fit_bucket as usize).is_none() {
|
||||
// fail early if the data bucket we need doesn't exist - we don't want the index entry partially allocated
|
||||
return Err(BucketMapError::DataNoSpace((best_fit_bucket, 0)));
|
||||
}
|
||||
let index_entry = self.find_entry_mut(key);
|
||||
let (elem, elem_ix) = match index_entry {
|
||||
None => {
|
||||
let ii = self.create_key(key)?;
|
||||
let elem: &mut IndexEntry = self.index.get_mut(ii);
|
||||
(elem, ii)
|
||||
}
|
||||
Some(res) => res,
|
||||
};
|
||||
elem.ref_count = ref_count;
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
let bucket_ix = elem.data_bucket_ix();
|
||||
let current_bucket = &self.data[bucket_ix as usize];
|
||||
let num_slots = data.len() as u64;
|
||||
if best_fit_bucket == bucket_ix && elem.num_slots > 0 {
|
||||
// in place update
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
let slice: &mut [T] = current_bucket.get_mut_cell_slice(elem_loc, data.len() as u64);
|
||||
assert_eq!(current_bucket.uid(elem_loc), Some(elem_uid));
|
||||
elem.num_slots = num_slots;
|
||||
slice.copy_from_slice(data);
|
||||
Ok(())
|
||||
} else {
|
||||
// need to move the allocation to a best fit spot
|
||||
let best_bucket = &self.data[best_fit_bucket as usize];
|
||||
let cap_power = best_bucket.capacity_pow2;
|
||||
let cap = best_bucket.capacity();
|
||||
let pos = thread_rng().gen_range(0, cap);
|
||||
for i in pos..pos + self.index.max_search() {
|
||||
let ix = i % cap;
|
||||
if best_bucket.is_free(ix) {
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
let old_slots = elem.num_slots;
|
||||
elem.set_storage_offset(ix);
|
||||
elem.set_storage_capacity_when_created_pow2(best_bucket.capacity_pow2);
|
||||
elem.num_slots = num_slots;
|
||||
if old_slots > 0 {
|
||||
let current_bucket = &mut self.data[bucket_ix as usize];
|
||||
current_bucket.free(elem_loc, elem_uid);
|
||||
}
|
||||
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
|
||||
if num_slots > 0 {
|
||||
let best_bucket = &mut self.data[best_fit_bucket as usize];
|
||||
best_bucket.allocate(ix, elem_uid, false).unwrap();
|
||||
let slice = best_bucket.get_mut_cell_slice(ix, num_slots);
|
||||
slice.copy_from_slice(data);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Err(BucketMapError::DataNoSpace((best_fit_bucket, cap_power)))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_key(&mut self, key: &Pubkey) {
|
||||
if let Some((elem, elem_ix)) = self.find_entry(key) {
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
if elem.num_slots > 0 {
|
||||
let ix = elem.data_bucket_ix() as usize;
|
||||
let data_bucket = &self.data[ix];
|
||||
let loc = elem.data_loc(data_bucket);
|
||||
let data_bucket = &mut self.data[ix];
|
||||
//debug!( "DATA FREE {:?} {} {} {}", key, elem.data_location, data_bucket.capacity, elem_uid );
|
||||
data_bucket.free(loc, elem_uid);
|
||||
}
|
||||
//debug!("INDEX FREE {:?} {}", key, elem_uid);
|
||||
self.index.free(elem_ix, elem_uid);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn grow_index(&self, current_capacity_pow2: u8) {
|
||||
if self.index.capacity_pow2 == current_capacity_pow2 {
|
||||
let mut m = Measure::start("grow_index");
|
||||
//debug!("GROW_INDEX: {}", current_capacity_pow2);
|
||||
let increment = 1;
|
||||
for i in increment.. {
|
||||
//increasing the capacity by ^4 reduces the
|
||||
//likelyhood of a re-index collision of 2^(max_search)^2
|
||||
//1 in 2^32
|
||||
let mut index = BucketStorage::new_with_capacity(
|
||||
Arc::clone(&self.drives),
|
||||
1,
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
// *2 causes rapid growth of index buckets
|
||||
self.index.capacity_pow2 + i, // * 2,
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.index),
|
||||
Arc::clone(&self.index.count),
|
||||
);
|
||||
let random = thread_rng().gen();
|
||||
let mut valid = true;
|
||||
for ix in 0..self.index.capacity() {
|
||||
let uid = self.index.uid(ix);
|
||||
if let Some(uid) = uid {
|
||||
let elem: &IndexEntry = self.index.get(ix);
|
||||
let new_ix =
|
||||
Self::bucket_create_key(&mut index, &elem.key, uid, random, true);
|
||||
if new_ix.is_err() {
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
let new_ix = new_ix.unwrap();
|
||||
let new_elem: &mut IndexEntry = index.get_mut(new_ix);
|
||||
*new_elem = *elem;
|
||||
/*
|
||||
let dbg_elem: IndexEntry = *new_elem;
|
||||
assert_eq!(
|
||||
Self::bucket_find_entry(&index, &elem.key, random).unwrap(),
|
||||
(&dbg_elem, new_ix)
|
||||
);
|
||||
*/
|
||||
}
|
||||
}
|
||||
if valid {
|
||||
let sz = index.capacity();
|
||||
{
|
||||
let mut max = self.stats.index.max_size.lock().unwrap();
|
||||
*max = std::cmp::max(*max, sz);
|
||||
}
|
||||
let mut items = self.reallocated.items.lock().unwrap();
|
||||
items.index = Some((random, index));
|
||||
self.reallocated.add_reallocation();
|
||||
break;
|
||||
}
|
||||
}
|
||||
m.stop();
|
||||
self.stats.index.resizes.fetch_add(1, Ordering::Relaxed);
|
||||
self.stats
|
||||
.index
|
||||
.resize_us
|
||||
.fetch_add(m.as_us(), Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_grow_index(&mut self, random: u64, index: BucketStorage) {
|
||||
self.random = random;
|
||||
self.index = index;
|
||||
}
|
||||
|
||||
fn elem_size() -> u64 {
|
||||
std::mem::size_of::<T>() as u64
|
||||
}
|
||||
|
||||
pub fn apply_grow_data(&mut self, ix: usize, bucket: BucketStorage) {
|
||||
if self.data.get(ix).is_none() {
|
||||
for i in self.data.len()..ix {
|
||||
// insert empty data buckets
|
||||
self.data.push(BucketStorage::new(
|
||||
Arc::clone(&self.drives),
|
||||
1 << i,
|
||||
Self::elem_size(),
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.data),
|
||||
Arc::default(),
|
||||
))
|
||||
}
|
||||
self.data.push(bucket);
|
||||
} else {
|
||||
self.data[ix] = bucket;
|
||||
}
|
||||
}
|
||||
|
||||
/// grow a data bucket
|
||||
/// The application of the new bucket is deferred until the next write lock.
|
||||
pub fn grow_data(&self, data_index: u64, current_capacity_pow2: u8) {
|
||||
let new_bucket = BucketStorage::new_resized(
|
||||
&self.drives,
|
||||
self.index.max_search,
|
||||
self.data.get(data_index as usize),
|
||||
std::cmp::max(current_capacity_pow2 + 1, DEFAULT_CAPACITY_POW2),
|
||||
1 << data_index,
|
||||
Self::elem_size(),
|
||||
&self.stats.data,
|
||||
);
|
||||
self.reallocated.add_reallocation();
|
||||
let mut items = self.reallocated.items.lock().unwrap();
|
||||
items.data = Some((data_index, new_bucket));
|
||||
}
|
||||
|
||||
fn bucket_index_ix(index: &BucketStorage, key: &Pubkey, random: u64) -> u64 {
|
||||
let uid = IndexEntry::key_uid(key);
|
||||
let mut s = DefaultHasher::new();
|
||||
uid.hash(&mut s);
|
||||
//the locally generated random will make it hard for an attacker
|
||||
//to deterministically cause all the pubkeys to land in the same
|
||||
//location in any bucket on all validators
|
||||
random.hash(&mut s);
|
||||
let ix = s.finish();
|
||||
ix % index.capacity()
|
||||
//debug!( "INDEX_IX: {:?} uid:{} loc: {} cap:{}", key, uid, location, index.capacity() );
|
||||
}
|
||||
|
||||
/// grow the appropriate piece. Note this takes an immutable ref.
|
||||
/// The actual grow is set into self.reallocated and applied later on a write lock
|
||||
pub fn grow(&self, err: BucketMapError) {
|
||||
match err {
|
||||
BucketMapError::DataNoSpace((data_index, current_capacity_pow2)) => {
|
||||
//debug!("GROWING SPACE {:?}", (data_index, current_capacity_pow2));
|
||||
self.grow_data(data_index, current_capacity_pow2);
|
||||
}
|
||||
BucketMapError::IndexNoSpace(current_capacity_pow2) => {
|
||||
//debug!("GROWING INDEX {}", sz);
|
||||
self.grow_index(current_capacity_pow2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// if a bucket was resized previously with a read lock, then apply that resize now
|
||||
pub fn handle_delayed_grows(&mut self) {
|
||||
if self.reallocated.get_reallocated() {
|
||||
// swap out the bucket that was resized previously with a read lock
|
||||
let mut items = ReallocatedItems::default();
|
||||
std::mem::swap(&mut items, &mut self.reallocated.items.lock().unwrap());
|
||||
|
||||
if let Some((random, bucket)) = items.index.take() {
|
||||
self.apply_grow_index(random, bucket);
|
||||
} else {
|
||||
// data bucket
|
||||
let (i, new_bucket) = items.data.take().unwrap();
|
||||
self.apply_grow_data(i as usize, new_bucket);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: &Pubkey, value: (&[T], RefCount)) {
|
||||
let (new, refct) = value;
|
||||
loop {
|
||||
let rv = self.try_write(key, new, refct);
|
||||
match rv {
|
||||
Ok(_) => return,
|
||||
Err(err) => {
|
||||
self.grow(err);
|
||||
self.handle_delayed_grows();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update<F>(&mut self, key: &Pubkey, mut updatefn: F)
|
||||
where
|
||||
F: FnMut(Option<(&[T], RefCount)>) -> Option<(Vec<T>, RefCount)>,
|
||||
{
|
||||
let current = self.read_value(key);
|
||||
let new = updatefn(current);
|
||||
if new.is_none() {
|
||||
self.delete_key(key);
|
||||
return;
|
||||
}
|
||||
let (new, refct) = new.unwrap();
|
||||
self.insert(key, (&new, refct));
|
||||
}
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
use {
|
||||
crate::{
|
||||
bucket::Bucket, bucket_item::BucketItem, bucket_map::BucketMapError,
|
||||
bucket_stats::BucketMapStats, MaxSearch, RefCount,
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{
|
||||
ops::RangeBounds,
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc, RwLock, RwLockWriteGuard,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
type LockedBucket<T> = RwLock<Option<Bucket<T>>>;
|
||||
|
||||
pub struct BucketApi<T: Clone + Copy> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
pub stats: Arc<BucketMapStats>,
|
||||
|
||||
bucket: LockedBucket<T>,
|
||||
count: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy> BucketApi<T> {
|
||||
pub fn new(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
) -> Self {
|
||||
Self {
|
||||
drives,
|
||||
max_search,
|
||||
stats,
|
||||
bucket: RwLock::default(),
|
||||
count: Arc::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the items for bucket
|
||||
pub fn items_in_range<R>(&self, range: &Option<&R>) -> Vec<BucketItem<T>>
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
{
|
||||
self.bucket
|
||||
.read()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map(|bucket| bucket.items_in_range(range))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get the Pubkeys
|
||||
pub fn keys(&self) -> Vec<Pubkey> {
|
||||
self.bucket
|
||||
.read()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map_or_else(Vec::default, |bucket| bucket.keys())
|
||||
}
|
||||
|
||||
/// Get the values for Pubkey `key`
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(Vec<T>, RefCount)> {
|
||||
self.bucket.read().unwrap().as_ref().and_then(|bucket| {
|
||||
bucket
|
||||
.read_value(key)
|
||||
.map(|(value, ref_count)| (value.to_vec(), ref_count))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn bucket_len(&self) -> u64 {
|
||||
self.count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn delete_key(&self, key: &Pubkey) {
|
||||
let mut bucket = self.get_write_bucket();
|
||||
if let Some(bucket) = bucket.as_mut() {
|
||||
bucket.delete_key(key)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_write_bucket(&self) -> RwLockWriteGuard<Option<Bucket<T>>> {
|
||||
let mut bucket = self.bucket.write().unwrap();
|
||||
if bucket.is_none() {
|
||||
*bucket = Some(Bucket::new(
|
||||
Arc::clone(&self.drives),
|
||||
self.max_search,
|
||||
Arc::clone(&self.stats),
|
||||
Arc::clone(&self.count),
|
||||
));
|
||||
} else {
|
||||
let write = bucket.as_mut().unwrap();
|
||||
write.handle_delayed_grows();
|
||||
}
|
||||
bucket
|
||||
}
|
||||
|
||||
pub fn addref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
self.get_write_bucket()
|
||||
.as_mut()
|
||||
.and_then(|bucket| bucket.addref(key))
|
||||
}
|
||||
|
||||
pub fn unref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
self.get_write_bucket()
|
||||
.as_mut()
|
||||
.and_then(|bucket| bucket.unref(key))
|
||||
}
|
||||
|
||||
pub fn insert(&self, pubkey: &Pubkey, value: (&[T], RefCount)) {
|
||||
let mut bucket = self.get_write_bucket();
|
||||
bucket.as_mut().unwrap().insert(pubkey, value)
|
||||
}
|
||||
|
||||
pub fn grow(&self, err: BucketMapError) {
|
||||
// grows are special - they get a read lock and modify 'reallocated'
|
||||
// the grown changes are applied the next time there is a write lock taken
|
||||
if let Some(bucket) = self.bucket.read().unwrap().as_ref() {
|
||||
bucket.grow(err)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update<F>(&self, key: &Pubkey, updatefn: F)
|
||||
where
|
||||
F: FnMut(Option<(&[T], RefCount)>) -> Option<(Vec<T>, RefCount)>,
|
||||
{
|
||||
let mut bucket = self.get_write_bucket();
|
||||
bucket.as_mut().unwrap().update(key, updatefn)
|
||||
}
|
||||
|
||||
pub fn try_write(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
value: (&[T], RefCount),
|
||||
) -> Result<(), BucketMapError> {
|
||||
let mut bucket = self.get_write_bucket();
|
||||
bucket.as_mut().unwrap().try_write(pubkey, value.0, value.1)
|
||||
}
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
use {crate::RefCount, solana_sdk::pubkey::Pubkey};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct BucketItem<T> {
|
||||
pub pubkey: Pubkey,
|
||||
pub ref_count: RefCount,
|
||||
pub slot_list: Vec<T>,
|
||||
}
|
@ -1,522 +0,0 @@
|
||||
//! BucketMap is a mostly contention free concurrent map backed by MmapMut
|
||||
|
||||
use {
|
||||
crate::{bucket_api::BucketApi, bucket_stats::BucketMapStats, MaxSearch, RefCount},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{convert::TryInto, fmt::Debug, fs, path::PathBuf, sync::Arc},
|
||||
tempfile::TempDir,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct BucketMapConfig {
|
||||
pub max_buckets: usize,
|
||||
pub drives: Option<Vec<PathBuf>>,
|
||||
pub max_search: Option<MaxSearch>,
|
||||
}
|
||||
|
||||
impl BucketMapConfig {
|
||||
/// Create a new BucketMapConfig
|
||||
/// NOTE: BucketMap requires that max_buckets is a power of two
|
||||
pub fn new(max_buckets: usize) -> BucketMapConfig {
|
||||
BucketMapConfig {
|
||||
max_buckets,
|
||||
..BucketMapConfig::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BucketMap<T: Clone + Copy + Debug> {
|
||||
buckets: Vec<Arc<BucketApi<T>>>,
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_buckets_pow2: u8,
|
||||
pub stats: Arc<BucketMapStats>,
|
||||
pub temp_dir: Option<TempDir>,
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy + Debug> Drop for BucketMap<T> {
|
||||
fn drop(&mut self) {
|
||||
if self.temp_dir.is_none() {
|
||||
BucketMap::<T>::erase_previous_drives(&self.drives);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy + Debug> std::fmt::Debug for BucketMap<T> {
|
||||
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BucketMapError {
|
||||
/// (bucket_index, current_capacity_pow2)
|
||||
DataNoSpace((u64, u8)),
|
||||
/// current_capacity_pow2
|
||||
IndexNoSpace(u8),
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy + Debug> BucketMap<T> {
|
||||
pub fn new(config: BucketMapConfig) -> Self {
|
||||
assert_ne!(
|
||||
config.max_buckets, 0,
|
||||
"Max number of buckets must be non-zero"
|
||||
);
|
||||
assert!(
|
||||
config.max_buckets.is_power_of_two(),
|
||||
"Max number of buckets must be a power of two"
|
||||
);
|
||||
// this should be <= 1 << DEFAULT_CAPACITY or we end up searching the same items over and over - probably not a big deal since it is so small anyway
|
||||
const MAX_SEARCH: MaxSearch = 32;
|
||||
let max_search = config.max_search.unwrap_or(MAX_SEARCH);
|
||||
|
||||
if let Some(drives) = config.drives.as_ref() {
|
||||
Self::erase_previous_drives(drives);
|
||||
}
|
||||
let mut temp_dir = None;
|
||||
let drives = config.drives.unwrap_or_else(|| {
|
||||
temp_dir = Some(TempDir::new().unwrap());
|
||||
vec![temp_dir.as_ref().unwrap().path().to_path_buf()]
|
||||
});
|
||||
let drives = Arc::new(drives);
|
||||
|
||||
let stats = Arc::default();
|
||||
let buckets = (0..config.max_buckets)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
Arc::new(BucketApi::new(
|
||||
Arc::clone(&drives),
|
||||
max_search,
|
||||
Arc::clone(&stats),
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// A simple log2 function that is correct if x is a power of two
|
||||
let log2 = |x: usize| usize::BITS - x.leading_zeros() - 1;
|
||||
|
||||
Self {
|
||||
buckets,
|
||||
drives,
|
||||
max_buckets_pow2: log2(config.max_buckets) as u8,
|
||||
stats,
|
||||
temp_dir,
|
||||
}
|
||||
}
|
||||
|
||||
fn erase_previous_drives(drives: &[PathBuf]) {
|
||||
drives.iter().for_each(|folder| {
|
||||
let _ = fs::remove_dir_all(&folder);
|
||||
let _ = fs::create_dir_all(&folder);
|
||||
})
|
||||
}
|
||||
|
||||
pub fn num_buckets(&self) -> usize {
|
||||
self.buckets.len()
|
||||
}
|
||||
|
||||
/// Get the values for Pubkey `key`
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(Vec<T>, RefCount)> {
|
||||
self.get_bucket(key).read_value(key)
|
||||
}
|
||||
|
||||
/// Delete the Pubkey `key`
|
||||
pub fn delete_key(&self, key: &Pubkey) {
|
||||
self.get_bucket(key).delete_key(key);
|
||||
}
|
||||
|
||||
/// Update Pubkey `key`'s value with 'value'
|
||||
pub fn insert(&self, key: &Pubkey, value: (&[T], RefCount)) {
|
||||
self.get_bucket(key).insert(key, value)
|
||||
}
|
||||
|
||||
/// Update Pubkey `key`'s value with 'value'
|
||||
pub fn try_insert(&self, key: &Pubkey, value: (&[T], RefCount)) -> Result<(), BucketMapError> {
|
||||
self.get_bucket(key).try_write(key, value)
|
||||
}
|
||||
|
||||
/// Update Pubkey `key`'s value with function `updatefn`
|
||||
pub fn update<F>(&self, key: &Pubkey, updatefn: F)
|
||||
where
|
||||
F: FnMut(Option<(&[T], RefCount)>) -> Option<(Vec<T>, RefCount)>,
|
||||
{
|
||||
self.get_bucket(key).update(key, updatefn)
|
||||
}
|
||||
|
||||
pub fn get_bucket(&self, key: &Pubkey) -> &Arc<BucketApi<T>> {
|
||||
self.get_bucket_from_index(self.bucket_ix(key))
|
||||
}
|
||||
|
||||
pub fn get_bucket_from_index(&self, ix: usize) -> &Arc<BucketApi<T>> {
|
||||
&self.buckets[ix]
|
||||
}
|
||||
|
||||
/// Get the bucket index for Pubkey `key`
|
||||
pub fn bucket_ix(&self, key: &Pubkey) -> usize {
|
||||
if self.max_buckets_pow2 > 0 {
|
||||
let location = read_be_u64(key.as_ref());
|
||||
(location >> (u64::BITS - self.max_buckets_pow2 as u32)) as usize
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the refcount for Pubkey `key`
|
||||
pub fn addref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
let ix = self.bucket_ix(key);
|
||||
let bucket = &self.buckets[ix];
|
||||
bucket.addref(key)
|
||||
}
|
||||
|
||||
/// Decrement the refcount for Pubkey `key`
|
||||
pub fn unref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
let ix = self.bucket_ix(key);
|
||||
let bucket = &self.buckets[ix];
|
||||
bucket.unref(key)
|
||||
}
|
||||
}
|
||||
|
||||
/// Look at the first 8 bytes of the input and reinterpret them as a u64
|
||||
fn read_be_u64(input: &[u8]) -> u64 {
|
||||
assert!(input.len() >= std::mem::size_of::<u64>());
|
||||
u64::from_be_bytes(input[0..std::mem::size_of::<u64>()].try_into().unwrap())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
rand::{thread_rng, Rng},
|
||||
std::{collections::HashMap, sync::RwLock},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_insert() {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.update(&key, |_| Some((vec![0], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_insert2() {
|
||||
for pass in 0..3 {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
let bucket = index.get_bucket(&key);
|
||||
if pass == 0 {
|
||||
index.insert(&key, (&[0], 0));
|
||||
} else {
|
||||
let result = index.try_insert(&key, (&[0], 0));
|
||||
assert!(result.is_err());
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
if pass == 2 {
|
||||
// another call to try insert again - should still return an error
|
||||
let result = index.try_insert(&key, (&[0], 0));
|
||||
assert!(result.is_err());
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
}
|
||||
bucket.grow(result.unwrap_err());
|
||||
let result = index.try_insert(&key, (&[0], 0));
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_update2() {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.insert(&key, (&[0], 0));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
index.insert(&key, (&[1], 0));
|
||||
assert_eq!(index.read_value(&key), Some((vec![1], 0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_update() {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.update(&key, |_| Some((vec![0], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
index.update(&key, |_| Some((vec![1], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![1], 0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_update_to_0_len() {
|
||||
solana_logger::setup();
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.update(&key, |_| Some((vec![0], 1)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 1)));
|
||||
// sets len to 0, updates in place
|
||||
index.update(&key, |_| Some((vec![], 1)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![], 1)));
|
||||
// sets len to 0, doesn't update in place - finds a new place, which causes us to no longer have an allocation in data
|
||||
index.update(&key, |_| Some((vec![], 2)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![], 2)));
|
||||
// sets len to 1, doesn't update in place - finds a new place
|
||||
index.update(&key, |_| Some((vec![1], 2)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![1], 2)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_delete() {
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
for i in 0..10 {
|
||||
let key = Pubkey::new_unique();
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
|
||||
index.delete_key(&key);
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
index.delete_key(&key);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_delete_2() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
for i in 0..100 {
|
||||
let key = Pubkey::new_unique();
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
|
||||
index.delete_key(&key);
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
index.delete_key(&key);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_n_drives() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
for i in 0..100 {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn bucket_map_test_grow_read() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
let keys: Vec<Pubkey> = (0..100).into_iter().map(|_| Pubkey::new_unique()).collect();
|
||||
for k in 0..keys.len() {
|
||||
let key = &keys[k];
|
||||
let i = read_be_u64(key.as_ref());
|
||||
index.update(key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
for (ix, key) in keys.iter().enumerate() {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
//debug!("READ: {:?} {}", key, i);
|
||||
let expected = if ix <= k { Some((vec![i], 0)) } else { None };
|
||||
assert_eq!(index.read_value(key), expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_n_delete() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
let keys: Vec<Pubkey> = (0..20).into_iter().map(|_| Pubkey::new_unique()).collect();
|
||||
for key in keys.iter() {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
index.update(key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
}
|
||||
for key in keys.iter() {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
//debug!("READ: {:?} {}", key, i);
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
}
|
||||
for k in 0..keys.len() {
|
||||
let key = &keys[k];
|
||||
index.delete_key(key);
|
||||
assert_eq!(index.read_value(key), None);
|
||||
for key in keys.iter().skip(k + 1) {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hashmap_compare() {
|
||||
use std::sync::Mutex;
|
||||
solana_logger::setup();
|
||||
let maps = (0..2)
|
||||
.into_iter()
|
||||
.map(|max_buckets_pow2| {
|
||||
let config = BucketMapConfig::new(1 << max_buckets_pow2);
|
||||
BucketMap::new(config)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let hash_map = RwLock::new(HashMap::<Pubkey, (Vec<(usize, usize)>, RefCount)>::new());
|
||||
let max_slot_list_len = 3;
|
||||
let all_keys = Mutex::new(vec![]);
|
||||
|
||||
let gen_rand_value = || {
|
||||
let count = thread_rng().gen_range(0, max_slot_list_len);
|
||||
let v = (0..count)
|
||||
.into_iter()
|
||||
.map(|x| (x as usize, x as usize /*thread_rng().gen::<usize>()*/))
|
||||
.collect::<Vec<_>>();
|
||||
let rc = thread_rng().gen::<RefCount>();
|
||||
(v, rc)
|
||||
};
|
||||
|
||||
let get_key = || {
|
||||
let mut keys = all_keys.lock().unwrap();
|
||||
if keys.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let len = keys.len();
|
||||
Some(keys.remove(thread_rng().gen_range(0, len)))
|
||||
};
|
||||
let return_key = |key| {
|
||||
let mut keys = all_keys.lock().unwrap();
|
||||
keys.push(key);
|
||||
};
|
||||
|
||||
let verify = || {
|
||||
let mut maps = maps
|
||||
.iter()
|
||||
.map(|map| {
|
||||
let mut r = vec![];
|
||||
for bin in 0..map.num_buckets() {
|
||||
r.append(
|
||||
&mut map.buckets[bin]
|
||||
.items_in_range(&None::<&std::ops::RangeInclusive<Pubkey>>),
|
||||
);
|
||||
}
|
||||
r
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let hm = hash_map.read().unwrap();
|
||||
for (k, v) in hm.iter() {
|
||||
for map in maps.iter_mut() {
|
||||
for i in 0..map.len() {
|
||||
if k == &map[i].pubkey {
|
||||
assert_eq!(map[i].slot_list, v.0);
|
||||
assert_eq!(map[i].ref_count, v.1);
|
||||
map.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for map in maps.iter() {
|
||||
assert!(map.is_empty());
|
||||
}
|
||||
};
|
||||
let mut initial = 100; // put this many items in to start
|
||||
|
||||
// do random operations: insert, update, delete, add/unref in random order
|
||||
// verify consistency between hashmap and all bucket maps
|
||||
for i in 0..10000 {
|
||||
if initial > 0 {
|
||||
initial -= 1;
|
||||
}
|
||||
if initial > 0 || thread_rng().gen_range(0, 5) == 0 {
|
||||
// insert
|
||||
let k = solana_sdk::pubkey::new_rand();
|
||||
let v = gen_rand_value();
|
||||
hash_map.write().unwrap().insert(k, v.clone());
|
||||
let insert = thread_rng().gen_range(0, 2) == 0;
|
||||
maps.iter().for_each(|map| {
|
||||
if insert {
|
||||
map.insert(&k, (&v.0, v.1))
|
||||
} else {
|
||||
map.update(&k, |current| {
|
||||
assert!(current.is_none());
|
||||
Some(v.clone())
|
||||
})
|
||||
}
|
||||
});
|
||||
return_key(k);
|
||||
}
|
||||
if thread_rng().gen_range(0, 10) == 0 {
|
||||
// update
|
||||
if let Some(k) = get_key() {
|
||||
let hm = hash_map.read().unwrap();
|
||||
let (v, rc) = gen_rand_value();
|
||||
let v_old = hm.get(&k);
|
||||
let insert = thread_rng().gen_range(0, 2) == 0;
|
||||
maps.iter().for_each(|map| {
|
||||
if insert {
|
||||
map.insert(&k, (&v, rc))
|
||||
} else {
|
||||
map.update(&k, |current| {
|
||||
assert_eq!(current, v_old.map(|(v, rc)| (&v[..], *rc)), "{}", k);
|
||||
Some((v.clone(), rc))
|
||||
})
|
||||
}
|
||||
});
|
||||
drop(hm);
|
||||
hash_map.write().unwrap().insert(k, (v, rc));
|
||||
return_key(k);
|
||||
}
|
||||
}
|
||||
if thread_rng().gen_range(0, 20) == 0 {
|
||||
// delete
|
||||
if let Some(k) = get_key() {
|
||||
let mut hm = hash_map.write().unwrap();
|
||||
hm.remove(&k);
|
||||
maps.iter().for_each(|map| {
|
||||
map.delete_key(&k);
|
||||
});
|
||||
}
|
||||
}
|
||||
if thread_rng().gen_range(0, 10) == 0 {
|
||||
// add/unref
|
||||
if let Some(k) = get_key() {
|
||||
let mut inc = thread_rng().gen_range(0, 2) == 0;
|
||||
let mut hm = hash_map.write().unwrap();
|
||||
let (v, mut rc) = hm.get(&k).map(|(v, rc)| (v.to_vec(), *rc)).unwrap();
|
||||
if !inc && rc == 0 {
|
||||
// can't decrement rc=0
|
||||
inc = true;
|
||||
}
|
||||
rc = if inc { rc + 1 } else { rc - 1 };
|
||||
hm.insert(k, (v.to_vec(), rc));
|
||||
maps.iter().for_each(|map| {
|
||||
if thread_rng().gen_range(0, 2) == 0 {
|
||||
map.update(&k, |current| Some((current.unwrap().0.to_vec(), rc)))
|
||||
} else if inc {
|
||||
map.addref(&k);
|
||||
} else {
|
||||
map.unref(&k);
|
||||
}
|
||||
});
|
||||
|
||||
return_key(k);
|
||||
}
|
||||
}
|
||||
if i % 1000 == 0 {
|
||||
verify();
|
||||
}
|
||||
}
|
||||
verify();
|
||||
}
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
use std::sync::{atomic::AtomicU64, Arc, Mutex};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BucketStats {
|
||||
pub resizes: AtomicU64,
|
||||
pub max_size: Mutex<u64>,
|
||||
pub resize_us: AtomicU64,
|
||||
pub new_file_us: AtomicU64,
|
||||
pub flush_file_us: AtomicU64,
|
||||
pub mmap_us: AtomicU64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BucketMapStats {
|
||||
pub index: Arc<BucketStats>,
|
||||
pub data: Arc<BucketStats>,
|
||||
}
|
@ -1,424 +0,0 @@
|
||||
use {
|
||||
crate::{bucket_stats::BucketStats, MaxSearch},
|
||||
memmap2::MmapMut,
|
||||
rand::{thread_rng, Rng},
|
||||
solana_measure::measure::Measure,
|
||||
std::{
|
||||
fs::{remove_file, OpenOptions},
|
||||
io::{Seek, SeekFrom, Write},
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
1 2
|
||||
2 4
|
||||
3 8
|
||||
4 16
|
||||
5 32
|
||||
6 64
|
||||
7 128
|
||||
8 256
|
||||
9 512
|
||||
10 1,024
|
||||
11 2,048
|
||||
12 4,096
|
||||
13 8,192
|
||||
14 16,384
|
||||
23 8,388,608
|
||||
24 16,777,216
|
||||
*/
|
||||
pub const DEFAULT_CAPACITY_POW2: u8 = 5;
|
||||
|
||||
/// A Header UID of 0 indicates that the header is unlocked
|
||||
const UID_UNLOCKED: Uid = 0;
|
||||
|
||||
pub(crate) type Uid = u64;
|
||||
|
||||
#[repr(C)]
|
||||
struct Header {
|
||||
lock: u64,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
/// try to lock this entry with 'uid'
|
||||
/// return true if it could be locked
|
||||
fn try_lock(&mut self, uid: Uid) -> bool {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
self.lock = uid;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
/// mark this entry as unlocked
|
||||
fn unlock(&mut self, expected: Uid) {
|
||||
assert_eq!(expected, self.lock);
|
||||
self.lock = UID_UNLOCKED;
|
||||
}
|
||||
/// uid that has locked this entry or None if unlocked
|
||||
fn uid(&self) -> Option<Uid> {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
None
|
||||
} else {
|
||||
Some(self.lock)
|
||||
}
|
||||
}
|
||||
/// true if this entry is unlocked
|
||||
fn is_unlocked(&self) -> bool {
|
||||
self.lock == UID_UNLOCKED
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BucketStorage {
|
||||
path: PathBuf,
|
||||
mmap: MmapMut,
|
||||
pub cell_size: u64,
|
||||
pub capacity_pow2: u8,
|
||||
pub count: Arc<AtomicU64>,
|
||||
pub stats: Arc<BucketStats>,
|
||||
pub max_search: MaxSearch,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BucketStorageError {
|
||||
AlreadyAllocated,
|
||||
}
|
||||
|
||||
impl Drop for BucketStorage {
|
||||
fn drop(&mut self) {
|
||||
let _ = remove_file(&self.path);
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketStorage {
|
||||
pub fn new_with_capacity(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
num_elems: u64,
|
||||
elem_size: u64,
|
||||
capacity_pow2: u8,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
|
||||
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity_pow2, &stats);
|
||||
Self {
|
||||
path,
|
||||
mmap,
|
||||
cell_size,
|
||||
count,
|
||||
capacity_pow2,
|
||||
stats,
|
||||
max_search,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max_search(&self) -> u64 {
|
||||
self.max_search as u64
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
num_elems: u64,
|
||||
elem_size: u64,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
Self::new_with_capacity(
|
||||
drives,
|
||||
num_elems,
|
||||
elem_size,
|
||||
DEFAULT_CAPACITY_POW2,
|
||||
max_search,
|
||||
stats,
|
||||
count,
|
||||
)
|
||||
}
|
||||
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_ptr(&self, ix: u64) -> &Header {
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
hdr.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_mut_ptr(&mut self, ix: u64) -> &mut Header {
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &mut [u8] = &mut self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_mut_ptr() as *mut Header;
|
||||
hdr.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// return uid allocated at index 'ix' or None if vacant
|
||||
pub fn uid(&self, ix: u64) -> Option<Uid> {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
self.header_ptr(ix).uid()
|
||||
}
|
||||
|
||||
/// true if the entry at index 'ix' is free (as opposed to being allocated)
|
||||
pub fn is_free(&self, ix: u64) -> bool {
|
||||
// note that the terminology in the implementation is locked or unlocked.
|
||||
// but our api is allocate/free
|
||||
self.header_ptr(ix).is_unlocked()
|
||||
}
|
||||
|
||||
/// caller knows id is not empty
|
||||
pub fn uid_unchecked(&self, ix: u64) -> Uid {
|
||||
self.uid(ix).unwrap()
|
||||
}
|
||||
|
||||
/// 'is_resizing' true if caller is resizing the index (so don't increment count)
|
||||
/// 'is_resizing' false if caller is adding an item to the index (so increment count)
|
||||
pub fn allocate(
|
||||
&mut self,
|
||||
ix: u64,
|
||||
uid: Uid,
|
||||
is_resizing: bool,
|
||||
) -> Result<(), BucketStorageError> {
|
||||
assert!(ix < self.capacity(), "allocate: bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "allocate: bad uid");
|
||||
let mut e = Err(BucketStorageError::AlreadyAllocated);
|
||||
//debug!("ALLOC {} {}", ix, uid);
|
||||
if self.header_mut_ptr(ix).try_lock(uid) {
|
||||
e = Ok(());
|
||||
if !is_resizing {
|
||||
self.count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
e
|
||||
}
|
||||
|
||||
pub fn free(&mut self, ix: u64, uid: Uid) {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "free: bad uid");
|
||||
self.header_mut_ptr(ix).unlock(uid);
|
||||
self.count.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn get<T: Sized>(&self, ix: u64) -> &T {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>();
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *const T;
|
||||
&*item
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_empty_cell_slice<T: Sized>(&self) -> &[T] {
|
||||
let len = 0;
|
||||
let item_slice: &[u8] = &self.mmap[0..0];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *const T;
|
||||
std::slice::from_raw_parts(item, len as usize)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &[T] {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let ix = self.cell_size * ix;
|
||||
let start = ix as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>() * len as usize;
|
||||
//debug!("GET slice {} {}", start, end);
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *const T;
|
||||
std::slice::from_raw_parts(item, len as usize)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
pub fn get_mut<T: Sized>(&self, ix: u64) -> &mut T {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>();
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *mut T;
|
||||
&mut *item
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
pub fn get_mut_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &mut [T] {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let ix = self.cell_size * ix;
|
||||
let start = ix as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>() * len as usize;
|
||||
//debug!("GET mut slice {} {}", start, end);
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *mut T;
|
||||
std::slice::from_raw_parts_mut(item, len as usize)
|
||||
}
|
||||
}
|
||||
|
||||
fn new_map(
|
||||
drives: &[PathBuf],
|
||||
cell_size: usize,
|
||||
capacity_pow2: u8,
|
||||
stats: &BucketStats,
|
||||
) -> (MmapMut, PathBuf) {
|
||||
let mut measure_new_file = Measure::start("measure_new_file");
|
||||
let capacity = 1u64 << capacity_pow2;
|
||||
let r = thread_rng().gen_range(0, drives.len());
|
||||
let drive = &drives[r];
|
||||
let pos = format!("{}", thread_rng().gen_range(0, u128::MAX),);
|
||||
let file = drive.join(pos);
|
||||
let mut data = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(file.clone())
|
||||
.map_err(|e| {
|
||||
panic!(
|
||||
"Unable to create data file {} in current dir({:?}): {:?}",
|
||||
file.display(),
|
||||
std::env::current_dir(),
|
||||
e
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Theoretical performance optimization: write a zero to the end of
|
||||
// the file so that we won't have to resize it later, which may be
|
||||
// expensive.
|
||||
//debug!("GROWING file {}", capacity * cell_size as u64);
|
||||
data.seek(SeekFrom::Start(capacity * cell_size as u64 - 1))
|
||||
.unwrap();
|
||||
data.write_all(&[0]).unwrap();
|
||||
data.seek(SeekFrom::Start(0)).unwrap();
|
||||
measure_new_file.stop();
|
||||
let mut measure_flush = Measure::start("measure_flush");
|
||||
data.flush().unwrap(); // can we skip this?
|
||||
measure_flush.stop();
|
||||
let mut measure_mmap = Measure::start("measure_mmap");
|
||||
let res = (unsafe { MmapMut::map_mut(&data).unwrap() }, file);
|
||||
measure_mmap.stop();
|
||||
stats
|
||||
.new_file_us
|
||||
.fetch_add(measure_new_file.as_us(), Ordering::Relaxed);
|
||||
stats
|
||||
.flush_file_us
|
||||
.fetch_add(measure_flush.as_us(), Ordering::Relaxed);
|
||||
stats
|
||||
.mmap_us
|
||||
.fetch_add(measure_mmap.as_us(), Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
/// copy contents from 'old_bucket' to 'self'
|
||||
fn copy_contents(&mut self, old_bucket: &Self) {
|
||||
let mut m = Measure::start("grow");
|
||||
let old_cap = old_bucket.capacity();
|
||||
let old_map = &old_bucket.mmap;
|
||||
|
||||
let increment = self.capacity_pow2 - old_bucket.capacity_pow2;
|
||||
let index_grow = 1 << increment;
|
||||
(0..old_cap as usize).into_iter().for_each(|i| {
|
||||
let old_ix = i * old_bucket.cell_size as usize;
|
||||
let new_ix = old_ix * index_grow;
|
||||
let dst_slice: &[u8] = &self.mmap[new_ix..new_ix + old_bucket.cell_size as usize];
|
||||
let src_slice: &[u8] = &old_map[old_ix..old_ix + old_bucket.cell_size as usize];
|
||||
|
||||
unsafe {
|
||||
let dst = dst_slice.as_ptr() as *mut u8;
|
||||
let src = src_slice.as_ptr() as *const u8;
|
||||
std::ptr::copy_nonoverlapping(src, dst, old_bucket.cell_size as usize);
|
||||
};
|
||||
});
|
||||
m.stop();
|
||||
self.stats.resizes.fetch_add(1, Ordering::Relaxed);
|
||||
self.stats.resize_us.fetch_add(m.as_us(), Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// allocate a new bucket, copying data from 'bucket'
|
||||
pub fn new_resized(
|
||||
drives: &Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
bucket: Option<&Self>,
|
||||
capacity_pow_2: u8,
|
||||
num_elems: u64,
|
||||
elem_size: u64,
|
||||
stats: &Arc<BucketStats>,
|
||||
) -> Self {
|
||||
let mut new_bucket = Self::new_with_capacity(
|
||||
Arc::clone(drives),
|
||||
num_elems,
|
||||
elem_size,
|
||||
capacity_pow_2,
|
||||
max_search,
|
||||
Arc::clone(stats),
|
||||
bucket
|
||||
.map(|bucket| Arc::clone(&bucket.count))
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
if let Some(bucket) = bucket {
|
||||
new_bucket.copy_contents(bucket);
|
||||
}
|
||||
let sz = new_bucket.capacity();
|
||||
{
|
||||
let mut max = new_bucket.stats.max_size.lock().unwrap();
|
||||
*max = std::cmp::max(*max, sz);
|
||||
}
|
||||
new_bucket
|
||||
}
|
||||
|
||||
/// Return the number of cells currently allocated
|
||||
pub fn capacity(&self) -> u64 {
|
||||
1 << self.capacity_pow2
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bucket_storage() {
|
||||
let tmpdir1 = std::env::temp_dir().join("bucket_map_test_mt");
|
||||
let paths: Vec<PathBuf> = [tmpdir1]
|
||||
.iter()
|
||||
.filter(|x| std::fs::create_dir_all(x).is_ok())
|
||||
.cloned()
|
||||
.collect();
|
||||
assert!(!paths.is_empty());
|
||||
|
||||
let mut storage =
|
||||
BucketStorage::new(Arc::new(paths), 1, 1, 1, Arc::default(), Arc::default());
|
||||
let ix = 0;
|
||||
let uid = Uid::MAX;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
let uid = 1;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
}
|
||||
}
|
@ -1,186 +0,0 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use {
|
||||
crate::{
|
||||
bucket::Bucket,
|
||||
bucket_storage::{BucketStorage, Uid},
|
||||
RefCount,
|
||||
},
|
||||
modular_bitfield::prelude::*,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||
std::{
|
||||
collections::hash_map::DefaultHasher,
|
||||
fmt::Debug,
|
||||
hash::{Hash, Hasher},
|
||||
},
|
||||
};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
// one instance of this per item in the index
|
||||
// stored in the index bucket
|
||||
pub struct IndexEntry {
|
||||
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
|
||||
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
|
||||
storage_cap_and_offset: PackedStorage,
|
||||
// if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2
|
||||
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
|
||||
}
|
||||
|
||||
/// Pack the storage offset and capacity-when-crated-pow2 fields into a single u64
|
||||
#[bitfield(bits = 64)]
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
|
||||
struct PackedStorage {
|
||||
capacity_when_created_pow2: B8,
|
||||
offset: B56,
|
||||
}
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn init(&mut self, pubkey: &Pubkey) {
|
||||
self.key = *pubkey;
|
||||
self.ref_count = 0;
|
||||
self.storage_cap_and_offset = PackedStorage::default();
|
||||
self.num_slots = 0;
|
||||
}
|
||||
|
||||
pub fn set_storage_capacity_when_created_pow2(
|
||||
&mut self,
|
||||
storage_capacity_when_created_pow2: u8,
|
||||
) {
|
||||
self.storage_cap_and_offset
|
||||
.set_capacity_when_created_pow2(storage_capacity_when_created_pow2)
|
||||
}
|
||||
|
||||
pub fn set_storage_offset(&mut self, storage_offset: u64) {
|
||||
self.storage_cap_and_offset
|
||||
.set_offset_checked(storage_offset)
|
||||
.expect("New storage offset must fit into 7 bytes!")
|
||||
}
|
||||
|
||||
/// return closest bucket index fit for the slot slice.
|
||||
/// Since bucket size is 2^index, the return value is
|
||||
/// min index, such that 2^index >= num_slots
|
||||
/// index = ceiling(log2(num_slots))
|
||||
/// special case, when slot slice empty, return 0th index.
|
||||
pub fn data_bucket_from_num_slots(num_slots: Slot) -> u64 {
|
||||
// Compute the ceiling of log2 for integer
|
||||
if num_slots == 0 {
|
||||
0
|
||||
} else {
|
||||
(Slot::BITS - (num_slots - 1).leading_zeros()) as u64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data_bucket_ix(&self) -> u64 {
|
||||
Self::data_bucket_from_num_slots(self.num_slots)
|
||||
}
|
||||
|
||||
pub fn ref_count(&self) -> RefCount {
|
||||
self.ref_count
|
||||
}
|
||||
|
||||
fn storage_capacity_when_created_pow2(&self) -> u8 {
|
||||
self.storage_cap_and_offset.capacity_when_created_pow2()
|
||||
}
|
||||
|
||||
fn storage_offset(&self) -> u64 {
|
||||
self.storage_cap_and_offset.offset()
|
||||
}
|
||||
|
||||
// This function maps the original data location into an index in the current bucket storage.
|
||||
// This is coupled with how we resize bucket storages.
|
||||
pub fn data_loc(&self, storage: &BucketStorage) -> u64 {
|
||||
self.storage_offset() << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2())
|
||||
}
|
||||
|
||||
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {
|
||||
let data_bucket_ix = self.data_bucket_ix();
|
||||
let data_bucket = &bucket.data[data_bucket_ix as usize];
|
||||
let slice = if self.num_slots > 0 {
|
||||
let loc = self.data_loc(data_bucket);
|
||||
let uid = Self::key_uid(&self.key);
|
||||
assert_eq!(Some(uid), bucket.data[data_bucket_ix as usize].uid(loc));
|
||||
bucket.data[data_bucket_ix as usize].get_cell_slice(loc, self.num_slots)
|
||||
} else {
|
||||
// num_slots is 0. This means we don't have an actual allocation.
|
||||
// can we trust that the data_bucket is even safe?
|
||||
bucket.data[data_bucket_ix as usize].get_empty_cell_slice()
|
||||
};
|
||||
Some((slice, self.ref_count))
|
||||
}
|
||||
|
||||
pub fn key_uid(key: &Pubkey) -> Uid {
|
||||
let mut s = DefaultHasher::new();
|
||||
key.hash(&mut s);
|
||||
s.finish().max(1u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn new(key: Pubkey) -> Self {
|
||||
IndexEntry {
|
||||
key,
|
||||
ref_count: 0,
|
||||
storage_cap_and_offset: PackedStorage::default(),
|
||||
num_slots: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// verify that accessors for storage_offset and capacity_when_created are
|
||||
/// correct and independent
|
||||
#[test]
|
||||
fn test_api() {
|
||||
for offset in [0, 1, u32::MAX as u64] {
|
||||
let mut index = IndexEntry::new(solana_sdk::pubkey::new_rand());
|
||||
if offset != 0 {
|
||||
index.set_storage_offset(offset);
|
||||
}
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), 0);
|
||||
for pow in [1, 255, 0] {
|
||||
index.set_storage_capacity_when_created_pow2(pow);
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), pow);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
assert_eq!(std::mem::size_of::<PackedStorage>(), 1 + 7);
|
||||
assert_eq!(std::mem::size_of::<IndexEntry>(), 32 + 8 + 8 + 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "New storage offset must fit into 7 bytes!")]
|
||||
fn test_set_storage_offset_value_too_large() {
|
||||
let too_big = 1 << 56;
|
||||
let mut index = IndexEntry::new(Pubkey::new_unique());
|
||||
index.set_storage_offset(too_big);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_data_bucket_from_num_slots() {
|
||||
for n in 0..512 {
|
||||
assert_eq!(
|
||||
IndexEntry::data_bucket_from_num_slots(n),
|
||||
(n as f64).log2().ceil() as u64
|
||||
);
|
||||
}
|
||||
assert_eq!(IndexEntry::data_bucket_from_num_slots(u32::MAX as u64), 32);
|
||||
assert_eq!(
|
||||
IndexEntry::data_bucket_from_num_slots(u32::MAX as u64 + 1),
|
||||
32
|
||||
);
|
||||
assert_eq!(
|
||||
IndexEntry::data_bucket_from_num_slots(u32::MAX as u64 + 2),
|
||||
33
|
||||
);
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
mod bucket;
|
||||
pub mod bucket_api;
|
||||
mod bucket_item;
|
||||
pub mod bucket_map;
|
||||
mod bucket_stats;
|
||||
mod bucket_storage;
|
||||
mod index_entry;
|
||||
|
||||
pub type MaxSearch = u8;
|
||||
pub type RefCount = u64;
|
@ -1,48 +0,0 @@
|
||||
use {
|
||||
rayon::prelude::*,
|
||||
solana_bucket_map::bucket_map::{BucketMap, BucketMapConfig},
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::path::PathBuf,
|
||||
};
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn bucket_map_test_mt() {
|
||||
let threads = 4096;
|
||||
let items = 4096;
|
||||
let tmpdir1 = std::env::temp_dir().join("bucket_map_test_mt");
|
||||
let tmpdir2 = PathBuf::from("/mnt/data/0").join("bucket_map_test_mt");
|
||||
let paths: Vec<PathBuf> = [tmpdir1, tmpdir2]
|
||||
.iter()
|
||||
.filter(|x| std::fs::create_dir_all(x).is_ok())
|
||||
.cloned()
|
||||
.collect();
|
||||
assert!(!paths.is_empty());
|
||||
let index = BucketMap::new(BucketMapConfig {
|
||||
max_buckets: 1 << 12,
|
||||
drives: Some(paths.clone()),
|
||||
..BucketMapConfig::default()
|
||||
});
|
||||
(0..threads).into_iter().into_par_iter().for_each(|_| {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![0u64], 0)));
|
||||
});
|
||||
let mut timer = Measure::start("bucket_map_test_mt");
|
||||
(0..threads).into_iter().into_par_iter().for_each(|_| {
|
||||
for _ in 0..items {
|
||||
let key = Pubkey::new_unique();
|
||||
let ix: u64 = index.bucket_ix(&key) as u64;
|
||||
index.update(&key, |_| Some((vec![ix], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![ix], 0)));
|
||||
}
|
||||
});
|
||||
timer.stop();
|
||||
println!("time: {}ns per item", timer.as_ns() / (threads * items));
|
||||
let mut total = 0;
|
||||
for tmpdir in paths.iter() {
|
||||
let folder_size = fs_extra::dir::get_size(tmpdir).unwrap();
|
||||
total += folder_size;
|
||||
std::fs::remove_dir_all(tmpdir).unwrap();
|
||||
}
|
||||
println!("overhead: {}bytes per item", total / (threads * items));
|
||||
}
|
@ -9,8 +9,5 @@ for a in "$@"; do
|
||||
fi
|
||||
done
|
||||
|
||||
set -ex
|
||||
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
|
||||
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
|
||||
fi
|
||||
set -x
|
||||
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"
|
||||
|
@ -1,365 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Builds a buildkite pipeline based on the environment variables
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
output_file=${1:-/dev/stderr}
|
||||
|
||||
if [[ -n $CI_PULL_REQUEST ]]; then
|
||||
IFS=':' read -ra affected_files <<< "$(buildkite-agent meta-data get affected_files)"
|
||||
if [[ ${#affected_files[*]} -eq 0 ]]; then
|
||||
echo "Unable to determine the files affected by this PR"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
affected_files=()
|
||||
fi
|
||||
|
||||
annotate() {
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
buildkite-agent annotate "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks if a CI pull request affects one or more path patterns. Each
|
||||
# pattern argument is checked in series. If one of them found to be affected,
|
||||
# return immediately as such.
|
||||
#
|
||||
# Bash regular expressions are permitted in the pattern:
|
||||
# affects .rs$ -- any file or directory ending in .rs
|
||||
# affects .rs -- also matches foo.rs.bar
|
||||
# affects ^snap/ -- anything under the snap/ subdirectory
|
||||
# affects snap/ -- also matches foo/snap/
|
||||
# Any pattern starting with the ! character will be negated:
|
||||
# affects !^docs/ -- anything *not* under the docs/ subdirectory
|
||||
#
|
||||
affects() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
# Assume everyting needs to be tested when any Dockerfile changes
|
||||
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
|
||||
if [[ ${pattern:0:1} = "!" ]]; then
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ ! $file =~ ${pattern:1} ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
else
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
# Checks if a CI pull request affects anything other than the provided path patterns
|
||||
#
|
||||
# Syntax is the same as `affects()` except that the negation prefix is not
|
||||
# supported
|
||||
#
|
||||
affects_other_than() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
|
||||
for file in "${affected_files[@]}"; do
|
||||
declare matched=false
|
||||
for pattern in "$@"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
matched=true
|
||||
fi
|
||||
done
|
||||
if ! $matched; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
start_pipeline() {
|
||||
echo "# $*" > "$output_file"
|
||||
echo "steps:" >> "$output_file"
|
||||
}
|
||||
|
||||
command_step() {
|
||||
cat >> "$output_file" <<EOF
|
||||
- name: "$1"
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
trigger_secondary_step() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
EOF
|
||||
}
|
||||
|
||||
wait_step() {
|
||||
echo " - wait" >> "$output_file"
|
||||
}
|
||||
|
||||
all_test_steps() {
|
||||
command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20
|
||||
wait_step
|
||||
|
||||
# Coverage...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
# Coverage in disk...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage-in-disk.sh \
|
||||
; then
|
||||
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
# Full test suite
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-perf skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream Anchor projects backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-anchor-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-anchor-projects.sh"
|
||||
name: "downstream-anchor-projects"
|
||||
timeout_in_minutes: 10
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-anchor-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
^ci/test-stable.sh \
|
||||
^sdk/ \
|
||||
; then
|
||||
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
|
||||
else
|
||||
annotate --style info \
|
||||
"wasm skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^ci/test-bench.sh \
|
||||
; then
|
||||
command_step bench "ci/test-bench.sh" 30
|
||||
else
|
||||
annotate --style info --context test-bench \
|
||||
"Bench skipped as no .rs files were modified"
|
||||
fi
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
40
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
command_step sanity "ci/test-sanity.sh" 5
|
||||
wait_step
|
||||
|
||||
# Check for any .sh file changes
|
||||
if affects .sh$; then
|
||||
command_step shellcheck "ci/shellcheck.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
|
||||
# Run the full test suite by default, skipping only if modifications are local
|
||||
# to some particular areas of the tree
|
||||
if affects_other_than ^.buildkite ^.mergify .md$ ^docs/ ^web3.js/ ^explorer/ ^.gitbook; then
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# web3.js, explorer and docs changes run on Travis or Github actions...
|
||||
}
|
||||
|
||||
|
||||
if [[ -n $BUILDKITE_TAG ]]; then
|
||||
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
|
||||
|
||||
annotate --style info --context release-tag \
|
||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
||||
|
||||
# Jump directly to the secondary build to publish release artifacts quickly
|
||||
trigger_secondary_step
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
echo "+++ Affected files in this PR"
|
||||
for file in "${affected_files[@]}"; do
|
||||
echo "- $file"
|
||||
done
|
||||
|
||||
start_pipeline "Pull request pipeline for $BUILDKITE_BRANCH"
|
||||
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
annotate --style info --context pr-backlink \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
|
||||
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
|
||||
command_step dependabot "ci/dependabot-pr.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
pull_or_push_steps
|
||||
exit 0
|
||||
fi
|
||||
|
||||
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
|
||||
pull_or_push_steps
|
||||
wait_step
|
||||
trigger_secondary_step
|
||||
exit 0
|
@ -102,8 +102,6 @@ command_step() {
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -139,7 +137,7 @@ all_test_steps() {
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
@ -147,19 +145,7 @@ all_test_steps() {
|
||||
fi
|
||||
|
||||
# Full test suite
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
|
||||
|
||||
# Docs tests
|
||||
if affects \
|
||||
.rs$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-docs.sh \
|
||||
; then
|
||||
command_step doctest "ci/test-docs.sh" 15
|
||||
else
|
||||
annotate --style info --context test-docs \
|
||||
"Docs skipped as no .rs files were modified"
|
||||
fi
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
@ -182,7 +168,7 @@ all_test_steps() {
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
@ -235,53 +221,11 @@ EOF
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream Anchor projects backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-anchor-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-anchor-projects.sh"
|
||||
name: "downstream-anchor-projects"
|
||||
timeout_in_minutes: 10
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-anchor-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
^ci/test-stable.sh \
|
||||
^sdk/ \
|
||||
; then
|
||||
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
|
||||
else
|
||||
annotate --style info \
|
||||
"wasm skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
@ -299,15 +243,7 @@ EOF
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
40
|
||||
45
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
@ -326,7 +262,7 @@ pull_or_push_steps() {
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# web3.js, explorer and docs changes run on Travis or Github actions...
|
||||
# web3.js, explorer and docs changes run on Travis...
|
||||
}
|
||||
|
||||
|
||||
|
@ -19,8 +19,3 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build-aarch64-apple-darwin"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball (aarch64-apple-darwin)"
|
||||
|
@ -23,7 +23,7 @@ echo --- "(FAILING) Backpropagating dependabot-triggered Cargo.lock updates"
|
||||
name="dependabot-buildkite"
|
||||
api_base="https://api.github.com/repos/solana-labs/solana/pulls"
|
||||
pr_num=$(echo "$BUILDKITE_BRANCH" | grep -Eo '[0-9]+')
|
||||
branch=$(curl -s "$api_base/$pr_num" | python3 -c 'import json,sys;print(json.load(sys.stdin)["head"]["ref"])')
|
||||
branch=$(curl -s "$api_base/$pr_num" | python -c 'import json,sys;print json.load(sys.stdin)["head"]["ref"]')
|
||||
|
||||
git add :**/Cargo.lock
|
||||
EMAIL="dependabot-buildkite@noreply.solana.com" \
|
||||
|
@ -8,6 +8,11 @@ src_root="$(readlink -f "${here}/..")"
|
||||
cd "${src_root}"
|
||||
|
||||
cargo_audit_ignores=(
|
||||
# failure is officially deprecated/unmaintained
|
||||
#
|
||||
# Blocked on multiple upstream crates removing their `failure` dependency.
|
||||
--ignore RUSTSEC-2020-0036
|
||||
|
||||
# `net2` crate has been deprecated; use `socket2` instead
|
||||
#
|
||||
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
|
||||
@ -25,15 +30,27 @@ cargo_audit_ignores=(
|
||||
|
||||
# generic-array: arr! macro erases lifetimes
|
||||
#
|
||||
# Blocked on new spl dependencies on solana-program v1.9
|
||||
# due to curve25519-dalek dependency
|
||||
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
||||
# https://github.com/paritytech/libsecp256k1/issues/66
|
||||
--ignore RUSTSEC-2020-0146
|
||||
|
||||
# chrono: Potential segfault in `localtime_r` invocations
|
||||
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
|
||||
#
|
||||
# Blocked due to no safe upgrade
|
||||
# https://github.com/chronotope/chrono/issues/499
|
||||
--ignore RUSTSEC-2020-0159
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0078
|
||||
|
||||
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0079
|
||||
|
||||
# tar: Links in archive can create arbitrary directories
|
||||
#
|
||||
# Blocked on `tar` releasing safe upgrade
|
||||
# https://github.com/alexcrichton/tar-rs/issues/238
|
||||
--ignore RUSTSEC-2021-0080
|
||||
|
||||
)
|
||||
scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}"
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.60.0
|
||||
FROM solanalabs/rust:1.52.1
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@ -3,14 +3,8 @@ set -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
platform=()
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
platform+=(--platform linux/amd64)
|
||||
fi
|
||||
|
||||
nightlyDate=${1:-$(date +%Y-%m-%d)}
|
||||
docker build "${platform[@]}" -t solanalabs/rust-nightly:"$nightlyDate" --build-arg date="$nightlyDate" .
|
||||
docker build -t solanalabs/rust-nightly:"$nightlyDate" --build-arg date="$nightlyDate" .
|
||||
|
||||
maybeEcho=
|
||||
if [[ -z $CI ]]; then
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.60.0
|
||||
FROM rust:1.52.1
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
@ -11,34 +11,28 @@ RUN set -x \
|
||||
&& apt-get install apt-transport-https \
|
||||
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
|
||||
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
|
||||
&& curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \
|
||||
&& apt update \
|
||||
&& apt install -y \
|
||||
buildkite-agent \
|
||||
clang \
|
||||
clang-7 \
|
||||
cmake \
|
||||
lcov \
|
||||
libudev-dev \
|
||||
libclang-common-7-dev \
|
||||
mscgen \
|
||||
nodejs \
|
||||
net-tools \
|
||||
rsync \
|
||||
sudo \
|
||||
golang \
|
||||
unzip \
|
||||
\
|
||||
&& apt remove -y libcurl4-openssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& node --version \
|
||||
&& npm --version \
|
||||
&& rustup component add rustfmt \
|
||||
&& rustup component add clippy \
|
||||
&& rustup target add wasm32-unknown-unknown \
|
||||
&& cargo install cargo-audit \
|
||||
&& cargo install svgbob_cli \
|
||||
&& cargo install mdbook \
|
||||
&& cargo install mdbook-linkcheck \
|
||||
&& cargo install svgbob_cli \
|
||||
&& cargo install wasm-pack \
|
||||
&& rustc --version \
|
||||
&& cargo --version \
|
||||
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
|
||||
|
@ -3,14 +3,7 @@ set -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
|
||||
platform=()
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
platform+=(--platform linux/amd64)
|
||||
fi
|
||||
|
||||
docker build "${platform[@]}" -t solanalabs/rust .
|
||||
docker build -t solanalabs/rust .
|
||||
|
||||
read -r rustc version _ < <(docker run solanalabs/rust rustc --version)
|
||||
[[ $rustc = rustc ]]
|
||||
|
16
ci/env.sh
16
ci/env.sh
@ -23,9 +23,6 @@ if [[ -n $CI ]]; then
|
||||
elif [[ -n $BUILDKITE ]]; then
|
||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
|
||||
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
@ -38,18 +35,7 @@ if [[ -n $CI ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
|
||||
case "$(uname -s)" in
|
||||
Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
Darwin)
|
||||
export CI_OS_NAME=osx
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
export CI_OS_NAME=linux
|
||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||
# triggered it
|
||||
|
@ -70,7 +70,7 @@ for Cargo_toml in $Cargo_tomls; do
|
||||
rm -rf crate-test
|
||||
"$cargo" stable init crate-test
|
||||
cd crate-test/
|
||||
echo "${crate_name} = \"=${expectedCrateVersion}\"" >> Cargo.toml
|
||||
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
|
||||
echo "[workspace]" >> Cargo.toml
|
||||
"$cargo" stable check
|
||||
) && really_uploaded=1
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user