Compare commits
166 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
7ca7f8604d | ||
|
e2b5f2dd9c | ||
|
3652bd57a9 | ||
|
5077d6bfb3 | ||
|
f0ee3e9deb | ||
|
babad39846 | ||
|
c15aa4a968 | ||
|
3124a88284 | ||
|
e76a2065e3 | ||
|
45f8e453a9 | ||
|
20f9c12855 | ||
|
4218414c87 | ||
|
60c91d386f | ||
|
e477501687 | ||
|
20463e141e | ||
|
e699462ed3 | ||
|
8b345f3258 | ||
|
56436a6271 | ||
|
805ea6f469 | ||
|
1db1d173fc | ||
|
11476038cd | ||
|
a669ef3abb | ||
|
dbbdfa1dbb | ||
|
768c6b4bef | ||
|
8bcc04c275 | ||
|
2fd822887f | ||
|
e2c8aa0847 | ||
|
9b049402c9 | ||
|
d0e1779893 | ||
|
929ffc5a4e | ||
|
1f63fb06f1 | ||
|
b49aa125c9 | ||
|
55836d133e | ||
|
277e402d55 | ||
|
0ab8312b23 | ||
|
bc4c5c5a97 | ||
|
1a9aa78129 | ||
|
798a6db915 | ||
|
0a4a3fd37e | ||
|
66242eab41 | ||
|
7f0d4f0656 | ||
|
acba8d6026 | ||
|
1ff9555099 | ||
|
72a13e2a72 | ||
|
74cdfc2213 | ||
|
7b8e5a9f47 | ||
|
80525ac862 | ||
|
c14f98c6fc | ||
|
c6edfc3944 | ||
|
b95c493d66 | ||
|
5871462241 | ||
|
53bb826375 | ||
|
c769bcc418 | ||
|
f06a4c7861 | ||
|
0cae099d12 | ||
|
4bc3653906 | ||
|
3e7050983a | ||
|
9f1bb75445 | ||
|
139bb32dba | ||
|
158f6f3725 | ||
|
e33f9ea6b5 | ||
|
473037db86 | ||
|
b0e14ea83c | ||
|
782a549613 | ||
|
c805f7dc4e | ||
|
782829152e | ||
|
da6f09afb8 | ||
|
004b1b9c3f | ||
|
2f8d0f88d6 | ||
|
177d241160 | ||
|
5323622842 | ||
|
c852923347 | ||
|
5dc4410d58 | ||
|
da4642d634 | ||
|
a264be1791 | ||
|
9aff121949 | ||
|
a7f4d1487a | ||
|
11e43e1654 | ||
|
82be47bc18 | ||
|
6498e4fbf6 | ||
|
9978971bd9 | ||
|
e28ac2c377 | ||
|
ef296aa7db | ||
|
43e7107f65 | ||
|
752fa29390 | ||
|
7bb7b42356 | ||
|
2a7fc744f9 | ||
|
90e3da0389 | ||
|
1a62bcee42 | ||
|
b83a4cae90 | ||
|
05ef21cd3b | ||
|
dfa27b04d7 | ||
|
880b04906e | ||
|
1fe0b1e516 | ||
|
f9fd4bd24c | ||
|
c55a11d160 | ||
|
92118de0e1 | ||
|
0d9802a2cd | ||
|
f6beede01b | ||
|
ff48ea20de | ||
|
dd9cb18d65 | ||
|
71932aed0a | ||
|
24dc6680e1 | ||
|
61d9d40e48 | ||
|
e9b40db319 | ||
|
316356861d | ||
|
e07c00710a | ||
|
bc47c80610 | ||
|
14baa511f0 | ||
|
e773faeb24 | ||
|
42847516a2 | ||
|
47e9a1ae4f | ||
|
549a154394 | ||
|
dca00d1bde | ||
|
45ce1b4f96 | ||
|
a9232c0633 | ||
|
3da254c745 | ||
|
9ba3ee9683 | ||
|
b0addba2a9 | ||
|
bb59525ff8 | ||
|
acd25124d4 | ||
|
d718ab2491 | ||
|
1860aacd1f | ||
|
d4bbb7f516 | ||
|
d1c0f4b4f1 | ||
|
b72b837ba2 | ||
|
fde85c96c0 | ||
|
121418dad2 | ||
|
f44f94fe23 | ||
|
55a4481022 | ||
|
e859ad37a8 | ||
|
1a28c7fc12 | ||
|
c706a07764 | ||
|
59568e5776 | ||
|
33ca8fa72a | ||
|
4bb66a81fb | ||
|
468c14b14f | ||
|
03e505897a | ||
|
5205eb382e | ||
|
b07b6e56fa | ||
|
bcc890e705 | ||
|
07d14f6f07 | ||
|
03b213e296 | ||
|
1bfce24c9f | ||
|
94b2565969 | ||
|
2896fdb603 | ||
|
50970bc8f9 | ||
|
10df45b173 | ||
|
d3b8129593 | ||
|
f7fb5aebac | ||
|
9311a6e356 | ||
|
8c706892df | ||
|
7f2b11756c | ||
|
f324547600 | ||
|
36e8977f1d | ||
|
b88db2689e | ||
|
1584ec220c | ||
|
fb366a7236 | ||
|
b903158543 | ||
|
9dad9c6333 | ||
|
a6658b9d75 | ||
|
a97feedcc1 | ||
|
8021bce41f | ||
|
d8fa19336c | ||
|
191483cf9f | ||
|
1eb8314d42 |
14
.buildkite/env/secrets.ejson
vendored
14
.buildkite/env/secrets.ejson
vendored
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
|
||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
|
||||
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
|
||||
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
|
||||
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
|
||||
}
|
||||
}
|
||||
|
@@ -1,18 +0,0 @@
|
||||
root: ./docs/src
|
||||
|
||||
structure:
|
||||
readme: introduction.md
|
||||
summary: SUMMARY.md
|
||||
|
||||
redirects:
|
||||
wallet: ./wallet-guide/README.md
|
||||
wallet/app-wallets: ./wallet-guide/apps.md
|
||||
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
|
||||
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
|
||||
wallet/cli-wallets: ./wallet-guide/cli.md
|
||||
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
|
||||
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
|
||||
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
|
||||
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
|
||||
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
|
||||
wallet/support: ./wallet-guide/support.md
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -23,3 +23,7 @@ log-*/
|
||||
/.idea/
|
||||
/solana.iml
|
||||
/.vscode/
|
||||
|
||||
# fetch-spl.sh artifacts
|
||||
/spl-genesis-args.sh
|
||||
/spl_*.so
|
||||
|
97
.travis.yml
97
.travis.yml
@@ -1,46 +1,71 @@
|
||||
os:
|
||||
- osx
|
||||
- windows
|
||||
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^v\d+\.\d+/
|
||||
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
on_success: change
|
||||
secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU=
|
||||
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
api_key: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
os: linux
|
||||
dist: bionic
|
||||
language: minimal
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- &release-artifacts
|
||||
if: type = push
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
token: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
|
||||
# docs pull request or commit
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "node"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh docs/ .travis || travis_terminate 0
|
||||
- cd docs/
|
||||
- source .travis/before_install.sh
|
||||
|
||||
script:
|
||||
- source .travis/script.sh
|
||||
|
25
.travis/affects.sh
Executable file
25
.travis/affects.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Check if files in the commit range match one or more prefixes
|
||||
#
|
||||
|
||||
# Always run the job if we are on a tagged release
|
||||
if [[ -n "$TRAVIS_TAG" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
git diff --name-only "$TRAVIS_COMMIT_RANGE"
|
||||
)
|
||||
|
||||
for file in $(git diff --name-only "$TRAVIS_COMMIT_RANGE"); do
|
||||
for prefix in "$@"; do
|
||||
if [[ $file =~ ^"$prefix" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "No modifications to $*"
|
||||
exit 1
|
2542
Cargo.lock
generated
2542
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -25,6 +25,7 @@ members = [
|
||||
"log-analyzer",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"storage-bigtable",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
@@ -52,6 +53,7 @@ members = [
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
@@ -63,6 +65,4 @@ members = [
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
"programs/move_loader",
|
||||
"programs/librapay",
|
||||
]
|
||||
|
29
account-decoder/Cargo.toml
Normal file
29
account-decoder/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.2.23"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
base64 = "0.12.3"
|
||||
bs58 = "0.3.1"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.54"
|
||||
thiserror = "1.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
173
account-decoder/src/lib.rs
Normal file
173
account-decoder/src/lib.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod parse_account_data;
|
||||
pub mod parse_config;
|
||||
pub mod parse_nonce;
|
||||
pub mod parse_stake;
|
||||
pub mod parse_sysvar;
|
||||
pub mod parse_token;
|
||||
pub mod parse_vote;
|
||||
pub mod validator_info;
|
||||
|
||||
use crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount};
|
||||
use solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey};
|
||||
use std::str::FromStr;
|
||||
|
||||
pub type StringAmount = String;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAccount {
|
||||
pub lamports: u64,
|
||||
pub data: UiAccountData,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum UiAccountData {
|
||||
Binary(String),
|
||||
Json(ParsedAccount),
|
||||
Binary64(String),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary, // SLOW! Avoid this encoding
|
||||
JsonParsed,
|
||||
Binary64,
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
pub fn encode(
|
||||
pubkey: &Pubkey,
|
||||
account: Account,
|
||||
encoding: UiAccountEncoding,
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => UiAccountData::Binary(
|
||||
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
|
||||
),
|
||||
UiAccountEncoding::Binary64 => UiAccountData::Binary64(base64::encode(slice_data(
|
||||
&account.data,
|
||||
data_slice_config,
|
||||
))),
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, &account.owner, &account.data, additional_data)
|
||||
{
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
UiAccountData::Binary64(base64::encode(&account.data))
|
||||
}
|
||||
}
|
||||
};
|
||||
UiAccount {
|
||||
lamports: account.lamports,
|
||||
data,
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> Option<Account> {
|
||||
let data = match &self.data {
|
||||
UiAccountData::Json(_) => None,
|
||||
UiAccountData::Binary(blob) => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountData::Binary64(blob) => base64::decode(blob).ok(),
|
||||
}?;
|
||||
Some(Account {
|
||||
lamports: self.lamports,
|
||||
data,
|
||||
owner: Pubkey::from_str(&self.owner).ok()?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiFeeCalculator {
|
||||
pub lamports_per_signature: StringAmount,
|
||||
}
|
||||
|
||||
impl From<FeeCalculator> for UiFeeCalculator {
|
||||
fn from(fee_calculator: FeeCalculator) -> Self {
|
||||
Self {
|
||||
lamports_per_signature: fee_calculator.lamports_per_signature.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for UiFeeCalculator {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lamports_per_signature: "0".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDataSliceConfig {
|
||||
pub offset: usize,
|
||||
pub length: usize,
|
||||
}
|
||||
|
||||
fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8] {
|
||||
if let Some(UiDataSliceConfig { offset, length }) = data_slice_config {
|
||||
if offset >= data.len() {
|
||||
&[]
|
||||
} else if length > data.len() - offset {
|
||||
&data[offset..]
|
||||
} else {
|
||||
&data[offset..offset + length]
|
||||
}
|
||||
} else {
|
||||
data
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_slice_data() {
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 0,
|
||||
length: 5,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &data[..]);
|
||||
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 0,
|
||||
length: 10,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &data[..]);
|
||||
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 1,
|
||||
length: 2,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &data[1..3]);
|
||||
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 10,
|
||||
length: 2,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &[] as &[u8]);
|
||||
}
|
||||
}
|
145
account-decoder/src/parse_account_data.rs
Normal file
145
account-decoder/src/parse_account_data.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
use crate::{
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id_v1_0},
|
||||
parse_vote::parse_vote,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v1_0();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
|
||||
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
|
||||
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ParseAccountError {
|
||||
#[error("{0:?} account not parsable")]
|
||||
AccountNotParsable(ParsableAccount),
|
||||
|
||||
#[error("Program not parsable")]
|
||||
ProgramNotParsable,
|
||||
|
||||
#[error("Additional data required to parse: {0}")]
|
||||
AdditionalDataMissing(String),
|
||||
|
||||
#[error("Instruction error")]
|
||||
InstructionError(#[from] InstructionError),
|
||||
|
||||
#[error("Serde json error")]
|
||||
SerdeJsonError(#[from] serde_json::error::Error),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ParsedAccount {
|
||||
pub program: String,
|
||||
pub parsed: Value,
|
||||
pub space: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ParsableAccount {
|
||||
Config,
|
||||
Nonce,
|
||||
SplToken,
|
||||
Stake,
|
||||
Sysvar,
|
||||
Vote,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AccountAdditionalData {
|
||||
pub spl_token_decimals: Option<u8>,
|
||||
}
|
||||
|
||||
pub fn parse_account_data(
|
||||
pubkey: &Pubkey,
|
||||
program_id: &Pubkey,
|
||||
data: &[u8],
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
) -> Result<ParsedAccount, ParseAccountError> {
|
||||
let program_name = PARSABLE_PROGRAM_IDS
|
||||
.get(program_id)
|
||||
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
|
||||
let additional_data = additional_data.unwrap_or_default();
|
||||
let parsed_json = match program_name {
|
||||
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
|
||||
ParsableAccount::Nonce => serde_json::to_value(parse_nonce(data)?)?,
|
||||
ParsableAccount::SplToken => {
|
||||
serde_json::to_value(parse_token(data, additional_data.spl_token_decimals)?)?
|
||||
}
|
||||
ParsableAccount::Stake => serde_json::to_value(parse_stake(data)?)?,
|
||||
ParsableAccount::Sysvar => serde_json::to_value(parse_sysvar(data, pubkey)?)?,
|
||||
ParsableAccount::Vote => serde_json::to_value(parse_vote(data)?)?,
|
||||
};
|
||||
Ok(ParsedAccount {
|
||||
program: format!("{:?}", program_name).to_kebab_case(),
|
||||
parsed: parsed_json,
|
||||
space: data.len() as u64,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
let account_pubkey = Pubkey::new_rand();
|
||||
let other_program = Pubkey::new_rand();
|
||||
let data = vec![0; 4];
|
||||
assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err());
|
||||
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
&solana_vote_program::id(),
|
||||
&vote_account_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(parsed.program, "vote".to_string());
|
||||
assert_eq!(parsed.space, VoteState::size_of() as u64);
|
||||
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
&system_program::id(),
|
||||
&nonce_account_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(parsed.program, "nonce".to_string());
|
||||
assert_eq!(parsed.space, State::size() as u64);
|
||||
}
|
||||
}
|
146
account-decoder/src/parse_config.rs
Normal file
146
account-decoder/src/parse_config.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
.map(|config| ConfigAccountType::StakeConfig(config.into()))
|
||||
} else {
|
||||
deserialize::<ConfigKeys>(data).ok().and_then(|key_list| {
|
||||
if !key_list.keys.is_empty() && key_list.keys[0].0 == validator_info::id() {
|
||||
parse_config_data::<String>(data, key_list.keys).and_then(|validator_info| {
|
||||
Some(ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: validator_info.keys,
|
||||
config_data: serde_json::from_str(&validator_info.config_data).ok()?,
|
||||
}))
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
};
|
||||
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::Config,
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConfig<T>>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| UiConfigKey {
|
||||
pubkey: key.0.to_string(),
|
||||
signer: key.1,
|
||||
})
|
||||
.collect();
|
||||
Some(UiConfig { keys, config_data })
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum ConfigAccountType {
|
||||
StakeConfig(UiStakeConfig),
|
||||
ValidatorInfo(UiConfig<Value>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiConfigKey {
|
||||
pub pubkey: String,
|
||||
pub signer: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeConfig {
|
||||
pub warmup_cooldown_rate: f64,
|
||||
pub slash_penalty: u8,
|
||||
}
|
||||
|
||||
impl From<StakeConfig> for UiStakeConfig {
|
||||
fn from(config: StakeConfig) -> Self {
|
||||
Self {
|
||||
warmup_cooldown_rate: config.warmup_cooldown_rate,
|
||||
slash_penalty: config.slash_penalty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiConfig<T> {
|
||||
pub keys: Vec<UiConfigKey>,
|
||||
pub config_data: T,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::validator_info::ValidatorInfo;
|
||||
use serde_json::json;
|
||||
use solana_config_program::create_config_account;
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
let stake_config = StakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(
|
||||
&stake_config_account.data,
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
}),
|
||||
);
|
||||
|
||||
let validator_info = ValidatorInfo {
|
||||
info: serde_json::to_string(&json!({
|
||||
"name": "Solana",
|
||||
}))
|
||||
.unwrap(),
|
||||
};
|
||||
let info_pubkey = Pubkey::new_rand();
|
||||
let validator_info_config_account = create_config_account(
|
||||
vec![(validator_info::id(), false), (info_pubkey, true)],
|
||||
&validator_info,
|
||||
10,
|
||||
);
|
||||
assert_eq!(
|
||||
parse_config(&validator_info_config_account.data, &info_pubkey).unwrap(),
|
||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: vec![
|
||||
UiConfigKey {
|
||||
pubkey: validator_info::id().to_string(),
|
||||
signer: false,
|
||||
},
|
||||
UiConfigKey {
|
||||
pubkey: info_pubkey.to_string(),
|
||||
signer: true,
|
||||
}
|
||||
],
|
||||
config_data: serde_json::from_str(r#"{"name":"Solana"}"#).unwrap(),
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_config(&bad_data, &info_pubkey).is_err());
|
||||
}
|
||||
}
|
67
account-decoder/src/parse_nonce.rs
Normal file
67
account-decoder/src/parse_nonce.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
let nonce_state: Versions = bincode::deserialize(data)
|
||||
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
||||
let nonce_state = nonce_state.convert_to_current();
|
||||
match nonce_state {
|
||||
State::Uninitialized => Ok(UiNonceState::Uninitialized),
|
||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||
authority: data.authority.to_string(),
|
||||
blockhash: data.blockhash.to_string(),
|
||||
fee_calculator: data.fee_calculator.into(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// A duplicate representation of NonceState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum UiNonceState {
|
||||
Uninitialized,
|
||||
Initialized(UiNonceData),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiNonceData {
|
||||
pub authority: String,
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_nonce() {
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
assert_eq!(
|
||||
parse_nonce(&nonce_account_data).unwrap(),
|
||||
UiNonceState::Initialized(UiNonceData {
|
||||
authority: Pubkey::default().to_string(),
|
||||
blockhash: Hash::default().to_string(),
|
||||
fee_calculator: UiFeeCalculator {
|
||||
lamports_per_signature: 0.to_string(),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_nonce(&bad_data).is_err());
|
||||
}
|
||||
}
|
236
account-decoder/src/parse_stake.rs
Normal file
236
account-decoder/src/parse_stake.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::Stake))?;
|
||||
let parsed_account = match stake_state {
|
||||
StakeState::Uninitialized => StakeAccountType::Uninitialized,
|
||||
StakeState::Initialized(meta) => StakeAccountType::Initialized(UiStakeAccount {
|
||||
meta: meta.into(),
|
||||
stake: None,
|
||||
}),
|
||||
StakeState::Stake(meta, stake) => StakeAccountType::Delegated(UiStakeAccount {
|
||||
meta: meta.into(),
|
||||
stake: Some(stake.into()),
|
||||
}),
|
||||
StakeState::RewardsPool => StakeAccountType::RewardsPool,
|
||||
};
|
||||
Ok(parsed_account)
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum StakeAccountType {
|
||||
Uninitialized,
|
||||
Initialized(UiStakeAccount),
|
||||
Delegated(UiStakeAccount),
|
||||
RewardsPool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeAccount {
|
||||
pub meta: UiMeta,
|
||||
pub stake: Option<UiStake>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMeta {
|
||||
pub rent_exempt_reserve: StringAmount,
|
||||
pub authorized: UiAuthorized,
|
||||
pub lockup: UiLockup,
|
||||
}
|
||||
|
||||
impl From<Meta> for UiMeta {
|
||||
fn from(meta: Meta) -> Self {
|
||||
Self {
|
||||
rent_exempt_reserve: meta.rent_exempt_reserve.to_string(),
|
||||
authorized: meta.authorized.into(),
|
||||
lockup: meta.lockup.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiLockup {
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
pub epoch: Epoch,
|
||||
pub custodian: String,
|
||||
}
|
||||
|
||||
impl From<Lockup> for UiLockup {
|
||||
fn from(lockup: Lockup) -> Self {
|
||||
Self {
|
||||
unix_timestamp: lockup.unix_timestamp,
|
||||
epoch: lockup.epoch,
|
||||
custodian: lockup.custodian.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAuthorized {
|
||||
pub staker: String,
|
||||
pub withdrawer: String,
|
||||
}
|
||||
|
||||
impl From<Authorized> for UiAuthorized {
|
||||
fn from(authorized: Authorized) -> Self {
|
||||
Self {
|
||||
staker: authorized.staker.to_string(),
|
||||
withdrawer: authorized.withdrawer.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStake {
|
||||
pub delegation: UiDelegation,
|
||||
pub credits_observed: u64,
|
||||
}
|
||||
|
||||
impl From<Stake> for UiStake {
|
||||
fn from(stake: Stake) -> Self {
|
||||
Self {
|
||||
delegation: stake.delegation.into(),
|
||||
credits_observed: stake.credits_observed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDelegation {
|
||||
pub voter: String,
|
||||
pub stake: StringAmount,
|
||||
pub activation_epoch: StringAmount,
|
||||
pub deactivation_epoch: StringAmount,
|
||||
pub warmup_cooldown_rate: f64,
|
||||
}
|
||||
|
||||
impl From<Delegation> for UiDelegation {
|
||||
fn from(delegation: Delegation) -> Self {
|
||||
Self {
|
||||
voter: delegation.voter_pubkey.to_string(),
|
||||
stake: delegation.stake.to_string(),
|
||||
activation_epoch: delegation.activation_epoch.to_string(),
|
||||
deactivation_epoch: delegation.deactivation_epoch.to_string(),
|
||||
warmup_cooldown_rate: delegation.warmup_cooldown_rate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_parse_stake() {
|
||||
let stake_state = StakeState::Uninitialized;
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Uninitialized
|
||||
);
|
||||
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let custodian = Pubkey::new_rand();
|
||||
let authorized = Authorized::auto(&pubkey);
|
||||
let lockup = Lockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian,
|
||||
};
|
||||
let meta = Meta {
|
||||
rent_exempt_reserve: 42,
|
||||
authorized,
|
||||
lockup,
|
||||
};
|
||||
|
||||
let stake_state = StakeState::Initialized(meta);
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Initialized(UiStakeAccount {
|
||||
meta: UiMeta {
|
||||
rent_exempt_reserve: 42.to_string(),
|
||||
authorized: UiAuthorized {
|
||||
staker: pubkey.to_string(),
|
||||
withdrawer: pubkey.to_string(),
|
||||
},
|
||||
lockup: UiLockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian: custodian.to_string(),
|
||||
}
|
||||
},
|
||||
stake: None,
|
||||
})
|
||||
);
|
||||
|
||||
let voter_pubkey = Pubkey::new_rand();
|
||||
let stake = Stake {
|
||||
delegation: Delegation {
|
||||
voter_pubkey,
|
||||
stake: 20,
|
||||
activation_epoch: 2,
|
||||
deactivation_epoch: std::u64::MAX,
|
||||
warmup_cooldown_rate: 0.25,
|
||||
},
|
||||
credits_observed: 10,
|
||||
};
|
||||
|
||||
let stake_state = StakeState::Stake(meta, stake);
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Delegated(UiStakeAccount {
|
||||
meta: UiMeta {
|
||||
rent_exempt_reserve: 42.to_string(),
|
||||
authorized: UiAuthorized {
|
||||
staker: pubkey.to_string(),
|
||||
withdrawer: pubkey.to_string(),
|
||||
},
|
||||
lockup: UiLockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian: custodian.to_string(),
|
||||
}
|
||||
},
|
||||
stake: Some(UiStake {
|
||||
delegation: UiDelegation {
|
||||
voter: voter_pubkey.to_string(),
|
||||
stake: 20.to_string(),
|
||||
activation_epoch: 2.to_string(),
|
||||
deactivation_epoch: std::u64::MAX.to_string(),
|
||||
warmup_cooldown_rate: 0.25,
|
||||
},
|
||||
credits_observed: 10,
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
let stake_state = StakeState::RewardsPool;
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::RewardsPool
|
||||
);
|
||||
|
||||
let bad_data = vec![1, 2, 3, 4];
|
||||
assert!(parse_stake(&bad_data).is_err());
|
||||
}
|
||||
}
|
328
account-decoder/src/parse_sysvar.rs
Normal file
328
account-decoder/src/parse_sysvar.rs
Normal file
@@ -0,0 +1,328 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use bv::BitVec;
|
||||
use solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
};
|
||||
|
||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||
let parsed_account = {
|
||||
if pubkey == &sysvar::clock::id() {
|
||||
deserialize::<Clock>(data)
|
||||
.ok()
|
||||
.map(|clock| SysvarAccountType::Clock(clock.into()))
|
||||
} else if pubkey == &sysvar::epoch_schedule::id() {
|
||||
deserialize(data).ok().map(SysvarAccountType::EpochSchedule)
|
||||
} else if pubkey == &sysvar::fees::id() {
|
||||
deserialize::<Fees>(data)
|
||||
.ok()
|
||||
.map(|fees| SysvarAccountType::Fees(fees.into()))
|
||||
} else if pubkey == &sysvar::recent_blockhashes::id() {
|
||||
deserialize::<RecentBlockhashes>(data)
|
||||
.ok()
|
||||
.map(|recent_blockhashes| {
|
||||
let recent_blockhashes = recent_blockhashes
|
||||
.iter()
|
||||
.map(|entry| UiRecentBlockhashesEntry {
|
||||
blockhash: entry.blockhash.to_string(),
|
||||
fee_calculator: entry.fee_calculator.clone().into(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::RecentBlockhashes(recent_blockhashes)
|
||||
})
|
||||
} else if pubkey == &sysvar::rent::id() {
|
||||
deserialize::<Rent>(data)
|
||||
.ok()
|
||||
.map(|rent| SysvarAccountType::Rent(rent.into()))
|
||||
} else if pubkey == &sysvar::rewards::id() {
|
||||
deserialize::<Rewards>(data)
|
||||
.ok()
|
||||
.map(|rewards| SysvarAccountType::Rewards(rewards.into()))
|
||||
} else if pubkey == &sysvar::slot_hashes::id() {
|
||||
deserialize::<SlotHashes>(data).ok().map(|slot_hashes| {
|
||||
let slot_hashes = slot_hashes
|
||||
.iter()
|
||||
.map(|slot_hash| UiSlotHashEntry {
|
||||
slot: slot_hash.0,
|
||||
hash: slot_hash.1.to_string(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::SlotHashes(slot_hashes)
|
||||
})
|
||||
} else if pubkey == &sysvar::slot_history::id() {
|
||||
deserialize::<SlotHistory>(data).ok().map(|slot_history| {
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
next_slot: slot_history.next_slot,
|
||||
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
|
||||
})
|
||||
})
|
||||
} else if pubkey == &sysvar::stake_history::id() {
|
||||
deserialize::<StakeHistory>(data).ok().map(|stake_history| {
|
||||
let stake_history = stake_history
|
||||
.iter()
|
||||
.map(|entry| UiStakeHistoryEntry {
|
||||
epoch: entry.0,
|
||||
stake_history: entry.1.clone(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::StakeHistory(stake_history)
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::Sysvar,
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum SysvarAccountType {
|
||||
Clock(UiClock),
|
||||
EpochSchedule(EpochSchedule),
|
||||
Fees(UiFees),
|
||||
RecentBlockhashes(Vec<UiRecentBlockhashesEntry>),
|
||||
Rent(UiRent),
|
||||
Rewards(UiRewards),
|
||||
SlotHashes(Vec<UiSlotHashEntry>),
|
||||
SlotHistory(UiSlotHistory),
|
||||
StakeHistory(Vec<UiStakeHistoryEntry>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiClock {
|
||||
pub slot: Slot,
|
||||
pub epoch: Epoch,
|
||||
pub leader_schedule_epoch: Epoch,
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
}
|
||||
|
||||
impl From<Clock> for UiClock {
|
||||
fn from(clock: Clock) -> Self {
|
||||
Self {
|
||||
slot: clock.slot,
|
||||
epoch: clock.epoch,
|
||||
leader_schedule_epoch: clock.leader_schedule_epoch,
|
||||
unix_timestamp: clock.unix_timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiFees {
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
impl From<Fees> for UiFees {
|
||||
fn from(fees: Fees) -> Self {
|
||||
Self {
|
||||
fee_calculator: fees.fee_calculator.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRent {
|
||||
pub lamports_per_byte_year: StringAmount,
|
||||
pub exemption_threshold: f64,
|
||||
pub burn_percent: u8,
|
||||
}
|
||||
|
||||
impl From<Rent> for UiRent {
|
||||
fn from(rent: Rent) -> Self {
|
||||
Self {
|
||||
lamports_per_byte_year: rent.lamports_per_byte_year.to_string(),
|
||||
exemption_threshold: rent.exemption_threshold,
|
||||
burn_percent: rent.burn_percent,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRewards {
|
||||
pub validator_point_value: f64,
|
||||
}
|
||||
|
||||
impl From<Rewards> for UiRewards {
|
||||
fn from(rewards: Rewards) -> Self {
|
||||
Self {
|
||||
validator_point_value: rewards.validator_point_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRecentBlockhashesEntry {
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiSlotHashEntry {
|
||||
pub slot: Slot,
|
||||
pub hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiSlotHistory {
|
||||
pub next_slot: Slot,
|
||||
pub bits: String,
|
||||
}
|
||||
|
||||
struct SlotHistoryBits(BitVec<u64>);
|
||||
|
||||
impl std::fmt::Debug for SlotHistoryBits {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
for i in 0..slot_history::MAX_ENTRIES {
|
||||
if self.0.get(i) {
|
||||
write!(f, "1")?;
|
||||
} else {
|
||||
write!(f, "0")?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeHistoryEntry {
|
||||
pub epoch: Epoch,
|
||||
pub stake_history: StakeHistoryEntry,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
sysvar::{recent_blockhashes::IterItem, Sysvar},
|
||||
};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
let clock_sysvar = Clock::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
|
||||
SysvarAccountType::Clock(UiClock::default()),
|
||||
);
|
||||
|
||||
let epoch_schedule = EpochSchedule {
|
||||
slots_per_epoch: 12,
|
||||
leader_schedule_slot_offset: 0,
|
||||
warmup: false,
|
||||
first_normal_epoch: 1,
|
||||
first_normal_slot: 12,
|
||||
};
|
||||
let epoch_schedule_sysvar = epoch_schedule.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
|
||||
SysvarAccountType::EpochSchedule(epoch_schedule),
|
||||
);
|
||||
|
||||
let fees_sysvar = Fees::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
);
|
||||
|
||||
let hash = Hash::new(&[1; 32]);
|
||||
let fee_calculator = FeeCalculator {
|
||||
lamports_per_signature: 10,
|
||||
};
|
||||
let recent_blockhashes =
|
||||
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
|
||||
let recent_blockhashes_sysvar = recent_blockhashes.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
&sysvar::recent_blockhashes::id()
|
||||
)
|
||||
.unwrap(),
|
||||
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
|
||||
blockhash: hash.to_string(),
|
||||
fee_calculator: fee_calculator.into(),
|
||||
}]),
|
||||
);
|
||||
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 10,
|
||||
exemption_threshold: 2.0,
|
||||
burn_percent: 5,
|
||||
};
|
||||
let rent_sysvar = rent.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
|
||||
SysvarAccountType::Rent(rent.into()),
|
||||
);
|
||||
|
||||
let rewards_sysvar = Rewards::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
|
||||
SysvarAccountType::Rewards(UiRewards::default()),
|
||||
);
|
||||
|
||||
let mut slot_hashes = SlotHashes::default();
|
||||
slot_hashes.add(1, hash);
|
||||
let slot_hashes_sysvar = slot_hashes.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
|
||||
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
|
||||
slot: 1,
|
||||
hash: hash.to_string(),
|
||||
}]),
|
||||
);
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
let slot_history_sysvar = slot_history.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
next_slot: slot_history.next_slot,
|
||||
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
|
||||
}),
|
||||
);
|
||||
|
||||
let mut stake_history = StakeHistory::default();
|
||||
let stake_history_entry = StakeHistoryEntry {
|
||||
effective: 10,
|
||||
activating: 2,
|
||||
deactivating: 3,
|
||||
};
|
||||
stake_history.add(1, stake_history_entry.clone());
|
||||
let stake_history_sysvar = stake_history.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
|
||||
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
|
||||
epoch: 1,
|
||||
stake_history: stake_history_entry,
|
||||
}]),
|
||||
);
|
||||
|
||||
let bad_pubkey = Pubkey::new_rand();
|
||||
assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err());
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_sysvar(&bad_data, &sysvar::stake_history::id()).is_err());
|
||||
}
|
||||
}
|
250
account-decoder/src/parse_token.rs
Normal file
250
account-decoder/src/parse_token.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v1_0::{
|
||||
option::COption,
|
||||
solana_sdk::pubkey::Pubkey as SplTokenPubkey,
|
||||
state::{unpack, Account, Mint, Multisig},
|
||||
};
|
||||
use std::{mem::size_of, str::FromStr};
|
||||
|
||||
// A helper function to convert spl_token_v1_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id_v1_0() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token_v1_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v1_0_native_mint() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::native_mint::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
pub fn parse_token(
|
||||
data: &[u8],
|
||||
mint_decimals: Option<u8>,
|
||||
) -> Result<TokenAccountType, ParseAccountError> {
|
||||
let mut data = data.to_vec();
|
||||
if data.len() == size_of::<Account>() {
|
||||
let account: Account = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
let decimals = mint_decimals.ok_or_else(|| {
|
||||
ParseAccountError::AdditionalDataMissing(
|
||||
"no mint_decimals provided to parse spl-token account".to_string(),
|
||||
)
|
||||
})?;
|
||||
Ok(TokenAccountType::Account(UiTokenAccount {
|
||||
mint: account.mint.to_string(),
|
||||
owner: account.owner.to_string(),
|
||||
token_amount: token_amount_to_ui_amount(account.amount, decimals),
|
||||
delegate: match account.delegate {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
COption::None => None,
|
||||
},
|
||||
is_initialized: account.is_initialized,
|
||||
is_native: account.is_native,
|
||||
delegated_amount: if account.delegate.is_none() {
|
||||
None
|
||||
} else {
|
||||
Some(token_amount_to_ui_amount(
|
||||
account.delegated_amount,
|
||||
decimals,
|
||||
))
|
||||
},
|
||||
}))
|
||||
} else if data.len() == size_of::<Mint>() {
|
||||
let mint: Mint = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Mint(UiMint {
|
||||
owner: match mint.owner {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
COption::None => None,
|
||||
},
|
||||
decimals: mint.decimals,
|
||||
is_initialized: mint.is_initialized,
|
||||
}))
|
||||
} else if data.len() == size_of::<Multisig>() {
|
||||
let multisig: Multisig = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: multisig.m,
|
||||
num_valid_signers: multisig.n,
|
||||
is_initialized: multisig.is_initialized,
|
||||
signers: multisig
|
||||
.signers
|
||||
.iter()
|
||||
.filter_map(|pubkey| {
|
||||
if pubkey != &SplTokenPubkey::default() {
|
||||
Some(pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
}))
|
||||
} else {
|
||||
Err(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::SplToken,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum TokenAccountType {
|
||||
Account(UiTokenAccount),
|
||||
Mint(UiMint),
|
||||
Multisig(UiMultisig),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiTokenAccount {
|
||||
pub mint: String,
|
||||
pub owner: String,
|
||||
pub token_amount: UiTokenAmount,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegate: Option<String>,
|
||||
pub is_initialized: bool,
|
||||
pub is_native: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_amount: Option<UiTokenAmount>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiTokenAmount {
|
||||
pub ui_amount: f64,
|
||||
pub decimals: u8,
|
||||
pub amount: StringAmount,
|
||||
}
|
||||
|
||||
pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
|
||||
// Use `amount_to_ui_amount()` once spl_token is bumped to a version that supports it: https://github.com/solana-labs/solana-program-library/pull/211
|
||||
let amount_decimals = amount as f64 / 10_usize.pow(decimals as u32) as f64;
|
||||
UiTokenAmount {
|
||||
ui_amount: amount_decimals,
|
||||
decimals,
|
||||
amount: amount.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMint {
|
||||
pub owner: Option<String>,
|
||||
pub decimals: u8,
|
||||
pub is_initialized: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMultisig {
|
||||
pub num_required_signers: u8,
|
||||
pub num_valid_signers: u8,
|
||||
pub is_initialized: bool,
|
||||
pub signers: Vec<String>,
|
||||
}
|
||||
|
||||
pub fn get_token_account_mint(data: &[u8]) -> Option<Pubkey> {
|
||||
if data.len() == size_of::<Account>() {
|
||||
Some(Pubkey::new(&data[0..32]))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use spl_token_v1_0::state::unpack_unchecked;
|
||||
|
||||
#[test]
|
||||
fn test_parse_token() {
|
||||
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
|
||||
let owner_pubkey = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut account_data = [0; size_of::<Account>()];
|
||||
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
|
||||
account.mint = mint_pubkey;
|
||||
account.owner = owner_pubkey;
|
||||
account.amount = 42;
|
||||
account.is_initialized = true;
|
||||
assert!(parse_token(&account_data, None).is_err());
|
||||
assert_eq!(
|
||||
parse_token(&account_data, Some(2)).unwrap(),
|
||||
TokenAccountType::Account(UiTokenAccount {
|
||||
mint: mint_pubkey.to_string(),
|
||||
owner: owner_pubkey.to_string(),
|
||||
token_amount: UiTokenAmount {
|
||||
ui_amount: 0.42,
|
||||
decimals: 2,
|
||||
amount: "42".to_string()
|
||||
},
|
||||
delegate: None,
|
||||
is_initialized: true,
|
||||
is_native: false,
|
||||
delegated_amount: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let mut mint_data = [0; size_of::<Mint>()];
|
||||
let mut mint: &mut Mint = unpack_unchecked(&mut mint_data).unwrap();
|
||||
mint.owner = COption::Some(owner_pubkey);
|
||||
mint.decimals = 3;
|
||||
mint.is_initialized = true;
|
||||
assert_eq!(
|
||||
parse_token(&mint_data, None).unwrap(),
|
||||
TokenAccountType::Mint(UiMint {
|
||||
owner: Some(owner_pubkey.to_string()),
|
||||
decimals: 3,
|
||||
is_initialized: true,
|
||||
}),
|
||||
);
|
||||
|
||||
let signer1 = SplTokenPubkey::new(&[1; 32]);
|
||||
let signer2 = SplTokenPubkey::new(&[2; 32]);
|
||||
let signer3 = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut multisig_data = [0; size_of::<Multisig>()];
|
||||
let mut multisig: &mut Multisig = unpack_unchecked(&mut multisig_data).unwrap();
|
||||
let mut signers = [SplTokenPubkey::default(); 11];
|
||||
signers[0] = signer1;
|
||||
signers[1] = signer2;
|
||||
signers[2] = signer3;
|
||||
multisig.m = 2;
|
||||
multisig.n = 3;
|
||||
multisig.is_initialized = true;
|
||||
multisig.signers = signers;
|
||||
assert_eq!(
|
||||
parse_token(&multisig_data, None).unwrap(),
|
||||
TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: 2,
|
||||
num_valid_signers: 3,
|
||||
is_initialized: true,
|
||||
signers: vec![
|
||||
signer1.to_string(),
|
||||
signer2.to_string(),
|
||||
signer3.to_string()
|
||||
],
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_token(&bad_data, None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_token_account_mint() {
|
||||
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
|
||||
let mut account_data = [0; size_of::<Account>()];
|
||||
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
|
||||
account.mint = mint_pubkey;
|
||||
|
||||
let expected_mint_pubkey = Pubkey::new(&[2; 32]);
|
||||
assert_eq!(
|
||||
get_token_account_mint(&account_data),
|
||||
Some(expected_mint_pubkey)
|
||||
);
|
||||
}
|
||||
}
|
144
account-decoder/src/parse_vote.rs
Normal file
144
account-decoder/src/parse_vote.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
let epoch_credits = vote_state
|
||||
.epoch_credits()
|
||||
.iter()
|
||||
.map(|(epoch, credits, previous_credits)| UiEpochCredits {
|
||||
epoch: *epoch,
|
||||
credits: credits.to_string(),
|
||||
previous_credits: previous_credits.to_string(),
|
||||
})
|
||||
.collect();
|
||||
let votes = vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|lockout| UiLockout {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
})
|
||||
.collect();
|
||||
let authorized_voters = vote_state
|
||||
.authorized_voters()
|
||||
.iter()
|
||||
.map(|(epoch, authorized_voter)| UiAuthorizedVoters {
|
||||
epoch: *epoch,
|
||||
authorized_voter: authorized_voter.to_string(),
|
||||
})
|
||||
.collect();
|
||||
let prior_voters = vote_state
|
||||
.prior_voters()
|
||||
.buf()
|
||||
.iter()
|
||||
.filter(|(pubkey, _, _)| pubkey != &Pubkey::default())
|
||||
.map(
|
||||
|(authorized_pubkey, epoch_of_last_authorized_switch, target_epoch)| UiPriorVoters {
|
||||
authorized_pubkey: authorized_pubkey.to_string(),
|
||||
epoch_of_last_authorized_switch: *epoch_of_last_authorized_switch,
|
||||
target_epoch: *target_epoch,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
Ok(VoteAccountType::Vote(UiVoteState {
|
||||
node_pubkey: vote_state.node_pubkey.to_string(),
|
||||
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
|
||||
commission: vote_state.commission,
|
||||
votes,
|
||||
root_slot: vote_state.root_slot,
|
||||
authorized_voters,
|
||||
prior_voters,
|
||||
epoch_credits,
|
||||
last_timestamp: vote_state.last_timestamp,
|
||||
}))
|
||||
}
|
||||
|
||||
/// A wrapper enum for consistency across programs
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum VoteAccountType {
|
||||
Vote(UiVoteState),
|
||||
}
|
||||
|
||||
/// A duplicate representation of VoteState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiVoteState {
|
||||
node_pubkey: String,
|
||||
authorized_withdrawer: String,
|
||||
commission: u8,
|
||||
votes: Vec<UiLockout>,
|
||||
root_slot: Option<Slot>,
|
||||
authorized_voters: Vec<UiAuthorizedVoters>,
|
||||
prior_voters: Vec<UiPriorVoters>,
|
||||
epoch_credits: Vec<UiEpochCredits>,
|
||||
last_timestamp: BlockTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiLockout {
|
||||
slot: Slot,
|
||||
confirmation_count: u32,
|
||||
}
|
||||
|
||||
impl From<&Lockout> for UiLockout {
|
||||
fn from(lockout: &Lockout) -> Self {
|
||||
Self {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiAuthorizedVoters {
|
||||
epoch: Epoch,
|
||||
authorized_voter: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiPriorVoters {
|
||||
authorized_pubkey: String,
|
||||
epoch_of_last_authorized_switch: Epoch,
|
||||
target_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiEpochCredits {
|
||||
epoch: Epoch,
|
||||
credits: StringAmount,
|
||||
previous_credits: StringAmount,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let mut expected_vote_state = UiVoteState::default();
|
||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
||||
expected_vote_state.authorized_withdrawer = Pubkey::default().to_string();
|
||||
assert_eq!(
|
||||
parse_vote(&vote_account_data).unwrap(),
|
||||
VoteAccountType::Vote(expected_vote_state)
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_vote(&bad_data).is_err());
|
||||
}
|
||||
}
|
18
account-decoder/src/validator_info.rs
Normal file
18
account-decoder/src/validator_info.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use solana_config_program::ConfigState;
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
pub const MAX_VALIDATOR_INFO: u64 = 576;
|
||||
|
||||
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
pub struct ValidatorInfo {
|
||||
pub info: String,
|
||||
}
|
||||
|
||||
impl ConfigState for ValidatorInfo {
|
||||
fn max_space() -> u64 {
|
||||
MAX_VALIDATOR_INFO
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-measure = { path = "../measure", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,16 +13,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.8" }
|
||||
solana-perf = { path = "../perf", version = "1.2.8" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-measure = { path = "../measure", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.23" }
|
||||
solana-perf = { path = "../perf", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,21 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.8" }
|
||||
solana-client = { path = "../client", version = "1.2.8" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.8" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.8" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,18 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,28 +14,23 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.8" }
|
||||
solana-client = { path = "../client", version = "1.2.8" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.8" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.8", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.8" }
|
||||
solana-measure = { path = "../measure", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.8", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.8" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,8 +4,6 @@ use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::{create_genesis, upload_mint_script, upload_payment_script};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
@@ -37,9 +35,6 @@ use std::{
|
||||
const MAX_TX_QUEUE_AGE: u64 =
|
||||
MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND;
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::librapay_transaction;
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -51,8 +46,6 @@ pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||
|
||||
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
||||
@@ -122,7 +115,6 @@ fn generate_chunked_transfers(
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) {
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
@@ -137,7 +129,6 @@ fn generate_chunked_transfers(
|
||||
&dest_keypair_chunks[chunk_index],
|
||||
threads,
|
||||
reclaim_lamports_back_to_source_account,
|
||||
&libra_args,
|
||||
);
|
||||
|
||||
// In sustained mode, overlap the transfers with generation. This has higher average
|
||||
@@ -205,12 +196,7 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
pub fn do_bench_tps<T>(client: Arc<T>, config: Config, gen_keypairs: Vec<Keypair>) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
@@ -294,7 +280,6 @@ where
|
||||
threads,
|
||||
duration,
|
||||
sustained,
|
||||
libra_args,
|
||||
);
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
@@ -340,52 +325,6 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn generate_move_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
reclaim: bool,
|
||||
move_keypairs: &[Keypair],
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_id: &Pubkey,
|
||||
blockhash: &Hash,
|
||||
) -> Vec<(Transaction, u64)> {
|
||||
let count = move_keypairs.len() / 2;
|
||||
let source_move = &move_keypairs[..count];
|
||||
let dest_move = &move_keypairs[count..];
|
||||
let pairs: Vec<_> = if !reclaim {
|
||||
source_move
|
||||
.iter()
|
||||
.zip(dest_move.iter())
|
||||
.zip(source.iter())
|
||||
.collect()
|
||||
} else {
|
||||
dest_move
|
||||
.iter()
|
||||
.zip(source_move.iter())
|
||||
.zip(dest.iter())
|
||||
.collect()
|
||||
};
|
||||
|
||||
pairs
|
||||
.par_iter()
|
||||
.map(|((from, to), payer)| {
|
||||
(
|
||||
librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
libra_mint_id,
|
||||
&payer,
|
||||
&from,
|
||||
&to.pubkey(),
|
||||
1,
|
||||
*blockhash,
|
||||
),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_system_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
@@ -416,7 +355,6 @@ fn generate_txs(
|
||||
dest: &VecDeque<&Keypair>,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
libra_args: &Option<LibraKeys>,
|
||||
) {
|
||||
let blockhash = *blockhash.read().unwrap();
|
||||
let tx_count = source.len();
|
||||
@@ -426,33 +364,7 @@ fn generate_txs(
|
||||
);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions = if let Some((
|
||||
_libra_genesis_keypair,
|
||||
_libra_pay_program_id,
|
||||
_libra_mint_program_id,
|
||||
_libra_keys,
|
||||
)) = libra_args
|
||||
{
|
||||
#[cfg(not(feature = "move"))]
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
generate_move_txs(
|
||||
source,
|
||||
dest,
|
||||
reclaim,
|
||||
&_libra_keys,
|
||||
_libra_pay_program_id,
|
||||
&_libra_genesis_keypair.pubkey(),
|
||||
&blockhash,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
generate_system_txs(source, dest, reclaim, &blockhash)
|
||||
};
|
||||
let transactions = generate_system_txs(source, dest, reclaim, &blockhash);
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
@@ -954,181 +866,13 @@ pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn fund_move_keys<T: Client>(
|
||||
client: &T,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
total: u64,
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_program_id: &Pubkey,
|
||||
libra_genesis_key: &Keypair,
|
||||
) {
|
||||
let (mut blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
|
||||
info!("creating the libra funding account..");
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
let tx = librapay_transaction::mint_tokens(
|
||||
&libra_mint_program_id,
|
||||
funding_key,
|
||||
libra_genesis_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
total,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("creating {} move accounts...", keypairs.len());
|
||||
let total_len = keypairs.len();
|
||||
let create_len = 5;
|
||||
let mut funding_time = Measure::start("funding_time");
|
||||
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
||||
if client
|
||||
.get_balance_with_commitment(&keys[0].pubkey(), CommitmentConfig::recent())
|
||||
.unwrap_or(0)
|
||||
> 0
|
||||
{
|
||||
// already created these accounts.
|
||||
break;
|
||||
}
|
||||
|
||||
let keypairs: Vec<_> = keys.iter().map(|k| k).collect();
|
||||
let tx = librapay_transaction::create_accounts(funding_key, &keypairs, 1, blockhash);
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
let mut keys = vec![funding_key];
|
||||
keys.extend(&keypairs);
|
||||
client.send_and_confirm_message(&keys, tx.message).unwrap();
|
||||
|
||||
if i % 10 == 0 {
|
||||
info!(
|
||||
"created {} accounts of {} (size {})",
|
||||
i,
|
||||
total_len / create_len,
|
||||
ser_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_FUNDING_KEYS: usize = 10;
|
||||
let funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
let pubkey_amounts: Vec<_> = funding_keys
|
||||
.iter()
|
||||
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
||||
.collect();
|
||||
let instructions = system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts);
|
||||
let message = Message::new(&instructions, Some(&funding_key.pubkey()));
|
||||
let tx = Transaction::new(&[funding_key], message, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key], tx.message)
|
||||
.unwrap();
|
||||
let mut balance = 0;
|
||||
for _ in 0..20 {
|
||||
if let Ok(balance_) = client
|
||||
.get_balance_with_commitment(&funding_keys[0].pubkey(), CommitmentConfig::recent())
|
||||
{
|
||||
if balance_ > 0 {
|
||||
balance = balance_;
|
||||
break;
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
assert!(balance > 0);
|
||||
info!(
|
||||
"funded multiple funding accounts with {:?} lanports",
|
||||
balance
|
||||
);
|
||||
|
||||
let libra_funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
for (i, key) in libra_funding_keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[&funding_keys[i], &key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[i],
|
||||
&libra_funding_key,
|
||||
&key.pubkey(),
|
||||
total / NUM_FUNDING_KEYS as u64,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_and_confirm_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("funded libra funding key {}", i);
|
||||
}
|
||||
|
||||
let keypair_count = keypairs.len();
|
||||
let amount = total / (keypair_count as u64);
|
||||
for (i, keys) in keypairs[..keypair_count]
|
||||
.chunks(NUM_FUNDING_KEYS)
|
||||
.enumerate()
|
||||
{
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[j],
|
||||
&libra_funding_keys[j],
|
||||
&key.pubkey(),
|
||||
amount,
|
||||
blockhash,
|
||||
);
|
||||
|
||||
let _sig = client
|
||||
.async_send_transaction(tx.clone())
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
}
|
||||
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let mut times = 0;
|
||||
loop {
|
||||
let balance =
|
||||
librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
||||
if balance >= amount {
|
||||
break;
|
||||
} else if times > 20 {
|
||||
info!("timed out.. {} key: {} balance: {}", i, j, balance);
|
||||
break;
|
||||
} else {
|
||||
times += 1;
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"funded group {} of {}",
|
||||
i + 1,
|
||||
keypairs.len() / NUM_FUNDING_KEYS
|
||||
);
|
||||
blockhash = get_recent_blockhash(client).0;
|
||||
}
|
||||
|
||||
funding_time.stop();
|
||||
info!("done funding keys, took {} ms", funding_time.as_ms());
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
use_move: bool,
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
|
||||
) -> Result<Vec<Keypair>> {
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
info!("Get lamports...");
|
||||
@@ -1141,12 +885,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
let mut move_keypairs_ret = None;
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let move_keypairs_ret = None;
|
||||
|
||||
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||
// start another bench-tps run without re-funding all of the keypairs, check if the
|
||||
// keypairs still have at least 80% of the expected funds. That should be enough to
|
||||
@@ -1157,10 +895,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let mut total = lamports_per_account * total_keypairs + extra_fees;
|
||||
if use_move {
|
||||
total *= 3;
|
||||
}
|
||||
let total = lamports_per_account * total_keypairs + extra_fees;
|
||||
|
||||
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
|
||||
info!(
|
||||
@@ -1172,40 +907,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
if use_move {
|
||||
let libra_genesis_keypair =
|
||||
create_genesis(&funding_key, client.as_ref(), 10_000_000);
|
||||
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
|
||||
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
|
||||
|
||||
// Generate another set of keypairs for move accounts.
|
||||
// Still fund the solana ones which will be used for fees.
|
||||
let seed = [0u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
||||
fund_move_keys(
|
||||
client.as_ref(),
|
||||
funding_key,
|
||||
&move_keypairs,
|
||||
total / 3,
|
||||
&libra_pay_program_id,
|
||||
&libra_mint_program_id,
|
||||
&libra_genesis_keypair,
|
||||
);
|
||||
move_keypairs_ret = Some((
|
||||
libra_genesis_keypair,
|
||||
libra_pay_program_id,
|
||||
libra_mint_program_id,
|
||||
move_keypairs,
|
||||
));
|
||||
|
||||
// Give solana keys 1/3 and move keys 1/3 the lamports. Keep 1/3 for fees.
|
||||
total /= 3;
|
||||
}
|
||||
}
|
||||
|
||||
fund_keys(
|
||||
client,
|
||||
funding_key,
|
||||
@@ -1219,7 +920,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok((keypairs, move_keypairs_ret))
|
||||
Ok(keypairs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -1243,11 +944,11 @@ mod tests {
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(client, config, keypairs, None);
|
||||
do_bench_tps(client, config, keypairs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1258,9 +959,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
@@ -1282,9 +982,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
|
@@ -23,7 +23,6 @@ pub struct Config {
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
}
|
||||
@@ -46,7 +45,6 @@ impl Default for Config {
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature,
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
}
|
||||
@@ -109,11 +107,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("use-move")
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no-multi-client")
|
||||
.long("no-multi-client")
|
||||
@@ -263,7 +256,6 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
args.multi_client = !matches.is_present("no-multi-client");
|
||||
|
||||
if let Some(v) = matches.value_of("num_lamports_per_account") {
|
||||
|
@@ -29,7 +29,6 @@ fn main() {
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
..
|
||||
@@ -86,7 +85,7 @@ fn main() {
|
||||
Arc::new(get_client(&nodes))
|
||||
};
|
||||
|
||||
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
|
||||
let keypairs = if *read_from_client_file {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
@@ -115,8 +114,8 @@ fn main() {
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||
(keypairs, None)
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
@@ -124,7 +123,6 @@ fn main() {
|
||||
&id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
*use_move,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
@@ -132,5 +130,5 @@ fn main() {
|
||||
})
|
||||
};
|
||||
|
||||
do_bench_tps(client, cli_config, keypairs, move_keypairs);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
|
@@ -6,17 +6,11 @@ use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
#[cfg(feature = "move")]
|
||||
use solana_sdk::move_loader::solana_move_loader_program;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::sync::{mpsc::channel, Arc};
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
#[cfg(feature = "move")]
|
||||
let native_instruction_processors = vec![solana_move_loader_program()];
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let native_instruction_processors = vec![];
|
||||
|
||||
solana_logger::setup();
|
||||
@@ -48,17 +42,16 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(faucet_addr),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
config.use_move,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
|
||||
let _total = do_bench_tps(client, config, keypairs);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
assert!(_total > 100);
|
||||
@@ -73,14 +66,3 @@ fn test_bench_tps_local_cluster_solana() {
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
@@ -211,12 +211,7 @@ pull_or_push_steps() {
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# doc/ changes:
|
||||
if affects ^docs/; then
|
||||
command_step docs ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image docs/build.sh" 5
|
||||
fi
|
||||
|
||||
# web3.js and explorer changes run on Travis...
|
||||
# web3.js, explorer and docs changes run on Travis...
|
||||
}
|
||||
|
||||
|
||||
|
@@ -5,9 +5,6 @@ steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
@@ -19,6 +16,3 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
# name: "move"
|
||||
# timeout_in_minutes: 20
|
||||
|
@@ -12,7 +12,7 @@ if [[ -n $CI ]]; then
|
||||
export CI_BUILD_ID=$TRAVIS_BUILD_ID
|
||||
export CI_COMMIT=$TRAVIS_COMMIT
|
||||
export CI_JOB_ID=$TRAVIS_JOB_ID
|
||||
if $TRAVIS_PULL_REQUEST; then
|
||||
if [[ $TRAVIS_PULL_REQUEST != false ]]; then
|
||||
export CI_PULL_REQUEST=true
|
||||
else
|
||||
export CI_PULL_REQUEST=
|
||||
|
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- build docs
|
||||
(
|
||||
set -x
|
||||
. ci/rust-version.sh stable
|
||||
ci/docker-run.sh "$rust_stable_docker_image" docs/build.sh
|
||||
)
|
||||
|
||||
echo --- update gitbook-cage
|
||||
if [[ -n $CI_BRANCH ]]; then
|
||||
(
|
||||
# make a local commit for the svgs and generated/updated markdown
|
||||
set -x
|
||||
git add -f docs/src
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$(basename "$0")"
|
||||
git commit -m "gitbook-cage update $(date -Is)"
|
||||
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
||||
# pop off the local commit
|
||||
git reset --hard HEAD~
|
||||
fi
|
||||
)
|
||||
else
|
||||
echo CI_BRANCH not set
|
||||
fi
|
||||
|
||||
exit 0
|
@@ -45,7 +45,7 @@ linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
;;
|
||||
windows)
|
||||
TARGET=x86_64-pc-windows-gnu
|
||||
TARGET=x86_64-pc-windows-msvc
|
||||
;;
|
||||
*)
|
||||
echo CI_OS_NAME unset
|
||||
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
timeout 15 ./run.sh &
|
||||
timeout 120 ./run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
|
@@ -27,5 +27,5 @@ Alternatively, you can source it from within a script:
|
||||
local PATCH=0
|
||||
local SPECIAL=""
|
||||
|
||||
semverParseInto "1.2.8" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "1.2.23" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
||||
|
@@ -34,7 +34,6 @@ _ cargo +"$rust_stable" clippy --workspace -- --deny=warnings
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
|
@@ -1 +0,0 @@
|
||||
test-stable.sh
|
@@ -47,7 +47,6 @@ echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
ci/affects-files.sh \
|
||||
@@ -93,27 +92,6 @@ test-stable-perf)
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-move)
|
||||
ci/affects-files.sh \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-move.sh \
|
||||
^programs/move_loader \
|
||||
^programs/librapay \
|
||||
^logger/ \
|
||||
^runtime/ \
|
||||
^sdk/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipped $testName as no relevant files were modified"
|
||||
exit 0
|
||||
}
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/librapay/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -27,28 +27,29 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.8" }
|
||||
solana-client = { path = "../client", version = "1.2.8" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.8" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.8" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.8" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.8" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.23" }
|
||||
thiserror = "1.0.19"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.2.8" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.8" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
101
cli/src/cli.rs
101
cli/src/cli.rs
@@ -15,6 +15,7 @@ use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use log::*;
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_budget_program::budget_instruction::{self, BudgetError};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg_with_default, COMMITMENT_ARG},
|
||||
@@ -28,7 +29,7 @@ use solana_client::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
|
||||
rpc_response::{RpcAccount, RpcKeyedAccount},
|
||||
rpc_response::{Response, RpcKeyedAccount},
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
@@ -57,7 +58,7 @@ use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize},
|
||||
};
|
||||
use solana_transaction_status::{EncodedTransaction, TransactionEncoding};
|
||||
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
|
||||
use solana_vote_program::vote_state::VoteAuthorize;
|
||||
use std::{
|
||||
error,
|
||||
@@ -245,8 +246,8 @@ pub enum CliCommand {
|
||||
},
|
||||
TransactionHistory {
|
||||
address: Pubkey,
|
||||
end_slot: Option<Slot>, // None == latest slot
|
||||
slot_limit: Option<u64>, // None == search full history
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
@@ -381,6 +382,7 @@ pub enum CliCommand {
|
||||
},
|
||||
// Vote Commands
|
||||
CreateVoteAccount {
|
||||
vote_account: SignerIndex,
|
||||
seed: Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: Option<Pubkey>,
|
||||
@@ -406,10 +408,12 @@ pub enum CliCommand {
|
||||
VoteUpdateValidator {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
VoteUpdateCommission {
|
||||
vote_account_pubkey: Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
@@ -1173,7 +1177,7 @@ fn process_confirm(
|
||||
if let Some(transaction_status) = status {
|
||||
if config.verbose {
|
||||
match rpc_client
|
||||
.get_confirmed_transaction(signature, TransactionEncoding::Binary)
|
||||
.get_confirmed_transaction(signature, UiTransactionEncoding::Binary)
|
||||
{
|
||||
Ok(confirmed_transaction) => {
|
||||
println!(
|
||||
@@ -1226,7 +1230,13 @@ fn process_show_account(
|
||||
let cli_account = CliAccount {
|
||||
keyed_account: RpcKeyedAccount {
|
||||
pubkey: account_pubkey.to_string(),
|
||||
account: RpcAccount::encode(account),
|
||||
account: UiAccount::encode(
|
||||
account_pubkey,
|
||||
account,
|
||||
UiAccountEncoding::Binary64,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
},
|
||||
use_lamports_unit,
|
||||
};
|
||||
@@ -1304,23 +1314,16 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = rpc_client.get_signature_status(&signature) {
|
||||
if rpc_client
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(0)
|
||||
> 1
|
||||
{
|
||||
return false;
|
||||
} else {
|
||||
return match status {
|
||||
None => true,
|
||||
Some(result) => result.is_err(),
|
||||
};
|
||||
signature
|
||||
.and_then(|signature| rpc_client.get_signature_statuses(&[signature]).ok())
|
||||
.map(|Response { context: _, value }| match &value[0] {
|
||||
None => true,
|
||||
Some(transaction_status) => {
|
||||
!(transaction_status.confirmations.is_none()
|
||||
|| transaction_status.confirmations.unwrap() > 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -1375,7 +1378,7 @@ fn process_deploy(
|
||||
create_account_tx.try_sign(&[config.signers[0], &program_id], blockhash)?;
|
||||
messages.push(&create_account_tx.message);
|
||||
let signers = [config.signers[0], &program_id];
|
||||
let mut write_transactions = vec![];
|
||||
let mut write_messages = vec![];
|
||||
for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) {
|
||||
let instruction = loader_instruction::write(
|
||||
&program_id.pubkey(),
|
||||
@@ -1384,19 +1387,17 @@ fn process_deploy(
|
||||
chunk.to_vec(),
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
write_messages.push(message);
|
||||
}
|
||||
for transaction in write_transactions.iter() {
|
||||
messages.push(&transaction.message);
|
||||
let mut write_message_refs = vec![];
|
||||
for message in write_messages.iter() {
|
||||
write_message_refs.push(message);
|
||||
}
|
||||
messages.append(&mut write_message_refs);
|
||||
|
||||
let instruction = loader_instruction::finalize(&program_id.pubkey(), &bpf_loader::id());
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
let mut finalize_tx = Transaction::new_unsigned(message);
|
||||
finalize_tx.try_sign(&signers, blockhash)?;
|
||||
messages.push(&finalize_tx.message);
|
||||
let finalize_message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
messages.push(&finalize_message);
|
||||
|
||||
check_account_for_multiple_fees(
|
||||
rpc_client,
|
||||
@@ -1411,11 +1412,24 @@ fn process_deploy(
|
||||
CliError::DynamicProgramError("Program account allocation failed".to_string())
|
||||
})?;
|
||||
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let mut write_transactions = vec![];
|
||||
for message in write_messages.into_iter() {
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
}
|
||||
|
||||
trace!("Writing program data");
|
||||
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|
||||
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
|
||||
)?;
|
||||
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
let mut finalize_tx = Transaction::new_unsigned(finalize_message);
|
||||
finalize_tx.try_sign(&signers, blockhash)?;
|
||||
|
||||
trace!("Finalizing program account");
|
||||
rpc_client
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
@@ -1833,9 +1847,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_show_validators(&rpc_client, config, *use_lamports_unit, *commitment_config),
|
||||
CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
|
||||
before,
|
||||
limit,
|
||||
} => process_transaction_history(&rpc_client, config, address, *before, *limit),
|
||||
|
||||
// Nonce Commands
|
||||
|
||||
@@ -2127,6 +2141,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
|
||||
// Create vote account
|
||||
CliCommand::CreateVoteAccount {
|
||||
vote_account,
|
||||
seed,
|
||||
identity_account,
|
||||
authorized_voter,
|
||||
@@ -2135,6 +2150,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_create_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
*vote_account,
|
||||
seed,
|
||||
*identity_account,
|
||||
authorized_voter,
|
||||
@@ -2179,16 +2195,25 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
*withdraw_authority,
|
||||
),
|
||||
CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
} => process_vote_update_commission(&rpc_client, config, &vote_account_pubkey, *commission),
|
||||
withdraw_authority,
|
||||
} => process_vote_update_commission(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*commission,
|
||||
*withdraw_authority,
|
||||
),
|
||||
|
||||
// Wallet Commands
|
||||
|
||||
@@ -3416,6 +3441,7 @@ mod tests {
|
||||
let bob_pubkey = bob_keypair.pubkey();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@@ -3441,6 +3467,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
@@ -3658,6 +3685,7 @@ mod tests {
|
||||
let bob_keypair = Keypair::new();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@@ -3677,6 +3705,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 1,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
@@ -16,7 +16,6 @@ use solana_client::{
|
||||
pubsub_client::{PubsubClient, SlotInfoMessage},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
|
||||
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@@ -27,6 +26,7 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::{self, Pubkey},
|
||||
signature::Signature,
|
||||
system_instruction, system_program,
|
||||
sysvar::{
|
||||
self,
|
||||
@@ -256,9 +256,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-history")
|
||||
.about("Show historical transactions affecting the given address, \
|
||||
ordered based on the slot in which they were confirmed in \
|
||||
from lowest to highest slot")
|
||||
.about("Show historical transactions affecting the given address \
|
||||
from newest to oldest")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("address")
|
||||
.index(1)
|
||||
@@ -266,26 +265,22 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Account address"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("end_slot")
|
||||
.takes_value(false)
|
||||
.value_name("SLOT")
|
||||
.index(2)
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Slot to start from [default: latest slot at maximum commitment]"
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.takes_value(true)
|
||||
.value_name("NUMBER OF SLOTS")
|
||||
.value_name("LIMIT")
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Limit the search to this many slots"
|
||||
),
|
||||
),
|
||||
.default_value("1000")
|
||||
.help("Maximum number of transaction signatures to return"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("before")
|
||||
.long("before")
|
||||
.value_name("TRANSACTION_SIGNATURE")
|
||||
.takes_value(true)
|
||||
.help("Start with the first signature older than this one"),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -453,14 +448,22 @@ pub fn parse_transaction_history(
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
|
||||
let end_slot = value_t!(matches, "end_slot", Slot).ok();
|
||||
let slot_limit = value_t!(matches, "limit", u64).ok();
|
||||
|
||||
let before = match matches.value_of("before") {
|
||||
Some(signature) => Some(
|
||||
signature
|
||||
.parse()
|
||||
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
let limit = value_t_or_exit!(matches, "limit", usize);
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
before,
|
||||
limit,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
@@ -1276,41 +1279,36 @@ pub fn process_show_validators(
|
||||
|
||||
pub fn process_transaction_history(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
address: &Pubkey,
|
||||
end_slot: Option<Slot>, // None == use latest slot
|
||||
slot_limit: Option<u64>,
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
) -> ProcessResult {
|
||||
let end_slot = {
|
||||
if let Some(end_slot) = end_slot {
|
||||
end_slot
|
||||
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
|
||||
address,
|
||||
before,
|
||||
Some(limit),
|
||||
)?;
|
||||
|
||||
let transactions_found = format!("{} transactions found", results.len());
|
||||
|
||||
for result in results {
|
||||
if config.verbose {
|
||||
println!(
|
||||
"{} [slot={} status={}] {}",
|
||||
result.signature,
|
||||
result.slot,
|
||||
match result.err {
|
||||
None => "Confirmed".to_string(),
|
||||
Some(err) => format!("Failed: {:?}", err),
|
||||
},
|
||||
result.memo.unwrap_or_else(|| "".to_string()),
|
||||
);
|
||||
} else {
|
||||
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
|
||||
println!("{}", result.signature);
|
||||
}
|
||||
};
|
||||
let mut start_slot = match slot_limit {
|
||||
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
|
||||
None => rpc_client.minimum_ledger_slot()?,
|
||||
};
|
||||
|
||||
println!(
|
||||
"Transactions affecting {} within slots [{},{}]",
|
||||
address, start_slot, end_slot
|
||||
);
|
||||
|
||||
let mut transaction_count = 0;
|
||||
while start_slot < end_slot {
|
||||
let signatures = rpc_client.get_confirmed_signatures_for_address(
|
||||
address,
|
||||
start_slot,
|
||||
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
|
||||
)?;
|
||||
for signature in &signatures {
|
||||
println!("{}", signature);
|
||||
}
|
||||
transaction_count += signatures.len();
|
||||
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
|
||||
}
|
||||
Ok(format!("{} transactions found", transaction_count))
|
||||
Ok(transactions_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -5,7 +5,7 @@ use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::RpcTransactionStatusMeta;
|
||||
use solana_transaction_status::UiTransactionStatusMeta;
|
||||
use std::{fmt, io};
|
||||
|
||||
// Pretty print a "name value"
|
||||
@@ -68,7 +68,7 @@ pub fn println_signers(
|
||||
pub fn write_transaction<W: io::Write>(
|
||||
w: &mut W,
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
transaction_status: &Option<UiTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) -> io::Result<()> {
|
||||
let message = &transaction.message;
|
||||
@@ -191,7 +191,7 @@ pub fn write_transaction<W: io::Write>(
|
||||
|
||||
pub fn println_transaction(
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
transaction_status: &Option<UiTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) {
|
||||
let mut w = Vec::new();
|
||||
|
@@ -106,9 +106,10 @@ mod tests {
|
||||
use crate::{nonce::nonce_arg, offline::blockhash_query::BlockhashQuery};
|
||||
use clap::App;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcAccount, RpcFeeCalculator, RpcResponseContext},
|
||||
rpc_response::{Response, RpcFeeCalculator, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account, fee_calculator::FeeCalculator, hash::hash, nonce, system_program,
|
||||
@@ -344,7 +345,13 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
let nonce_pubkey = Pubkey::new(&[4u8; 32]);
|
||||
let rpc_nonce_account = RpcAccount::encode(nonce_account);
|
||||
let rpc_nonce_account = UiAccount::encode(
|
||||
&nonce_pubkey,
|
||||
nonce_account,
|
||||
UiAccountEncoding::Binary64,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let get_account_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(Some(rpc_nonce_account)),
|
||||
|
@@ -6,9 +6,10 @@ use crate::{
|
||||
use bincode::deserialize;
|
||||
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use reqwest::blocking::Client;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
use solana_account_decoder::validator_info::{
|
||||
self, ValidatorInfo, MAX_LONG_FIELD_LENGTH, MAX_SHORT_FIELD_LENGTH,
|
||||
};
|
||||
use solana_clap_utils::{
|
||||
input_parsers::pubkey_of,
|
||||
input_validators::{is_pubkey, is_url},
|
||||
@@ -27,23 +28,6 @@ use solana_sdk::{
|
||||
};
|
||||
use std::{error, sync::Arc};
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
pub const MAX_VALIDATOR_INFO: u64 = 576;
|
||||
|
||||
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
pub struct ValidatorInfo {
|
||||
info: String,
|
||||
}
|
||||
|
||||
impl ConfigState for ValidatorInfo {
|
||||
fn max_space() -> u64 {
|
||||
MAX_VALIDATOR_INFO
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if a validator details are longer than the max length.
|
||||
pub fn check_details_length(string: String) -> Result<(), String> {
|
||||
if string.len() > MAX_LONG_FIELD_LENGTH {
|
||||
@@ -289,7 +273,7 @@ pub fn process_set_validator_info(
|
||||
.iter()
|
||||
.filter(|(_, account)| {
|
||||
let key_list: ConfigKeys = deserialize(&account.data).map_err(|_| false).unwrap();
|
||||
key_list.keys.contains(&(id(), false))
|
||||
key_list.keys.contains(&(validator_info::id(), false))
|
||||
})
|
||||
.find(|(pubkey, account)| {
|
||||
let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap();
|
||||
@@ -328,7 +312,10 @@ pub fn process_set_validator_info(
|
||||
};
|
||||
|
||||
let build_message = |lamports| {
|
||||
let keys = vec![(id(), false), (config.signers[0].pubkey(), true)];
|
||||
let keys = vec![
|
||||
(validator_info::id(), false),
|
||||
(config.signers[0].pubkey(), true),
|
||||
];
|
||||
if balance == 0 {
|
||||
println!(
|
||||
"Publishing info for Validator {:?}",
|
||||
@@ -400,7 +387,7 @@ pub fn process_get_validator_info(
|
||||
let key_list: ConfigKeys = deserialize(&validator_info_account.data)
|
||||
.map_err(|_| false)
|
||||
.unwrap();
|
||||
key_list.keys.contains(&(id(), false))
|
||||
key_list.keys.contains(&(validator_info::id(), false))
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
@@ -502,7 +489,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_validator_info() {
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let keys = vec![(id(), false), (pubkey, true)];
|
||||
let keys = vec![(validator_info::id(), false), (pubkey, true)];
|
||||
let config = ConfigKeys { keys };
|
||||
|
||||
let mut info = Map::new();
|
||||
|
@@ -253,7 +253,7 @@ pub fn parse_create_vote_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let (vote_account, vote_account_pubkey) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let (identity_account, identity_pubkey) =
|
||||
signer_of(matches, "identity_account", wallet_manager)?;
|
||||
@@ -271,6 +271,7 @@ pub fn parse_create_vote_account(
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: signer_info.index_of(vote_account_pubkey).unwrap(),
|
||||
seed,
|
||||
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
|
||||
authorized_voter,
|
||||
@@ -320,7 +321,8 @@ pub fn parse_vote_update_validator(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (new_identity_account, new_identity_pubkey) =
|
||||
signer_of(matches, "new_identity_account", wallet_manager)?;
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
@@ -334,6 +336,7 @@ pub fn parse_vote_update_validator(
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@@ -346,7 +349,8 @@ pub fn parse_vote_update_commission(
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
|
||||
let payer_provided = None;
|
||||
@@ -361,6 +365,7 @@ pub fn parse_vote_update_commission(
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@@ -420,13 +425,14 @@ pub fn parse_withdraw_from_vote_account(
|
||||
pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account: SignerIndex,
|
||||
seed: &Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: &Option<Pubkey>,
|
||||
authorized_withdrawer: &Option<Pubkey>,
|
||||
commission: u8,
|
||||
) -> ProcessResult {
|
||||
let vote_account = config.signers[1];
|
||||
let vote_account = config.signers[vote_account];
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let vote_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
@@ -551,8 +557,9 @@ pub fn process_vote_update_validator(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let new_identity_account = config.signers[new_identity_account];
|
||||
let new_identity_pubkey = new_identity_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
@@ -584,8 +591,9 @@ pub fn process_vote_update_commission(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_commission(
|
||||
vote_account_pubkey,
|
||||
@@ -817,6 +825,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -845,6 +854,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -877,6 +887,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(authed),
|
||||
@@ -907,6 +918,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -934,6 +946,7 @@ mod tests {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -956,6 +969,7 @@ mod tests {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey: pubkey,
|
||||
commission: 42,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
|
@@ -57,6 +57,7 @@ fn test_stake_delegation_force() {
|
||||
let vote_keypair = Keypair::new();
|
||||
config.signers = vec![&default_signer, &vote_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
|
@@ -49,6 +49,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
let vote_account_pubkey = vote_account_keypair.pubkey();
|
||||
config.signers = vec![&default_signer, &vote_account_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
@@ -120,6 +121,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,11 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.8" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -31,7 +32,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.1.0"
|
||||
jsonrpc-http-server = "14.1.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -8,6 +8,7 @@ pub mod perf_utils;
|
||||
pub mod pubsub_client;
|
||||
pub mod rpc_client;
|
||||
pub mod rpc_config;
|
||||
pub mod rpc_filter;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_response;
|
||||
pub mod rpc_sender;
|
||||
|
@@ -2,8 +2,12 @@ use crate::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
http_sender::HttpSender,
|
||||
mock_sender::{MockSender, Mocks},
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig},
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
rpc_config::RpcAccountInfoConfig,
|
||||
rpc_config::{
|
||||
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
|
||||
RpcSendTransactionConfig, RpcTokenAccountsFilter,
|
||||
},
|
||||
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
|
||||
rpc_response::*,
|
||||
rpc_sender::RpcSender,
|
||||
};
|
||||
@@ -11,6 +15,12 @@ use bincode::serialize;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use log::*;
|
||||
use serde_json::{json, Value};
|
||||
use solana_account_decoder::{
|
||||
parse_token::UiTokenAmount,
|
||||
UiAccount,
|
||||
UiAccountData::{Binary, Binary64},
|
||||
UiAccountEncoding,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{
|
||||
@@ -28,7 +38,7 @@ use solana_sdk::{
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionStatus, UiTransactionEncoding,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
@@ -238,13 +248,13 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
|
||||
self.get_confirmed_block_with_encoding(slot, TransactionEncoding::Json)
|
||||
self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block_with_encoding(
|
||||
&self,
|
||||
slot: Slot,
|
||||
encoding: TransactionEncoding,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<ConfirmedBlock> {
|
||||
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
|
||||
}
|
||||
@@ -282,10 +292,36 @@ impl RpcClient {
|
||||
Ok(signatures)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
self.get_confirmed_signatures_for_address2_with_config(address, None, None)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2_with_config(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
before: Option<Signature>,
|
||||
limit: Option<usize>,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
let config = RpcGetConfirmedSignaturesForAddress2Config {
|
||||
before: before.map(|signature| signature.to_string()),
|
||||
limit,
|
||||
};
|
||||
|
||||
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2,
|
||||
json!([address.to_string(), config]),
|
||||
)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
encoding: TransactionEncoding,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<ConfirmedTransaction> {
|
||||
self.send(
|
||||
RpcRequest::GetConfirmedTransaction,
|
||||
@@ -437,9 +473,14 @@ impl RpcClient {
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<Account>> {
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Binary64),
|
||||
commitment: Some(commitment_config),
|
||||
data_slice: None,
|
||||
};
|
||||
let response = self.sender.send(
|
||||
RpcRequest::GetAccountInfo,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
json!([pubkey.to_string(), config]),
|
||||
);
|
||||
|
||||
response
|
||||
@@ -451,10 +492,19 @@ impl RpcClient {
|
||||
}
|
||||
let Response {
|
||||
context,
|
||||
value: rpc_account,
|
||||
} = serde_json::from_value::<Response<Option<RpcAccount>>>(result_json)?;
|
||||
value: mut rpc_account,
|
||||
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
|
||||
if let Some(ref mut account) = rpc_account {
|
||||
if let Binary(_) = &account.data {
|
||||
let tmp = Binary64(String::new());
|
||||
match std::mem::replace(&mut account.data, tmp) {
|
||||
Binary(new_data) => account.data = Binary64(new_data),
|
||||
_ => panic!("should have gotten binary here."),
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("Response account {:?} {:?}", pubkey, rpc_account);
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode().ok());
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account,
|
||||
@@ -510,17 +560,7 @@ impl RpcClient {
|
||||
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let accounts: Vec<RpcKeyedAccount> =
|
||||
self.send(RpcRequest::GetProgramAccounts, json!([pubkey.to_string()]))?;
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
RpcRequest::GetProgramAccounts,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((pubkey, account.decode().unwrap()));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
parse_keyed_accounts(accounts, RpcRequest::GetProgramAccounts)
|
||||
}
|
||||
|
||||
/// Request the transaction count.
|
||||
@@ -667,6 +707,118 @@ impl RpcClient {
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_account_balance_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<UiTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountBalance,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<RpcKeyedAccount>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_delegate_with_commitment(
|
||||
delegate,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate_with_commitment(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<RpcKeyedAccount>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::JsonParsed),
|
||||
commitment: Some(commitment_config),
|
||||
data_slice: None,
|
||||
};
|
||||
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountsByOwner,
|
||||
json!([delegate.to_string(), token_account_filter, config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<RpcKeyedAccount>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_owner_with_commitment(
|
||||
owner,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner_with_commitment(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<RpcKeyedAccount>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::JsonParsed),
|
||||
commitment: Some(commitment_config),
|
||||
data_slice: None,
|
||||
};
|
||||
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountsByOwner,
|
||||
json!([owner.to_string(), token_account_filter, config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_supply_with_commitment(mint, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_supply_with_commitment(
|
||||
&self,
|
||||
mint: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<UiTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenSupply,
|
||||
json!([mint.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn poll_balance_with_timeout_and_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
@@ -1008,6 +1160,31 @@ pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_keyed_accounts(
|
||||
accounts: Vec<RpcKeyedAccount>,
|
||||
request: RpcRequest,
|
||||
) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((
|
||||
pubkey,
|
||||
account.decode().ok_or_else(|| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Account from rpc".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?,
|
||||
));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -1,3 +1,5 @@
|
||||
use crate::rpc_filter::RpcFilterType;
|
||||
use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig};
|
||||
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
@@ -35,8 +37,39 @@ pub struct RpcLargestAccountsConfig {
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationConfig {
|
||||
pub struct RpcStakeConfig {
|
||||
pub epoch: Option<Epoch>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccountInfoConfig {
|
||||
pub encoding: Option<UiAccountEncoding>,
|
||||
pub data_slice: Option<UiDataSliceConfig>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcProgramAccountsConfig {
|
||||
pub filters: Option<Vec<RpcFilterType>>,
|
||||
#[serde(flatten)]
|
||||
pub account_config: RpcAccountInfoConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTokenAccountsFilter {
|
||||
Mint(String),
|
||||
ProgramId(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcGetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<String>, // Signature as base-58 string
|
||||
pub limit: Option<usize>,
|
||||
}
|
||||
|
143
client/src/rpc_filter.rs
Normal file
143
client/src/rpc_filter.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcFilterType {
|
||||
DataSize(u64),
|
||||
Memcmp(Memcmp),
|
||||
}
|
||||
|
||||
impl RpcFilterType {
|
||||
pub fn verify(&self) -> Result<(), RpcFilterError> {
|
||||
match self {
|
||||
RpcFilterType::DataSize(_) => Ok(()),
|
||||
RpcFilterType::Memcmp(compare) => {
|
||||
let encoding = compare.encoding.as_ref().unwrap_or(&MemcmpEncoding::Binary);
|
||||
match encoding {
|
||||
MemcmpEncoding::Binary => {
|
||||
let MemcmpEncodedBytes::Binary(bytes) = &compare.bytes;
|
||||
bs58::decode(&bytes)
|
||||
.into_vec()
|
||||
.map(|_| ())
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RpcFilterError {
|
||||
#[error("bs58 decode error")]
|
||||
DecodeError(#[from] bs58::decode::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum MemcmpEncoding {
|
||||
Binary,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum MemcmpEncodedBytes {
|
||||
Binary(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Memcmp {
|
||||
/// Data offset to begin match
|
||||
pub offset: usize,
|
||||
/// Bytes, encoded with specified encoding, or default Binary
|
||||
pub bytes: MemcmpEncodedBytes,
|
||||
/// Optional encoding specification
|
||||
pub encoding: Option<MemcmpEncoding>,
|
||||
}
|
||||
|
||||
impl Memcmp {
|
||||
pub fn bytes_match(&self, data: &[u8]) -> bool {
|
||||
match &self.bytes {
|
||||
MemcmpEncodedBytes::Binary(bytes) => {
|
||||
let bytes = bs58::decode(bytes).into_vec();
|
||||
if bytes.is_err() {
|
||||
return false;
|
||||
}
|
||||
let bytes = bytes.unwrap();
|
||||
if self.offset > data.len() {
|
||||
return false;
|
||||
}
|
||||
if data[self.offset..].len() < bytes.len() {
|
||||
return false;
|
||||
}
|
||||
data[self.offset..self.offset + bytes.len()] == bytes[..]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bytes_match() {
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
// Exact match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1, 2, 3, 4, 5]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Partial match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1, 2]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Offset partial match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 2,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![3, 4]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Incorrect partial match of data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![2]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Bytes overrun data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 2,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![3, 4, 5, 6]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Offset outside data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 6,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![5]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Invalid base-58 fails
|
||||
assert!(!Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary("III".to_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
}
|
||||
}
|
@@ -1,4 +1,5 @@
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -13,6 +14,7 @@ pub enum RpcRequest {
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetConfirmedSignaturesForAddress,
|
||||
GetConfirmedSignaturesForAddress2,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
@@ -36,6 +38,10 @@ pub enum RpcRequest {
|
||||
GetSlotsPerSegment,
|
||||
GetStoragePubkeysForSlot,
|
||||
GetSupply,
|
||||
GetTokenAccountBalance,
|
||||
GetTokenAccountsByDelegate,
|
||||
GetTokenAccountsByOwner,
|
||||
GetTokenSupply,
|
||||
GetTotalSupply,
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
@@ -60,6 +66,7 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
@@ -83,6 +90,10 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
|
||||
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||
RpcRequest::GetSupply => "getSupply",
|
||||
RpcRequest::GetTokenAccountBalance => "getTokenAccountBalance",
|
||||
RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate",
|
||||
RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner",
|
||||
RpcRequest::GetTokenSupply => "getTokenSupply",
|
||||
RpcRequest::GetTotalSupply => "getTotalSupply",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
@@ -102,6 +113,8 @@ impl fmt::Display for RpcRequest {
|
||||
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
|
||||
|
||||
// Validators that are this number of slots behind are considered delinquent
|
||||
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;
|
||||
@@ -130,9 +143,16 @@ pub enum RpcError {
|
||||
ForUser(String), /* "direct-to-user message" */
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum TokenAccountsFilter {
|
||||
Mint(Pubkey),
|
||||
ProgramId(Pubkey),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::rpc_config::RpcTokenAccountsFilter;
|
||||
use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel};
|
||||
|
||||
#[test]
|
||||
@@ -197,5 +217,16 @@ mod tests {
|
||||
let request =
|
||||
test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
|
||||
assert_eq!(request["params"], json!([addr, commitment_config]));
|
||||
|
||||
// Test request with CommitmentConfig and params
|
||||
let test_request = RpcRequest::GetTokenAccountsByOwner;
|
||||
let mint = Pubkey::new_rand();
|
||||
let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string());
|
||||
let request = test_request
|
||||
.build_request_json(1, json!([addr, token_account_filter, commitment_config]));
|
||||
assert_eq!(
|
||||
request["params"],
|
||||
json!([addr, token_account_filter, commitment_config])
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,13 +1,13 @@
|
||||
use crate::{client_error, rpc_request::RpcError};
|
||||
use crate::client_error;
|
||||
use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
|
||||
use solana_transaction_status::ConfirmedTransactionStatusWithSignature;
|
||||
use std::{collections::HashMap, net::SocketAddr};
|
||||
|
||||
pub type RpcResult<T> = client_error::Result<Response<T>>;
|
||||
|
||||
@@ -91,7 +91,7 @@ pub struct RpcInflationRate {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcKeyedAccount {
|
||||
pub pubkey: String,
|
||||
pub account: RpcAccount,
|
||||
pub account: UiAccount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
@@ -100,43 +100,6 @@ pub struct RpcSignatureResult {
|
||||
pub err: Option<TransactionError>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccount {
|
||||
pub lamports: u64,
|
||||
pub data: String,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
impl RpcAccount {
|
||||
pub fn encode(account: Account) -> Self {
|
||||
RpcAccount {
|
||||
lamports: account.lamports,
|
||||
data: bs58::encode(account.data.clone()).into_string(),
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> std::result::Result<Account, RpcError> {
|
||||
Ok(Account {
|
||||
lamports: self.lamports,
|
||||
data: bs58::decode(self.data.clone()).into_vec().map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account data".to_string())
|
||||
})?,
|
||||
owner: Pubkey::from_str(&self.owner).map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account owner".to_string())
|
||||
})?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct RpcContactInfo {
|
||||
/// Pubkey of the node as a base-58 string
|
||||
@@ -240,3 +203,54 @@ pub struct RpcSupply {
|
||||
pub non_circulating: u64,
|
||||
pub non_circulating_accounts: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum StakeActivationState {
|
||||
Activating,
|
||||
Active,
|
||||
Deactivating,
|
||||
Inactive,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStakeActivation {
|
||||
pub state: StakeActivationState,
|
||||
pub active: u64,
|
||||
pub inactive: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTokenAccountBalance {
|
||||
pub address: String,
|
||||
#[serde(flatten)]
|
||||
pub amount: UiTokenAmount,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedTransactionStatusWithSignature {
|
||||
pub signature: String,
|
||||
pub slot: Slot,
|
||||
pub err: Option<TransactionError>,
|
||||
pub memo: Option<String>,
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
|
||||
let ConfirmedTransactionStatusWithSignature {
|
||||
signature,
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
} = value;
|
||||
Self {
|
||||
signature: signature.to_string(),
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -21,7 +21,7 @@ byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
ed25519-dalek = "=1.0.0-pre.4"
|
||||
fs_extra = "1.1.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = "1.3"
|
||||
@@ -42,38 +42,42 @@ regex = "1.3.7"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.8" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.8" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.8" }
|
||||
solana-client = { path = "../client", version = "1.2.8" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.8" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.8" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.8" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.8" }
|
||||
solana-logger = { path = "../logger", version = "1.2.8" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.8" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.8" }
|
||||
solana-measure = { path = "../measure", version = "1.2.8" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.8" }
|
||||
solana-perf = { path = "../perf", version = "1.2.8" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.8" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.8" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.8" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.8" }
|
||||
solana-version = { path = "../version", version = "1.2.8" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.8" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.8" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.8" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.23" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-perf = { path = "../perf", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.2.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.23" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.23" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.23" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.8" }
|
||||
tokio = { version = "0.2.22", features = ["full"] }
|
||||
tokio_01 = { version = "0.1", package = "tokio" }
|
||||
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
|
||||
tokio_io_01 = { version = "0.1", package = "tokio-io" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
base64 = "0.12.3"
|
||||
matches = "0.1.6"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serial_test = "0.4.0"
|
||||
|
@@ -18,25 +18,32 @@ const INTERVAL_MS: u64 = 100;
|
||||
const SHRUNKEN_ACCOUNT_PER_SEC: usize = 250;
|
||||
const SHRUNKEN_ACCOUNT_PER_INTERVAL: usize =
|
||||
SHRUNKEN_ACCOUNT_PER_SEC / (1000 / INTERVAL_MS as usize);
|
||||
const CLEAN_INTERVAL_SLOTS: u64 = 100;
|
||||
|
||||
impl AccountsBackgroundService {
|
||||
pub fn new(bank_forks: Arc<RwLock<BankForks>>, exit: &Arc<AtomicBool>) -> Self {
|
||||
info!("AccountsBackgroundService active");
|
||||
let exit = exit.clone();
|
||||
let mut consumed_budget = 0;
|
||||
let mut last_cleaned_slot = 0;
|
||||
let t_background = Builder::new()
|
||||
.name("solana-accounts-background".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
|
||||
bank.process_dead_slots();
|
||||
|
||||
consumed_budget = bank
|
||||
.process_stale_slot_with_budget(consumed_budget, SHRUNKEN_ACCOUNT_PER_INTERVAL);
|
||||
|
||||
if bank.block_height() - last_cleaned_slot > CLEAN_INTERVAL_SLOTS {
|
||||
bank.clean_accounts();
|
||||
last_cleaned_slot = bank.block_height();
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(INTERVAL_MS));
|
||||
})
|
||||
.unwrap();
|
||||
|
@@ -509,7 +509,7 @@ impl BankingStage {
|
||||
// expires.
|
||||
let txs = batch.transactions();
|
||||
let pre_balances = if transaction_status_sender.is_some() {
|
||||
bank.collect_balances(txs)
|
||||
bank.collect_balances(batch)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@@ -545,10 +545,11 @@ impl BankingStage {
|
||||
.processing_results;
|
||||
|
||||
if let Some(sender) = transaction_status_sender {
|
||||
let post_balances = bank.collect_balances(txs);
|
||||
let post_balances = bank.collect_balances(batch);
|
||||
send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
transaction_statuses,
|
||||
TransactionBalancesSet::new(pre_balances, post_balances),
|
||||
sender,
|
||||
|
@@ -1490,12 +1490,6 @@ impl ClusterInfo {
|
||||
.time_gossip_write_lock("purge", &self.stats.purge)
|
||||
.purge(timestamp(), &timeouts);
|
||||
inc_new_counter_info!("cluster_info-purge-count", num_purged);
|
||||
let table_size = self.gossip.read().unwrap().crds.table.len();
|
||||
datapoint_debug!(
|
||||
"cluster_info-purge",
|
||||
("table_size", table_size as i64, i64),
|
||||
("purge_stake_timeout", timeout as i64, i64)
|
||||
);
|
||||
}
|
||||
|
||||
/// randomly pick a node and ask them for updates asynchronously
|
||||
@@ -1743,7 +1737,7 @@ impl ClusterInfo {
|
||||
"generate_pull_responses",
|
||||
&self.stats.generate_pull_responses,
|
||||
)
|
||||
.generate_pull_responses(&caller_and_filters);
|
||||
.generate_pull_responses(&caller_and_filters, now);
|
||||
|
||||
self.time_gossip_write_lock("process_pull_reqs", &self.stats.process_pull_requests)
|
||||
.process_pull_requests(caller_and_filters, now);
|
||||
@@ -2085,6 +2079,10 @@ impl ClusterInfo {
|
||||
|
||||
fn print_reset_stats(&self, last_print: &mut Instant) {
|
||||
if last_print.elapsed().as_millis() > 2000 {
|
||||
let (table_size, purged_values_size) = {
|
||||
let r_gossip = self.gossip.read().unwrap();
|
||||
(r_gossip.crds.table.len(), r_gossip.pull.purged_values.len())
|
||||
};
|
||||
datapoint_info!(
|
||||
"cluster_info_stats",
|
||||
("entrypoint", self.stats.entrypoint.clear(), i64),
|
||||
@@ -2108,6 +2106,8 @@ impl ClusterInfo {
|
||||
self.stats.new_push_requests_num.clear(),
|
||||
i64
|
||||
),
|
||||
("table_size", table_size as i64, i64),
|
||||
("purged_values_size", purged_values_size as i64, i64),
|
||||
);
|
||||
datapoint_info!(
|
||||
"cluster_info_stats2",
|
||||
|
@@ -1,16 +1,18 @@
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
consensus::PubkeyVotes,
|
||||
crds_value::CrdsValueLabel,
|
||||
poh_recorder::PohRecorder,
|
||||
pubkey_references::LockedPubkeyReferences,
|
||||
replay_stage::ReplayVotesReceiver,
|
||||
result::{Error, Result},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify,
|
||||
verified_vote_packets::VerifiedVotePackets,
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender,
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Select, Sender as CrossbeamSender,
|
||||
};
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
@@ -30,7 +32,7 @@ use solana_sdk::{
|
||||
};
|
||||
use solana_vote_program::vote_instruction::VoteInstruction;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
{Arc, Mutex, RwLock},
|
||||
@@ -40,16 +42,22 @@ use std::{
|
||||
};
|
||||
|
||||
// Map from a vote account to the authorized voter for an epoch
|
||||
pub type VerifiedVotePacketsSender = CrossbeamSender<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedVotePacketsReceiver = CrossbeamReceiver<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedLabelVotePacketsSender = CrossbeamSender<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedVoteTransactionsSender = CrossbeamSender<Vec<Transaction>>;
|
||||
pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
|
||||
pub type VerifiedVoteSender = CrossbeamSender<(Pubkey, Vec<Slot>)>;
|
||||
pub type VerifiedVoteReceiver = CrossbeamReceiver<(Pubkey, Vec<Slot>)>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SlotVoteTracker {
|
||||
voted: HashSet<Arc<Pubkey>>,
|
||||
// Maps pubkeys that have voted for this slot
|
||||
// to whether or not we've seen the vote on gossip.
|
||||
// True if seen on gossip, false if only seen in replay.
|
||||
voted: HashMap<Arc<Pubkey>, bool>,
|
||||
updates: Option<Vec<Arc<Pubkey>>>,
|
||||
pub total_stake: u64,
|
||||
total_stake: u64,
|
||||
gossip_only_stake: u64,
|
||||
}
|
||||
|
||||
impl SlotVoteTracker {
|
||||
@@ -62,7 +70,7 @@ impl SlotVoteTracker {
|
||||
#[derive(Default)]
|
||||
pub struct VoteTracker {
|
||||
// Map from a slot to a set of validators who have voted for that slot
|
||||
pub slot_vote_trackers: RwLock<HashMap<Slot, Arc<RwLock<SlotVoteTracker>>>>,
|
||||
slot_vote_trackers: RwLock<HashMap<Slot, Arc<RwLock<SlotVoteTracker>>>>,
|
||||
// Don't track votes from people who are not staked, acts as a spam filter
|
||||
epoch_authorized_voters: RwLock<HashMap<Epoch, Arc<EpochAuthorizedVoters>>>,
|
||||
leader_schedule_epoch: RwLock<Epoch>,
|
||||
@@ -126,7 +134,7 @@ impl VoteTracker {
|
||||
|
||||
let mut w_slot_vote_tracker = slot_vote_tracker.write().unwrap();
|
||||
|
||||
w_slot_vote_tracker.voted.insert(pubkey.clone());
|
||||
w_slot_vote_tracker.voted.insert(pubkey.clone(), true);
|
||||
if let Some(ref mut updates) = w_slot_vote_tracker.updates {
|
||||
updates.push(pubkey.clone())
|
||||
} else {
|
||||
@@ -202,15 +210,18 @@ impl ClusterInfoVoteListener {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
sender: CrossbeamSender<Vec<Packets>>,
|
||||
verified_packets_sender: CrossbeamSender<Vec<Packets>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Self {
|
||||
let exit_ = exit.clone();
|
||||
|
||||
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
|
||||
let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) =
|
||||
unbounded();
|
||||
let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded();
|
||||
let listen_thread = Builder::new()
|
||||
.name("solana-cluster_info_vote_listener".to_string())
|
||||
@@ -218,7 +229,7 @@ impl ClusterInfoVoteListener {
|
||||
let _ = Self::recv_loop(
|
||||
exit_,
|
||||
&cluster_info,
|
||||
verified_vote_packets_sender,
|
||||
verified_vote_label_packets_sender,
|
||||
verified_vote_transactions_sender,
|
||||
);
|
||||
})
|
||||
@@ -231,9 +242,9 @@ impl ClusterInfoVoteListener {
|
||||
.spawn(move || {
|
||||
let _ = Self::bank_send_loop(
|
||||
exit_,
|
||||
verified_vote_packets_receiver,
|
||||
verified_vote_label_packets_receiver,
|
||||
poh_recorder,
|
||||
&sender,
|
||||
&verified_packets_sender,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@@ -248,6 +259,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker,
|
||||
&bank_forks,
|
||||
subscriptions,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@@ -267,7 +280,7 @@ impl ClusterInfoVoteListener {
|
||||
fn recv_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: &ClusterInfo,
|
||||
verified_vote_packets_sender: VerifiedVotePacketsSender,
|
||||
verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender,
|
||||
verified_vote_transactions_sender: VerifiedVoteTransactionsSender,
|
||||
) -> Result<()> {
|
||||
let mut last_ts = 0;
|
||||
@@ -282,7 +295,7 @@ impl ClusterInfoVoteListener {
|
||||
if !votes.is_empty() {
|
||||
let (vote_txs, packets) = Self::verify_votes(votes, labels);
|
||||
verified_vote_transactions_sender.send(vote_txs)?;
|
||||
verified_vote_packets_sender.send(packets)?;
|
||||
verified_vote_label_packets_sender.send(packets)?;
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
|
||||
@@ -322,9 +335,9 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
fn bank_send_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_vote_packets_receiver: VerifiedVotePacketsReceiver,
|
||||
verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
packets_sender: &CrossbeamSender<Vec<Packets>>,
|
||||
verified_packets_sender: &CrossbeamSender<Vec<Packets>>,
|
||||
) -> Result<()> {
|
||||
let mut verified_vote_packets = VerifiedVotePackets::default();
|
||||
let mut time_since_lock = Instant::now();
|
||||
@@ -334,9 +347,10 @@ impl ClusterInfoVoteListener {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(e) = verified_vote_packets
|
||||
.get_and_process_vote_packets(&verified_vote_packets_receiver, &mut update_version)
|
||||
{
|
||||
if let Err(e) = verified_vote_packets.get_and_process_vote_packets(
|
||||
&verified_vote_label_packets_receiver,
|
||||
&mut update_version,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
@@ -353,7 +367,7 @@ impl ClusterInfoVoteListener {
|
||||
if let Some(bank) = bank {
|
||||
let last_version = bank.last_vote_sync.load(Ordering::Relaxed);
|
||||
let (new_version, msgs) = verified_vote_packets.get_latest_votes(last_version);
|
||||
packets_sender.send(msgs)?;
|
||||
verified_packets_sender.send(msgs)?;
|
||||
bank.last_vote_sync.compare_and_swap(
|
||||
last_version,
|
||||
new_version,
|
||||
@@ -371,6 +385,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@@ -380,19 +396,18 @@ impl ClusterInfoVoteListener {
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
vote_tracker.process_new_root_bank(&root_bank);
|
||||
let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch());
|
||||
|
||||
if let Err(e) = Self::get_and_process_votes(
|
||||
&vote_txs_receiver,
|
||||
&vote_tracker,
|
||||
root_bank.slot(),
|
||||
subscriptions.clone(),
|
||||
epoch_stakes,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
}
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
|
||||
| Error::ReadyTimeoutError => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
@@ -407,6 +422,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes_receiver: &ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
Self::get_and_process_votes(
|
||||
vote_txs_receiver,
|
||||
@@ -414,6 +431,8 @@ impl ClusterInfoVoteListener {
|
||||
last_root,
|
||||
subscriptions,
|
||||
None,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -423,19 +442,41 @@ impl ClusterInfoVoteListener {
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes_receiver: &ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut vote_txs = vote_txs_receiver.recv_timeout(timer)?;
|
||||
while let Ok(new_txs) = vote_txs_receiver.try_recv() {
|
||||
vote_txs.extend(new_txs);
|
||||
let mut sel = Select::new();
|
||||
sel.recv(vote_txs_receiver);
|
||||
sel.recv(replay_votes_receiver);
|
||||
let mut remaining_wait_time = 200;
|
||||
loop {
|
||||
if remaining_wait_time == 0 {
|
||||
break;
|
||||
}
|
||||
let start = Instant::now();
|
||||
// Wait for one of the receivers to be ready. `ready_timeout`
|
||||
// will return if channels either have something, or are
|
||||
// disconnected. `ready_timeout` can wake up spuriously,
|
||||
// hence the loop
|
||||
let _ = sel.ready_timeout(Duration::from_millis(remaining_wait_time))?;
|
||||
let vote_txs: Vec<_> = vote_txs_receiver.try_iter().flatten().collect();
|
||||
let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect();
|
||||
if !vote_txs.is_empty() || !replay_votes.is_empty() {
|
||||
Self::process_votes(
|
||||
vote_tracker,
|
||||
vote_txs,
|
||||
last_root,
|
||||
subscriptions,
|
||||
epoch_stakes,
|
||||
verified_vote_sender,
|
||||
&replay_votes,
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
remaining_wait_time = remaining_wait_time
|
||||
.saturating_sub(std::cmp::max(start.elapsed().as_millis() as u64, 1));
|
||||
}
|
||||
}
|
||||
Self::process_votes(
|
||||
vote_tracker,
|
||||
vote_txs,
|
||||
last_root,
|
||||
subscriptions,
|
||||
epoch_stakes,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -445,10 +486,11 @@ impl ClusterInfoVoteListener {
|
||||
root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes: &[Arc<PubkeyVotes>],
|
||||
) {
|
||||
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
|
||||
let mut diff: HashMap<Slot, HashMap<Arc<Pubkey>, bool>> = HashMap::new();
|
||||
{
|
||||
let all_slot_trackers = &vote_tracker.slot_vote_trackers;
|
||||
for tx in vote_txs {
|
||||
if let (Some(vote_pubkey), Some(vote_instruction)) = tx
|
||||
.message
|
||||
@@ -502,25 +544,33 @@ impl ClusterInfoVoteListener {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't insert if we already have marked down this pubkey
|
||||
// voting for this slot
|
||||
let maybe_slot_tracker =
|
||||
all_slot_trackers.read().unwrap().get(&slot).cloned();
|
||||
if let Some(slot_tracker) = maybe_slot_tracker {
|
||||
if slot_tracker.read().unwrap().voted.contains(vote_pubkey) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(vote_pubkey);
|
||||
diff.entry(slot).or_default().insert(unduplicated_pubkey);
|
||||
diff.entry(slot)
|
||||
.or_default()
|
||||
.insert(unduplicated_pubkey, true);
|
||||
}
|
||||
|
||||
subscriptions.notify_vote(&vote);
|
||||
let _ = verified_vote_sender.send((*vote_pubkey, vote.slots));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (slot, slot_diff) in diff {
|
||||
// Process the replay votes
|
||||
for votes in replay_votes {
|
||||
for (pubkey, slot) in votes.iter() {
|
||||
if *slot <= root {
|
||||
continue;
|
||||
}
|
||||
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(pubkey);
|
||||
diff.entry(*slot)
|
||||
.or_default()
|
||||
.entry(unduplicated_pubkey)
|
||||
.or_default();
|
||||
}
|
||||
}
|
||||
|
||||
for (slot, mut slot_diff) in diff {
|
||||
let slot_tracker = vote_tracker
|
||||
.slot_vote_trackers
|
||||
.read()
|
||||
@@ -528,15 +578,55 @@ impl ClusterInfoVoteListener {
|
||||
.get(&slot)
|
||||
.cloned();
|
||||
if let Some(slot_tracker) = slot_tracker {
|
||||
{
|
||||
let r_slot_tracker = slot_tracker.read().unwrap();
|
||||
// Only keep the pubkeys we haven't seen voting for this slot
|
||||
slot_diff.retain(|pubkey, seen_in_gossip_above| {
|
||||
let seen_in_gossip_previously = r_slot_tracker.voted.get(pubkey);
|
||||
let is_new = seen_in_gossip_previously.is_none();
|
||||
if is_new && !*seen_in_gossip_above {
|
||||
// If this vote wasn't seen in gossip, then it must be a
|
||||
// replay vote, and we haven't sent a notification for
|
||||
// those yet
|
||||
let _ = verified_vote_sender.send((**pubkey, vec![slot]));
|
||||
}
|
||||
|
||||
// `is_new_from_gossip` means we observed a vote for this slot
|
||||
// for the first time in gossip
|
||||
let is_new_from_gossip =
|
||||
!seen_in_gossip_previously.cloned().unwrap_or(false)
|
||||
&& *seen_in_gossip_above;
|
||||
is_new || is_new_from_gossip
|
||||
});
|
||||
}
|
||||
let mut w_slot_tracker = slot_tracker.write().unwrap();
|
||||
if w_slot_tracker.updates.is_none() {
|
||||
w_slot_tracker.updates = Some(vec![]);
|
||||
}
|
||||
let mut current_stake = 0;
|
||||
for pubkey in slot_diff {
|
||||
Self::sum_stake(&mut current_stake, epoch_stakes, &pubkey);
|
||||
let mut gossip_only_stake = 0;
|
||||
for (pubkey, seen_in_gossip_above) in slot_diff {
|
||||
let is_new = !w_slot_tracker.voted.contains_key(&pubkey);
|
||||
Self::sum_stake(
|
||||
&mut current_stake,
|
||||
&mut gossip_only_stake,
|
||||
epoch_stakes,
|
||||
&pubkey,
|
||||
// By this point we know if the vote was seen in gossip above,
|
||||
// it was not seen in gossip at any point in the past, so it's
|
||||
// safe to pass this in here as an overall indicator of whether
|
||||
// this vote is new
|
||||
seen_in_gossip_above,
|
||||
is_new,
|
||||
);
|
||||
|
||||
w_slot_tracker.voted.insert(pubkey.clone());
|
||||
// From the `slot_diff.retain` earlier, we know because there are
|
||||
// no other writers to `slot_vote_tracker` that
|
||||
// `is_new || is_new_from_gossip`. In both cases we want to record
|
||||
// `is_new_from_gossip` for the `pubkey` entry.
|
||||
w_slot_tracker
|
||||
.voted
|
||||
.insert(pubkey.clone(), seen_in_gossip_above);
|
||||
w_slot_tracker.updates.as_mut().unwrap().push(pubkey);
|
||||
}
|
||||
Self::notify_for_stake_change(
|
||||
@@ -547,20 +637,33 @@ impl ClusterInfoVoteListener {
|
||||
slot,
|
||||
);
|
||||
w_slot_tracker.total_stake += current_stake;
|
||||
w_slot_tracker.gossip_only_stake += gossip_only_stake
|
||||
} else {
|
||||
let mut total_stake = 0;
|
||||
let voted: HashSet<_> = slot_diff
|
||||
let mut gossip_only_stake = 0;
|
||||
let voted: HashMap<_, _> = slot_diff
|
||||
.into_iter()
|
||||
.map(|pubkey| {
|
||||
Self::sum_stake(&mut total_stake, epoch_stakes, &pubkey);
|
||||
pubkey
|
||||
.map(|(pubkey, seen_in_gossip_above)| {
|
||||
if !seen_in_gossip_above {
|
||||
let _ = verified_vote_sender.send((*pubkey, vec![slot]));
|
||||
}
|
||||
Self::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
epoch_stakes,
|
||||
&pubkey,
|
||||
seen_in_gossip_above,
|
||||
true,
|
||||
);
|
||||
(pubkey, seen_in_gossip_above)
|
||||
})
|
||||
.collect();
|
||||
Self::notify_for_stake_change(total_stake, 0, &subscriptions, epoch_stakes, slot);
|
||||
let new_slot_tracker = SlotVoteTracker {
|
||||
voted: voted.clone(),
|
||||
updates: Some(voted.into_iter().collect()),
|
||||
updates: Some(voted.keys().cloned().collect()),
|
||||
voted,
|
||||
total_stake,
|
||||
gossip_only_stake,
|
||||
};
|
||||
vote_tracker
|
||||
.slot_vote_trackers
|
||||
@@ -588,10 +691,26 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
|
||||
fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) {
|
||||
fn sum_stake(
|
||||
sum: &mut u64,
|
||||
gossip_only_stake: &mut u64,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
pubkey: &Pubkey,
|
||||
is_new_from_gossip: bool,
|
||||
is_new: bool,
|
||||
) {
|
||||
if !is_new_from_gossip && !is_new {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(stakes) = epoch_stakes {
|
||||
if let Some(vote_account) = stakes.stakes().vote_accounts().get(pubkey) {
|
||||
*sum += vote_account.0;
|
||||
if is_new {
|
||||
*sum += vote_account.0;
|
||||
}
|
||||
if is_new_from_gossip {
|
||||
*gossip_only_stake += vote_account.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -611,6 +730,7 @@ mod tests {
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_vote_program::vote_transaction;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
#[test]
|
||||
fn test_max_vote_tx_fits() {
|
||||
@@ -783,8 +903,11 @@ mod tests {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slots = vec![1, 2];
|
||||
let replay_vote_slots = vec![3, 4];
|
||||
validator_voting_keypairs.iter().for_each(|keypairs| {
|
||||
let node_keypair = &keypairs.node_keypair;
|
||||
let vote_keypair = &keypairs.vote_keypair;
|
||||
@@ -797,6 +920,15 @@ mod tests {
|
||||
vote_keypair,
|
||||
);
|
||||
votes_sender.send(vec![vote_tx]).unwrap();
|
||||
for vote_slot in &replay_vote_slots {
|
||||
// Send twice, should only expect to be notified once later
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), *vote_slot)]))
|
||||
.unwrap();
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), *vote_slot)]))
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
@@ -806,14 +938,42 @@ mod tests {
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
for vote_slot in vote_slots {
|
||||
|
||||
// Check that the received votes were pushed to other commponents
|
||||
// subscribing via `verified_vote_receiver`
|
||||
let all_expected_slots: BTreeSet<_> = vote_slots
|
||||
.into_iter()
|
||||
.chain(replay_vote_slots.into_iter())
|
||||
.collect();
|
||||
let mut pubkey_to_votes: HashMap<Pubkey, BTreeSet<Slot>> = HashMap::new();
|
||||
for (received_pubkey, new_votes) in verified_vote_receiver.try_iter() {
|
||||
let already_received_votes = pubkey_to_votes.entry(received_pubkey).or_default();
|
||||
for new_vote in new_votes {
|
||||
// `new_vote` should only be received once
|
||||
assert!(already_received_votes.insert(new_vote));
|
||||
}
|
||||
}
|
||||
assert_eq!(pubkey_to_votes.len(), validator_voting_keypairs.len());
|
||||
for keypairs in &validator_voting_keypairs {
|
||||
assert_eq!(
|
||||
*pubkey_to_votes
|
||||
.get(&keypairs.vote_keypair.pubkey())
|
||||
.unwrap(),
|
||||
all_expected_slots
|
||||
);
|
||||
}
|
||||
|
||||
// Check the vote trackers were updated correctly
|
||||
for vote_slot in all_expected_slots {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
|
||||
let r_slot_vote_tracker = slot_vote_tracker.read().unwrap();
|
||||
for voting_keypairs in &validator_voting_keypairs {
|
||||
let pubkey = voting_keypairs.vote_keypair.pubkey();
|
||||
assert!(r_slot_vote_tracker.voted.contains(&pubkey));
|
||||
assert!(r_slot_vote_tracker.voted.contains_key(&pubkey));
|
||||
assert!(r_slot_vote_tracker
|
||||
.updates
|
||||
.as_ref()
|
||||
@@ -828,14 +988,18 @@ mod tests {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
// Send some votes to process
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (votes_txs_sender, votes_txs_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (_replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let mut expected_votes = vec![];
|
||||
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
|
||||
let validator_votes: Vec<_> = keyset
|
||||
.iter()
|
||||
.map(|keypairs| {
|
||||
let node_keypair = &keypairs.node_keypair;
|
||||
let vote_keypair = &keypairs.vote_keypair;
|
||||
expected_votes.push((vote_keypair.pubkey(), vec![i as Slot + 1]));
|
||||
vote_transaction::new_vote_transaction(
|
||||
vec![i as u64 + 1],
|
||||
Hash::default(),
|
||||
@@ -846,24 +1010,38 @@ mod tests {
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
votes_sender.send(validator_votes).unwrap();
|
||||
votes_txs_sender.send(validator_votes).unwrap();
|
||||
}
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
// Read and process votes from channel `votes_receiver`
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&votes_txs_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check that the received votes were pushed to other commponents
|
||||
// subscribing via a channel
|
||||
let received_votes: Vec<_> = verified_vote_receiver.try_iter().collect();
|
||||
assert_eq!(received_votes.len(), validator_voting_keypairs.len());
|
||||
for (expected_pubkey_vote, received_pubkey_vote) in
|
||||
expected_votes.iter().zip(received_votes.iter())
|
||||
{
|
||||
assert_eq!(expected_pubkey_vote, received_pubkey_vote);
|
||||
}
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(i as u64 + 1).unwrap();
|
||||
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
|
||||
for voting_keypairs in keyset {
|
||||
let pubkey = voting_keypairs.vote_keypair.pubkey();
|
||||
assert!(r_slot_vote_tracker.voted.contains(&pubkey));
|
||||
assert!(r_slot_vote_tracker.voted.contains_key(&pubkey));
|
||||
assert!(r_slot_vote_tracker
|
||||
.updates
|
||||
.as_ref()
|
||||
@@ -873,6 +1051,79 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_votes3() {
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slot = 1;
|
||||
|
||||
// Events:
|
||||
// 0: Send gossip vote
|
||||
// 1: Send replay vote
|
||||
// 2: Send both
|
||||
let ordered_events = vec![
|
||||
vec![0],
|
||||
vec![1],
|
||||
vec![0, 1],
|
||||
vec![1, 0],
|
||||
vec![2],
|
||||
vec![0, 1, 2],
|
||||
vec![1, 0, 2],
|
||||
];
|
||||
for events in ordered_events {
|
||||
let (vote_tracker, bank, validator_voting_keypairs, subscriptions) = setup();
|
||||
let node_keypair = &validator_voting_keypairs[0].node_keypair;
|
||||
let vote_keypair = &validator_voting_keypairs[0].vote_keypair;
|
||||
for &e in &events {
|
||||
if e == 0 || e == 2 {
|
||||
// Create vote transaction
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
vec![vote_slot],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
node_keypair,
|
||||
vote_keypair,
|
||||
vote_keypair,
|
||||
);
|
||||
votes_sender.send(vec![vote_tx.clone()]).unwrap();
|
||||
}
|
||||
if e == 1 || e == 2 {
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), vote_slot)]))
|
||||
.unwrap();
|
||||
}
|
||||
let _ = ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
Some(
|
||||
// Make sure `epoch_stakes` exists for this slot by unwrapping
|
||||
bank.epoch_stakes(bank.epoch_schedule().get_epoch(vote_slot))
|
||||
.unwrap(),
|
||||
),
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
);
|
||||
}
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
|
||||
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
|
||||
|
||||
if events == vec![1] {
|
||||
// Check `gossip_only_stake` is not incremented
|
||||
assert_eq!(r_slot_vote_tracker.total_stake, 100);
|
||||
assert_eq!(r_slot_vote_tracker.gossip_only_stake, 0);
|
||||
} else {
|
||||
// Check that both the `gossip_only_stake` and `total_stake` both
|
||||
// increased
|
||||
assert_eq!(r_slot_vote_tracker.total_stake, 100);
|
||||
assert_eq!(r_slot_vote_tracker.gossip_only_stake, 100);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_voters_by_epoch() {
|
||||
// Create some voters at genesis
|
||||
@@ -936,14 +1187,14 @@ mod tests {
|
||||
let ref_count_per_vote = 2;
|
||||
|
||||
// Create some voters at genesis
|
||||
let validator_voting_keypairs: Vec<_> = (0..2)
|
||||
let validator_keypairs: Vec<_> = (0..2)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_voting_keypairs,
|
||||
&validator_keypairs,
|
||||
100,
|
||||
);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
@@ -957,16 +1208,17 @@ mod tests {
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
|
||||
// Send a vote to process, should add a reference to the pubkey for that voter
|
||||
// in the tracker
|
||||
let validator0_keypairs = &validator_voting_keypairs[0];
|
||||
let validator0_keypairs = &validator_keypairs[0];
|
||||
let voted_slot = bank.slot() + 1;
|
||||
let vote_tx = vec![vote_transaction::new_vote_transaction(
|
||||
// Must vote > root to be processed
|
||||
vec![bank.slot() + 1],
|
||||
vec![voted_slot],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
&validator0_keypairs.node_keypair,
|
||||
@@ -974,12 +1226,19 @@ mod tests {
|
||||
&validator0_keypairs.vote_keypair,
|
||||
)];
|
||||
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_tx,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
// Add vote for same slot, should not affect outcome
|
||||
&[Arc::new(vec![(
|
||||
validator0_keypairs.vote_keypair.pubkey(),
|
||||
voted_slot,
|
||||
)])],
|
||||
);
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
@@ -1014,8 +1273,9 @@ mod tests {
|
||||
// Test with votes across two epochs
|
||||
let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch);
|
||||
|
||||
// Make 2 new votes in two different epochs, ref count should go up
|
||||
// by 2 * ref_count_per_vote
|
||||
// Make 2 new votes in two different epochs for the same pubkey,
|
||||
// the ref count should go up by 3 * ref_count_per_vote
|
||||
// Add 1 vote through the replay channel, ref count should
|
||||
let vote_txs: Vec<_> = [bank.slot() + 2, first_slot_in_new_epoch]
|
||||
.iter()
|
||||
.map(|slot| {
|
||||
@@ -1031,8 +1291,32 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_txs,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&[Arc::new(vec![(
|
||||
validator_keypairs[1].vote_keypair.pubkey(),
|
||||
first_slot_in_new_epoch,
|
||||
)])],
|
||||
);
|
||||
|
||||
// Check new replay vote pubkey first
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
.0
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&validator_keypairs[1].vote_keypair.pubkey())
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(ref_count, current_ref_count);
|
||||
|
||||
// Check the existing pubkey
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
@@ -1072,7 +1356,7 @@ mod tests {
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
|
||||
@@ -1158,4 +1442,78 @@ mod tests {
|
||||
assert_eq!(vote_txs.len(), 2);
|
||||
verify_packets_len(&packets, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum_stake() {
|
||||
let (_, bank, validator_voting_keypairs, _) = setup();
|
||||
let vote_keypair = &validator_voting_keypairs[0].vote_keypair;
|
||||
let epoch_stakes = bank.epoch_stakes(bank.epoch()).unwrap();
|
||||
|
||||
// If `is_new_from_gossip` and `is_new` are both true, both fields
|
||||
// should increase
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = true;
|
||||
let is_new = true;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 100);
|
||||
assert_eq!(gossip_only_stake, 100);
|
||||
|
||||
// If `is_new_from_gossip` and `is_new` are both false, none should increase
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = false;
|
||||
let is_new = false;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 0);
|
||||
assert_eq!(gossip_only_stake, 0);
|
||||
|
||||
// If only `is_new`, but not `is_new_from_gossip` then
|
||||
// `total_stake` will increase, but `gossip_only_stake` won't
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = false;
|
||||
let is_new = true;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 100);
|
||||
assert_eq!(gossip_only_stake, 0);
|
||||
|
||||
// If only `is_new_from_gossip`, but not `is_new` then
|
||||
// `gossip_only_stake` will increase, but `total_stake` won't
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = true;
|
||||
let is_new = false;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 0);
|
||||
assert_eq!(gossip_only_stake, 100);
|
||||
}
|
||||
}
|
||||
|
193
core/src/cluster_slots_service.rs
Normal file
193
core/src/cluster_slots_service.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use crate::{cluster_info::ClusterInfo, cluster_slots::ClusterSlots};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
{Arc, RwLock},
|
||||
},
|
||||
thread::sleep,
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct ClusterSlotsServiceTiming {
|
||||
pub lowest_slot_elapsed: u64,
|
||||
pub update_completed_slots_elapsed: u64,
|
||||
}
|
||||
|
||||
impl ClusterSlotsServiceTiming {
|
||||
fn update(&mut self, lowest_slot_elapsed: u64, update_completed_slots_elapsed: u64) {
|
||||
self.lowest_slot_elapsed += lowest_slot_elapsed;
|
||||
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ClusterSlotsService {
|
||||
t_cluster_slots_service: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl ClusterSlotsService {
|
||||
pub fn new(
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let id = cluster_info.id();
|
||||
Self::initialize_lowest_slot(id, &blockstore, &cluster_info);
|
||||
Self::initialize_epoch_slots(&blockstore, &cluster_info, &completed_slots_receiver);
|
||||
let t_cluster_slots_service = Builder::new()
|
||||
.name("solana-cluster-slots-service".to_string())
|
||||
.spawn(move || {
|
||||
Self::run(
|
||||
blockstore,
|
||||
cluster_slots,
|
||||
bank_forks,
|
||||
cluster_info,
|
||||
completed_slots_receiver,
|
||||
exit,
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
ClusterSlotsService {
|
||||
t_cluster_slots_service,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_cluster_slots_service.join()
|
||||
}
|
||||
|
||||
fn run(
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) {
|
||||
let mut cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
|
||||
let mut last_stats = Instant::now();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let new_root = bank_forks.read().unwrap().root();
|
||||
let id = cluster_info.id();
|
||||
let mut lowest_slot_elapsed = Measure::start("lowest_slot_elapsed");
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
lowest_slot_elapsed.stop();
|
||||
let mut update_completed_slots_elapsed =
|
||||
Measure::start("update_completed_slots_elapsed");
|
||||
Self::update_completed_slots(&completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, &cluster_info, &bank_forks);
|
||||
update_completed_slots_elapsed.stop();
|
||||
|
||||
cluster_slots_service_timing.update(
|
||||
lowest_slot_elapsed.as_us(),
|
||||
update_completed_slots_elapsed.as_us(),
|
||||
);
|
||||
|
||||
if last_stats.elapsed().as_secs() > 2 {
|
||||
datapoint_info!(
|
||||
"cluster_slots_service-timing",
|
||||
(
|
||||
"lowest_slot_elapsed",
|
||||
cluster_slots_service_timing.lowest_slot_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"update_completed_slots_elapsed",
|
||||
cluster_slots_service_timing.update_completed_slots_elapsed,
|
||||
i64
|
||||
),
|
||||
);
|
||||
cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
|
||||
last_stats = Instant::now();
|
||||
}
|
||||
sleep(Duration::from_millis(200));
|
||||
}
|
||||
}
|
||||
|
||||
fn update_completed_slots(
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
cluster_info: &ClusterInfo,
|
||||
) {
|
||||
let mut slots: Vec<Slot> = vec![];
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
|
||||
// Safe to set into gossip because by this time, the leader schedule cache should
|
||||
// also be updated with the latest root (done in blockstore_processor) and thus
|
||||
// will provide a schedule to window_service for any incoming shreds up to the
|
||||
// last_confirmed_epoch.
|
||||
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
|
||||
}
|
||||
|
||||
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
|
||||
cluster_info.push_lowest_slot(*id, lowest_slot);
|
||||
}
|
||||
|
||||
fn initialize_epoch_slots(
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &ClusterInfo,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
) {
|
||||
let root = blockstore.last_root();
|
||||
let mut slots: Vec<_> = blockstore
|
||||
.live_slots_iterator(root)
|
||||
.filter_map(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() {
|
||||
Some(slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::cluster_info::Node;
|
||||
|
||||
#[test]
|
||||
pub fn test_update_lowest_slot() {
|
||||
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
|
||||
ClusterSlotsService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
|
||||
let lowest = cluster_info
|
||||
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
|
||||
lowest_slot.clone()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(lowest.lowest, 5);
|
||||
}
|
||||
}
|
@@ -8,6 +8,7 @@ use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{
|
||||
cmp::max,
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
@@ -103,27 +104,8 @@ impl AggregateCommitmentService {
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
let cache_slot_info =
|
||||
Self::update_commitment_cache(block_commitment_cache, aggregation_data, ancestors);
|
||||
aggregate_commitment_time.stop();
|
||||
datapoint_info!(
|
||||
"block-commitment-cache",
|
||||
@@ -134,12 +116,50 @@ impl AggregateCommitmentService {
|
||||
)
|
||||
);
|
||||
|
||||
subscriptions.notify_subscribers(CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
});
|
||||
// Triggers rpc_subscription notifications as soon as new commitment data is available,
|
||||
// sending just the commitment cache slot information that the notifications thread
|
||||
// needs
|
||||
subscriptions.notify_subscribers(cache_slot_info);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_commitment_cache(
|
||||
block_commitment_cache: &RwLock<BlockCommitmentCache>,
|
||||
aggregation_data: CommitmentAggregationData,
|
||||
ancestors: Vec<u64>,
|
||||
) -> CacheSlotInfo {
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
let largest_confirmed_root = max(
|
||||
new_block_commitment.largest_confirmed_root(),
|
||||
w_block_commitment_cache.largest_confirmed_root(),
|
||||
);
|
||||
new_block_commitment.set_largest_confirmed_root(largest_confirmed_root);
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,10 +245,24 @@ impl AggregateCommitmentService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_runtime::genesis_utils::{
|
||||
create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
use solana_vote_program::{
|
||||
vote_state::{self, VoteStateVersions},
|
||||
vote_transaction,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
@@ -451,4 +485,163 @@ mod tests {
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_highest_confirmed_root_advance() {
|
||||
fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Arc<Bank>) -> Slot {
|
||||
let account = &bank.vote_accounts()[&vote_pubkey].1;
|
||||
let vote_state = VoteState::from(account).unwrap();
|
||||
vote_state.root_slot.unwrap()
|
||||
}
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
);
|
||||
|
||||
let node_keypair = Keypair::new().to_bytes();
|
||||
let vote_keypair = Keypair::new().to_bytes();
|
||||
let stake_keypair = Keypair::new().to_bytes();
|
||||
let validator_keypairs = vec![ValidatorVoteKeypairs {
|
||||
node_keypair: Keypair::from_bytes(&node_keypair).unwrap(),
|
||||
vote_keypair: Keypair::from_bytes(&vote_keypair).unwrap(),
|
||||
stake_keypair: Keypair::from_bytes(&stake_keypair).unwrap(),
|
||||
}];
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
voting_keypair: _,
|
||||
} = create_genesis_config_with_vote_accounts(1_000_000_000, &validator_keypairs, 100);
|
||||
|
||||
let node_keypair = Keypair::from_bytes(&node_keypair).unwrap();
|
||||
let vote_keypair = Keypair::from_bytes(&vote_keypair).unwrap();
|
||||
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
bank0
|
||||
.transfer(100_000, &mint_keypair, &node_keypair.pubkey())
|
||||
.unwrap();
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
|
||||
// Fill bank_forks with banks with votes landing in the next slot
|
||||
// Create enough banks such that vote account will root slots 0 and 1
|
||||
for x in 0..33 {
|
||||
let previous_bank = bank_forks.get(x).unwrap();
|
||||
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
|
||||
let vote = vote_transaction::new_vote_transaction(
|
||||
vec![x],
|
||||
previous_bank.hash(),
|
||||
previous_bank.last_blockhash(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&vote_keypair,
|
||||
);
|
||||
bank.process_transaction(&vote).unwrap();
|
||||
bank_forks.insert(bank);
|
||||
}
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
|
||||
for x in 0..root {
|
||||
bank_forks.set_root(x, &None, None);
|
||||
}
|
||||
|
||||
// Add an additional bank/vote that will root slot 2
|
||||
let bank33 = bank_forks.get(33).unwrap();
|
||||
let bank34 = Bank::new_from_parent(bank33, &Pubkey::default(), 34);
|
||||
let vote33 = vote_transaction::new_vote_transaction(
|
||||
vec![33],
|
||||
bank33.hash(),
|
||||
bank33.last_blockhash(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&vote_keypair,
|
||||
);
|
||||
bank34.process_transaction(&vote33).unwrap();
|
||||
bank_forks.insert(bank34);
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
|
||||
let ancestors = working_bank.status_cache_ancestors();
|
||||
let _ = AggregateCommitmentService::update_commitment_cache(
|
||||
&block_commitment_cache,
|
||||
CommitmentAggregationData {
|
||||
bank: working_bank,
|
||||
root: 0,
|
||||
total_staked: 100,
|
||||
},
|
||||
ancestors,
|
||||
);
|
||||
let largest_confirmed_root = block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root();
|
||||
bank_forks.set_root(root, &None, Some(largest_confirmed_root));
|
||||
let largest_confirmed_root_bank = bank_forks.get(largest_confirmed_root);
|
||||
assert!(largest_confirmed_root_bank.is_some());
|
||||
|
||||
// Add a forked bank. Because the vote for bank 33 landed in the non-ancestor, the vote
|
||||
// account's root (and thus the highest_confirmed_root) rolls back to slot 1
|
||||
let bank33 = bank_forks.get(33).unwrap();
|
||||
let bank35 = Bank::new_from_parent(bank33, &Pubkey::default(), 35);
|
||||
bank_forks.insert(bank35);
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let ancestors = working_bank.status_cache_ancestors();
|
||||
let _ = AggregateCommitmentService::update_commitment_cache(
|
||||
&block_commitment_cache,
|
||||
CommitmentAggregationData {
|
||||
bank: working_bank,
|
||||
root: 1,
|
||||
total_staked: 100,
|
||||
},
|
||||
ancestors,
|
||||
);
|
||||
let largest_confirmed_root = block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root();
|
||||
let largest_confirmed_root_bank = bank_forks.get(largest_confirmed_root);
|
||||
assert!(largest_confirmed_root_bank.is_some());
|
||||
|
||||
// Add additional banks beyond lockout built on the new fork to ensure that behavior
|
||||
// continues normally
|
||||
for x in 35..=37 {
|
||||
let previous_bank = bank_forks.get(x).unwrap();
|
||||
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
|
||||
let vote = vote_transaction::new_vote_transaction(
|
||||
vec![x],
|
||||
previous_bank.hash(),
|
||||
previous_bank.last_blockhash(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&vote_keypair,
|
||||
);
|
||||
bank.process_transaction(&vote).unwrap();
|
||||
bank_forks.insert(bank);
|
||||
}
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
|
||||
let ancestors = working_bank.status_cache_ancestors();
|
||||
let _ = AggregateCommitmentService::update_commitment_cache(
|
||||
&block_commitment_cache,
|
||||
CommitmentAggregationData {
|
||||
bank: working_bank,
|
||||
root: 0,
|
||||
total_staked: 100,
|
||||
},
|
||||
ancestors,
|
||||
);
|
||||
let largest_confirmed_root = block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root();
|
||||
bank_forks.set_root(root, &None, Some(largest_confirmed_root));
|
||||
let largest_confirmed_root_bank = bank_forks.get(largest_confirmed_root);
|
||||
assert!(largest_confirmed_root_bank.is_some());
|
||||
}
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
@@ -61,6 +61,8 @@ impl SwitchForkDecision {
|
||||
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
|
||||
pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
|
||||
|
||||
pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct StakeLockout {
|
||||
lockout: u64,
|
||||
@@ -84,7 +86,7 @@ pub(crate) struct ComputedBankState {
|
||||
pub total_staked: u64,
|
||||
pub bank_weight: u128,
|
||||
pub lockout_intervals: LockoutIntervals,
|
||||
pub pubkey_votes: Vec<(Pubkey, Slot)>,
|
||||
pub pubkey_votes: Arc<PubkeyVotes>,
|
||||
}
|
||||
|
||||
pub struct Tower {
|
||||
@@ -258,7 +260,7 @@ impl Tower {
|
||||
total_staked,
|
||||
bank_weight,
|
||||
lockout_intervals,
|
||||
pubkey_votes,
|
||||
pubkey_votes: Arc::new(pubkey_votes),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -666,6 +668,7 @@ pub mod test {
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
@@ -785,6 +788,7 @@ pub mod test {
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let (replay_slot_sender, _replay_slot_receiver) = unbounded();
|
||||
let _ = ReplayStage::compute_bank_stats(
|
||||
&my_pubkey,
|
||||
&ancestors,
|
||||
@@ -797,6 +801,7 @@ pub mod test {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_slot_sender,
|
||||
);
|
||||
|
||||
let vote_bank = self
|
||||
@@ -1353,7 +1358,7 @@ pub mod test {
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let mut accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: Vec<(Pubkey, Slot)> =
|
||||
let account_latest_votes: PubkeyVotes =
|
||||
accounts.iter().map(|(pubkey, _)| (*pubkey, 0)).collect();
|
||||
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
@@ -1363,7 +1368,7 @@ pub mod test {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
bank_weight,
|
||||
mut pubkey_votes,
|
||||
pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
@@ -1375,6 +1380,7 @@ pub mod test {
|
||||
assert_eq!(stake_lockouts[&0].stake, 2);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
assert_eq!(total_staked, 2);
|
||||
let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap();
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
|
||||
@@ -1390,7 +1396,7 @@ pub mod test {
|
||||
//two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
|
||||
let mut accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: Vec<(Pubkey, Slot)> = accounts
|
||||
let account_latest_votes: PubkeyVotes = accounts
|
||||
.iter()
|
||||
.map(|(pubkey, _)| (*pubkey, (MAX_LOCKOUT_HISTORY - 1) as Slot))
|
||||
.collect();
|
||||
@@ -1417,7 +1423,7 @@ pub mod test {
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
bank_weight,
|
||||
mut pubkey_votes,
|
||||
pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
@@ -1433,6 +1439,7 @@ pub mod test {
|
||||
// should be the sum of all the weights for root
|
||||
assert!(stake_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
|
||||
assert_eq!(bank_weight, expected_bank_weight);
|
||||
let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap();
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
}
|
||||
|
@@ -159,8 +159,9 @@ impl CrdsGossip {
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.pull.generate_pull_responses(&self.crds, filters)
|
||||
self.pull.generate_pull_responses(&self.crds, filters, now)
|
||||
}
|
||||
|
||||
pub fn filter_pull_responses(
|
||||
|
@@ -131,7 +131,7 @@ pub struct CrdsGossipPull {
|
||||
/// timestamp of last request
|
||||
pub pull_request_time: HashMap<Pubkey, u64>,
|
||||
/// hash and insert time
|
||||
purged_values: VecDeque<(Hash, u64)>,
|
||||
pub purged_values: VecDeque<(Hash, u64)>,
|
||||
pub crds_timeout: u64,
|
||||
pub msg_timeout: u64,
|
||||
pub num_pulls: usize,
|
||||
@@ -237,8 +237,9 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &Crds,
|
||||
requests: &[(CrdsValue, CrdsFilter)],
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.filter_crds_values(crds, requests)
|
||||
self.filter_crds_values(crds, requests, now)
|
||||
}
|
||||
|
||||
// Checks if responses should be inserted and
|
||||
@@ -364,22 +365,50 @@ impl CrdsGossipPull {
|
||||
for (value_hash, _insert_timestamp) in &self.purged_values {
|
||||
filters.iter_mut().for_each(|filter| filter.add(value_hash));
|
||||
}
|
||||
|
||||
filters
|
||||
}
|
||||
|
||||
/// filter values that fail the bloom filter up to max_bytes
|
||||
fn filter_crds_values(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
let mut ret = vec![vec![]; filters.len()];
|
||||
let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
let jitter = rand::thread_rng().gen_range(0, msg_timeout / 4);
|
||||
let start = filters.len();
|
||||
//skip filters from callers that are too old
|
||||
let future = now.saturating_add(msg_timeout);
|
||||
let past = now.saturating_sub(msg_timeout);
|
||||
let recent: Vec<_> = filters
|
||||
.iter()
|
||||
.filter(|(caller, _)| caller.wallclock() < future && caller.wallclock() >= past)
|
||||
.collect();
|
||||
inc_new_counter_info!(
|
||||
"gossip_filter_crds_values-dropped_requests",
|
||||
start - recent.len()
|
||||
);
|
||||
if recent.is_empty() {
|
||||
return ret;
|
||||
}
|
||||
let mut total_skipped = 0;
|
||||
for v in crds.table.values() {
|
||||
filters.iter().enumerate().for_each(|(i, (_, filter))| {
|
||||
recent.iter().enumerate().for_each(|(i, (caller, filter))| {
|
||||
//skip values that are too new
|
||||
if v.value.wallclock() > caller.wallclock().checked_add(jitter).unwrap_or_else(|| 0)
|
||||
{
|
||||
total_skipped += 1;
|
||||
return;
|
||||
}
|
||||
if !filter.contains(&v.value_hash) {
|
||||
ret[i].push(v.value.clone());
|
||||
}
|
||||
});
|
||||
}
|
||||
inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped);
|
||||
ret
|
||||
}
|
||||
pub fn make_timeouts_def(
|
||||
@@ -636,6 +665,62 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generate_pull_responses() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry, 0).unwrap();
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(new, 0).unwrap();
|
||||
let req = node.new_pull_request(
|
||||
&node_crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
0,
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
|
||||
let mut dest_crds = Crds::default();
|
||||
let dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let mut filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
|
||||
assert_eq!(rsp[0].len(), 0);
|
||||
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
)));
|
||||
dest_crds
|
||||
.insert(new, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS)
|
||||
.unwrap();
|
||||
|
||||
//should skip new value since caller is to old
|
||||
let rsp =
|
||||
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
|
||||
assert_eq!(rsp[0].len(), 0);
|
||||
|
||||
//should return new value since caller is new
|
||||
filters[0].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
|
||||
)));
|
||||
|
||||
let rsp =
|
||||
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
|
||||
assert_eq!(rsp[0].len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_pull_request() {
|
||||
let mut node_crds = Crds::default();
|
||||
@@ -664,7 +749,7 @@ mod test {
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
|
||||
assert!(dest_crds.lookup(&caller.label()).is_some());
|
||||
@@ -688,7 +773,7 @@ mod test {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
1,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
@@ -696,7 +781,7 @@ mod test {
|
||||
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
1,
|
||||
)));
|
||||
node_crds.insert(new, 0).unwrap();
|
||||
|
||||
@@ -735,7 +820,7 @@ mod test {
|
||||
);
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
// if there is a false positive this is empty
|
||||
// prob should be around 0.1 per iteration
|
||||
|
@@ -2,6 +2,7 @@ use crate::{
|
||||
consensus::{ComputedBankState, Tower},
|
||||
fork_choice::ForkChoice,
|
||||
progress_map::ProgressMap,
|
||||
tree_diff::TreeDiff,
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::{bank::Bank, epoch_stakes::EpochStakes};
|
||||
@@ -142,6 +143,20 @@ impl HeaviestSubtreeForkChoice {
|
||||
.map(|fork_info| fork_info.stake_voted_subtree)
|
||||
}
|
||||
|
||||
pub fn root(&self) -> Slot {
|
||||
self.root
|
||||
}
|
||||
|
||||
pub fn max_by_weight(&self, slot1: Slot, slot2: Slot) -> std::cmp::Ordering {
|
||||
let weight1 = self.stake_voted_subtree(slot1).unwrap();
|
||||
let weight2 = self.stake_voted_subtree(slot2).unwrap();
|
||||
if weight1 == weight2 {
|
||||
slot1.cmp(&slot2).reverse()
|
||||
} else {
|
||||
weight1.cmp(&weight2)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new votes, returns the best slot
|
||||
pub fn add_votes(
|
||||
&mut self,
|
||||
@@ -159,28 +174,40 @@ impl HeaviestSubtreeForkChoice {
|
||||
self.best_overall_slot()
|
||||
}
|
||||
|
||||
pub fn set_root(&mut self, root: Slot) {
|
||||
self.last_root_time = Instant::now();
|
||||
self.root = root;
|
||||
let mut pending_slots = vec![root];
|
||||
let mut new_fork_infos = HashMap::new();
|
||||
while !pending_slots.is_empty() {
|
||||
let current_slot = pending_slots.pop().unwrap();
|
||||
let fork_info = self
|
||||
.fork_infos
|
||||
.remove(¤t_slot)
|
||||
.expect("Anything reachable from root must exist in the map");
|
||||
for child in &fork_info.children {
|
||||
pending_slots.push(*child);
|
||||
}
|
||||
new_fork_infos.insert(current_slot, fork_info);
|
||||
pub fn set_root(&mut self, new_root: Slot) {
|
||||
// Remove everything reachable from `self.root` but not `new_root`,
|
||||
// as those are now unrooted.
|
||||
let remove_set = self.subtree_diff(self.root, new_root);
|
||||
for slot in remove_set {
|
||||
self.fork_infos
|
||||
.remove(&slot)
|
||||
.expect("Slots reachable from old root must exist in tree");
|
||||
}
|
||||
|
||||
std::mem::swap(&mut self.fork_infos, &mut new_fork_infos);
|
||||
self.fork_infos
|
||||
.get_mut(&root)
|
||||
.get_mut(&new_root)
|
||||
.expect("new root must exist in fork_infos map")
|
||||
.parent = None;
|
||||
self.root = new_root;
|
||||
}
|
||||
|
||||
pub fn add_root_parent(&mut self, root_parent: Slot) {
|
||||
assert!(root_parent < self.root);
|
||||
assert!(self.fork_infos.get(&root_parent).is_none());
|
||||
let root_info = self
|
||||
.fork_infos
|
||||
.get_mut(&self.root)
|
||||
.expect("entry for root must exist");
|
||||
root_info.parent = Some(root_parent);
|
||||
let root_parent_info = ForkInfo {
|
||||
stake_voted_at: 0,
|
||||
stake_voted_subtree: root_info.stake_voted_subtree,
|
||||
// The `best_slot` of a leaf is itself
|
||||
best_slot: root_info.best_slot,
|
||||
children: vec![self.root],
|
||||
parent: None,
|
||||
};
|
||||
self.fork_infos.insert(root_parent, root_parent_info);
|
||||
self.root = root_parent;
|
||||
}
|
||||
|
||||
pub fn add_new_leaf_slot(&mut self, slot: Slot, parent: Option<Slot>) {
|
||||
@@ -241,6 +268,59 @@ impl HeaviestSubtreeForkChoice {
|
||||
|
||||
true
|
||||
}
|
||||
pub fn all_slots_stake_voted_subtree(&self) -> Vec<(Slot, u64)> {
|
||||
self.fork_infos
|
||||
.iter()
|
||||
.map(|(slot, fork_info)| (*slot, fork_info.stake_voted_subtree))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn ancestors(&self, start_slot: Slot) -> Vec<Slot> {
|
||||
AncestorIterator::new(start_slot, &self.fork_infos).collect()
|
||||
}
|
||||
|
||||
pub fn merge(
|
||||
&mut self,
|
||||
other: HeaviestSubtreeForkChoice,
|
||||
merge_leaf: Slot,
|
||||
epoch_stakes: &HashMap<Epoch, EpochStakes>,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
) {
|
||||
assert!(self.fork_infos.contains_key(&merge_leaf));
|
||||
|
||||
// Add all the nodes from `other` into our tree
|
||||
let mut other_slots_nodes: Vec<_> = other
|
||||
.fork_infos
|
||||
.iter()
|
||||
.map(|(slot, fork_info)| (slot, fork_info.parent.unwrap_or(merge_leaf)))
|
||||
.collect();
|
||||
|
||||
other_slots_nodes.sort_by_key(|(slot, _)| *slot);
|
||||
for (slot, parent) in other_slots_nodes {
|
||||
self.add_new_leaf_slot(*slot, Some(parent));
|
||||
}
|
||||
|
||||
// Add all the latest votes from `other` that are newer than the ones
|
||||
// in the current tree
|
||||
let new_votes: Vec<_> = other
|
||||
.latest_votes
|
||||
.into_iter()
|
||||
.filter(|(pk, other_latest_slot)| {
|
||||
self.latest_votes
|
||||
.get(&pk)
|
||||
.map(|latest_slot| other_latest_slot > latest_slot)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.add_votes(&new_votes, epoch_stakes, epoch_schedule);
|
||||
}
|
||||
|
||||
pub fn stake_voted_at(&self, slot: Slot) -> Option<u64> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| fork_info.stake_voted_at)
|
||||
}
|
||||
|
||||
fn propagate_new_leaf(&mut self, slot: Slot, parent: Slot) {
|
||||
let parent_best_slot = self
|
||||
@@ -401,12 +481,6 @@ impl HeaviestSubtreeForkChoice {
|
||||
}
|
||||
}
|
||||
|
||||
fn children(&self, slot: Slot) -> Option<&[Slot]> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| &fork_info.children[..])
|
||||
}
|
||||
|
||||
fn parent(&self, slot: Slot) -> Option<Slot> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
@@ -425,13 +499,6 @@ impl HeaviestSubtreeForkChoice {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn stake_voted_at(&self, slot: Slot) -> Option<u64> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| fork_info.stake_voted_at)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn set_stake_voted_at(&mut self, slot: Slot, stake_voted_at: u64) {
|
||||
self.fork_infos.get_mut(&slot).unwrap().stake_voted_at = stake_voted_at;
|
||||
@@ -443,6 +510,18 @@ impl HeaviestSubtreeForkChoice {
|
||||
}
|
||||
}
|
||||
|
||||
impl TreeDiff for HeaviestSubtreeForkChoice {
|
||||
fn contains_slot(&self, slot: Slot) -> bool {
|
||||
self.fork_infos.contains_key(&slot)
|
||||
}
|
||||
|
||||
fn children(&self, slot: Slot) -> Option<&[Slot]> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| &fork_info.children[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl ForkChoice for HeaviestSubtreeForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
@@ -539,14 +618,75 @@ impl<'a> Iterator for AncestorIterator<'a> {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::consensus::test::VoteSimulator;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs},
|
||||
};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_runtime::{bank::Bank, bank_utils};
|
||||
use std::{collections::HashSet, ops::Range};
|
||||
use trees::tr;
|
||||
|
||||
#[test]
|
||||
fn test_max_by_weight() {
|
||||
/*
|
||||
Build fork structure:
|
||||
slot 0
|
||||
|
|
||||
slot 4
|
||||
|
|
||||
slot 5
|
||||
*/
|
||||
let forks = tr(0) / (tr(4) / (tr(5)));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake);
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
&[(vote_pubkeys[0], 4)],
|
||||
bank.epoch_stakes_map(),
|
||||
bank.epoch_schedule(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.max_by_weight(4, 5),
|
||||
std::cmp::Ordering::Greater
|
||||
);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.max_by_weight(4, 0),
|
||||
std::cmp::Ordering::Less
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_root_parent() {
|
||||
/*
|
||||
Build fork structure:
|
||||
slot 3
|
||||
|
|
||||
slot 4
|
||||
|
|
||||
slot 5
|
||||
*/
|
||||
let forks = tr(3) / (tr(4) / (tr(5)));
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake);
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
&[(vote_pubkeys[0], 5)],
|
||||
bank.epoch_stakes_map(),
|
||||
bank.epoch_schedule(),
|
||||
);
|
||||
heaviest_subtree_fork_choice.add_root_parent(2);
|
||||
assert_eq!(heaviest_subtree_fork_choice.parent(3).unwrap(), 2);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.stake_voted_subtree(3).unwrap(),
|
||||
stake
|
||||
);
|
||||
assert_eq!(heaviest_subtree_fork_choice.stake_voted_at(2).unwrap(), 0);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.children(2).unwrap().to_vec(),
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_slot(2).unwrap(), 5);
|
||||
assert!(heaviest_subtree_fork_choice.parent(2).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ancestor_iterator() {
|
||||
let mut heaviest_subtree_fork_choice = setup_forks();
|
||||
@@ -634,7 +774,7 @@ mod test {
|
||||
fn test_set_root_and_add_votes() {
|
||||
let mut heaviest_subtree_fork_choice = setup_forks();
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(1, stake);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake);
|
||||
|
||||
// Vote for slot 2
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
@@ -681,7 +821,7 @@ mod test {
|
||||
fn test_set_root_and_add_outdated_votes() {
|
||||
let mut heaviest_subtree_fork_choice = setup_forks();
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(1, stake);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake);
|
||||
|
||||
// Vote for slot 0
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
@@ -792,7 +932,7 @@ mod test {
|
||||
|
||||
// Add a vote for the other branch at slot 3.
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(2, stake);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(2, stake);
|
||||
let leaf6 = 6;
|
||||
// Leaf slot 9 stops being the `best_slot` at slot 1 because there
|
||||
// are now votes for the branch at slot 3
|
||||
@@ -858,7 +998,7 @@ mod test {
|
||||
let forks = tr(0) / (tr(4) / (tr(6)));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(1, stake);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake);
|
||||
|
||||
// slot 6 should be the best because it's the only leaf
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6);
|
||||
@@ -984,7 +1124,7 @@ mod test {
|
||||
fn test_process_update_operations() {
|
||||
let mut heaviest_subtree_fork_choice = setup_forks();
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(3, stake);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake);
|
||||
|
||||
let pubkey_votes: Vec<(Pubkey, Slot)> = vec![
|
||||
(vote_pubkeys[0], 3),
|
||||
@@ -1057,7 +1197,7 @@ mod test {
|
||||
fn test_generate_update_operations() {
|
||||
let mut heaviest_subtree_fork_choice = setup_forks();
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(3, stake);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake);
|
||||
let pubkey_votes: Vec<(Pubkey, Slot)> = vec![
|
||||
(vote_pubkeys[0], 3),
|
||||
(vote_pubkeys[1], 4),
|
||||
@@ -1170,7 +1310,7 @@ mod test {
|
||||
fn test_add_votes() {
|
||||
let mut heaviest_subtree_fork_choice = setup_forks();
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(3, stake);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake);
|
||||
|
||||
let pubkey_votes: Vec<(Pubkey, Slot)> = vec![
|
||||
(vote_pubkeys[0], 3),
|
||||
@@ -1215,7 +1355,7 @@ mod test {
|
||||
assert!(!heaviest_subtree_fork_choice.is_best_child(10));
|
||||
|
||||
// Add vote for 9, it's the best again
|
||||
let (bank, vote_pubkeys) = setup_bank_and_vote_pubkeys(3, 100);
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, 100);
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
&[(vote_pubkeys[0], 9)],
|
||||
bank.epoch_stakes_map(),
|
||||
@@ -1226,6 +1366,138 @@ mod test {
|
||||
assert!(!heaviest_subtree_fork_choice.is_best_child(10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge() {
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake);
|
||||
/*
|
||||
Build fork structure:
|
||||
slot 0
|
||||
|
|
||||
slot 3
|
||||
/ \
|
||||
slot 5 |
|
||||
| slot 9
|
||||
slot 7 |
|
||||
slot 11
|
||||
|
|
||||
slot 12 (vote pubkey 2)
|
||||
*/
|
||||
let forks = tr(0) / (tr(3) / (tr(5) / (tr(7))) / (tr(9) / (tr(11) / (tr(12)))));
|
||||
let mut tree1 = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
let pubkey_votes: Vec<(Pubkey, Slot)> = vec![
|
||||
(vote_pubkeys[0], 5),
|
||||
(vote_pubkeys[1], 3),
|
||||
(vote_pubkeys[2], 12),
|
||||
];
|
||||
tree1.add_votes(
|
||||
&pubkey_votes,
|
||||
bank.epoch_stakes_map(),
|
||||
bank.epoch_schedule(),
|
||||
);
|
||||
|
||||
/*
|
||||
Build fork structure:
|
||||
slot 10
|
||||
|
|
||||
slot 15
|
||||
/ \
|
||||
(vote pubkey 0) slot 16 |
|
||||
| slot 18
|
||||
slot 17 |
|
||||
slot 19 (vote pubkey 1)
|
||||
|
|
||||
slot 20
|
||||
*/
|
||||
let forks = tr(10) / (tr(15) / (tr(16) / (tr(17))) / (tr(18) / (tr(19) / (tr(20)))));
|
||||
let mut tree2 = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
let pubkey_votes: Vec<(Pubkey, Slot)> = vec![
|
||||
// more than tree 1
|
||||
(vote_pubkeys[0], 16),
|
||||
// more than tree1
|
||||
(vote_pubkeys[1], 19),
|
||||
// less than tree1
|
||||
(vote_pubkeys[2], 10),
|
||||
];
|
||||
tree2.add_votes(
|
||||
&pubkey_votes,
|
||||
bank.epoch_stakes_map(),
|
||||
bank.epoch_schedule(),
|
||||
);
|
||||
|
||||
// Merge tree2 at leaf 7 of tree1
|
||||
tree1.merge(tree2, 7, bank.epoch_stakes_map(), bank.epoch_schedule());
|
||||
|
||||
// Check ancestry information is correct
|
||||
let ancestors: Vec<_> = tree1.ancestor_iterator(20).collect();
|
||||
assert_eq!(ancestors, vec![19, 18, 15, 10, 7, 5, 3, 0]);
|
||||
let ancestors: Vec<_> = tree1.ancestor_iterator(17).collect();
|
||||
assert_eq!(ancestors, vec![16, 15, 10, 7, 5, 3, 0]);
|
||||
|
||||
// Check correctness off votes
|
||||
// Pubkey 0
|
||||
assert_eq!(tree1.stake_voted_at(16).unwrap(), stake);
|
||||
assert_eq!(tree1.stake_voted_at(5).unwrap(), 0);
|
||||
// Pubkey 1
|
||||
assert_eq!(tree1.stake_voted_at(19).unwrap(), stake);
|
||||
assert_eq!(tree1.stake_voted_at(3).unwrap(), 0);
|
||||
// Pubkey 2
|
||||
assert_eq!(tree1.stake_voted_at(10).unwrap(), 0);
|
||||
assert_eq!(tree1.stake_voted_at(12).unwrap(), stake);
|
||||
|
||||
for slot in &[0, 3] {
|
||||
assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), 3 * stake);
|
||||
}
|
||||
for slot in &[5, 7, 10, 15] {
|
||||
assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), 2 * stake);
|
||||
}
|
||||
for slot in &[9, 11, 12, 16, 18, 19] {
|
||||
assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), stake);
|
||||
}
|
||||
for slot in &[17, 20] {
|
||||
assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), 0);
|
||||
}
|
||||
|
||||
assert_eq!(tree1.best_overall_slot(), 17);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subtree_diff() {
|
||||
let mut heaviest_subtree_fork_choice = setup_forks();
|
||||
|
||||
// Diff of same root is empty, no matter root, intermediate node, or leaf
|
||||
assert!(heaviest_subtree_fork_choice.subtree_diff(0, 0).is_empty());
|
||||
assert!(heaviest_subtree_fork_choice.subtree_diff(5, 5).is_empty());
|
||||
assert!(heaviest_subtree_fork_choice.subtree_diff(6, 6).is_empty());
|
||||
|
||||
// The set reachable from slot 3, excluding subtree 1, is just everything
|
||||
// in slot 3 since subtree 1 is an ancestor
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.subtree_diff(3, 1),
|
||||
vec![3, 5, 6].into_iter().collect::<HashSet<_>>()
|
||||
);
|
||||
|
||||
// The set reachable from slot 1, excluding subtree 3, is just 1 and
|
||||
// the subtree at 2
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.subtree_diff(1, 3),
|
||||
vec![1, 2, 4].into_iter().collect::<HashSet<_>>()
|
||||
);
|
||||
|
||||
// The set reachable from slot 1, excluding leaf 6, is just everything
|
||||
// except leaf 6
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.subtree_diff(0, 6),
|
||||
vec![0, 1, 3, 5, 2, 4].into_iter().collect::<HashSet<_>>()
|
||||
);
|
||||
|
||||
// Set root at 1
|
||||
heaviest_subtree_fork_choice.set_root(1);
|
||||
|
||||
// Zero no longer exists, set reachable from 0 is empty
|
||||
assert!(heaviest_subtree_fork_choice.subtree_diff(0, 6).is_empty());
|
||||
}
|
||||
|
||||
fn setup_forks() -> HeaviestSubtreeForkChoice {
|
||||
/*
|
||||
Build fork structure:
|
||||
@@ -1244,26 +1516,6 @@ mod test {
|
||||
HeaviestSubtreeForkChoice::new_from_tree(forks)
|
||||
}
|
||||
|
||||
fn setup_bank_and_vote_pubkeys(num_vote_accounts: usize, stake: u64) -> (Bank, Vec<Pubkey>) {
|
||||
// Create some voters at genesis
|
||||
let validator_voting_keypairs: Vec<_> = (0..num_vote_accounts)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
|
||||
let vote_pubkeys: Vec<_> = validator_voting_keypairs
|
||||
.iter()
|
||||
.map(|k| k.vote_keypair.pubkey())
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_voting_keypairs,
|
||||
stake,
|
||||
);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
(bank, vote_pubkeys)
|
||||
}
|
||||
|
||||
fn check_process_update_correctness<F>(
|
||||
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
pubkey_votes: &[(Pubkey, Slot)],
|
||||
|
@@ -19,6 +19,7 @@ pub mod contact_info;
|
||||
pub mod bank_weight_fork_choice;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_slots;
|
||||
pub mod cluster_slots_service;
|
||||
pub mod consensus;
|
||||
pub mod crds;
|
||||
pub mod crds_gossip;
|
||||
@@ -41,6 +42,8 @@ pub mod progress_map;
|
||||
pub mod pubkey_references;
|
||||
pub mod repair_response;
|
||||
pub mod repair_service;
|
||||
pub mod repair_weight;
|
||||
pub mod repair_weighted_traversal;
|
||||
pub mod replay_stage;
|
||||
mod result;
|
||||
pub mod retransmit_stage;
|
||||
@@ -61,6 +64,7 @@ pub mod sigverify_stage;
|
||||
pub mod snapshot_packager_service;
|
||||
pub mod tpu;
|
||||
pub mod transaction_status_service;
|
||||
pub mod tree_diff;
|
||||
pub mod tvu;
|
||||
pub mod validator;
|
||||
pub mod verified_vote_packets;
|
||||
|
@@ -23,14 +23,14 @@ pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSuppl
|
||||
let stake_account = StakeState::from(&account).unwrap_or_default();
|
||||
match stake_account {
|
||||
StakeState::Initialized(meta) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
StakeState::Stake(meta, _stake) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
@@ -77,6 +77,7 @@ solana_sdk::pubkeys!(
|
||||
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
|
||||
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
|
||||
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
|
||||
"AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF",
|
||||
]
|
||||
);
|
||||
|
||||
|
@@ -2,21 +2,21 @@
|
||||
//! regularly finds missing shreds in the ledger and sends repair requests for those shreds
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
repair_weight::RepairWeight,
|
||||
repair_weighted_traversal::Contains,
|
||||
result::Result,
|
||||
serve_repair::{RepairType, ServeRepair, DEFAULT_NONCE},
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
|
||||
blockstore::{Blockstore, SlotMeta},
|
||||
shred::Nonce,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
|
||||
use std::{
|
||||
@@ -71,6 +71,33 @@ pub struct RepairStats {
|
||||
pub shred: RepairStatsGroup,
|
||||
pub highest_shred: RepairStatsGroup,
|
||||
pub orphan: RepairStatsGroup,
|
||||
pub get_best_orphans_us: u64,
|
||||
pub get_best_shreds_us: u64,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct RepairTiming {
|
||||
pub set_root_elapsed: u64,
|
||||
pub get_votes_elapsed: u64,
|
||||
pub add_votes_elapsed: u64,
|
||||
pub get_best_orphans_elapsed: u64,
|
||||
pub get_best_shreds_elapsed: u64,
|
||||
pub send_repairs_elapsed: u64,
|
||||
}
|
||||
|
||||
impl RepairTiming {
|
||||
fn update(
|
||||
&mut self,
|
||||
set_root_elapsed: u64,
|
||||
get_votes_elapsed: u64,
|
||||
add_votes_elapsed: u64,
|
||||
send_repairs_elapsed: u64,
|
||||
) {
|
||||
self.set_root_elapsed += set_root_elapsed;
|
||||
self.get_votes_elapsed += get_votes_elapsed;
|
||||
self.add_votes_elapsed += add_votes_elapsed;
|
||||
self.send_repairs_elapsed += send_repairs_elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
pub const MAX_REPAIR_LENGTH: usize = 512;
|
||||
@@ -81,7 +108,6 @@ pub const MAX_ORPHANS: usize = 5;
|
||||
|
||||
pub struct RepairInfo {
|
||||
pub bank_forks: Arc<RwLock<BankForks>>,
|
||||
pub completed_slots_receiver: CompletedSlotsReceiver,
|
||||
pub epoch_schedule: EpochSchedule,
|
||||
pub duplicate_slots_reset_sender: DuplicateSlotsResetSender,
|
||||
}
|
||||
@@ -118,7 +144,7 @@ impl RepairService {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
repair_info: RepairInfo,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) -> Self {
|
||||
let t_repair = Builder::new()
|
||||
.name("solana-repair-service".to_string())
|
||||
@@ -130,7 +156,7 @@ impl RepairService {
|
||||
cluster_info,
|
||||
repair_info,
|
||||
&cluster_slots,
|
||||
vote_tracker,
|
||||
verified_vote_receiver,
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
@@ -145,31 +171,57 @@ impl RepairService {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
repair_info: RepairInfo,
|
||||
cluster_slots: &ClusterSlots,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) {
|
||||
let mut repair_weight = RepairWeight::new(repair_info.bank_forks.read().unwrap().root());
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let id = cluster_info.id();
|
||||
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
|
||||
let mut repair_stats = RepairStats::default();
|
||||
let mut repair_timing = RepairTiming::default();
|
||||
let mut last_stats = Instant::now();
|
||||
let duplicate_slot_repair_statuses = HashMap::new();
|
||||
Self::initialize_epoch_slots(
|
||||
blockstore,
|
||||
&cluster_info,
|
||||
&repair_info.completed_slots_receiver,
|
||||
);
|
||||
let duplicate_slot_repair_statuses: HashMap<Slot, DuplicateSlotRepairStatus> =
|
||||
HashMap::new();
|
||||
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
let mut set_root_elapsed;
|
||||
let mut get_votes_elapsed;
|
||||
let mut add_votes_elapsed;
|
||||
let repairs = {
|
||||
let root_bank = repair_info.bank_forks.read().unwrap().root_bank().clone();
|
||||
let new_root = root_bank.slot();
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
Self::update_completed_slots(&repair_info.completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, &cluster_info, &repair_info.bank_forks);
|
||||
|
||||
// Purge outdated slots from the weighting heuristic
|
||||
set_root_elapsed = Measure::start("set_root_elapsed");
|
||||
repair_weight.set_root(new_root);
|
||||
set_root_elapsed.stop();
|
||||
|
||||
// Add new votes to the weighting heuristic
|
||||
get_votes_elapsed = Measure::start("get_votes_elapsed");
|
||||
let mut slot_to_vote_pubkeys: HashMap<Slot, Vec<Pubkey>> = HashMap::new();
|
||||
verified_vote_receiver
|
||||
.try_iter()
|
||||
.for_each(|(vote_pubkey, vote_slots)| {
|
||||
for slot in vote_slots {
|
||||
slot_to_vote_pubkeys
|
||||
.entry(slot)
|
||||
.or_default()
|
||||
.push(vote_pubkey);
|
||||
}
|
||||
});
|
||||
get_votes_elapsed.stop();
|
||||
|
||||
add_votes_elapsed = Measure::start("add_votes");
|
||||
repair_weight.add_votes(
|
||||
&blockstore,
|
||||
slot_to_vote_pubkeys.into_iter(),
|
||||
root_bank.epoch_stakes_map(),
|
||||
root_bank.epoch_schedule(),
|
||||
);
|
||||
add_votes_elapsed.stop();
|
||||
/*let new_duplicate_slots = Self::find_new_duplicate_slots(
|
||||
&duplicate_slot_repair_statuses,
|
||||
blockstore,
|
||||
@@ -193,31 +245,40 @@ impl RepairService {
|
||||
&mut repair_stats,
|
||||
&repair_socket,
|
||||
);*/
|
||||
Self::generate_repairs(
|
||||
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
blockstore,
|
||||
root_bank.slot(),
|
||||
root_bank.epoch_stakes_map(),
|
||||
root_bank.epoch_schedule(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&duplicate_slot_repair_statuses,
|
||||
&vote_tracker,
|
||||
Some(&mut repair_timing),
|
||||
)
|
||||
};
|
||||
|
||||
if let Ok(repairs) = repairs {
|
||||
let mut cache = HashMap::new();
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
repair_request,
|
||||
&mut cache,
|
||||
&mut repair_stats,
|
||||
) {
|
||||
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
|
||||
info!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
let mut cache = HashMap::new();
|
||||
let mut send_repairs_elapsed = Measure::start("send_repairs_elapsed");
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
repair_request,
|
||||
&mut cache,
|
||||
&mut repair_stats,
|
||||
) {
|
||||
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
|
||||
info!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
}
|
||||
});
|
||||
send_repairs_elapsed.stop();
|
||||
repair_timing.update(
|
||||
set_root_elapsed.as_us(),
|
||||
get_votes_elapsed.as_us(),
|
||||
add_votes_elapsed.as_us(),
|
||||
send_repairs_elapsed.as_us(),
|
||||
);
|
||||
|
||||
if last_stats.elapsed().as_secs() > 2 {
|
||||
let repair_total = repair_stats.shred.count
|
||||
@@ -235,7 +296,29 @@ impl RepairService {
|
||||
("repair-orphan", repair_stats.orphan.max, i64),
|
||||
);
|
||||
}
|
||||
datapoint_info!(
|
||||
"serve_repair-repair-timing",
|
||||
("set-root-elapsed", repair_timing.set_root_elapsed, i64),
|
||||
("get-votes-elapsed", repair_timing.get_votes_elapsed, i64),
|
||||
("add-votes-elapsed", repair_timing.add_votes_elapsed, i64),
|
||||
(
|
||||
"get-best-orphans-elapsed",
|
||||
repair_timing.get_best_orphans_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get-best-shreds-elapsed",
|
||||
repair_timing.get_best_shreds_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"send-repairs-elapsed",
|
||||
repair_timing.send_repairs_elapsed,
|
||||
i64
|
||||
),
|
||||
);
|
||||
repair_stats = RepairStats::default();
|
||||
repair_timing = RepairTiming::default();
|
||||
last_stats = Instant::now();
|
||||
}
|
||||
sleep(Duration::from_millis(REPAIR_MS));
|
||||
@@ -275,29 +358,59 @@ impl RepairService {
|
||||
Ok(repairs)
|
||||
}
|
||||
|
||||
fn generate_repairs(
|
||||
pub fn generate_repairs_for_slot(
|
||||
blockstore: &Blockstore,
|
||||
root: Slot,
|
||||
slot: Slot,
|
||||
slot_meta: &SlotMeta,
|
||||
max_repairs: usize,
|
||||
duplicate_slot_repair_statuses: &HashMap<Slot, DuplicateSlotRepairStatus>,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
) -> Result<Vec<RepairType>> {
|
||||
// Slot height and shred indexes for shreds we want to repair
|
||||
let mut repairs: Vec<RepairType> = vec![];
|
||||
Self::generate_repairs_for_fork(
|
||||
blockstore,
|
||||
&mut repairs,
|
||||
max_repairs,
|
||||
root,
|
||||
duplicate_slot_repair_statuses,
|
||||
vote_tracker,
|
||||
);
|
||||
) -> Vec<RepairType> {
|
||||
if max_repairs == 0 || slot_meta.is_full() {
|
||||
vec![]
|
||||
} else if slot_meta.consumed == slot_meta.received {
|
||||
vec![RepairType::HighestShred(slot, slot_meta.received)]
|
||||
} else {
|
||||
let reqs = blockstore.find_missing_data_indexes(
|
||||
slot,
|
||||
slot_meta.first_shred_timestamp,
|
||||
slot_meta.consumed,
|
||||
slot_meta.received,
|
||||
max_repairs,
|
||||
);
|
||||
reqs.into_iter()
|
||||
.map(|i| RepairType::Shred(slot, i))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
// Try to resolve orphans in blockstore
|
||||
let orphans = blockstore.orphans_iterator(root + 1).unwrap();
|
||||
Self::generate_repairs_for_orphans(orphans, &mut repairs);
|
||||
|
||||
Ok(repairs)
|
||||
/// Repairs any fork starting at the input slot
|
||||
pub fn generate_repairs_for_fork(
|
||||
blockstore: &Blockstore,
|
||||
repairs: &mut Vec<RepairType>,
|
||||
max_repairs: usize,
|
||||
slot: Slot,
|
||||
duplicate_slot_repair_statuses: &dyn Contains<Slot>,
|
||||
) {
|
||||
let mut pending_slots = vec![slot];
|
||||
while repairs.len() < max_repairs && !pending_slots.is_empty() {
|
||||
let slot = pending_slots.pop().unwrap();
|
||||
if duplicate_slot_repair_statuses.contains(&slot) {
|
||||
// These are repaired through a different path
|
||||
continue;
|
||||
}
|
||||
if let Some(slot_meta) = blockstore.meta(slot).unwrap() {
|
||||
let new_repairs = Self::generate_repairs_for_slot(
|
||||
blockstore,
|
||||
slot,
|
||||
&slot_meta,
|
||||
max_repairs - repairs.len(),
|
||||
);
|
||||
repairs.extend(new_repairs);
|
||||
let next_slots = slot_meta.next_slots;
|
||||
pending_slots.extend(next_slots);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
@@ -503,158 +616,6 @@ impl RepairService {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_repairs_for_slot(
|
||||
blockstore: &Blockstore,
|
||||
slot: Slot,
|
||||
slot_meta: &SlotMeta,
|
||||
max_repairs: usize,
|
||||
) -> Vec<RepairType> {
|
||||
if slot_meta.is_full() {
|
||||
vec![]
|
||||
} else if slot_meta.consumed == slot_meta.received {
|
||||
vec![RepairType::HighestShred(slot, slot_meta.received)]
|
||||
} else {
|
||||
let reqs = blockstore.find_missing_data_indexes(
|
||||
slot,
|
||||
slot_meta.first_shred_timestamp,
|
||||
slot_meta.consumed,
|
||||
slot_meta.received,
|
||||
max_repairs,
|
||||
);
|
||||
reqs.into_iter()
|
||||
.map(|i| RepairType::Shred(slot, i))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_repairs_for_orphans(
|
||||
orphans: impl Iterator<Item = u64>,
|
||||
repairs: &mut Vec<RepairType>,
|
||||
) {
|
||||
repairs.extend(orphans.take(MAX_ORPHANS).map(RepairType::Orphan));
|
||||
}
|
||||
|
||||
/// Repairs any fork starting at the input slot
|
||||
fn generate_repairs_for_fork(
|
||||
blockstore: &Blockstore,
|
||||
repairs: &mut Vec<RepairType>,
|
||||
max_repairs: usize,
|
||||
slot: Slot,
|
||||
duplicate_slot_repair_statuses: &HashMap<Slot, DuplicateSlotRepairStatus>,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
) {
|
||||
let mut seed = [0u8; 32];
|
||||
thread_rng().fill(&mut seed);
|
||||
let rng = &mut ChaChaRng::from_seed(seed);
|
||||
let mut pending_slots = vec![slot];
|
||||
while repairs.len() < max_repairs && !pending_slots.is_empty() {
|
||||
pending_slots.retain(|slot| !duplicate_slot_repair_statuses.contains_key(slot));
|
||||
let mut next_pending_slots = vec![];
|
||||
let mut level_repairs = HashMap::new();
|
||||
for slot in &pending_slots {
|
||||
if let Some(slot_meta) = blockstore.meta(*slot).unwrap() {
|
||||
let new_repairs = Self::generate_repairs_for_slot(
|
||||
blockstore,
|
||||
*slot,
|
||||
&slot_meta,
|
||||
std::usize::MAX,
|
||||
);
|
||||
if !new_repairs.is_empty() {
|
||||
level_repairs.insert(*slot, new_repairs);
|
||||
}
|
||||
next_pending_slots.extend(slot_meta.next_slots);
|
||||
}
|
||||
}
|
||||
|
||||
if !level_repairs.is_empty() {
|
||||
let mut slots_to_repair: Vec<_> = level_repairs.keys().cloned().collect();
|
||||
let mut weights: Vec<_> = {
|
||||
let r_vote_tracker = vote_tracker.slot_vote_trackers.read().unwrap();
|
||||
slots_to_repair
|
||||
.iter()
|
||||
.map(|slot| {
|
||||
if let Some(slot_vote_tracker) = r_vote_tracker.get(slot) {
|
||||
std::cmp::max(slot_vote_tracker.read().unwrap().total_stake, 1)
|
||||
} else {
|
||||
// should it be something else?
|
||||
1
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
|
||||
let mut weighted_index = WeightedIndex::new(weights.clone()).unwrap();
|
||||
while repairs.len() < max_repairs && !level_repairs.is_empty() {
|
||||
let index = weighted_index.sample(rng);
|
||||
let slot_repairs = level_repairs.get_mut(&slots_to_repair[index]).unwrap();
|
||||
repairs.push(slot_repairs.remove(0));
|
||||
if slot_repairs.is_empty() {
|
||||
level_repairs.remove(&slots_to_repair[index]);
|
||||
slots_to_repair.remove(index);
|
||||
weights.remove(index);
|
||||
if !weights.is_empty() {
|
||||
weighted_index = WeightedIndex::new(weights.clone()).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pending_slots = next_pending_slots;
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
|
||||
// Safe to set into gossip because by this time, the leader schedule cache should
|
||||
// also be updated with the latest root (done in blockstore_processor) and thus
|
||||
// will provide a schedule to window_service for any incoming shreds up to the
|
||||
// last_confirmed_epoch.
|
||||
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
|
||||
}
|
||||
|
||||
fn update_completed_slots(
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
cluster_info: &ClusterInfo,
|
||||
) {
|
||||
let mut slots: Vec<Slot> = vec![];
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
|
||||
cluster_info.push_lowest_slot(*id, lowest_slot);
|
||||
}
|
||||
|
||||
fn initialize_epoch_slots(
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &ClusterInfo,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
) {
|
||||
let root = blockstore.last_root();
|
||||
let mut slots: Vec<_> = blockstore
|
||||
.live_slots_iterator(root)
|
||||
.filter_map(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() {
|
||||
Some(slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_repair.join()
|
||||
}
|
||||
@@ -673,6 +634,7 @@ mod test {
|
||||
use solana_runtime::genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs};
|
||||
use solana_sdk::signature::Signer;
|
||||
use solana_vote_program::vote_transaction;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[test]
|
||||
pub fn test_repair_orphan() {
|
||||
@@ -685,11 +647,18 @@ mod test {
|
||||
let (shreds2, _) = make_slot_entries(5, 2, 1);
|
||||
shreds.extend(shreds2);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blockstore, 0, 2, &HashMap::new(), &vote_tracker)
|
||||
.unwrap(),
|
||||
vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)]
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
&HashMap::new(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
vec![RepairType::Orphan(2), RepairType::HighestShred(0, 0)]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -707,12 +676,19 @@ mod test {
|
||||
// Write this shred to slot 2, should chain to slot 0, which we haven't received
|
||||
// any shreds for
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
// Check that repair tries to patch the empty slot
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blockstore, 0, 2, &HashMap::new(), &vote_tracker)
|
||||
.unwrap(),
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
&HashMap::new(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
vec![RepairType::HighestShred(0, 0)]
|
||||
);
|
||||
}
|
||||
@@ -757,83 +733,36 @@ mod test {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
std::usize::MAX,
|
||||
&HashMap::new(),
|
||||
&vote_tracker
|
||||
)
|
||||
.unwrap(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
expected
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
expected.len() - 2,
|
||||
&HashMap::new(),
|
||||
&vote_tracker,
|
||||
)
|
||||
.unwrap()[..],
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
expected.len() - 2,
|
||||
&HashSet::new(),
|
||||
None
|
||||
)[..],
|
||||
expected[0..expected.len() - 2]
|
||||
);
|
||||
}
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_repairs_distributed_across_slots() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let num_entries_per_slot = 100;
|
||||
|
||||
// Create some shreds
|
||||
for i in 1..10 {
|
||||
let (shreds, _) = make_slot_entries(i, 0, num_entries_per_slot as u64);
|
||||
|
||||
// Only insert the first shred
|
||||
blockstore
|
||||
.insert_shreds(shreds[..1].to_vec(), None, false)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let repairs = RepairService::generate_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
num_entries_per_slot,
|
||||
&HashMap::new(),
|
||||
&vote_tracker,
|
||||
)
|
||||
.unwrap();
|
||||
let mut repairs_slots = HashMap::new();
|
||||
for repair in repairs {
|
||||
match repair {
|
||||
RepairType::Shred(slot, _shred_index) => {
|
||||
*repairs_slots.entry(slot).or_insert(0) += 1;
|
||||
}
|
||||
RepairType::HighestShred(slot, _shred_index) => {
|
||||
*repairs_slots.entry(slot).or_insert(0) += 1;
|
||||
}
|
||||
RepairType::Orphan(slot) => {
|
||||
*repairs_slots.entry(slot).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
for i in 1..10 {
|
||||
assert!(repairs_slots.contains_key(&i));
|
||||
}
|
||||
}
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_generate_highest_repair() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
@@ -855,16 +784,17 @@ mod test {
|
||||
let expected: Vec<RepairType> =
|
||||
vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)];
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
std::usize::MAX,
|
||||
&HashMap::new(),
|
||||
&vote_tracker
|
||||
)
|
||||
.unwrap(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
expected
|
||||
);
|
||||
}
|
||||
@@ -963,19 +893,6 @@ mod test {
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_update_lowest_slot() {
|
||||
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
|
||||
RepairService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
|
||||
let lowest = cluster_info
|
||||
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
|
||||
lowest_slot.clone()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(lowest.lowest, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_generate_duplicate_repairs_for_slot() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
|
1271
core/src/repair_weight.rs
Normal file
1271
core/src/repair_weight.rs
Normal file
File diff suppressed because it is too large
Load Diff
428
core/src/repair_weighted_traversal.rs
Normal file
428
core/src/repair_weighted_traversal.rs
Normal file
@@ -0,0 +1,428 @@
|
||||
use crate::{
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, repair_service::RepairService,
|
||||
serve_repair::RepairType, tree_diff::TreeDiff,
|
||||
};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::{
|
||||
cmp::Eq,
|
||||
collections::{HashMap, HashSet},
|
||||
hash::{BuildHasher, Hash},
|
||||
};
|
||||
|
||||
pub trait Contains<T: Eq + Hash> {
|
||||
fn contains(&self, key: &T) -> bool;
|
||||
}
|
||||
|
||||
impl<T: Eq + Hash, U, S: BuildHasher> Contains<T> for HashMap<T, U, S> {
|
||||
fn contains(&self, key: &T) -> bool {
|
||||
self.contains_key(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Eq + Hash, S: BuildHasher> Contains<T> for HashSet<T, S> {
|
||||
fn contains(&self, key: &T) -> bool {
|
||||
self.contains(key)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum Visit {
|
||||
Visited(Slot),
|
||||
Unvisited(Slot),
|
||||
}
|
||||
|
||||
impl Visit {
|
||||
pub fn slot(&self) -> Slot {
|
||||
match self {
|
||||
Visit::Visited(slot) => *slot,
|
||||
Visit::Unvisited(slot) => *slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterates through slots in order of weight
|
||||
struct RepairWeightTraversal<'a> {
|
||||
tree: &'a HeaviestSubtreeForkChoice,
|
||||
pending: Vec<Visit>,
|
||||
}
|
||||
|
||||
impl<'a> RepairWeightTraversal<'a> {
|
||||
pub fn new(tree: &'a HeaviestSubtreeForkChoice) -> Self {
|
||||
Self {
|
||||
tree,
|
||||
pending: vec![Visit::Unvisited(tree.root())],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for RepairWeightTraversal<'a> {
|
||||
type Item = Visit;
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let next = self.pending.pop();
|
||||
next.map(|next| {
|
||||
if let Visit::Unvisited(slot) = next {
|
||||
// Add a bookmark to communicate all child
|
||||
// slots have been visited
|
||||
self.pending.push(Visit::Visited(slot));
|
||||
let mut children: Vec<_> = self
|
||||
.tree
|
||||
.children(slot)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|child_slot| Visit::Unvisited(*child_slot))
|
||||
.collect();
|
||||
|
||||
// Sort children by weight to prioritize visiting the heaviest
|
||||
// ones first
|
||||
children
|
||||
.sort_by(|slot1, slot2| self.tree.max_by_weight(slot1.slot(), slot2.slot()));
|
||||
self.pending.extend(children);
|
||||
}
|
||||
next
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Generate shred repairs for main subtree rooted at `self.slot`
|
||||
pub fn get_best_repair_shreds(
|
||||
tree: &HeaviestSubtreeForkChoice,
|
||||
blockstore: &Blockstore,
|
||||
repairs: &mut Vec<RepairType>,
|
||||
max_new_shreds: usize,
|
||||
ignore_slots: &dyn Contains<Slot>,
|
||||
) {
|
||||
let initial_len = repairs.len();
|
||||
let max_repairs = initial_len + max_new_shreds;
|
||||
let weighted_iter = RepairWeightTraversal::new(tree);
|
||||
let mut visited_set = HashSet::new();
|
||||
let mut slot_meta_cache = HashMap::new();
|
||||
for next in weighted_iter {
|
||||
if repairs.len() > max_repairs {
|
||||
break;
|
||||
}
|
||||
|
||||
let slot_meta = slot_meta_cache
|
||||
.entry(next.slot())
|
||||
.or_insert_with(|| blockstore.meta(next.slot()).unwrap());
|
||||
|
||||
if let Some(slot_meta) = slot_meta {
|
||||
match next {
|
||||
Visit::Unvisited(slot) => {
|
||||
if !ignore_slots.contains(&slot) {
|
||||
let new_repairs = RepairService::generate_repairs_for_slot(
|
||||
blockstore,
|
||||
slot,
|
||||
&slot_meta,
|
||||
max_repairs - repairs.len(),
|
||||
);
|
||||
repairs.extend(new_repairs);
|
||||
}
|
||||
visited_set.insert(slot);
|
||||
}
|
||||
Visit::Visited(_) => {
|
||||
// By the time we reach here, this means all the children of this slot
|
||||
// have been explored/repaired. Although this slot has already been visited,
|
||||
// this slot is still the heaviest slot left in the traversal. Thus any
|
||||
// remaining children that have not been explored should now be repaired.
|
||||
for new_child_slot in &slot_meta.next_slots {
|
||||
// If the `new_child_slot` has not been visited by now, it must
|
||||
// not exist in `tree`
|
||||
if !visited_set.contains(new_child_slot) {
|
||||
// Generate repairs for entire subtree rooted at `new_child_slot`
|
||||
RepairService::generate_repairs_for_fork(
|
||||
blockstore,
|
||||
repairs,
|
||||
max_repairs,
|
||||
*new_child_slot,
|
||||
ignore_slots,
|
||||
);
|
||||
}
|
||||
visited_set.insert(*new_child_slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use solana_ledger::{get_tmp_ledger_path, shred::Shred};
|
||||
use solana_runtime::bank_utils;
|
||||
use trees::tr;
|
||||
|
||||
#[test]
|
||||
fn test_weighted_repair_traversal_single() {
|
||||
let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new(42);
|
||||
let weighted_traversal = RepairWeightTraversal::new(&heaviest_subtree_fork_choice);
|
||||
let steps: Vec<_> = weighted_traversal.collect();
|
||||
assert_eq!(steps, vec![Visit::Unvisited(42), Visit::Visited(42)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_weighted_repair_traversal() {
|
||||
let stake = 100;
|
||||
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake);
|
||||
let (_, mut heaviest_subtree_fork_choice) = setup_forks();
|
||||
let weighted_traversal = RepairWeightTraversal::new(&heaviest_subtree_fork_choice);
|
||||
let steps: Vec<_> = weighted_traversal.collect();
|
||||
|
||||
// When every node has a weight of zero, visit
|
||||
// smallest children first
|
||||
assert_eq!(
|
||||
steps,
|
||||
vec![
|
||||
Visit::Unvisited(0),
|
||||
Visit::Unvisited(1),
|
||||
Visit::Unvisited(2),
|
||||
Visit::Unvisited(4),
|
||||
Visit::Visited(4),
|
||||
Visit::Visited(2),
|
||||
Visit::Unvisited(3),
|
||||
Visit::Unvisited(5),
|
||||
Visit::Visited(5),
|
||||
Visit::Visited(3),
|
||||
Visit::Visited(1),
|
||||
Visit::Visited(0)
|
||||
]
|
||||
);
|
||||
|
||||
// Add a vote to branch with slot 5,
|
||||
// should prioritize that branch
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
&[(vote_pubkeys[0], 5)],
|
||||
bank.epoch_stakes_map(),
|
||||
bank.epoch_schedule(),
|
||||
);
|
||||
|
||||
let weighted_traversal = RepairWeightTraversal::new(&heaviest_subtree_fork_choice);
|
||||
let steps: Vec<_> = weighted_traversal.collect();
|
||||
assert_eq!(
|
||||
steps,
|
||||
vec![
|
||||
Visit::Unvisited(0),
|
||||
Visit::Unvisited(1),
|
||||
Visit::Unvisited(3),
|
||||
Visit::Unvisited(5),
|
||||
Visit::Visited(5),
|
||||
// Prioritizes heavier child 3 over 2
|
||||
Visit::Visited(3),
|
||||
Visit::Unvisited(2),
|
||||
Visit::Unvisited(4),
|
||||
Visit::Visited(4),
|
||||
Visit::Visited(2),
|
||||
Visit::Visited(1),
|
||||
Visit::Visited(0)
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_best_repair_shreds() {
|
||||
let (blockstore, heaviest_subtree_fork_choice) = setup_forks();
|
||||
|
||||
// `blockstore` and `heaviest_subtree_fork_choice` match exactly, so should
|
||||
// return repairs for all slots (none are completed) in order of traversal
|
||||
let mut repairs = vec![];
|
||||
let last_shred = blockstore.meta(0).unwrap().unwrap().received;
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
6,
|
||||
&HashSet::new(),
|
||||
);
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[0, 1, 2, 4, 3, 5]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
// Add some leaves to blockstore, attached to the current best leaf, should prioritize
|
||||
// repairing those new leaves before trying other branches
|
||||
repairs = vec![];
|
||||
let best_overall_slot = heaviest_subtree_fork_choice.best_overall_slot();
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4);
|
||||
blockstore.add_tree(tr(best_overall_slot) / (tr(6) / tr(7)), true, false);
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
6,
|
||||
&HashSet::new(),
|
||||
);
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[0, 1, 2, 4, 6, 7]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
// Completing slots should remove them from the repair list
|
||||
repairs = vec![];
|
||||
let completed_shreds: Vec<Shred> = [0, 2, 4, 6]
|
||||
.iter()
|
||||
.map(|slot| {
|
||||
let mut shred = Shred::new_from_serialized_shred(
|
||||
blockstore
|
||||
.get_data_shred(*slot, last_shred - 1)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
shred.set_index(last_shred as u32);
|
||||
shred.set_last_in_slot();
|
||||
shred
|
||||
})
|
||||
.collect();
|
||||
blockstore
|
||||
.insert_shreds(completed_shreds, None, false)
|
||||
.unwrap();
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
4,
|
||||
&HashSet::new(),
|
||||
);
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[1, 7, 3, 5]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
// Adding incomplete children with higher weighted parents, even if
|
||||
// the parents are complete should still be repaired
|
||||
repairs = vec![];
|
||||
blockstore.add_tree(tr(2) / (tr(8)), true, false);
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
4,
|
||||
&HashSet::new(),
|
||||
);
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[1, 7, 8, 3]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_best_repair_shreds_no_duplicates() {
|
||||
let (blockstore, heaviest_subtree_fork_choice) = setup_forks();
|
||||
// Add a branch to slot 2, make sure it doesn't repair child
|
||||
// 4 again when the Unvisited(2) event happens
|
||||
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false);
|
||||
let mut repairs = vec![];
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
std::usize::MAX,
|
||||
&HashSet::new(),
|
||||
);
|
||||
let last_shred = blockstore.meta(0).unwrap().unwrap().received;
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[0, 1, 2, 4, 6, 7, 3, 5]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_best_repair_shreds_ignore() {
|
||||
let (blockstore, heaviest_subtree_fork_choice) = setup_forks();
|
||||
|
||||
// Adding slots to ignore should remove them from the repair set, but
|
||||
// should not remove their children
|
||||
let mut repairs = vec![];
|
||||
let mut ignore_set: HashSet<Slot> = vec![1, 3].into_iter().collect();
|
||||
let last_shred = blockstore.meta(0).unwrap().unwrap().received;
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
std::usize::MAX,
|
||||
&ignore_set,
|
||||
);
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[0, 2, 4, 5]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
// Adding slot 2 to ignore should not remove its unexplored children from
|
||||
// the repair set
|
||||
repairs = vec![];
|
||||
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false);
|
||||
ignore_set.insert(2);
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
std::usize::MAX,
|
||||
&ignore_set,
|
||||
);
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[0, 4, 6, 7, 5]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
// Adding unexplored child 6 to ignore set should remove it and it's
|
||||
// child 7 from the repair set
|
||||
repairs = vec![];
|
||||
ignore_set.insert(6);
|
||||
get_best_repair_shreds(
|
||||
&heaviest_subtree_fork_choice,
|
||||
&blockstore,
|
||||
&mut repairs,
|
||||
std::usize::MAX,
|
||||
&ignore_set,
|
||||
);
|
||||
assert_eq!(
|
||||
repairs,
|
||||
[0, 4, 5]
|
||||
.iter()
|
||||
.map(|slot| RepairType::HighestShred(*slot, last_shred))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
fn setup_forks() -> (Blockstore, HeaviestSubtreeForkChoice) {
|
||||
/*
|
||||
Build fork structure:
|
||||
slot 0
|
||||
|
|
||||
slot 1
|
||||
/ \
|
||||
slot 2 |
|
||||
| slot 3
|
||||
slot 4 |
|
||||
slot 5
|
||||
*/
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5))));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
blockstore.add_tree(forks.clone(), false, false);
|
||||
|
||||
(blockstore, HeaviestSubtreeForkChoice::new_from_tree(forks))
|
||||
}
|
||||
}
|
@@ -8,7 +8,7 @@ use crate::{
|
||||
cluster_slots::ClusterSlots,
|
||||
commitment::BlockCommitmentCache,
|
||||
commitment_service::{AggregateCommitmentService, CommitmentAggregationData},
|
||||
consensus::{ComputedBankState, StakeLockout, SwitchForkDecision, Tower},
|
||||
consensus::{ComputedBankState, PubkeyVotes, StakeLockout, SwitchForkDecision, Tower},
|
||||
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
@@ -19,6 +19,7 @@ use crate::{
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
block_error::BlockError,
|
||||
@@ -61,6 +62,9 @@ pub const MAX_ENTRY_RECV_PER_ITER: usize = 512;
|
||||
pub const SUPERMINORITY_THRESHOLD: f64 = 1f64 / 3f64;
|
||||
pub const MAX_UNCONFIRMED_SLOTS: usize = 5;
|
||||
|
||||
pub type ReplayVotesSender = CrossbeamSender<Arc<PubkeyVotes>>;
|
||||
pub type ReplayVotesReceiver = CrossbeamReceiver<Arc<PubkeyVotes>>;
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub(crate) enum HeaviestForkFailures {
|
||||
LockedOut(u64),
|
||||
@@ -221,6 +225,7 @@ impl ReplayStage {
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
retransmit_slots_sender: RetransmitSlotsSender,
|
||||
duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
|
||||
replay_votes_sender: ReplayVotesSender,
|
||||
) -> Self {
|
||||
let ReplayStageConfig {
|
||||
my_pubkey,
|
||||
@@ -387,6 +392,7 @@ impl ReplayStage {
|
||||
&mut all_pubkeys,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut bank_weight_fork_choice,
|
||||
&replay_votes_sender,
|
||||
);
|
||||
compute_bank_stats_time.stop();
|
||||
|
||||
@@ -1303,6 +1309,7 @@ impl ReplayStage {
|
||||
all_pubkeys: &mut PubkeyReferences,
|
||||
heaviest_subtree_fork_choice: &mut dyn ForkChoice,
|
||||
bank_weight_fork_choice: &mut dyn ForkChoice,
|
||||
replay_votes_sender: &ReplayVotesSender,
|
||||
) -> Vec<Slot> {
|
||||
frozen_banks.sort_by_key(|bank| bank.slot());
|
||||
let mut new_stats = vec![];
|
||||
@@ -1324,6 +1331,9 @@ impl ReplayStage {
|
||||
&ancestors,
|
||||
all_pubkeys,
|
||||
);
|
||||
// Notify any listeners of the votes found in this newly computed
|
||||
// bank
|
||||
let _ = replay_votes_sender.send(computed_bank_state.pubkey_votes.clone());
|
||||
heaviest_subtree_fork_choice.compute_bank_stats(
|
||||
&bank,
|
||||
tower,
|
||||
@@ -1853,7 +1863,8 @@ impl ReplayStage {
|
||||
pub fn get_unlock_switch_vote_slot(operating_mode: OperatingMode) -> Slot {
|
||||
match operating_mode {
|
||||
OperatingMode::Development => 0,
|
||||
OperatingMode::Stable => std::u64::MAX / 2,
|
||||
// 400_000 slots into epoch 61
|
||||
OperatingMode::Stable => 26_752_000,
|
||||
// Epoch 63
|
||||
OperatingMode::Preview => 21_692_256,
|
||||
}
|
||||
@@ -1862,7 +1873,8 @@ impl ReplayStage {
|
||||
pub fn get_unlock_heaviest_subtree_fork_choice(operating_mode: OperatingMode) -> Slot {
|
||||
match operating_mode {
|
||||
OperatingMode::Development => 0,
|
||||
OperatingMode::Stable => std::u64::MAX / 2,
|
||||
// 400_000 slots into epoch 61
|
||||
OperatingMode::Stable => 26_752_000,
|
||||
// Epoch 63
|
||||
OperatingMode::Preview => 21_692_256,
|
||||
}
|
||||
@@ -1938,146 +1950,179 @@ pub(crate) mod tests {
|
||||
assert!(ReplayStage::is_partition_detected(&ancestors, 4, 3));
|
||||
}
|
||||
|
||||
struct ReplayBlockstoreComponents {
|
||||
blockstore: Arc<Blockstore>,
|
||||
validator_voting_keys: HashMap<Pubkey, Pubkey>,
|
||||
progress: ProgressMap,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
||||
rpc_subscriptions: Arc<RpcSubscriptions>,
|
||||
}
|
||||
|
||||
fn replay_blockstore_components() -> ReplayBlockstoreComponents {
|
||||
// Setup blockstore
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let validator_authorized_voter_keypairs: Vec<_> =
|
||||
(0..20).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
|
||||
|
||||
let validator_voting_keys: HashMap<_, _> = validator_authorized_voter_keypairs
|
||||
.iter()
|
||||
.map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey()))
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_authorized_voter_keypairs,
|
||||
100,
|
||||
);
|
||||
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
|
||||
// ProgressMap
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank0,
|
||||
bank0.collector_id(),
|
||||
&Pubkey::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
|
||||
// Leader schedule cache
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
||||
|
||||
// BankForks
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
||||
|
||||
// RpcSubscriptions
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let rpc_subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
|
||||
ReplayBlockstoreComponents {
|
||||
blockstore,
|
||||
validator_voting_keys,
|
||||
progress,
|
||||
bank_forks,
|
||||
leader_schedule_cache,
|
||||
rpc_subscriptions,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_child_slots_of_same_parent() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
// Setup
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let validator_authorized_voter_keypairs: Vec<_> = (0..20)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
let ReplayBlockstoreComponents {
|
||||
blockstore,
|
||||
validator_voting_keys,
|
||||
mut progress,
|
||||
bank_forks,
|
||||
leader_schedule_cache,
|
||||
rpc_subscriptions,
|
||||
} = replay_blockstore_components();
|
||||
|
||||
let validator_voting_keys: HashMap<_, _> = validator_authorized_voter_keypairs
|
||||
.iter()
|
||||
.map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey()))
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_authorized_voter_keypairs,
|
||||
100,
|
||||
);
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
// Insert a non-root bank so that the propagation logic will update this
|
||||
// bank
|
||||
let bank1 = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(0).unwrap(),
|
||||
&leader_schedule_cache.slot_leader_at(1, None).unwrap(),
|
||||
1,
|
||||
);
|
||||
progress.insert(
|
||||
1,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank1,
|
||||
bank1.collector_id(),
|
||||
validator_voting_keys.get(&bank1.collector_id()).unwrap(),
|
||||
Some(0),
|
||||
0,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank0,
|
||||
bank0.collector_id(),
|
||||
&Pubkey::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
0,
|
||||
),
|
||||
);
|
||||
assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot);
|
||||
bank1.freeze();
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
|
||||
// Insert a non-root bank so that the propagation logic will update this
|
||||
// bank
|
||||
let bank1 = Bank::new_from_parent(
|
||||
bank_forks.get(0).unwrap(),
|
||||
&leader_schedule_cache.slot_leader_at(1, None).unwrap(),
|
||||
1,
|
||||
);
|
||||
progress.insert(
|
||||
1,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank1,
|
||||
bank1.collector_id(),
|
||||
&validator_voting_keys.get(&bank1.collector_id()).unwrap(),
|
||||
Some(0),
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot);
|
||||
bank1.freeze();
|
||||
bank_forks.insert(bank1);
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&rpc_subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
|
||||
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&rpc_subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
|
||||
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
// // There are 20 equally staked accounts, of which 3 have built
|
||||
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
|
||||
// we should see 3 validators in bank 1's propagated_validator set.
|
||||
let expected_leader_slots = vec![
|
||||
1,
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
];
|
||||
for slot in expected_leader_slots {
|
||||
let leader = leader_schedule_cache.slot_leader_at(slot, None).unwrap();
|
||||
let vote_key = validator_voting_keys.get(&leader).unwrap();
|
||||
assert!(progress
|
||||
.get_propagated_stats(1)
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
|
||||
// // There are 20 equally staked acccounts, of which 3 have built
|
||||
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
|
||||
// we should see 3 validators in bank 1's propagated_validator set.
|
||||
let expected_leader_slots = vec![
|
||||
1,
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
];
|
||||
for slot in expected_leader_slots {
|
||||
let leader = leader_schedule_cache.slot_leader_at(slot, None).unwrap();
|
||||
let vote_key = validator_voting_keys.get(&leader).unwrap();
|
||||
assert!(progress
|
||||
.get_propagated_stats(1)
|
||||
.unwrap()
|
||||
.propagated_validators
|
||||
.contains(vote_key));
|
||||
}
|
||||
.propagated_validators
|
||||
.contains(vote_key));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2695,6 +2740,7 @@ pub(crate) mod tests {
|
||||
.cloned()
|
||||
.collect();
|
||||
let tower = Tower::new_for_tests(0, 0.67);
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
let newly_computed = ReplayStage::compute_bank_stats(
|
||||
&node_pubkey,
|
||||
&ancestors,
|
||||
@@ -2707,8 +2753,13 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
// bank 0 has no votes, should not send any votes on the channel
|
||||
assert_eq!(replay_votes_receiver.try_recv().unwrap(), Arc::new(vec![]));
|
||||
assert_eq!(newly_computed, vec![0]);
|
||||
|
||||
// The only vote is in bank 1, and bank_forks does not currently contain
|
||||
// bank 1, so no slot should be confirmed.
|
||||
{
|
||||
@@ -2750,8 +2801,15 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
// Bank 1 had one vote, ensure that `compute_bank_stats` notifies listeners
|
||||
// via `replay_votes_receiver`.
|
||||
assert_eq!(
|
||||
replay_votes_receiver.try_recv().unwrap(),
|
||||
Arc::new(vec![(my_keypairs.vote_keypair.pubkey(), 0)])
|
||||
);
|
||||
assert_eq!(newly_computed, vec![1]);
|
||||
{
|
||||
let fork_progress = progress.get(&1).unwrap();
|
||||
@@ -2785,8 +2843,10 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
// No new stats should have been computed
|
||||
assert!(replay_votes_receiver.try_iter().next().is_none());
|
||||
assert!(newly_computed.is_empty());
|
||||
}
|
||||
|
||||
@@ -2811,6 +2871,7 @@ pub(crate) mod tests {
|
||||
.collect();
|
||||
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
|
||||
ReplayStage::compute_bank_stats(
|
||||
&node_pubkey,
|
||||
&ancestors,
|
||||
@@ -2823,6 +2884,7 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
@@ -2885,6 +2947,7 @@ pub(crate) mod tests {
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
|
||||
ReplayStage::compute_bank_stats(
|
||||
&node_pubkey,
|
||||
&vote_simulator.bank_forks.read().unwrap().ancestors(),
|
||||
@@ -2897,6 +2960,7 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut vote_simulator.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
frozen_banks.sort_by_key(|bank| bank.slot());
|
||||
|
@@ -17,6 +17,7 @@ pub enum Error {
|
||||
RecvError(std::sync::mpsc::RecvError),
|
||||
TryCrossbeamRecvError(crossbeam_channel::TryRecvError),
|
||||
CrossbeamRecvTimeoutError(crossbeam_channel::RecvTimeoutError),
|
||||
ReadyTimeoutError,
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
CrossbeamSendError,
|
||||
TryRecvError(std::sync::mpsc::TryRecvError),
|
||||
@@ -61,6 +62,11 @@ impl std::convert::From<crossbeam_channel::RecvTimeoutError> for Error {
|
||||
Error::CrossbeamRecvTimeoutError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<crossbeam_channel::ReadyTimeoutError> for Error {
|
||||
fn from(_e: crossbeam_channel::ReadyTimeoutError) -> Error {
|
||||
Error::ReadyTimeoutError
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
|
||||
Error::RecvTimeoutError(e)
|
||||
|
@@ -1,9 +1,10 @@
|
||||
//! The `retransmit_stage` retransmits shreds between validators
|
||||
|
||||
use crate::cluster_info_vote_listener::VoteTracker;
|
||||
use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
cluster_slots_service::ClusterSlotsService,
|
||||
contact_info::ContactInfo,
|
||||
repair_service::DuplicateSlotsResetSender,
|
||||
repair_service::RepairInfo,
|
||||
@@ -394,6 +395,7 @@ pub fn retransmitter(
|
||||
pub struct RetransmitStage {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
window_service: WindowService,
|
||||
cluster_slots_service: ClusterSlotsService,
|
||||
}
|
||||
|
||||
impl RetransmitStage {
|
||||
@@ -414,7 +416,7 @@ impl RetransmitStage {
|
||||
shred_version: u16,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) -> Self {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
@@ -427,9 +429,16 @@ impl RetransmitStage {
|
||||
retransmit_receiver,
|
||||
);
|
||||
|
||||
let cluster_slots_service = ClusterSlotsService::new(
|
||||
blockstore.clone(),
|
||||
cluster_slots.clone(),
|
||||
bank_forks.clone(),
|
||||
cluster_info.clone(),
|
||||
completed_slots_receiver,
|
||||
exit.clone(),
|
||||
);
|
||||
let repair_info = RepairInfo {
|
||||
bank_forks,
|
||||
completed_slots_receiver,
|
||||
epoch_schedule,
|
||||
duplicate_slots_reset_sender,
|
||||
};
|
||||
@@ -459,13 +468,14 @@ impl RetransmitStage {
|
||||
rv && is_connected
|
||||
},
|
||||
cluster_slots,
|
||||
vote_tracker,
|
||||
verified_vote_receiver,
|
||||
);
|
||||
|
||||
let thread_hdls = t_retransmit;
|
||||
Self {
|
||||
thread_hdls,
|
||||
window_service,
|
||||
cluster_slots_service,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -474,6 +484,7 @@ impl RetransmitStage {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
self.window_service.join()?;
|
||||
self.cluster_slots_service.join()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
1843
core/src/rpc.rs
1843
core/src/rpc.rs
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@ use solana_sdk::clock::Slot;
|
||||
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
|
||||
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
|
||||
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
|
||||
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
NonexistentClusterRoot {
|
||||
@@ -17,6 +18,7 @@ pub enum RpcCustomError {
|
||||
SendTransactionPreflightFailure {
|
||||
message: String,
|
||||
},
|
||||
SendTransactionIsNotSigned,
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@@ -49,6 +51,11 @@ impl From<RpcCustomError> for Error {
|
||||
message,
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SendTransactionIsNotSigned => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_3),
|
||||
message: "Transaction is not signed".to_string(),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,8 +4,10 @@ use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote, SlotInfo};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
|
||||
use solana_client::rpc_response::{
|
||||
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
|
||||
use solana_account_decoder::UiAccount;
|
||||
use solana_client::{
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_response::{Response as RpcResponse, RpcKeyedAccount, RpcSignatureResult},
|
||||
};
|
||||
#[cfg(test)]
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
@@ -37,9 +39,9 @@ pub trait RpcSolPubSub {
|
||||
fn account_subscribe(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcAccount>>,
|
||||
subscriber: Subscriber<RpcResponse<UiAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcAccountInfoConfig>,
|
||||
);
|
||||
|
||||
// Unsubscribe from account notification subscription.
|
||||
@@ -63,7 +65,7 @@ pub trait RpcSolPubSub {
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcProgramAccountsConfig>,
|
||||
);
|
||||
|
||||
// Unsubscribe from account notification subscription.
|
||||
@@ -177,9 +179,9 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
fn account_subscribe(
|
||||
&self,
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcAccount>>,
|
||||
subscriber: Subscriber<RpcResponse<UiAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcAccountInfoConfig>,
|
||||
) {
|
||||
match param::<Pubkey>(&pubkey_str, "pubkey") {
|
||||
Ok(pubkey) => {
|
||||
@@ -187,7 +189,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
let sub_id = SubscriptionId::Number(id as u64);
|
||||
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
|
||||
self.subscriptions
|
||||
.add_account_subscription(pubkey, commitment, sub_id, subscriber)
|
||||
.add_account_subscription(pubkey, config, sub_id, subscriber)
|
||||
}
|
||||
Err(e) => subscriber.reject(e).unwrap(),
|
||||
}
|
||||
@@ -215,7 +217,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcProgramAccountsConfig>,
|
||||
) {
|
||||
match param::<Pubkey>(&pubkey_str, "pubkey") {
|
||||
Ok(pubkey) => {
|
||||
@@ -223,7 +225,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
let sub_id = SubscriptionId::Number(id as u64);
|
||||
info!("program_subscribe: account={:?} id={:?}", pubkey, sub_id);
|
||||
self.subscriptions
|
||||
.add_program_subscription(pubkey, commitment, sub_id, subscriber)
|
||||
.add_program_subscription(pubkey, config, sub_id, subscriber)
|
||||
}
|
||||
Err(e) => subscriber.reject(e).unwrap(),
|
||||
}
|
||||
@@ -362,6 +364,7 @@ mod tests {
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use serial_test_derive::serial;
|
||||
use solana_account_decoder::{parse_account_data::parse_account_data, UiAccountEncoding};
|
||||
use solana_budget_program::{self, budget_instruction};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
@@ -377,7 +380,7 @@ mod tests {
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_program, system_transaction,
|
||||
system_instruction, system_program, system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_vote_program::vote_transaction;
|
||||
@@ -556,7 +559,11 @@ mod tests {
|
||||
session,
|
||||
subscriber,
|
||||
contract_state.pubkey().to_string(),
|
||||
Some(CommitmentConfig::recent()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &contract_funds.pubkey(), 51, blockhash);
|
||||
@@ -629,6 +636,101 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_subscribe_with_encoding() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair: alice,
|
||||
..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let nonce_account = Keypair::new();
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore_bank(
|
||||
blockstore,
|
||||
bank_forks.read().unwrap().get(1).unwrap().clone(),
|
||||
1,
|
||||
),
|
||||
)),
|
||||
)),
|
||||
uid: Arc::new(atomic::AtomicUsize::default()),
|
||||
};
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(
|
||||
session,
|
||||
subscriber,
|
||||
nonce_account.pubkey().to_string(),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: Some(UiAccountEncoding::JsonParsed),
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let ixs = system_instruction::create_nonce_account(
|
||||
&alice.pubkey(),
|
||||
&nonce_account.pubkey(),
|
||||
&alice.pubkey(),
|
||||
100,
|
||||
);
|
||||
let message = Message::new(&ixs, Some(&alice.pubkey()));
|
||||
let tx = Transaction::new(&[&alice, &nonce_account], message, blockhash);
|
||||
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Test signature confirmation notification #1
|
||||
let expected_data = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(1)
|
||||
.unwrap()
|
||||
.get_account(&nonce_account.pubkey())
|
||||
.unwrap()
|
||||
.data;
|
||||
let expected_data = parse_account_data(
|
||||
&nonce_account.pubkey(),
|
||||
&system_program::id(),
|
||||
&expected_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 1 },
|
||||
"value": {
|
||||
"owner": system_program::id().to_string(),
|
||||
"lamports": 100,
|
||||
"data": expected_data,
|
||||
"executable": false,
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
},
|
||||
"subscription": 0,
|
||||
}
|
||||
});
|
||||
|
||||
let (response, _) = robust_poll_or_panic(receiver);
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_unsubscribe() {
|
||||
@@ -703,7 +805,11 @@ mod tests {
|
||||
session,
|
||||
subscriber,
|
||||
bob.pubkey().to_string(),
|
||||
Some(CommitmentConfig::root()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::root()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
|
||||
@@ -756,7 +862,11 @@ mod tests {
|
||||
session,
|
||||
subscriber,
|
||||
bob.pubkey().to_string(),
|
||||
Some(CommitmentConfig::root()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::root()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
|
||||
@@ -921,11 +1031,15 @@ mod tests {
|
||||
});
|
||||
|
||||
// Process votes and check they were notified.
|
||||
let (s, _r) = unbounded();
|
||||
let (_replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
ClusterInfoVoteListener::get_and_process_votes_for_tests(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
rpc.subscriptions.clone(),
|
||||
&s,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@@ -24,7 +24,7 @@ use std::{
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
use tokio::runtime;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@@ -33,6 +33,7 @@ pub struct JsonRpcService {
|
||||
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
|
||||
|
||||
close_handle: Option<CloseHandle>,
|
||||
runtime: runtime::Runtime,
|
||||
}
|
||||
|
||||
struct RpcRequestMiddleware {
|
||||
@@ -98,6 +99,9 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
|
||||
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
|
||||
// Stuck on tokio 0.1 until the jsonrpc-http-server crate upgrades to tokio 0.2
|
||||
use tokio_01::prelude::*;
|
||||
|
||||
let stem = path.split_at(1).1; // Drop leading '/' from path
|
||||
let filename = {
|
||||
match path {
|
||||
@@ -116,10 +120,10 @@ impl RpcRequestMiddleware {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(
|
||||
tokio_fs::file::File::open(filename)
|
||||
tokio_fs_01::file::File::open(filename)
|
||||
.and_then(|file| {
|
||||
let buf: Vec<u8> = Vec::new();
|
||||
tokio_io::io::read_to_end(file, buf)
|
||||
tokio_io_01::io::read_to_end(file, buf)
|
||||
.and_then(|item| Ok(hyper::Response::new(item.1.into())))
|
||||
.or_else(|_| Ok(RpcRequestMiddleware::internal_server_error()))
|
||||
})
|
||||
@@ -256,6 +260,27 @@ impl JsonRpcService {
|
||||
&exit_send_transaction_service,
|
||||
));
|
||||
|
||||
let mut runtime = runtime::Builder::new()
|
||||
.threaded_scheduler()
|
||||
.thread_name("rpc-runtime")
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Runtime");
|
||||
|
||||
let bigtable_ledger_storage = if config.enable_bigtable_ledger_storage {
|
||||
runtime
|
||||
.block_on(solana_storage_bigtable::LedgerStorage::new(false))
|
||||
.map(|x| {
|
||||
info!("BigTable ledger storage initialized");
|
||||
Some(x)
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to initialize BigTable ledger storage: {:?}", err);
|
||||
None
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks.clone(),
|
||||
@@ -266,6 +291,8 @@ impl JsonRpcService {
|
||||
cluster_info,
|
||||
genesis_hash,
|
||||
send_transaction_service,
|
||||
&runtime,
|
||||
bigtable_ledger_storage,
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -325,6 +352,7 @@ impl JsonRpcService {
|
||||
.register_exit(Box::new(move || close_handle_.close()));
|
||||
Self {
|
||||
thread_hdl,
|
||||
runtime,
|
||||
#[cfg(test)]
|
||||
request_processor: test_request_processor,
|
||||
close_handle: Some(close_handle),
|
||||
@@ -338,6 +366,7 @@ impl JsonRpcService {
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.runtime.shutdown_background();
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,9 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use crate::{
|
||||
commitment::BlockCommitmentCache,
|
||||
rpc::{get_parsed_token_account, get_parsed_token_accounts},
|
||||
};
|
||||
use core::hash::Hash;
|
||||
use jsonrpc_core::futures::Future;
|
||||
use jsonrpc_pubsub::{
|
||||
@@ -8,8 +11,11 @@ use jsonrpc_pubsub::{
|
||||
SubscriptionId,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use solana_client::rpc_response::{
|
||||
Response, RpcAccount, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
|
||||
use solana_account_decoder::{parse_token::spl_token_id_v1_0, UiAccount, UiAccountEncoding};
|
||||
use solana_client::{
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_filter::RpcFilterType,
|
||||
rpc_response::{Response, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult},
|
||||
};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::bank::Bank;
|
||||
@@ -33,7 +39,9 @@ use std::{
|
||||
iter,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
};
|
||||
use tokio::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
|
||||
|
||||
// Stuck on tokio 0.1 until the jsonrpc-pubsub crate upgrades to tokio 0.2
|
||||
use tokio_01::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
|
||||
|
||||
const RECEIVE_DELAY_MILLIS: u64 = 100;
|
||||
|
||||
@@ -86,29 +94,44 @@ impl std::fmt::Debug for NotificationEntry {
|
||||
}
|
||||
}
|
||||
|
||||
struct SubscriptionData<S> {
|
||||
struct SubscriptionData<S, T> {
|
||||
sink: Sink<S>,
|
||||
commitment: CommitmentConfig,
|
||||
last_notified_slot: RwLock<Slot>,
|
||||
config: Option<T>,
|
||||
}
|
||||
type RpcAccountSubscriptions =
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, SubscriptionData<Response<RpcAccount>>>>>;
|
||||
type RpcProgramSubscriptions =
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, SubscriptionData<Response<RpcKeyedAccount>>>>>;
|
||||
#[derive(Default, Clone)]
|
||||
struct ProgramConfig {
|
||||
filters: Vec<RpcFilterType>,
|
||||
encoding: Option<UiAccountEncoding>,
|
||||
}
|
||||
type RpcAccountSubscriptions = RwLock<
|
||||
HashMap<
|
||||
Pubkey,
|
||||
HashMap<SubscriptionId, SubscriptionData<Response<UiAccount>, UiAccountEncoding>>,
|
||||
>,
|
||||
>;
|
||||
type RpcProgramSubscriptions = RwLock<
|
||||
HashMap<
|
||||
Pubkey,
|
||||
HashMap<SubscriptionId, SubscriptionData<Response<RpcKeyedAccount>, ProgramConfig>>,
|
||||
>,
|
||||
>;
|
||||
type RpcSignatureSubscriptions = RwLock<
|
||||
HashMap<Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>>>>,
|
||||
HashMap<Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>, ()>>>,
|
||||
>;
|
||||
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
|
||||
type RpcVoteSubscriptions = RwLock<HashMap<SubscriptionId, Sink<RpcVote>>>;
|
||||
type RpcRootSubscriptions = RwLock<HashMap<SubscriptionId, Sink<Slot>>>;
|
||||
|
||||
fn add_subscription<K, S>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S>>>,
|
||||
fn add_subscription<K, S, T>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S, T>>>,
|
||||
hashmap_key: K,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<S>,
|
||||
last_notified_slot: Slot,
|
||||
config: Option<T>,
|
||||
) where
|
||||
K: Eq + Hash,
|
||||
S: Clone,
|
||||
@@ -119,6 +142,7 @@ fn add_subscription<K, S>(
|
||||
sink,
|
||||
commitment,
|
||||
last_notified_slot: RwLock::new(last_notified_slot),
|
||||
config,
|
||||
};
|
||||
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
|
||||
current_hashmap.insert(sub_id, subscription_data);
|
||||
@@ -129,8 +153,8 @@ fn add_subscription<K, S>(
|
||||
subscriptions.insert(hashmap_key, hashmap);
|
||||
}
|
||||
|
||||
fn remove_subscription<K, S>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S>>>,
|
||||
fn remove_subscription<K, S, T>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S, T>>>,
|
||||
sub_id: &SubscriptionId,
|
||||
) -> bool
|
||||
where
|
||||
@@ -152,8 +176,8 @@ where
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn check_commitment_and_notify<K, S, B, F, X>(
|
||||
subscriptions: &HashMap<K, HashMap<SubscriptionId, SubscriptionData<Response<S>>>>,
|
||||
fn check_commitment_and_notify<K, S, B, F, X, T>(
|
||||
subscriptions: &HashMap<K, HashMap<SubscriptionId, SubscriptionData<Response<S>, T>>>,
|
||||
hashmap_key: &K,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cache_slot_info: &CacheSlotInfo,
|
||||
@@ -165,8 +189,9 @@ where
|
||||
K: Eq + Hash + Clone + Copy,
|
||||
S: Clone + Serialize,
|
||||
B: Fn(&Bank, &K) -> X,
|
||||
F: Fn(X, Slot) -> (Box<dyn Iterator<Item = S>>, Slot),
|
||||
F: Fn(X, &K, Slot, Option<T>, Option<Arc<Bank>>) -> (Box<dyn Iterator<Item = S>>, Slot),
|
||||
X: Clone + Serialize + Default,
|
||||
T: Clone,
|
||||
{
|
||||
let mut notified_set: HashSet<SubscriptionId> = HashSet::new();
|
||||
if let Some(hashmap) = subscriptions.get(hashmap_key) {
|
||||
@@ -176,6 +201,7 @@ where
|
||||
sink,
|
||||
commitment,
|
||||
last_notified_slot,
|
||||
config,
|
||||
},
|
||||
) in hashmap.iter()
|
||||
{
|
||||
@@ -187,15 +213,19 @@ where
|
||||
cache_slot_info.highest_confirmed_slot
|
||||
}
|
||||
};
|
||||
let results = {
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
bank_forks
|
||||
.get(slot)
|
||||
.map(|desired_bank| bank_method(&desired_bank, hashmap_key))
|
||||
.unwrap_or_default()
|
||||
};
|
||||
let bank = bank_forks.read().unwrap().get(slot).cloned();
|
||||
let results = bank
|
||||
.clone()
|
||||
.map(|desired_bank| bank_method(&desired_bank, hashmap_key))
|
||||
.unwrap_or_default();
|
||||
let mut w_last_notified_slot = last_notified_slot.write().unwrap();
|
||||
let (filter_results, result_slot) = filter_results(results, *w_last_notified_slot);
|
||||
let (filter_results, result_slot) = filter_results(
|
||||
results,
|
||||
hashmap_key,
|
||||
*w_last_notified_slot,
|
||||
config.as_ref().cloned(),
|
||||
bank,
|
||||
);
|
||||
for result in filter_results {
|
||||
notifier.notify(
|
||||
Response {
|
||||
@@ -226,13 +256,30 @@ impl RpcNotifier {
|
||||
|
||||
fn filter_account_result(
|
||||
result: Option<(Account, Slot)>,
|
||||
pubkey: &Pubkey,
|
||||
last_notified_slot: Slot,
|
||||
) -> (Box<dyn Iterator<Item = RpcAccount>>, Slot) {
|
||||
encoding: Option<UiAccountEncoding>,
|
||||
bank: Option<Arc<Bank>>,
|
||||
) -> (Box<dyn Iterator<Item = UiAccount>>, Slot) {
|
||||
if let Some((account, fork)) = result {
|
||||
// If fork < last_notified_slot this means that we last notified for a fork
|
||||
// and should notify that the account state has been reverted.
|
||||
if fork != last_notified_slot {
|
||||
return (Box::new(iter::once(RpcAccount::encode(account))), fork);
|
||||
let encoding = encoding.unwrap_or(UiAccountEncoding::Binary);
|
||||
if account.owner == spl_token_id_v1_0() && encoding == UiAccountEncoding::JsonParsed {
|
||||
let bank = bank.unwrap(); // If result.is_some(), bank must also be Some
|
||||
return (
|
||||
Box::new(iter::once(get_parsed_token_account(bank, pubkey, account))),
|
||||
fork,
|
||||
);
|
||||
} else {
|
||||
return (
|
||||
Box::new(iter::once(UiAccount::encode(
|
||||
pubkey, account, encoding, None, None,
|
||||
))),
|
||||
fork,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
(Box::new(iter::empty()), last_notified_slot)
|
||||
@@ -240,7 +287,10 @@ fn filter_account_result(
|
||||
|
||||
fn filter_signature_result(
|
||||
result: Option<transaction::Result<()>>,
|
||||
_signature: &Signature,
|
||||
last_notified_slot: Slot,
|
||||
_config: Option<()>,
|
||||
_bank: Option<Arc<Bank>>,
|
||||
) -> (Box<dyn Iterator<Item = RpcSignatureResult>>, Slot) {
|
||||
(
|
||||
Box::new(
|
||||
@@ -254,19 +304,33 @@ fn filter_signature_result(
|
||||
|
||||
fn filter_program_results(
|
||||
accounts: Vec<(Pubkey, Account)>,
|
||||
_program_id: &Pubkey,
|
||||
last_notified_slot: Slot,
|
||||
config: Option<ProgramConfig>,
|
||||
bank: Option<Arc<Bank>>,
|
||||
) -> (Box<dyn Iterator<Item = RpcKeyedAccount>>, Slot) {
|
||||
(
|
||||
Box::new(
|
||||
accounts
|
||||
.into_iter()
|
||||
.map(|(pubkey, account)| RpcKeyedAccount {
|
||||
let config = config.unwrap_or_default();
|
||||
let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary);
|
||||
let filters = config.filters;
|
||||
let keyed_accounts = accounts.into_iter().filter(move |(_, account)| {
|
||||
filters.iter().all(|filter_type| match filter_type {
|
||||
RpcFilterType::DataSize(size) => account.data.len() as u64 == *size,
|
||||
RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data),
|
||||
})
|
||||
});
|
||||
let accounts: Box<dyn Iterator<Item = RpcKeyedAccount>> =
|
||||
if encoding == UiAccountEncoding::JsonParsed {
|
||||
let bank = bank.unwrap(); // If !accounts.is_empty(), bank must be Some
|
||||
Box::new(get_parsed_token_accounts(bank, keyed_accounts))
|
||||
} else {
|
||||
Box::new(
|
||||
keyed_accounts.map(move |(pubkey, account)| RpcKeyedAccount {
|
||||
pubkey: pubkey.to_string(),
|
||||
account: RpcAccount::encode(account),
|
||||
account: UiAccount::encode(&pubkey, account, encoding.clone(), None, None),
|
||||
}),
|
||||
),
|
||||
last_notified_slot,
|
||||
)
|
||||
)
|
||||
};
|
||||
(accounts, last_notified_slot)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -454,11 +518,13 @@ impl RpcSubscriptions {
|
||||
pub fn add_account_subscription(
|
||||
&self,
|
||||
pubkey: Pubkey,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcAccountInfoConfig>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<RpcAccount>>,
|
||||
subscriber: Subscriber<Response<UiAccount>>,
|
||||
) {
|
||||
let commitment_level = commitment
|
||||
let config = config.unwrap_or_default();
|
||||
let commitment_level = config
|
||||
.commitment
|
||||
.unwrap_or_else(CommitmentConfig::single)
|
||||
.commitment;
|
||||
let slot = match commitment_level {
|
||||
@@ -504,10 +570,11 @@ impl RpcSubscriptions {
|
||||
add_subscription(
|
||||
&mut subscriptions,
|
||||
pubkey,
|
||||
commitment,
|
||||
config.commitment,
|
||||
sub_id,
|
||||
subscriber,
|
||||
last_notified_slot,
|
||||
config.encoding,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -528,11 +595,14 @@ impl RpcSubscriptions {
|
||||
pub fn add_program_subscription(
|
||||
&self,
|
||||
program_id: Pubkey,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcProgramAccountsConfig>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<RpcKeyedAccount>>,
|
||||
) {
|
||||
let commitment_level = commitment
|
||||
let config = config.unwrap_or_default();
|
||||
let commitment_level = config
|
||||
.account_config
|
||||
.commitment
|
||||
.unwrap_or_else(CommitmentConfig::recent)
|
||||
.commitment;
|
||||
let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip {
|
||||
@@ -546,10 +616,14 @@ impl RpcSubscriptions {
|
||||
add_subscription(
|
||||
&mut subscriptions,
|
||||
program_id,
|
||||
commitment,
|
||||
config.account_config.commitment,
|
||||
sub_id,
|
||||
subscriber,
|
||||
0, // last_notified_slot is not utilized for program subscriptions
|
||||
Some(ProgramConfig {
|
||||
filters: config.filters.unwrap_or_default(),
|
||||
encoding: config.account_config.encoding,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -592,6 +666,7 @@ impl RpcSubscriptions {
|
||||
sub_id,
|
||||
subscriber,
|
||||
0, // last_notified_slot is not utilized for signature subscriptions
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -711,6 +786,9 @@ impl RpcSubscriptions {
|
||||
notifier.notify(slot_info, sink);
|
||||
}
|
||||
}
|
||||
// These notifications are only triggered by votes observed on gossip,
|
||||
// unlike `NotificationEntry::Gossip`, which also accounts for slots seen
|
||||
// in VoteState's from bank states built in ReplayStage.
|
||||
NotificationEntry::Vote(ref vote_info) => {
|
||||
let subscriptions = subscriptions.vote_subscriptions.read().unwrap();
|
||||
for (_, sink) in subscriptions.iter() {
|
||||
@@ -819,7 +897,7 @@ impl RpcSubscriptions {
|
||||
&subscriptions.gossip_account_subscriptions,
|
||||
&subscriptions.gossip_program_subscriptions,
|
||||
&subscriptions.gossip_signature_subscriptions,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
&cache_slot_info,
|
||||
¬ifier,
|
||||
);
|
||||
@@ -840,7 +918,7 @@ impl RpcSubscriptions {
|
||||
for pubkey in &pubkeys {
|
||||
Self::check_account(
|
||||
pubkey,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
account_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
@@ -854,7 +932,7 @@ impl RpcSubscriptions {
|
||||
for program_id in &programs {
|
||||
Self::check_program(
|
||||
program_id,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
program_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
@@ -868,7 +946,7 @@ impl RpcSubscriptions {
|
||||
for signature in &signatures {
|
||||
Self::check_signature(
|
||||
signature,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
signature_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
@@ -913,7 +991,7 @@ pub(crate) mod tests {
|
||||
system_transaction,
|
||||
};
|
||||
use std::{fmt::Debug, sync::mpsc::channel, time::Instant};
|
||||
use tokio::{prelude::FutureExt, runtime::Runtime, timer::Delay};
|
||||
use tokio_01::{prelude::FutureExt, runtime::Runtime, timer::Delay};
|
||||
|
||||
pub(crate) fn robust_poll_or_panic<T: Debug + Send + 'static>(
|
||||
receiver: futures::sync::mpsc::Receiver<T>,
|
||||
@@ -975,7 +1053,11 @@ pub(crate) mod tests {
|
||||
);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::recent()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
sub_id.clone(),
|
||||
subscriber,
|
||||
);
|
||||
@@ -1385,7 +1467,7 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_add_and_remove_subscription() {
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, SubscriptionData<()>>> =
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, SubscriptionData<(), ()>>> =
|
||||
HashMap::new();
|
||||
|
||||
let num_keys = 5;
|
||||
@@ -1393,7 +1475,7 @@ pub(crate) mod tests {
|
||||
let (subscriber, _id_receiver, _transport_receiver) =
|
||||
Subscriber::new_test("notification");
|
||||
let sub_id = SubscriptionId::Number(key);
|
||||
add_subscription(&mut subscriptions, key, None, sub_id, subscriber, 0);
|
||||
add_subscription(&mut subscriptions, key, None, sub_id, subscriber, 0, None);
|
||||
}
|
||||
|
||||
// Add another subscription to the "0" key
|
||||
@@ -1406,6 +1488,7 @@ pub(crate) mod tests {
|
||||
extra_sub_id.clone(),
|
||||
subscriber,
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
assert_eq!(subscriptions.len(), num_keys as usize);
|
||||
@@ -1467,7 +1550,11 @@ pub(crate) mod tests {
|
||||
let sub_id0 = SubscriptionId::Number(0 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::single_gossip()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::single_gossip()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
sub_id0.clone(),
|
||||
subscriber0,
|
||||
);
|
||||
@@ -1532,7 +1619,11 @@ pub(crate) mod tests {
|
||||
let sub_id1 = SubscriptionId::Number(1 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::single_gossip()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::single_gossip()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
sub_id1.clone(),
|
||||
subscriber1,
|
||||
);
|
||||
|
@@ -33,7 +33,7 @@ use std::{
|
||||
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
|
||||
pub const DEFAULT_NONCE: u32 = 42;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
||||
pub enum RepairType {
|
||||
Orphan(Slot),
|
||||
HighestShred(Slot, u64),
|
||||
|
@@ -5,9 +5,10 @@ use crate::{
|
||||
banking_stage::BankingStage,
|
||||
broadcast_stage::{BroadcastStage, BroadcastStageType, RetransmitSlotsReceiver},
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
|
||||
cluster_info_vote_listener::{ClusterInfoVoteListener, VerifiedVoteSender, VoteTracker},
|
||||
fetch_stage::FetchStage,
|
||||
poh_recorder::{PohRecorder, WorkingBankEntry},
|
||||
replay_stage::ReplayVotesReceiver,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify::TransactionSigVerifier,
|
||||
sigverify_stage::SigVerifyStage,
|
||||
@@ -52,6 +53,8 @@ impl Tpu {
|
||||
shred_version: u16,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let fetch_stage = FetchStage::new_with_sender(
|
||||
@@ -68,22 +71,24 @@ impl Tpu {
|
||||
SigVerifyStage::new(packet_receiver, verified_sender, verifier)
|
||||
};
|
||||
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
|
||||
let cluster_info_vote_listener = ClusterInfoVoteListener::new(
|
||||
&exit,
|
||||
cluster_info.clone(),
|
||||
verified_vote_sender,
|
||||
verified_vote_packets_sender,
|
||||
&poh_recorder,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
subscriptions.clone(),
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
);
|
||||
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
poh_recorder,
|
||||
verified_receiver,
|
||||
verified_vote_receiver,
|
||||
verified_vote_packets_receiver,
|
||||
transaction_status_sender,
|
||||
);
|
||||
|
||||
|
@@ -3,6 +3,7 @@ use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionSta
|
||||
use solana_runtime::{
|
||||
bank::{Bank, HashAgeKind},
|
||||
nonce_utils,
|
||||
transaction_utils::OrderedIterator,
|
||||
};
|
||||
use solana_transaction_status::TransactionStatusMeta;
|
||||
use std::{
|
||||
@@ -50,16 +51,17 @@ impl TransactionStatusService {
|
||||
let TransactionStatusBatch {
|
||||
bank,
|
||||
transactions,
|
||||
iteration_order,
|
||||
statuses,
|
||||
balances,
|
||||
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
let slot = bank.slot();
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in transactions
|
||||
.iter()
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in
|
||||
OrderedIterator::new(&transactions, iteration_order.as_deref())
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
{
|
||||
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
|
||||
let fee_calculator = match hash_age_kind {
|
||||
|
32
core/src/tree_diff.rs
Normal file
32
core/src/tree_diff.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub trait TreeDiff {
|
||||
fn children(&self, slot: Slot) -> Option<&[Slot]>;
|
||||
|
||||
fn contains_slot(&self, slot: Slot) -> bool;
|
||||
|
||||
// Find all nodes reachable from `root1`, excluding subtree at `root2`
|
||||
fn subtree_diff(&self, root1: Slot, root2: Slot) -> HashSet<Slot> {
|
||||
if !self.contains_slot(root1) {
|
||||
return HashSet::new();
|
||||
}
|
||||
let mut pending_slots = vec![root1];
|
||||
let mut reachable_set = HashSet::new();
|
||||
while !pending_slots.is_empty() {
|
||||
let current_slot = pending_slots.pop().unwrap();
|
||||
if current_slot == root2 {
|
||||
continue;
|
||||
}
|
||||
reachable_set.insert(current_slot);
|
||||
for child in self
|
||||
.children(current_slot)
|
||||
.expect("slot was discovered earlier, must exist")
|
||||
{
|
||||
pending_slots.push(*child);
|
||||
}
|
||||
}
|
||||
|
||||
reachable_set
|
||||
}
|
||||
}
|
@@ -6,12 +6,12 @@ use crate::{
|
||||
accounts_hash_verifier::AccountsHashVerifier,
|
||||
broadcast_stage::RetransmitSlotsSender,
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_info_vote_listener::{VerifiedVoteReceiver, VoteTracker},
|
||||
cluster_slots::ClusterSlots,
|
||||
commitment::BlockCommitmentCache,
|
||||
ledger_cleanup_service::LedgerCleanupService,
|
||||
poh_recorder::PohRecorder,
|
||||
replay_stage::{ReplayStage, ReplayStageConfig},
|
||||
replay_stage::{ReplayStage, ReplayStageConfig, ReplayVotesSender},
|
||||
retransmit_stage::RetransmitStage,
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
@@ -96,6 +96,8 @@ impl Tvu {
|
||||
snapshot_package_sender: Option<AccountsPackageSender>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
retransmit_slots_sender: RetransmitSlotsSender,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
replay_votes_sender: ReplayVotesSender,
|
||||
tvu_config: TvuConfig,
|
||||
) -> Self {
|
||||
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
|
||||
@@ -146,7 +148,7 @@ impl Tvu {
|
||||
tvu_config.shred_version,
|
||||
cluster_slots.clone(),
|
||||
duplicate_slots_reset_sender,
|
||||
vote_tracker.clone(),
|
||||
verified_vote_receiver,
|
||||
);
|
||||
|
||||
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
|
||||
@@ -196,6 +198,7 @@ impl Tvu {
|
||||
cluster_slots,
|
||||
retransmit_slots_sender,
|
||||
duplicate_slots_reset_receiver,
|
||||
replay_votes_sender,
|
||||
);
|
||||
|
||||
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
|
||||
@@ -266,7 +269,7 @@ pub mod tests {
|
||||
|
||||
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
|
||||
let (blockstore, l_receiver, completed_slots_receiver) =
|
||||
Blockstore::open_with_signal(&blockstore_path)
|
||||
Blockstore::open_with_signal(&blockstore_path, None)
|
||||
.expect("Expected to successfully open ledger");
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let bank = bank_forks.working_bank();
|
||||
@@ -278,6 +281,8 @@ pub mod tests {
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
|
||||
let (_verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let tvu = Tvu::new(
|
||||
&vote_keypair.pubkey(),
|
||||
@@ -310,6 +315,8 @@ pub mod tests {
|
||||
None,
|
||||
Arc::new(VoteTracker::new(&bank)),
|
||||
retransmit_slots_sender,
|
||||
verified_vote_receiver,
|
||||
replay_votes_sender,
|
||||
TvuConfig::default(),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
|
@@ -9,7 +9,7 @@ use crate::{
|
||||
gossip_service::{discover_cluster, GossipService},
|
||||
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
poh_service::PohService,
|
||||
rewards_recorder_service::RewardsRecorderService,
|
||||
rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService},
|
||||
rpc::JsonRpcConfig,
|
||||
rpc_pubsub_service::PubSubService,
|
||||
rpc_service::JsonRpcService,
|
||||
@@ -28,11 +28,14 @@ use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver, PurgeType},
|
||||
blockstore_processor, create_new_tmp_ledger,
|
||||
blockstore_db::BlockstoreRecoveryMode,
|
||||
blockstore_processor::{self, TransactionStatusSender},
|
||||
create_new_tmp_ledger,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
leader_schedule::FixedSchedule,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
@@ -81,6 +84,7 @@ pub struct ValidatorConfig {
|
||||
pub no_rocksdb_compaction: bool,
|
||||
pub accounts_hash_interval_slots: u64,
|
||||
pub max_genesis_archive_unpacked_size: u64,
|
||||
pub wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@@ -108,6 +112,7 @@ impl Default for ValidatorConfig {
|
||||
no_rocksdb_compaction: false,
|
||||
accounts_hash_interval_slots: std::u64::MAX,
|
||||
max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
|
||||
wal_recovery_mode: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -129,6 +134,14 @@ impl ValidatorExit {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TransactionHistoryServices {
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
transaction_status_service: Option<TransactionStatusService>,
|
||||
rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
rewards_recorder_service: Option<RewardsRecorderService>,
|
||||
}
|
||||
|
||||
pub struct Validator {
|
||||
pub id: Pubkey,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
@@ -193,7 +206,21 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
info!("Cleaning accounts paths..");
|
||||
let mut start = Measure::start("clean_accounts_paths");
|
||||
for accounts_path in &config.account_paths {
|
||||
cleanup_accounts_path(accounts_path);
|
||||
}
|
||||
start.stop();
|
||||
info!("done. {}", start);
|
||||
|
||||
info!("creating bank...");
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_ = exit.clone();
|
||||
validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed)));
|
||||
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
|
||||
|
||||
let (
|
||||
genesis_config,
|
||||
bank_forks,
|
||||
@@ -202,10 +229,15 @@ impl Validator {
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
snapshot_hash,
|
||||
) = new_banks_from_blockstore(config, ledger_path, poh_verify);
|
||||
TransactionHistoryServices {
|
||||
transaction_status_sender,
|
||||
transaction_status_service,
|
||||
rewards_recorder_sender,
|
||||
rewards_recorder_service,
|
||||
},
|
||||
) = new_banks_from_blockstore(config, ledger_path, poh_verify, &exit);
|
||||
|
||||
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank = bank_forks.working_bank();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
@@ -217,11 +249,6 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit_ = exit.clone();
|
||||
validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed)));
|
||||
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
|
||||
|
||||
node.info.wallclock = timestamp();
|
||||
node.info.shred_version = compute_shred_version(
|
||||
&genesis_config.hash(),
|
||||
@@ -240,7 +267,6 @@ impl Validator {
|
||||
}
|
||||
|
||||
let cluster_info = Arc::new(ClusterInfo::new(node.info.clone(), keypair.clone()));
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
@@ -283,36 +309,6 @@ impl Validator {
|
||||
)
|
||||
});
|
||||
|
||||
let (transaction_status_sender, transaction_status_service) =
|
||||
if rpc_service.is_some() && config.rpc_config.enable_rpc_transaction_history {
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
(
|
||||
Some(transaction_status_sender),
|
||||
Some(TransactionStatusService::new(
|
||||
transaction_status_receiver,
|
||||
blockstore.clone(),
|
||||
&exit,
|
||||
)),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let (rewards_recorder_sender, rewards_recorder_service) =
|
||||
if rpc_service.is_some() && config.rpc_config.enable_rpc_transaction_history {
|
||||
let (rewards_recorder_sender, rewards_receiver) = unbounded();
|
||||
(
|
||||
Some(rewards_recorder_sender),
|
||||
Some(RewardsRecorderService::new(
|
||||
rewards_receiver,
|
||||
blockstore.clone(),
|
||||
&exit,
|
||||
)),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
info!(
|
||||
"Starting PoH: epoch={} slot={} tick_height={} blockhash={} leader={:?}",
|
||||
bank.epoch(),
|
||||
@@ -407,6 +403,8 @@ impl Validator {
|
||||
let vote_tracker = Arc::new(VoteTracker::new(bank_forks.read().unwrap().root_bank()));
|
||||
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
let tvu = Tvu::new(
|
||||
vote_account,
|
||||
authorized_voter_keypairs,
|
||||
@@ -451,6 +449,8 @@ impl Validator {
|
||||
snapshot_package_sender,
|
||||
vote_tracker.clone(),
|
||||
retransmit_slots_sender,
|
||||
verified_vote_receiver,
|
||||
replay_votes_sender,
|
||||
TvuConfig {
|
||||
max_ledger_shreds: config.max_ledger_shreds,
|
||||
halt_on_trusted_validators_accounts_hash_mismatch: config
|
||||
@@ -477,6 +477,8 @@ impl Validator {
|
||||
node.info.shred_version,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
);
|
||||
|
||||
datapoint_info!("validator-new", ("id", id.to_string(), String));
|
||||
@@ -568,14 +570,16 @@ fn new_banks_from_blockstore(
|
||||
config: &ValidatorConfig,
|
||||
blockstore_path: &Path,
|
||||
poh_verify: bool,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> (
|
||||
GenesisConfig,
|
||||
BankForks,
|
||||
Blockstore,
|
||||
Arc<Blockstore>,
|
||||
Receiver<bool>,
|
||||
CompletedSlotsReceiver,
|
||||
LeaderScheduleCache,
|
||||
Option<(Slot, Hash)>,
|
||||
TransactionHistoryServices,
|
||||
) {
|
||||
let genesis_config =
|
||||
open_genesis_config(blockstore_path, config.max_genesis_archive_unpacked_size);
|
||||
@@ -602,7 +606,8 @@ fn new_banks_from_blockstore(
|
||||
}
|
||||
|
||||
let (mut blockstore, ledger_signal_receiver, completed_slots_receiver) =
|
||||
Blockstore::open_with_signal(blockstore_path).expect("Failed to open ledger database");
|
||||
Blockstore::open_with_signal(blockstore_path, config.wal_recovery_mode.clone())
|
||||
.expect("Failed to open ledger database");
|
||||
blockstore.set_no_compaction(config.no_rocksdb_compaction);
|
||||
|
||||
let process_options = blockstore_processor::ProcessOptions {
|
||||
@@ -613,12 +618,23 @@ fn new_banks_from_blockstore(
|
||||
..blockstore_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let transaction_history_services =
|
||||
if config.rpc_ports.is_some() && config.rpc_config.enable_rpc_transaction_history {
|
||||
initialize_rpc_transaction_history_services(blockstore.clone(), exit)
|
||||
} else {
|
||||
TransactionHistoryServices::default()
|
||||
};
|
||||
|
||||
let (mut bank_forks, mut leader_schedule_cache, snapshot_hash) = bank_forks_utils::load(
|
||||
&genesis_config,
|
||||
&blockstore,
|
||||
config.account_paths.clone(),
|
||||
config.snapshot_config.as_ref(),
|
||||
process_options,
|
||||
transaction_history_services
|
||||
.transaction_status_sender
|
||||
.clone(),
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to load ledger: {:?}", err);
|
||||
@@ -638,6 +654,7 @@ fn new_banks_from_blockstore(
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
snapshot_hash,
|
||||
transaction_history_services,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -701,6 +718,33 @@ fn backup_and_clear_blockstore(ledger_path: &Path, start_slot: Slot, shred_versi
|
||||
drop(blockstore);
|
||||
}
|
||||
|
||||
fn initialize_rpc_transaction_history_services(
|
||||
blockstore: Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> TransactionHistoryServices {
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
let transaction_status_sender = Some(transaction_status_sender);
|
||||
let transaction_status_service = Some(TransactionStatusService::new(
|
||||
transaction_status_receiver,
|
||||
blockstore.clone(),
|
||||
exit,
|
||||
));
|
||||
|
||||
let (rewards_recorder_sender, rewards_receiver) = unbounded();
|
||||
let rewards_recorder_sender = Some(rewards_recorder_sender);
|
||||
let rewards_recorder_service = Some(RewardsRecorderService::new(
|
||||
rewards_receiver,
|
||||
blockstore,
|
||||
exit,
|
||||
));
|
||||
TransactionHistoryServices {
|
||||
transaction_status_sender,
|
||||
transaction_status_service,
|
||||
rewards_recorder_sender,
|
||||
rewards_recorder_service,
|
||||
}
|
||||
}
|
||||
|
||||
// Return true on error, indicating the validator should exit.
|
||||
fn wait_for_supermajority(
|
||||
config: &ValidatorConfig,
|
||||
@@ -958,6 +1002,16 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo
|
||||
online_stake * 100 / total_activated_stake
|
||||
}
|
||||
|
||||
// Cleanup anything that looks like an accounts append-vec
|
||||
fn cleanup_accounts_path(account_path: &std::path::Path) {
|
||||
if std::fs::remove_dir_all(account_path).is_err() {
|
||||
warn!(
|
||||
"encountered error removing accounts path: {:?}",
|
||||
account_path
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
cluster_info_vote_listener::VerifiedVotePacketsReceiver, crds_value::CrdsValueLabel,
|
||||
cluster_info_vote_listener::VerifiedLabelVotePacketsReceiver, crds_value::CrdsValueLabel,
|
||||
result::Result,
|
||||
};
|
||||
use solana_perf::packet::Packets;
|
||||
@@ -18,7 +18,7 @@ impl Deref for VerifiedVotePackets {
|
||||
impl VerifiedVotePackets {
|
||||
pub fn get_and_process_vote_packets(
|
||||
&mut self,
|
||||
vote_packets_receiver: &VerifiedVotePacketsReceiver,
|
||||
vote_packets_receiver: &VerifiedLabelVotePacketsReceiver,
|
||||
last_update_version: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
|
@@ -3,7 +3,7 @@
|
||||
//!
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
repair_response,
|
||||
repair_service::{RepairInfo, RepairService},
|
||||
@@ -302,7 +302,7 @@ impl WindowService {
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
shred_filter: F,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) -> WindowService
|
||||
where
|
||||
F: 'static
|
||||
@@ -319,7 +319,7 @@ impl WindowService {
|
||||
cluster_info.clone(),
|
||||
repair_info,
|
||||
cluster_slots,
|
||||
vote_tracker,
|
||||
verified_vote_receiver,
|
||||
);
|
||||
|
||||
let (insert_sender, insert_receiver) = unbounded();
|
||||
|
@@ -436,7 +436,7 @@ fn network_run_pull(
|
||||
let rsp = node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.generate_pull_responses(&filters)
|
||||
.generate_pull_responses(&filters, now)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
|
@@ -7,9 +7,10 @@ use jsonrpc_core_client::transports::ws;
|
||||
use log::*;
|
||||
use reqwest::{self, header::CONTENT_TYPE};
|
||||
use serde_json::{json, Value};
|
||||
use solana_account_decoder::UiAccount;
|
||||
use solana_client::{
|
||||
rpc_client::{get_rpc_request_str, RpcClient},
|
||||
rpc_response::{Response, RpcAccount, RpcSignatureResult},
|
||||
rpc_response::{Response, RpcSignatureResult},
|
||||
};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, validator::TestValidator};
|
||||
@@ -25,7 +26,7 @@ use std::{
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio_01::runtime::Runtime;
|
||||
|
||||
macro_rules! json_req {
|
||||
($method: expr, $params: expr) => {{
|
||||
@@ -99,6 +100,20 @@ fn test_rpc_send_tx() {
|
||||
|
||||
assert_eq!(confirmed_tx, true);
|
||||
|
||||
use solana_account_decoder::UiAccountEncoding;
|
||||
use solana_client::rpc_config::RpcAccountInfoConfig;
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Binary64),
|
||||
commitment: None,
|
||||
data_slice: None,
|
||||
};
|
||||
let req = json_req!(
|
||||
"getAccountInfo",
|
||||
json!([bs58::encode(bob_pubkey).into_string(), config])
|
||||
);
|
||||
let json: Value = post_rpc(req, &leader_data);
|
||||
info!("{:?}", json["result"]["value"]);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
@@ -120,14 +135,14 @@ fn test_rpc_invalid_requests() {
|
||||
let json = post_rpc(req, &leader_data);
|
||||
|
||||
let the_error = json["error"]["message"].as_str().unwrap();
|
||||
assert_eq!(the_error, "Invalid");
|
||||
assert_eq!(the_error, "Invalid param: Invalid");
|
||||
|
||||
// test invalid get_account_info request
|
||||
let req = json_req!("getAccountInfo", json!(["invalid9999"]));
|
||||
let json = post_rpc(req, &leader_data);
|
||||
|
||||
let the_error = json["error"]["message"].as_str().unwrap();
|
||||
assert_eq!(the_error, "Invalid");
|
||||
assert_eq!(the_error, "Invalid param: Invalid");
|
||||
|
||||
// test invalid get_account_info request
|
||||
let req = json_req!("getAccountInfo", json!([bob_pubkey.to_string()]));
|
||||
@@ -172,7 +187,7 @@ fn test_rpc_subscriptions() {
|
||||
// Track when subscriptions are ready
|
||||
let (ready_sender, ready_receiver) = channel::<()>();
|
||||
// Track account notifications are received
|
||||
let (account_sender, account_receiver) = channel::<Response<RpcAccount>>();
|
||||
let (account_sender, account_receiver) = channel::<Response<UiAccount>>();
|
||||
// Track when status notifications are received
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<RpcSignatureResult>)>();
|
||||
|
||||
@@ -188,7 +203,7 @@ fn test_rpc_subscriptions() {
|
||||
.and_then(move |client| {
|
||||
for sig in signature_set {
|
||||
let status_sender = status_sender.clone();
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.signature_subscribe(sig.clone(), None)
|
||||
.and_then(move |sig_stream| {
|
||||
@@ -202,7 +217,7 @@ fn test_rpc_subscriptions() {
|
||||
}),
|
||||
);
|
||||
}
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.slot_subscribe()
|
||||
.and_then(move |slot_stream| {
|
||||
@@ -217,7 +232,7 @@ fn test_rpc_subscriptions() {
|
||||
);
|
||||
for pubkey in account_set {
|
||||
let account_sender = account_sender.clone();
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.account_subscribe(pubkey, None)
|
||||
.and_then(move |account_stream| {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.2.8"
|
||||
version = "1.2.23"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -21,7 +21,7 @@ rand_chacha = { version = "0.2.2" }
|
||||
regex-syntax = { version = "0.6.12" }
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde = { version = "1.0.100", features = ["rc"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.3", features = ["serde"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.4", features = ["serde"] }
|
||||
syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "fold", "full"] }
|
||||
syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] }
|
||||
tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] }
|
||||
|
21
docs/.eslintrc
Normal file
21
docs/.eslintrc
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"env": {
|
||||
"browser": true,
|
||||
"node": true
|
||||
},
|
||||
"parser": "babel-eslint",
|
||||
"rules": {
|
||||
"strict": 0,
|
||||
"no-unused-vars": ["error", { "argsIgnorePattern": "^_" }],
|
||||
"no-trailing-spaces": ["error", { "skipBlankLines": true }]
|
||||
},
|
||||
"settings": {
|
||||
"react": {
|
||||
"version": "detect", // React version. "detect" automatically picks the version you have installed.
|
||||
}
|
||||
},
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:react/recommended"
|
||||
]
|
||||
}
|
1
docs/.gitattributes
vendored
1
docs/.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
theme/highlight.js binary
|
24
docs/.gitignore
vendored
Normal file
24
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Dependencies
|
||||
/node_modules
|
||||
|
||||
# Production
|
||||
/build
|
||||
|
||||
# Generated files
|
||||
.docusaurus
|
||||
.cache-loader
|
||||
.vercel
|
||||
/static/img/*.svg
|
||||
/static/img/*.png
|
||||
vercel.json
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
9
docs/.travis/before_install.sh
Normal file
9
docs/.travis/before_install.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
# |source| this file
|
||||
|
||||
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
|
||||
sudo apt install -y nodejs
|
||||
|
||||
npm install --global docusaurus-init
|
||||
docusaurus-init
|
||||
|
||||
npm install --global vercel
|
4
docs/.travis/script.sh
Normal file
4
docs/.travis/script.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
# |source| this file
|
||||
|
||||
set -ex
|
||||
./build.sh
|
@@ -1,31 +1,39 @@
|
||||
Building the Solana Docs
|
||||
---
|
||||
# Docs Readme
|
||||
|
||||
Install dependencies, build, and test the docs:
|
||||
Solana's Docs are built using [Docusaurus 2](https://v2.docusaurus.io/) with `npm`.
|
||||
Static content delivery is handled using `vercel`.
|
||||
|
||||
```bash
|
||||
$ brew install coreutils
|
||||
$ brew install mscgen
|
||||
$ cargo install svgbob_cli
|
||||
$ cargo install mdbook-linkcheck
|
||||
$ cargo install mdbook
|
||||
$ ./build.sh
|
||||
### Installing Docusaurus
|
||||
|
||||
```
|
||||
$ npm install
|
||||
```
|
||||
|
||||
Run any Rust tests in the markdown:
|
||||
### Local Development
|
||||
|
||||
```bash
|
||||
$ make test
|
||||
This command starts a local development server and open up a browser window.
|
||||
Most changes are reflected live without having to restart the server.
|
||||
|
||||
```
|
||||
$ npm run start
|
||||
```
|
||||
|
||||
Render markdown as HTML:
|
||||
### Build Locally
|
||||
|
||||
```bash
|
||||
$ make build
|
||||
This command generates static content into the `build` directory and can be
|
||||
served using any static contents hosting service.
|
||||
|
||||
```
|
||||
$ docs/build.sh
|
||||
```
|
||||
|
||||
Render and view the docs:
|
||||
### CI Build Flow
|
||||
The docs are built and published in Travis CI with the `docs/build.sh` script.
|
||||
On each PR, the docs are built, but not published.
|
||||
|
||||
```bash
|
||||
$ make open
|
||||
```
|
||||
In each post-commit build, docs are built and published using `vercel` to their
|
||||
respective domain depending on the build branch.
|
||||
|
||||
- Master branch docs are published to `edge.docs.solana.com`
|
||||
- Beta branch docs are published to `beta.docs.solana.com`
|
||||
- Latest release tag docs are published to `docs.solana.com`
|
||||
|
3
docs/babel.config.js
Normal file
3
docs/babel.config.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = {
|
||||
presets: [require.resolve("@docusaurus/core/lib/babel/preset")],
|
||||
};
|
@@ -1,12 +0,0 @@
|
||||
[book]
|
||||
title = "Solana: Blockchain Rebuilt for Scale"
|
||||
authors = ["The Solana Team"]
|
||||
|
||||
[build]
|
||||
build-dir = "html"
|
||||
create-missing = false
|
||||
|
||||
[output.html]
|
||||
theme = "theme"
|
||||
|
||||
[output.linkcheck]
|
@@ -3,6 +3,9 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# shellcheck source=ci/rust-version.sh
|
||||
source ../ci/rust-version.sh stable
|
||||
|
||||
: "${rust_stable:=}" # Pacify shellcheck
|
||||
|
||||
usage=$(cargo +"$rust_stable" -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')
|
||||
|
@@ -1,17 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# md check
|
||||
find src -name '*.md' -a \! -name SUMMARY.md |
|
||||
while read -r file; do
|
||||
if ! grep -q '('"${file#src/}"')' src/SUMMARY.md; then
|
||||
echo "Error: $file missing from SUMMARY.md"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
# shellcheck source=ci/env.sh
|
||||
source ../ci/env.sh
|
||||
|
||||
mdbook --version
|
||||
mdbook-linkcheck --version
|
||||
make -j"$(nproc)"
|
||||
: "${rust_stable_docker_image:=}" # Pacify shellcheck
|
||||
|
||||
# shellcheck source=ci/rust-version.sh
|
||||
source ../ci/rust-version.sh
|
||||
../ci/docker-run.sh "$rust_stable_docker_image" docs/build-cli-usage.sh
|
||||
../ci/docker-run.sh "$rust_stable_docker_image" docs/convert-ascii-to-svg.sh
|
||||
./set-solana-release-tag.sh
|
||||
|
||||
# Build from /src into /build
|
||||
npm run build
|
||||
|
||||
# Publish only from merge commits and release tags
|
||||
if [[ -n $CI ]]; then
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
./publish-docs.sh
|
||||
fi
|
||||
fi
|
||||
|
21
docs/convert-ascii-to-svg.sh
Executable file
21
docs/convert-ascii-to-svg.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Convert .bob and .msc files in docs/art to .svg files located where the
|
||||
# site build will find them.
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
output_dir=static/img
|
||||
|
||||
mkdir -p "$output_dir"
|
||||
|
||||
while read -r bob_file; do
|
||||
out_file=$(basename "${bob_file%.*}".svg)
|
||||
svgbob "$bob_file" --output "$output_dir/$out_file"
|
||||
done < <(find art/*.bob)
|
||||
|
||||
while read -r msc_file; do
|
||||
out_file=$(basename "${msc_file%.*}".png)
|
||||
mscgen -T png -o "$output_dir/$out_file" -i "$msc_file"
|
||||
done < <(find art/*.msc)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user