Compare commits
124 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
55836d133e | ||
|
277e402d55 | ||
|
0ab8312b23 | ||
|
bc4c5c5a97 | ||
|
1a9aa78129 | ||
|
798a6db915 | ||
|
0a4a3fd37e | ||
|
66242eab41 | ||
|
7f0d4f0656 | ||
|
acba8d6026 | ||
|
1ff9555099 | ||
|
72a13e2a72 | ||
|
74cdfc2213 | ||
|
7b8e5a9f47 | ||
|
80525ac862 | ||
|
c14f98c6fc | ||
|
c6edfc3944 | ||
|
b95c493d66 | ||
|
5871462241 | ||
|
53bb826375 | ||
|
c769bcc418 | ||
|
f06a4c7861 | ||
|
0cae099d12 | ||
|
4bc3653906 | ||
|
3e7050983a | ||
|
9f1bb75445 | ||
|
139bb32dba | ||
|
158f6f3725 | ||
|
e33f9ea6b5 | ||
|
473037db86 | ||
|
b0e14ea83c | ||
|
782a549613 | ||
|
c805f7dc4e | ||
|
782829152e | ||
|
da6f09afb8 | ||
|
004b1b9c3f | ||
|
2f8d0f88d6 | ||
|
177d241160 | ||
|
5323622842 | ||
|
c852923347 | ||
|
5dc4410d58 | ||
|
da4642d634 | ||
|
a264be1791 | ||
|
9aff121949 | ||
|
a7f4d1487a | ||
|
11e43e1654 | ||
|
82be47bc18 | ||
|
6498e4fbf6 | ||
|
9978971bd9 | ||
|
e28ac2c377 | ||
|
ef296aa7db | ||
|
43e7107f65 | ||
|
752fa29390 | ||
|
7bb7b42356 | ||
|
2a7fc744f9 | ||
|
90e3da0389 | ||
|
1a62bcee42 | ||
|
b83a4cae90 | ||
|
05ef21cd3b | ||
|
dfa27b04d7 | ||
|
880b04906e | ||
|
1fe0b1e516 | ||
|
f9fd4bd24c | ||
|
c55a11d160 | ||
|
92118de0e1 | ||
|
0d9802a2cd | ||
|
f6beede01b | ||
|
ff48ea20de | ||
|
dd9cb18d65 | ||
|
71932aed0a | ||
|
24dc6680e1 | ||
|
61d9d40e48 | ||
|
e9b40db319 | ||
|
316356861d | ||
|
e07c00710a | ||
|
bc47c80610 | ||
|
14baa511f0 | ||
|
e773faeb24 | ||
|
42847516a2 | ||
|
47e9a1ae4f | ||
|
549a154394 | ||
|
dca00d1bde | ||
|
45ce1b4f96 | ||
|
a9232c0633 | ||
|
3da254c745 | ||
|
9ba3ee9683 | ||
|
b0addba2a9 | ||
|
bb59525ff8 | ||
|
acd25124d4 | ||
|
d718ab2491 | ||
|
1860aacd1f | ||
|
d4bbb7f516 | ||
|
d1c0f4b4f1 | ||
|
b72b837ba2 | ||
|
fde85c96c0 | ||
|
121418dad2 | ||
|
f44f94fe23 | ||
|
55a4481022 | ||
|
e859ad37a8 | ||
|
1a28c7fc12 | ||
|
c706a07764 | ||
|
59568e5776 | ||
|
33ca8fa72a | ||
|
4bb66a81fb | ||
|
468c14b14f | ||
|
03e505897a | ||
|
5205eb382e | ||
|
b07b6e56fa | ||
|
bcc890e705 | ||
|
07d14f6f07 | ||
|
03b213e296 | ||
|
1bfce24c9f | ||
|
94b2565969 | ||
|
2896fdb603 | ||
|
50970bc8f9 | ||
|
10df45b173 | ||
|
d3b8129593 | ||
|
f7fb5aebac | ||
|
9311a6e356 | ||
|
8c706892df | ||
|
7f2b11756c | ||
|
f324547600 | ||
|
36e8977f1d | ||
|
b88db2689e |
14
.buildkite/env/secrets.ejson
vendored
14
.buildkite/env/secrets.ejson
vendored
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
|
||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
|
||||
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
|
||||
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
|
||||
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
|
||||
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
|
||||
}
|
||||
}
|
||||
|
@@ -1,18 +0,0 @@
|
||||
root: ./docs/src
|
||||
|
||||
structure:
|
||||
readme: introduction.md
|
||||
summary: SUMMARY.md
|
||||
|
||||
redirects:
|
||||
wallet: ./wallet-guide/README.md
|
||||
wallet/app-wallets: ./wallet-guide/apps.md
|
||||
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
|
||||
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
|
||||
wallet/cli-wallets: ./wallet-guide/cli.md
|
||||
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
|
||||
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
|
||||
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
|
||||
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
|
||||
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
|
||||
wallet/support: ./wallet-guide/support.md
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -23,3 +23,7 @@ log-*/
|
||||
/.idea/
|
||||
/solana.iml
|
||||
/.vscode/
|
||||
|
||||
# fetch-spl.sh artifacts
|
||||
/spl-genesis-args.sh
|
||||
/spl_*.so
|
||||
|
97
.travis.yml
97
.travis.yml
@@ -1,46 +1,71 @@
|
||||
os:
|
||||
- osx
|
||||
- windows
|
||||
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^v\d+\.\d+/
|
||||
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
on_success: change
|
||||
secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU=
|
||||
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
api_key: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
os: linux
|
||||
dist: bionic
|
||||
language: minimal
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- &release-artifacts
|
||||
if: type = push
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
access_key_id: $AWS_ACCESS_KEY_ID
|
||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
skip_cleanup: true
|
||||
acl: public_read
|
||||
local_dir: travis-s3-upload
|
||||
on:
|
||||
all_branches: true
|
||||
- provider: releases
|
||||
token: $GITHUB_TOKEN
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
file: travis-release-upload/*
|
||||
on:
|
||||
tags: true
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
|
||||
# docs pull request or commit
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "node"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh docs/ .travis || travis_terminate 0
|
||||
- cd docs/
|
||||
- source .travis/before_install.sh
|
||||
|
||||
script:
|
||||
- source .travis/script.sh
|
||||
|
25
.travis/affects.sh
Executable file
25
.travis/affects.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Check if files in the commit range match one or more prefixes
|
||||
#
|
||||
|
||||
# Always run the job if we are on a tagged release
|
||||
if [[ -n "$TRAVIS_TAG" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
git diff --name-only "$TRAVIS_COMMIT_RANGE"
|
||||
)
|
||||
|
||||
for file in $(git diff --name-only "$TRAVIS_COMMIT_RANGE"); do
|
||||
for prefix in "$@"; do
|
||||
if [[ $file =~ ^"$prefix" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "No modifications to $*"
|
||||
exit 1
|
2536
Cargo.lock
generated
2536
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -25,6 +25,7 @@ members = [
|
||||
"log-analyzer",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"storage-bigtable",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
@@ -52,6 +53,7 @@ members = [
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
@@ -63,6 +65,4 @@ members = [
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
"programs/move_loader",
|
||||
"programs/librapay",
|
||||
]
|
||||
|
25
account-decoder/Cargo.toml
Normal file
25
account-decoder/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.2.20"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
bs58 = "0.3.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.20" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.54"
|
||||
thiserror = "1.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
80
account-decoder/src/lib.rs
Normal file
80
account-decoder/src/lib.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod parse_account_data;
|
||||
pub mod parse_nonce;
|
||||
pub mod parse_token;
|
||||
pub mod parse_vote;
|
||||
|
||||
use crate::parse_account_data::{parse_account_data, ParsedAccount};
|
||||
use solana_sdk::{account::Account, clock::Epoch, pubkey::Pubkey};
|
||||
use std::str::FromStr;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAccount {
|
||||
pub lamports: u64,
|
||||
pub data: UiAccountData,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum UiAccountData {
|
||||
Binary(String),
|
||||
Json(ParsedAccount),
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for UiAccountData {
|
||||
fn from(data: Vec<u8>) -> Self {
|
||||
Self::Binary(bs58::encode(data).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary,
|
||||
JsonParsed,
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
pub fn encode(account: Account, encoding: UiAccountEncoding) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => account.data.into(),
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) = parse_account_data(&account.owner, &account.data) {
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
account.data.into()
|
||||
}
|
||||
}
|
||||
};
|
||||
UiAccount {
|
||||
lamports: account.lamports,
|
||||
data,
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> Option<Account> {
|
||||
let data = match &self.data {
|
||||
UiAccountData::Json(_) => None,
|
||||
UiAccountData::Binary(blob) => bs58::decode(blob).into_vec().ok(),
|
||||
}?;
|
||||
Some(Account {
|
||||
lamports: self.lamports,
|
||||
data,
|
||||
owner: Pubkey::from_str(&self.owner).ok()?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
})
|
||||
}
|
||||
}
|
100
account-decoder/src/parse_account_data.rs
Normal file
100
account-decoder/src/parse_account_data.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
use crate::{
|
||||
parse_nonce::parse_nonce,
|
||||
parse_token::{parse_token, spl_token_id_v1_0},
|
||||
parse_vote::parse_vote,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v1_0();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ParseAccountError {
|
||||
#[error("{0:?} account not parsable")]
|
||||
AccountNotParsable(ParsableAccount),
|
||||
|
||||
#[error("Program not parsable")]
|
||||
ProgramNotParsable,
|
||||
|
||||
#[error("Instruction error")]
|
||||
InstructionError(#[from] InstructionError),
|
||||
|
||||
#[error("Serde json error")]
|
||||
SerdeJsonError(#[from] serde_json::error::Error),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ParsedAccount {
|
||||
pub program: String,
|
||||
pub parsed: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ParsableAccount {
|
||||
Nonce,
|
||||
SplToken,
|
||||
Vote,
|
||||
}
|
||||
|
||||
pub fn parse_account_data(
|
||||
program_id: &Pubkey,
|
||||
data: &[u8],
|
||||
) -> Result<ParsedAccount, ParseAccountError> {
|
||||
let program_name = PARSABLE_PROGRAM_IDS
|
||||
.get(program_id)
|
||||
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
|
||||
let parsed_json = match program_name {
|
||||
ParsableAccount::Nonce => serde_json::to_value(parse_nonce(data)?)?,
|
||||
ParsableAccount::SplToken => serde_json::to_value(parse_token(data)?)?,
|
||||
ParsableAccount::Vote => serde_json::to_value(parse_vote(data)?)?,
|
||||
};
|
||||
Ok(ParsedAccount {
|
||||
program: format!("{:?}", program_name).to_kebab_case(),
|
||||
parsed: parsed_json,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
let other_program = Pubkey::new_rand();
|
||||
let data = vec![0; 4];
|
||||
assert!(parse_account_data(&other_program, &data).is_err());
|
||||
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let parsed = parse_account_data(&solana_vote_program::id(), &vote_account_data).unwrap();
|
||||
assert_eq!(parsed.program, "vote".to_string());
|
||||
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
let parsed = parse_account_data(&system_program::id(), &nonce_account_data).unwrap();
|
||||
assert_eq!(parsed.program, "nonce".to_string());
|
||||
}
|
||||
}
|
66
account-decoder/src/parse_nonce.rs
Normal file
66
account-decoder/src/parse_nonce.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use crate::parse_account_data::ParseAccountError;
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
let nonce_state: Versions = bincode::deserialize(data)
|
||||
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
||||
let nonce_state = nonce_state.convert_to_current();
|
||||
match nonce_state {
|
||||
State::Uninitialized => Ok(UiNonceState::Uninitialized),
|
||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||
authority: data.authority.to_string(),
|
||||
blockhash: data.blockhash.to_string(),
|
||||
fee_calculator: data.fee_calculator,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// A duplicate representation of NonceState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum UiNonceState {
|
||||
Uninitialized,
|
||||
Initialized(UiNonceData),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiNonceData {
|
||||
pub authority: String,
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_nonce() {
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
assert_eq!(
|
||||
parse_nonce(&nonce_account_data).unwrap(),
|
||||
UiNonceState::Initialized(UiNonceData {
|
||||
authority: Pubkey::default().to_string(),
|
||||
blockhash: Hash::default().to_string(),
|
||||
fee_calculator: FeeCalculator::default(),
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_nonce(&bad_data).is_err());
|
||||
}
|
||||
}
|
185
account-decoder/src/parse_token.rs
Normal file
185
account-decoder/src/parse_token.rs
Normal file
@@ -0,0 +1,185 @@
|
||||
use crate::parse_account_data::{ParsableAccount, ParseAccountError};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v1_0::{
|
||||
option::COption,
|
||||
solana_sdk::pubkey::Pubkey as SplTokenPubkey,
|
||||
state::{unpack, Account, Mint, Multisig},
|
||||
};
|
||||
use std::{mem::size_of, str::FromStr};
|
||||
|
||||
// A helper function to convert spl_token_v1_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id_v1_0() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token_v1_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v1_0_native_mint() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::native_mint::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
pub fn parse_token(data: &[u8]) -> Result<TokenAccountType, ParseAccountError> {
|
||||
let mut data = data.to_vec();
|
||||
if data.len() == size_of::<Account>() {
|
||||
let account: Account = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Account(UiTokenAccount {
|
||||
mint: account.mint.to_string(),
|
||||
owner: account.owner.to_string(),
|
||||
amount: account.amount,
|
||||
delegate: match account.delegate {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
COption::None => None,
|
||||
},
|
||||
is_initialized: account.is_initialized,
|
||||
is_native: account.is_native,
|
||||
delegated_amount: account.delegated_amount,
|
||||
}))
|
||||
} else if data.len() == size_of::<Mint>() {
|
||||
let mint: Mint = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Mint(UiMint {
|
||||
owner: match mint.owner {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
COption::None => None,
|
||||
},
|
||||
decimals: mint.decimals,
|
||||
is_initialized: mint.is_initialized,
|
||||
}))
|
||||
} else if data.len() == size_of::<Multisig>() {
|
||||
let multisig: Multisig = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: multisig.m,
|
||||
num_valid_signers: multisig.n,
|
||||
is_initialized: multisig.is_initialized,
|
||||
signers: multisig
|
||||
.signers
|
||||
.iter()
|
||||
.filter_map(|pubkey| {
|
||||
if pubkey != &SplTokenPubkey::default() {
|
||||
Some(pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
}))
|
||||
} else {
|
||||
Err(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::SplToken,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum TokenAccountType {
|
||||
Account(UiTokenAccount),
|
||||
Mint(UiMint),
|
||||
Multisig(UiMultisig),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiTokenAccount {
|
||||
pub mint: String,
|
||||
pub owner: String,
|
||||
pub amount: u64,
|
||||
pub delegate: Option<String>,
|
||||
pub is_initialized: bool,
|
||||
pub is_native: bool,
|
||||
pub delegated_amount: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMint {
|
||||
pub owner: Option<String>,
|
||||
pub decimals: u8,
|
||||
pub is_initialized: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMultisig {
|
||||
pub num_required_signers: u8,
|
||||
pub num_valid_signers: u8,
|
||||
pub is_initialized: bool,
|
||||
pub signers: Vec<String>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use spl_token_v1_0::state::unpack_unchecked;
|
||||
|
||||
#[test]
|
||||
fn test_parse_token() {
|
||||
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
|
||||
let owner_pubkey = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut account_data = [0; size_of::<Account>()];
|
||||
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
|
||||
account.mint = mint_pubkey;
|
||||
account.owner = owner_pubkey;
|
||||
account.amount = 42;
|
||||
account.is_initialized = true;
|
||||
assert_eq!(
|
||||
parse_token(&account_data).unwrap(),
|
||||
TokenAccountType::Account(UiTokenAccount {
|
||||
mint: mint_pubkey.to_string(),
|
||||
owner: owner_pubkey.to_string(),
|
||||
amount: 42,
|
||||
delegate: None,
|
||||
is_initialized: true,
|
||||
is_native: false,
|
||||
delegated_amount: 0,
|
||||
}),
|
||||
);
|
||||
|
||||
let mut mint_data = [0; size_of::<Mint>()];
|
||||
let mut mint: &mut Mint = unpack_unchecked(&mut mint_data).unwrap();
|
||||
mint.owner = COption::Some(owner_pubkey);
|
||||
mint.decimals = 3;
|
||||
mint.is_initialized = true;
|
||||
assert_eq!(
|
||||
parse_token(&mint_data).unwrap(),
|
||||
TokenAccountType::Mint(UiMint {
|
||||
owner: Some(owner_pubkey.to_string()),
|
||||
decimals: 3,
|
||||
is_initialized: true,
|
||||
}),
|
||||
);
|
||||
|
||||
let signer1 = SplTokenPubkey::new(&[1; 32]);
|
||||
let signer2 = SplTokenPubkey::new(&[2; 32]);
|
||||
let signer3 = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut multisig_data = [0; size_of::<Multisig>()];
|
||||
let mut multisig: &mut Multisig = unpack_unchecked(&mut multisig_data).unwrap();
|
||||
let mut signers = [SplTokenPubkey::default(); 11];
|
||||
signers[0] = signer1;
|
||||
signers[1] = signer2;
|
||||
signers[2] = signer3;
|
||||
multisig.m = 2;
|
||||
multisig.n = 3;
|
||||
multisig.is_initialized = true;
|
||||
multisig.signers = signers;
|
||||
assert_eq!(
|
||||
parse_token(&multisig_data).unwrap(),
|
||||
TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: 2,
|
||||
num_valid_signers: 3,
|
||||
is_initialized: true,
|
||||
signers: vec![
|
||||
signer1.to_string(),
|
||||
signer2.to_string(),
|
||||
signer3.to_string()
|
||||
],
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_token(&bad_data).is_err());
|
||||
}
|
||||
}
|
144
account-decoder/src/parse_vote.rs
Normal file
144
account-decoder/src/parse_vote.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use crate::parse_account_data::ParseAccountError;
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
let epoch_credits = vote_state
|
||||
.epoch_credits()
|
||||
.iter()
|
||||
.map(|(epoch, credits, previous_credits)| UiEpochCredits {
|
||||
epoch: *epoch,
|
||||
credits: *credits,
|
||||
previous_credits: *previous_credits,
|
||||
})
|
||||
.collect();
|
||||
let votes = vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|lockout| UiLockout {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
})
|
||||
.collect();
|
||||
let authorized_voters = vote_state
|
||||
.authorized_voters()
|
||||
.iter()
|
||||
.map(|(epoch, authorized_voter)| UiAuthorizedVoters {
|
||||
epoch: *epoch,
|
||||
authorized_voter: authorized_voter.to_string(),
|
||||
})
|
||||
.collect();
|
||||
let prior_voters = vote_state
|
||||
.prior_voters()
|
||||
.buf()
|
||||
.iter()
|
||||
.filter(|(pubkey, _, _)| pubkey != &Pubkey::default())
|
||||
.map(
|
||||
|(authorized_pubkey, epoch_of_last_authorized_switch, target_epoch)| UiPriorVoters {
|
||||
authorized_pubkey: authorized_pubkey.to_string(),
|
||||
epoch_of_last_authorized_switch: *epoch_of_last_authorized_switch,
|
||||
target_epoch: *target_epoch,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
Ok(VoteAccountType::Vote(UiVoteState {
|
||||
node_pubkey: vote_state.node_pubkey.to_string(),
|
||||
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
|
||||
commission: vote_state.commission,
|
||||
votes,
|
||||
root_slot: vote_state.root_slot,
|
||||
authorized_voters,
|
||||
prior_voters,
|
||||
epoch_credits,
|
||||
last_timestamp: vote_state.last_timestamp,
|
||||
}))
|
||||
}
|
||||
|
||||
/// A wrapper enum for consistency across programs
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum VoteAccountType {
|
||||
Vote(UiVoteState),
|
||||
}
|
||||
|
||||
/// A duplicate representation of VoteState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiVoteState {
|
||||
node_pubkey: String,
|
||||
authorized_withdrawer: String,
|
||||
commission: u8,
|
||||
votes: Vec<UiLockout>,
|
||||
root_slot: Option<Slot>,
|
||||
authorized_voters: Vec<UiAuthorizedVoters>,
|
||||
prior_voters: Vec<UiPriorVoters>,
|
||||
epoch_credits: Vec<UiEpochCredits>,
|
||||
last_timestamp: BlockTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiLockout {
|
||||
slot: Slot,
|
||||
confirmation_count: u32,
|
||||
}
|
||||
|
||||
impl From<&Lockout> for UiLockout {
|
||||
fn from(lockout: &Lockout) -> Self {
|
||||
Self {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiAuthorizedVoters {
|
||||
epoch: Epoch,
|
||||
authorized_voter: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiPriorVoters {
|
||||
authorized_pubkey: String,
|
||||
epoch_of_last_authorized_switch: Epoch,
|
||||
target_epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiEpochCredits {
|
||||
epoch: Epoch,
|
||||
credits: u64,
|
||||
previous_credits: u64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let mut expected_vote_state = UiVoteState::default();
|
||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
||||
expected_vote_state.authorized_withdrawer = Pubkey::default().to_string();
|
||||
assert_eq!(
|
||||
parse_vote(&vote_account_data).unwrap(),
|
||||
VoteAccountType::Vote(expected_vote_state)
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_vote(&bad_data).is_err());
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.9" }
|
||||
solana-measure = { path = "../measure", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.20" }
|
||||
solana-measure = { path = "../measure", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,16 +13,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.2.9" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.9" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.9" }
|
||||
solana-perf = { path = "../perf", version = "1.2.9" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.9" }
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.9" }
|
||||
solana-measure = { path = "../measure", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-version = { path = "../version", version = "1.2.9" }
|
||||
solana-core = { path = "../core", version = "1.2.20" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.20" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.20" }
|
||||
solana-perf = { path = "../perf", version = "1.2.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.20" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.20" }
|
||||
solana-measure = { path = "../measure", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
solana-version = { path = "../version", version = "1.2.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,21 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.9" }
|
||||
solana-core = { path = "../core", version = "1.2.9" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.9" }
|
||||
solana-client = { path = "../client", version = "1.2.9" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.9" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.9" }
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.9" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.9" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-version = { path = "../version", version = "1.2.9" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.20" }
|
||||
solana-core = { path = "../core", version = "1.2.20" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.20" }
|
||||
solana-client = { path = "../client", version = "1.2.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.20" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.20" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
solana-version = { path = "../version", version = "1.2.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.9" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,18 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.9" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.9" }
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.9" }
|
||||
solana-version = { path = "../version", version = "1.2.9" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.20" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.20" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.20" }
|
||||
solana-version = { path = "../version", version = "1.2.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,28 +14,23 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.9" }
|
||||
solana-core = { path = "../core", version = "1.2.9" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.9" }
|
||||
solana-client = { path = "../client", version = "1.2.9" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.9" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.9", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.9" }
|
||||
solana-measure = { path = "../measure", version = "1.2.9" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.9" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.9", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.9" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.20" }
|
||||
solana-core = { path = "../core", version = "1.2.20" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.20" }
|
||||
solana-client = { path = "../client", version = "1.2.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.20" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.20" }
|
||||
solana-measure = { path = "../measure", version = "1.2.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
solana-version = { path = "../version", version = "1.2.20" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.9" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,8 +4,6 @@ use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::{create_genesis, upload_mint_script, upload_payment_script};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
@@ -37,9 +35,6 @@ use std::{
|
||||
const MAX_TX_QUEUE_AGE: u64 =
|
||||
MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND;
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::librapay_transaction;
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -51,8 +46,6 @@ pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||
|
||||
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
||||
@@ -122,7 +115,6 @@ fn generate_chunked_transfers(
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) {
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
@@ -137,7 +129,6 @@ fn generate_chunked_transfers(
|
||||
&dest_keypair_chunks[chunk_index],
|
||||
threads,
|
||||
reclaim_lamports_back_to_source_account,
|
||||
&libra_args,
|
||||
);
|
||||
|
||||
// In sustained mode, overlap the transfers with generation. This has higher average
|
||||
@@ -205,12 +196,7 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
pub fn do_bench_tps<T>(client: Arc<T>, config: Config, gen_keypairs: Vec<Keypair>) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
@@ -294,7 +280,6 @@ where
|
||||
threads,
|
||||
duration,
|
||||
sustained,
|
||||
libra_args,
|
||||
);
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
@@ -340,52 +325,6 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn generate_move_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
reclaim: bool,
|
||||
move_keypairs: &[Keypair],
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_id: &Pubkey,
|
||||
blockhash: &Hash,
|
||||
) -> Vec<(Transaction, u64)> {
|
||||
let count = move_keypairs.len() / 2;
|
||||
let source_move = &move_keypairs[..count];
|
||||
let dest_move = &move_keypairs[count..];
|
||||
let pairs: Vec<_> = if !reclaim {
|
||||
source_move
|
||||
.iter()
|
||||
.zip(dest_move.iter())
|
||||
.zip(source.iter())
|
||||
.collect()
|
||||
} else {
|
||||
dest_move
|
||||
.iter()
|
||||
.zip(source_move.iter())
|
||||
.zip(dest.iter())
|
||||
.collect()
|
||||
};
|
||||
|
||||
pairs
|
||||
.par_iter()
|
||||
.map(|((from, to), payer)| {
|
||||
(
|
||||
librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
libra_mint_id,
|
||||
&payer,
|
||||
&from,
|
||||
&to.pubkey(),
|
||||
1,
|
||||
*blockhash,
|
||||
),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_system_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
@@ -416,7 +355,6 @@ fn generate_txs(
|
||||
dest: &VecDeque<&Keypair>,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
libra_args: &Option<LibraKeys>,
|
||||
) {
|
||||
let blockhash = *blockhash.read().unwrap();
|
||||
let tx_count = source.len();
|
||||
@@ -426,33 +364,7 @@ fn generate_txs(
|
||||
);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions = if let Some((
|
||||
_libra_genesis_keypair,
|
||||
_libra_pay_program_id,
|
||||
_libra_mint_program_id,
|
||||
_libra_keys,
|
||||
)) = libra_args
|
||||
{
|
||||
#[cfg(not(feature = "move"))]
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
generate_move_txs(
|
||||
source,
|
||||
dest,
|
||||
reclaim,
|
||||
&_libra_keys,
|
||||
_libra_pay_program_id,
|
||||
&_libra_genesis_keypair.pubkey(),
|
||||
&blockhash,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
generate_system_txs(source, dest, reclaim, &blockhash)
|
||||
};
|
||||
let transactions = generate_system_txs(source, dest, reclaim, &blockhash);
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
@@ -954,181 +866,13 @@ pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn fund_move_keys<T: Client>(
|
||||
client: &T,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
total: u64,
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_program_id: &Pubkey,
|
||||
libra_genesis_key: &Keypair,
|
||||
) {
|
||||
let (mut blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
|
||||
info!("creating the libra funding account..");
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
let tx = librapay_transaction::mint_tokens(
|
||||
&libra_mint_program_id,
|
||||
funding_key,
|
||||
libra_genesis_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
total,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("creating {} move accounts...", keypairs.len());
|
||||
let total_len = keypairs.len();
|
||||
let create_len = 5;
|
||||
let mut funding_time = Measure::start("funding_time");
|
||||
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
||||
if client
|
||||
.get_balance_with_commitment(&keys[0].pubkey(), CommitmentConfig::recent())
|
||||
.unwrap_or(0)
|
||||
> 0
|
||||
{
|
||||
// already created these accounts.
|
||||
break;
|
||||
}
|
||||
|
||||
let keypairs: Vec<_> = keys.iter().map(|k| k).collect();
|
||||
let tx = librapay_transaction::create_accounts(funding_key, &keypairs, 1, blockhash);
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
let mut keys = vec![funding_key];
|
||||
keys.extend(&keypairs);
|
||||
client.send_and_confirm_message(&keys, tx.message).unwrap();
|
||||
|
||||
if i % 10 == 0 {
|
||||
info!(
|
||||
"created {} accounts of {} (size {})",
|
||||
i,
|
||||
total_len / create_len,
|
||||
ser_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_FUNDING_KEYS: usize = 10;
|
||||
let funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
let pubkey_amounts: Vec<_> = funding_keys
|
||||
.iter()
|
||||
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
||||
.collect();
|
||||
let instructions = system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts);
|
||||
let message = Message::new(&instructions, Some(&funding_key.pubkey()));
|
||||
let tx = Transaction::new(&[funding_key], message, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key], tx.message)
|
||||
.unwrap();
|
||||
let mut balance = 0;
|
||||
for _ in 0..20 {
|
||||
if let Ok(balance_) = client
|
||||
.get_balance_with_commitment(&funding_keys[0].pubkey(), CommitmentConfig::recent())
|
||||
{
|
||||
if balance_ > 0 {
|
||||
balance = balance_;
|
||||
break;
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
assert!(balance > 0);
|
||||
info!(
|
||||
"funded multiple funding accounts with {:?} lanports",
|
||||
balance
|
||||
);
|
||||
|
||||
let libra_funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
for (i, key) in libra_funding_keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[&funding_keys[i], &key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[i],
|
||||
&libra_funding_key,
|
||||
&key.pubkey(),
|
||||
total / NUM_FUNDING_KEYS as u64,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_and_confirm_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("funded libra funding key {}", i);
|
||||
}
|
||||
|
||||
let keypair_count = keypairs.len();
|
||||
let amount = total / (keypair_count as u64);
|
||||
for (i, keys) in keypairs[..keypair_count]
|
||||
.chunks(NUM_FUNDING_KEYS)
|
||||
.enumerate()
|
||||
{
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[j],
|
||||
&libra_funding_keys[j],
|
||||
&key.pubkey(),
|
||||
amount,
|
||||
blockhash,
|
||||
);
|
||||
|
||||
let _sig = client
|
||||
.async_send_transaction(tx.clone())
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
}
|
||||
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let mut times = 0;
|
||||
loop {
|
||||
let balance =
|
||||
librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
||||
if balance >= amount {
|
||||
break;
|
||||
} else if times > 20 {
|
||||
info!("timed out.. {} key: {} balance: {}", i, j, balance);
|
||||
break;
|
||||
} else {
|
||||
times += 1;
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"funded group {} of {}",
|
||||
i + 1,
|
||||
keypairs.len() / NUM_FUNDING_KEYS
|
||||
);
|
||||
blockhash = get_recent_blockhash(client).0;
|
||||
}
|
||||
|
||||
funding_time.stop();
|
||||
info!("done funding keys, took {} ms", funding_time.as_ms());
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
use_move: bool,
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
|
||||
) -> Result<Vec<Keypair>> {
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
info!("Get lamports...");
|
||||
@@ -1141,12 +885,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
let mut move_keypairs_ret = None;
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let move_keypairs_ret = None;
|
||||
|
||||
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||
// start another bench-tps run without re-funding all of the keypairs, check if the
|
||||
// keypairs still have at least 80% of the expected funds. That should be enough to
|
||||
@@ -1157,10 +895,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let mut total = lamports_per_account * total_keypairs + extra_fees;
|
||||
if use_move {
|
||||
total *= 3;
|
||||
}
|
||||
let total = lamports_per_account * total_keypairs + extra_fees;
|
||||
|
||||
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
|
||||
info!(
|
||||
@@ -1172,40 +907,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
if use_move {
|
||||
let libra_genesis_keypair =
|
||||
create_genesis(&funding_key, client.as_ref(), 10_000_000);
|
||||
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
|
||||
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
|
||||
|
||||
// Generate another set of keypairs for move accounts.
|
||||
// Still fund the solana ones which will be used for fees.
|
||||
let seed = [0u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
||||
fund_move_keys(
|
||||
client.as_ref(),
|
||||
funding_key,
|
||||
&move_keypairs,
|
||||
total / 3,
|
||||
&libra_pay_program_id,
|
||||
&libra_mint_program_id,
|
||||
&libra_genesis_keypair,
|
||||
);
|
||||
move_keypairs_ret = Some((
|
||||
libra_genesis_keypair,
|
||||
libra_pay_program_id,
|
||||
libra_mint_program_id,
|
||||
move_keypairs,
|
||||
));
|
||||
|
||||
// Give solana keys 1/3 and move keys 1/3 the lamports. Keep 1/3 for fees.
|
||||
total /= 3;
|
||||
}
|
||||
}
|
||||
|
||||
fund_keys(
|
||||
client,
|
||||
funding_key,
|
||||
@@ -1219,7 +920,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok((keypairs, move_keypairs_ret))
|
||||
Ok(keypairs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -1243,11 +944,11 @@ mod tests {
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(client, config, keypairs, None);
|
||||
do_bench_tps(client, config, keypairs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1258,9 +959,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
@@ -1282,9 +982,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
|
@@ -23,7 +23,6 @@ pub struct Config {
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
}
|
||||
@@ -46,7 +45,6 @@ impl Default for Config {
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature,
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
}
|
||||
@@ -109,11 +107,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("use-move")
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no-multi-client")
|
||||
.long("no-multi-client")
|
||||
@@ -263,7 +256,6 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
args.multi_client = !matches.is_present("no-multi-client");
|
||||
|
||||
if let Some(v) = matches.value_of("num_lamports_per_account") {
|
||||
|
@@ -29,7 +29,6 @@ fn main() {
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
..
|
||||
@@ -86,7 +85,7 @@ fn main() {
|
||||
Arc::new(get_client(&nodes))
|
||||
};
|
||||
|
||||
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
|
||||
let keypairs = if *read_from_client_file {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
@@ -115,8 +114,8 @@ fn main() {
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||
(keypairs, None)
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
@@ -124,7 +123,6 @@ fn main() {
|
||||
&id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
*use_move,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
@@ -132,5 +130,5 @@ fn main() {
|
||||
})
|
||||
};
|
||||
|
||||
do_bench_tps(client, cli_config, keypairs, move_keypairs);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
|
@@ -6,17 +6,11 @@ use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
#[cfg(feature = "move")]
|
||||
use solana_sdk::move_loader::solana_move_loader_program;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::sync::{mpsc::channel, Arc};
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
#[cfg(feature = "move")]
|
||||
let native_instruction_processors = vec![solana_move_loader_program()];
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let native_instruction_processors = vec![];
|
||||
|
||||
solana_logger::setup();
|
||||
@@ -48,17 +42,16 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(faucet_addr),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
config.use_move,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
|
||||
let _total = do_bench_tps(client, config, keypairs);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
assert!(_total > 100);
|
||||
@@ -73,14 +66,3 @@ fn test_bench_tps_local_cluster_solana() {
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
@@ -211,12 +211,7 @@ pull_or_push_steps() {
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# doc/ changes:
|
||||
if affects ^docs/; then
|
||||
command_step docs ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image docs/build.sh" 5
|
||||
fi
|
||||
|
||||
# web3.js and explorer changes run on Travis...
|
||||
# web3.js, explorer and docs changes run on Travis...
|
||||
}
|
||||
|
||||
|
||||
|
@@ -5,9 +5,6 @@ steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
@@ -19,6 +16,3 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
# name: "move"
|
||||
# timeout_in_minutes: 20
|
||||
|
@@ -12,7 +12,7 @@ if [[ -n $CI ]]; then
|
||||
export CI_BUILD_ID=$TRAVIS_BUILD_ID
|
||||
export CI_COMMIT=$TRAVIS_COMMIT
|
||||
export CI_JOB_ID=$TRAVIS_JOB_ID
|
||||
if $TRAVIS_PULL_REQUEST; then
|
||||
if [[ $TRAVIS_PULL_REQUEST != false ]]; then
|
||||
export CI_PULL_REQUEST=true
|
||||
else
|
||||
export CI_PULL_REQUEST=
|
||||
|
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- build docs
|
||||
(
|
||||
set -x
|
||||
. ci/rust-version.sh stable
|
||||
ci/docker-run.sh "$rust_stable_docker_image" docs/build.sh
|
||||
)
|
||||
|
||||
echo --- update gitbook-cage
|
||||
if [[ -n $CI_BRANCH ]]; then
|
||||
(
|
||||
# make a local commit for the svgs and generated/updated markdown
|
||||
set -x
|
||||
git add -f docs/src
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$(basename "$0")"
|
||||
git commit -m "gitbook-cage update $(date -Is)"
|
||||
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
||||
# pop off the local commit
|
||||
git reset --hard HEAD~
|
||||
fi
|
||||
)
|
||||
else
|
||||
echo CI_BRANCH not set
|
||||
fi
|
||||
|
||||
exit 0
|
@@ -45,7 +45,7 @@ linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
;;
|
||||
windows)
|
||||
TARGET=x86_64-pc-windows-gnu
|
||||
TARGET=x86_64-pc-windows-msvc
|
||||
;;
|
||||
*)
|
||||
echo CI_OS_NAME unset
|
||||
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
timeout 15 ./run.sh &
|
||||
timeout 120 ./run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
|
@@ -27,5 +27,5 @@ Alternatively, you can source it from within a script:
|
||||
local PATCH=0
|
||||
local SPECIAL=""
|
||||
|
||||
semverParseInto "1.2.9" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "1.2.20" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
||||
|
@@ -34,7 +34,6 @@ _ cargo +"$rust_stable" clippy --workspace -- --deny=warnings
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
|
@@ -1 +0,0 @@
|
||||
test-stable.sh
|
@@ -47,7 +47,6 @@ echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
ci/affects-files.sh \
|
||||
@@ -93,27 +92,6 @@ test-stable-perf)
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-move)
|
||||
ci/affects-files.sh \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-move.sh \
|
||||
^programs/move_loader \
|
||||
^programs/librapay \
|
||||
^logger/ \
|
||||
^runtime/ \
|
||||
^sdk/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipped $testName as no relevant files were modified"
|
||||
exit 0
|
||||
}
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/librapay/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -27,28 +27,29 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.9" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.9" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.9" }
|
||||
solana-client = { path = "../client", version = "1.2.9" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.9" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.9" }
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.9" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.9" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.9" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.9" }
|
||||
solana-version = { path = "../version", version = "1.2.9" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.9" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.9" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.20" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.20" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.20" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.20" }
|
||||
solana-client = { path = "../client", version = "1.2.20" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.20" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.20" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.20" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.20" }
|
||||
solana-version = { path = "../version", version = "1.2.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.20" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.20" }
|
||||
thiserror = "1.0.19"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.2.9" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.9" }
|
||||
solana-core = { path = "../core", version = "1.2.20" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.20" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -15,6 +15,7 @@ use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use log::*;
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_budget_program::budget_instruction::{self, BudgetError};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg_with_default, COMMITMENT_ARG},
|
||||
@@ -28,7 +29,7 @@ use solana_client::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
|
||||
rpc_response::{RpcAccount, RpcKeyedAccount},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
@@ -57,7 +58,7 @@ use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize},
|
||||
};
|
||||
use solana_transaction_status::{EncodedTransaction, TransactionEncoding};
|
||||
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
|
||||
use solana_vote_program::vote_state::VoteAuthorize;
|
||||
use std::{
|
||||
error,
|
||||
@@ -245,8 +246,8 @@ pub enum CliCommand {
|
||||
},
|
||||
TransactionHistory {
|
||||
address: Pubkey,
|
||||
end_slot: Option<Slot>, // None == latest slot
|
||||
slot_limit: Option<u64>, // None == search full history
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
@@ -381,6 +382,7 @@ pub enum CliCommand {
|
||||
},
|
||||
// Vote Commands
|
||||
CreateVoteAccount {
|
||||
vote_account: SignerIndex,
|
||||
seed: Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: Option<Pubkey>,
|
||||
@@ -406,10 +408,12 @@ pub enum CliCommand {
|
||||
VoteUpdateValidator {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
VoteUpdateCommission {
|
||||
vote_account_pubkey: Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
@@ -1173,7 +1177,7 @@ fn process_confirm(
|
||||
if let Some(transaction_status) = status {
|
||||
if config.verbose {
|
||||
match rpc_client
|
||||
.get_confirmed_transaction(signature, TransactionEncoding::Binary)
|
||||
.get_confirmed_transaction(signature, UiTransactionEncoding::Binary)
|
||||
{
|
||||
Ok(confirmed_transaction) => {
|
||||
println!(
|
||||
@@ -1226,7 +1230,7 @@ fn process_show_account(
|
||||
let cli_account = CliAccount {
|
||||
keyed_account: RpcKeyedAccount {
|
||||
pubkey: account_pubkey.to_string(),
|
||||
account: RpcAccount::encode(account),
|
||||
account: UiAccount::encode(account, UiAccountEncoding::Binary),
|
||||
},
|
||||
use_lamports_unit,
|
||||
};
|
||||
@@ -1833,9 +1837,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_show_validators(&rpc_client, config, *use_lamports_unit, *commitment_config),
|
||||
CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
|
||||
before,
|
||||
limit,
|
||||
} => process_transaction_history(&rpc_client, config, address, *before, *limit),
|
||||
|
||||
// Nonce Commands
|
||||
|
||||
@@ -2127,6 +2131,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
|
||||
// Create vote account
|
||||
CliCommand::CreateVoteAccount {
|
||||
vote_account,
|
||||
seed,
|
||||
identity_account,
|
||||
authorized_voter,
|
||||
@@ -2135,6 +2140,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_create_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
*vote_account,
|
||||
seed,
|
||||
*identity_account,
|
||||
authorized_voter,
|
||||
@@ -2179,16 +2185,25 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account,
|
||||
withdraw_authority,
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
*withdraw_authority,
|
||||
),
|
||||
CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
} => process_vote_update_commission(&rpc_client, config, &vote_account_pubkey, *commission),
|
||||
withdraw_authority,
|
||||
} => process_vote_update_commission(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
*commission,
|
||||
*withdraw_authority,
|
||||
),
|
||||
|
||||
// Wallet Commands
|
||||
|
||||
@@ -3416,6 +3431,7 @@ mod tests {
|
||||
let bob_pubkey = bob_keypair.pubkey();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@@ -3441,6 +3457,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
@@ -3658,6 +3675,7 @@ mod tests {
|
||||
let bob_keypair = Keypair::new();
|
||||
let identity_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
@@ -3677,6 +3695,7 @@ mod tests {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 1,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
@@ -16,7 +16,6 @@ use solana_client::{
|
||||
pubsub_client::{PubsubClient, SlotInfoMessage},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
|
||||
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@@ -27,6 +26,7 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::{self, Pubkey},
|
||||
signature::Signature,
|
||||
system_instruction, system_program,
|
||||
sysvar::{
|
||||
self,
|
||||
@@ -256,9 +256,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-history")
|
||||
.about("Show historical transactions affecting the given address, \
|
||||
ordered based on the slot in which they were confirmed in \
|
||||
from lowest to highest slot")
|
||||
.about("Show historical transactions affecting the given address \
|
||||
from newest to oldest")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("address")
|
||||
.index(1)
|
||||
@@ -266,26 +265,22 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Account address"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("end_slot")
|
||||
.takes_value(false)
|
||||
.value_name("SLOT")
|
||||
.index(2)
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Slot to start from [default: latest slot at maximum commitment]"
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.takes_value(true)
|
||||
.value_name("NUMBER OF SLOTS")
|
||||
.value_name("LIMIT")
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Limit the search to this many slots"
|
||||
),
|
||||
),
|
||||
.default_value("1000")
|
||||
.help("Maximum number of transaction signatures to return"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("before")
|
||||
.long("before")
|
||||
.value_name("TRANSACTION_SIGNATURE")
|
||||
.takes_value(true)
|
||||
.help("Start with the first signature older than this one"),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -453,14 +448,22 @@ pub fn parse_transaction_history(
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
|
||||
let end_slot = value_t!(matches, "end_slot", Slot).ok();
|
||||
let slot_limit = value_t!(matches, "limit", u64).ok();
|
||||
|
||||
let before = match matches.value_of("before") {
|
||||
Some(signature) => Some(
|
||||
signature
|
||||
.parse()
|
||||
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
let limit = value_t_or_exit!(matches, "limit", usize);
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
before,
|
||||
limit,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
@@ -1276,41 +1279,36 @@ pub fn process_show_validators(
|
||||
|
||||
pub fn process_transaction_history(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
address: &Pubkey,
|
||||
end_slot: Option<Slot>, // None == use latest slot
|
||||
slot_limit: Option<u64>,
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
) -> ProcessResult {
|
||||
let end_slot = {
|
||||
if let Some(end_slot) = end_slot {
|
||||
end_slot
|
||||
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
|
||||
address,
|
||||
before,
|
||||
Some(limit),
|
||||
)?;
|
||||
|
||||
let transactions_found = format!("{} transactions found", results.len());
|
||||
|
||||
for result in results {
|
||||
if config.verbose {
|
||||
println!(
|
||||
"{} [slot={} status={}] {}",
|
||||
result.signature,
|
||||
result.slot,
|
||||
match result.err {
|
||||
None => "Confirmed".to_string(),
|
||||
Some(err) => format!("Failed: {:?}", err),
|
||||
},
|
||||
result.memo.unwrap_or_else(|| "".to_string()),
|
||||
);
|
||||
} else {
|
||||
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
|
||||
println!("{}", result.signature);
|
||||
}
|
||||
};
|
||||
let mut start_slot = match slot_limit {
|
||||
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
|
||||
None => rpc_client.minimum_ledger_slot()?,
|
||||
};
|
||||
|
||||
println!(
|
||||
"Transactions affecting {} within slots [{},{}]",
|
||||
address, start_slot, end_slot
|
||||
);
|
||||
|
||||
let mut transaction_count = 0;
|
||||
while start_slot < end_slot {
|
||||
let signatures = rpc_client.get_confirmed_signatures_for_address(
|
||||
address,
|
||||
start_slot,
|
||||
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
|
||||
)?;
|
||||
for signature in &signatures {
|
||||
println!("{}", signature);
|
||||
}
|
||||
transaction_count += signatures.len();
|
||||
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
|
||||
}
|
||||
Ok(format!("{} transactions found", transaction_count))
|
||||
Ok(transactions_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -5,7 +5,7 @@ use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::RpcTransactionStatusMeta;
|
||||
use solana_transaction_status::UiTransactionStatusMeta;
|
||||
use std::{fmt, io};
|
||||
|
||||
// Pretty print a "name value"
|
||||
@@ -68,7 +68,7 @@ pub fn println_signers(
|
||||
pub fn write_transaction<W: io::Write>(
|
||||
w: &mut W,
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
transaction_status: &Option<UiTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) -> io::Result<()> {
|
||||
let message = &transaction.message;
|
||||
@@ -191,7 +191,7 @@ pub fn write_transaction<W: io::Write>(
|
||||
|
||||
pub fn println_transaction(
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
transaction_status: &Option<UiTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) {
|
||||
let mut w = Vec::new();
|
||||
|
@@ -106,9 +106,10 @@ mod tests {
|
||||
use crate::{nonce::nonce_arg, offline::blockhash_query::BlockhashQuery};
|
||||
use clap::App;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcAccount, RpcFeeCalculator, RpcResponseContext},
|
||||
rpc_response::{Response, RpcFeeCalculator, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account, fee_calculator::FeeCalculator, hash::hash, nonce, system_program,
|
||||
@@ -344,7 +345,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
let nonce_pubkey = Pubkey::new(&[4u8; 32]);
|
||||
let rpc_nonce_account = RpcAccount::encode(nonce_account);
|
||||
let rpc_nonce_account = UiAccount::encode(nonce_account, UiAccountEncoding::Binary);
|
||||
let get_account_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(Some(rpc_nonce_account)),
|
||||
|
@@ -253,7 +253,7 @@ pub fn parse_create_vote_account(
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let (vote_account, vote_account_pubkey) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let (identity_account, identity_pubkey) =
|
||||
signer_of(matches, "identity_account", wallet_manager)?;
|
||||
@@ -271,6 +271,7 @@ pub fn parse_create_vote_account(
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: signer_info.index_of(vote_account_pubkey).unwrap(),
|
||||
seed,
|
||||
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
|
||||
authorized_voter,
|
||||
@@ -320,7 +321,8 @@ pub fn parse_vote_update_validator(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (new_identity_account, new_identity_pubkey) =
|
||||
signer_of(matches, "new_identity_account", wallet_manager)?;
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
@@ -334,6 +336,7 @@ pub fn parse_vote_update_validator(
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@@ -346,7 +349,8 @@ pub fn parse_vote_update_commission(
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
|
||||
let payer_provided = None;
|
||||
@@ -361,6 +365,7 @@ pub fn parse_vote_update_commission(
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@@ -420,13 +425,14 @@ pub fn parse_withdraw_from_vote_account(
|
||||
pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account: SignerIndex,
|
||||
seed: &Option<String>,
|
||||
identity_account: SignerIndex,
|
||||
authorized_voter: &Option<Pubkey>,
|
||||
authorized_withdrawer: &Option<Pubkey>,
|
||||
commission: u8,
|
||||
) -> ProcessResult {
|
||||
let vote_account = config.signers[1];
|
||||
let vote_account = config.signers[vote_account];
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let vote_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
@@ -551,8 +557,9 @@ pub fn process_vote_update_validator(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let new_identity_account = config.signers[new_identity_account];
|
||||
let new_identity_pubkey = new_identity_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
@@ -584,8 +591,9 @@ pub fn process_vote_update_commission(
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
commission: u8,
|
||||
withdraw_authority: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let authorized_withdrawer = config.signers[withdraw_authority];
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_commission(
|
||||
vote_account_pubkey,
|
||||
@@ -817,6 +825,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -845,6 +854,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -877,6 +887,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: Some(authed),
|
||||
@@ -907,6 +918,7 @@ mod tests {
|
||||
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 2,
|
||||
authorized_voter: None,
|
||||
@@ -934,6 +946,7 @@ mod tests {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -956,6 +969,7 @@ mod tests {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey: pubkey,
|
||||
commission: 42,
|
||||
withdraw_authority: 1,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
|
@@ -57,6 +57,7 @@ fn test_stake_delegation_force() {
|
||||
let vote_keypair = Keypair::new();
|
||||
config.signers = vec![&default_signer, &vote_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
|
@@ -49,6 +49,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
let vote_account_pubkey = vote_account_keypair.pubkey();
|
||||
config.signers = vec![&default_signer, &vote_account_keypair];
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: 1,
|
||||
seed: None,
|
||||
identity_account: 0,
|
||||
authorized_voter: None,
|
||||
@@ -120,6 +121,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_account: 2,
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,11 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.9" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.9" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.20" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -31,7 +32,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.1.0"
|
||||
jsonrpc-http-server = "14.1.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -8,6 +8,7 @@ pub mod perf_utils;
|
||||
pub mod pubsub_client;
|
||||
pub mod rpc_client;
|
||||
pub mod rpc_config;
|
||||
pub mod rpc_filter;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_response;
|
||||
pub mod rpc_sender;
|
||||
|
@@ -2,8 +2,11 @@ use crate::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
http_sender::HttpSender,
|
||||
mock_sender::{MockSender, Mocks},
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig},
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
rpc_config::{
|
||||
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
|
||||
RpcSendTransactionConfig, RpcTokenAccountsFilter,
|
||||
},
|
||||
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
|
||||
rpc_response::*,
|
||||
rpc_sender::RpcSender,
|
||||
};
|
||||
@@ -11,6 +14,10 @@ use bincode::serialize;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use log::*;
|
||||
use serde_json::{json, Value};
|
||||
use solana_account_decoder::{
|
||||
parse_token::{parse_token, TokenAccountType, UiMint, UiMultisig, UiTokenAccount},
|
||||
UiAccount,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{
|
||||
@@ -28,7 +35,7 @@ use solana_sdk::{
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionStatus, UiTransactionEncoding,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
@@ -238,13 +245,13 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
|
||||
self.get_confirmed_block_with_encoding(slot, TransactionEncoding::Json)
|
||||
self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block_with_encoding(
|
||||
&self,
|
||||
slot: Slot,
|
||||
encoding: TransactionEncoding,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<ConfirmedBlock> {
|
||||
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
|
||||
}
|
||||
@@ -282,10 +289,36 @@ impl RpcClient {
|
||||
Ok(signatures)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
self.get_confirmed_signatures_for_address2_with_config(address, None, None)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2_with_config(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
before: Option<Signature>,
|
||||
limit: Option<usize>,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
let config = RpcGetConfirmedSignaturesForAddress2Config {
|
||||
before: before.map(|signature| signature.to_string()),
|
||||
limit,
|
||||
};
|
||||
|
||||
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2,
|
||||
json!([address.to_string(), config]),
|
||||
)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
encoding: TransactionEncoding,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<ConfirmedTransaction> {
|
||||
self.send(
|
||||
RpcRequest::GetConfirmedTransaction,
|
||||
@@ -452,9 +485,9 @@ impl RpcClient {
|
||||
let Response {
|
||||
context,
|
||||
value: rpc_account,
|
||||
} = serde_json::from_value::<Response<Option<RpcAccount>>>(result_json)?;
|
||||
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
|
||||
trace!("Response account {:?} {:?}", pubkey, rpc_account);
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode().ok());
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account,
|
||||
@@ -510,17 +543,7 @@ impl RpcClient {
|
||||
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let accounts: Vec<RpcKeyedAccount> =
|
||||
self.send(RpcRequest::GetProgramAccounts, json!([pubkey.to_string()]))?;
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
RpcRequest::GetProgramAccounts,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((pubkey, account.decode().unwrap()));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
parse_keyed_accounts(accounts, RpcRequest::GetProgramAccounts)
|
||||
}
|
||||
|
||||
/// Request the transaction count.
|
||||
@@ -667,6 +690,211 @@ impl RpcClient {
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
|
||||
Ok(self
|
||||
.get_token_account_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_account_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<UiTokenAccount>> {
|
||||
let Response {
|
||||
context,
|
||||
value: account,
|
||||
} = self.get_account_with_commitment(pubkey, commitment_config)?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account
|
||||
.map(|account| match parse_token(&account.data) {
|
||||
Ok(TokenAccountType::Account(ui_token_account)) => Some(ui_token_account),
|
||||
_ => None,
|
||||
})
|
||||
.flatten(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_mint(&self, pubkey: &Pubkey) -> ClientResult<Option<UiMint>> {
|
||||
Ok(self
|
||||
.get_token_mint_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_mint_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<UiMint>> {
|
||||
let Response {
|
||||
context,
|
||||
value: account,
|
||||
} = self.get_account_with_commitment(pubkey, commitment_config)?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account
|
||||
.map(|account| match parse_token(&account.data) {
|
||||
Ok(TokenAccountType::Mint(ui_token_mint)) => Some(ui_token_mint),
|
||||
_ => None,
|
||||
})
|
||||
.flatten(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_multisig(&self, pubkey: &Pubkey) -> ClientResult<Option<UiMultisig>> {
|
||||
Ok(self
|
||||
.get_token_multisig_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_multisig_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<UiMultisig>> {
|
||||
let Response {
|
||||
context,
|
||||
value: account,
|
||||
} = self.get_account_with_commitment(pubkey, commitment_config)?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account
|
||||
.map(|account| match parse_token(&account.data) {
|
||||
Ok(TokenAccountType::Multisig(ui_token_multisig)) => Some(ui_token_multisig),
|
||||
_ => None,
|
||||
})
|
||||
.flatten(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<RpcTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_account_balance_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<RpcTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountBalance,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_delegate_with_commitment(
|
||||
delegate,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate_with_commitment(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
let Response {
|
||||
context,
|
||||
value: accounts,
|
||||
} = self.send(
|
||||
RpcRequest::GetTokenAccountsByDelegate,
|
||||
json!([
|
||||
delegate.to_string(),
|
||||
token_account_filter,
|
||||
commitment_config
|
||||
]),
|
||||
)?;
|
||||
let pubkey_accounts = accounts_to_token_accounts(parse_keyed_accounts(
|
||||
accounts,
|
||||
RpcRequest::GetTokenAccountsByDelegate,
|
||||
)?);
|
||||
Ok(Response {
|
||||
context,
|
||||
value: pubkey_accounts,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_owner_with_commitment(
|
||||
owner,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner_with_commitment(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<(Pubkey, UiTokenAccount)>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
let Response {
|
||||
context,
|
||||
value: accounts,
|
||||
} = self.send(
|
||||
RpcRequest::GetTokenAccountsByOwner,
|
||||
json!([owner.to_string(), token_account_filter, commitment_config]),
|
||||
)?;
|
||||
let pubkey_accounts = accounts_to_token_accounts(parse_keyed_accounts(
|
||||
accounts,
|
||||
RpcRequest::GetTokenAccountsByDelegate,
|
||||
)?);
|
||||
Ok(Response {
|
||||
context,
|
||||
value: pubkey_accounts,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<RpcTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_supply_with_commitment(mint, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_supply_with_commitment(
|
||||
&self,
|
||||
mint: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<RpcTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenSupply,
|
||||
json!([mint.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn poll_balance_with_timeout_and_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
@@ -1008,6 +1236,43 @@ pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_keyed_accounts(
|
||||
accounts: Vec<RpcKeyedAccount>,
|
||||
request: RpcRequest,
|
||||
) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((
|
||||
pubkey,
|
||||
account.decode().ok_or_else(|| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Account from rpc".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?,
|
||||
));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
}
|
||||
|
||||
fn accounts_to_token_accounts(
|
||||
pubkey_accounts: Vec<(Pubkey, Account)>,
|
||||
) -> Vec<(Pubkey, UiTokenAccount)> {
|
||||
pubkey_accounts
|
||||
.into_iter()
|
||||
.filter_map(|(pubkey, account)| match parse_token(&account.data) {
|
||||
Ok(TokenAccountType::Account(ui_token_account)) => Some((pubkey, ui_token_account)),
|
||||
_ => None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -1,3 +1,5 @@
|
||||
use crate::rpc_filter::RpcFilterType;
|
||||
use solana_account_decoder::UiAccountEncoding;
|
||||
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
@@ -35,8 +37,38 @@ pub struct RpcLargestAccountsConfig {
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationConfig {
|
||||
pub struct RpcStakeConfig {
|
||||
pub epoch: Option<Epoch>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccountInfoConfig {
|
||||
pub encoding: Option<UiAccountEncoding>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcProgramAccountsConfig {
|
||||
pub filters: Option<Vec<RpcFilterType>>,
|
||||
#[serde(flatten)]
|
||||
pub account_config: RpcAccountInfoConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTokenAccountsFilter {
|
||||
Mint(String),
|
||||
ProgramId(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcGetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<String>, // Signature as base-58 string
|
||||
pub limit: Option<usize>,
|
||||
}
|
||||
|
143
client/src/rpc_filter.rs
Normal file
143
client/src/rpc_filter.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcFilterType {
|
||||
DataSize(u64),
|
||||
Memcmp(Memcmp),
|
||||
}
|
||||
|
||||
impl RpcFilterType {
|
||||
pub fn verify(&self) -> Result<(), RpcFilterError> {
|
||||
match self {
|
||||
RpcFilterType::DataSize(_) => Ok(()),
|
||||
RpcFilterType::Memcmp(compare) => {
|
||||
let encoding = compare.encoding.as_ref().unwrap_or(&MemcmpEncoding::Binary);
|
||||
match encoding {
|
||||
MemcmpEncoding::Binary => {
|
||||
let MemcmpEncodedBytes::Binary(bytes) = &compare.bytes;
|
||||
bs58::decode(&bytes)
|
||||
.into_vec()
|
||||
.map(|_| ())
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RpcFilterError {
|
||||
#[error("bs58 decode error")]
|
||||
DecodeError(#[from] bs58::decode::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum MemcmpEncoding {
|
||||
Binary,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum MemcmpEncodedBytes {
|
||||
Binary(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Memcmp {
|
||||
/// Data offset to begin match
|
||||
pub offset: usize,
|
||||
/// Bytes, encoded with specified encoding, or default Binary
|
||||
pub bytes: MemcmpEncodedBytes,
|
||||
/// Optional encoding specification
|
||||
pub encoding: Option<MemcmpEncoding>,
|
||||
}
|
||||
|
||||
impl Memcmp {
|
||||
pub fn bytes_match(&self, data: &[u8]) -> bool {
|
||||
match &self.bytes {
|
||||
MemcmpEncodedBytes::Binary(bytes) => {
|
||||
let bytes = bs58::decode(bytes).into_vec();
|
||||
if bytes.is_err() {
|
||||
return false;
|
||||
}
|
||||
let bytes = bytes.unwrap();
|
||||
if self.offset > data.len() {
|
||||
return false;
|
||||
}
|
||||
if data[self.offset..].len() < bytes.len() {
|
||||
return false;
|
||||
}
|
||||
data[self.offset..self.offset + bytes.len()] == bytes[..]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bytes_match() {
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
// Exact match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1, 2, 3, 4, 5]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Partial match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1, 2]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Offset partial match of data succeeds
|
||||
assert!(Memcmp {
|
||||
offset: 2,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![3, 4]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Incorrect partial match of data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![2]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Bytes overrun data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 2,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![3, 4, 5, 6]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Offset outside data fails
|
||||
assert!(!Memcmp {
|
||||
offset: 6,
|
||||
bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![5]).into_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
|
||||
// Invalid base-58 fails
|
||||
assert!(!Memcmp {
|
||||
offset: 0,
|
||||
bytes: MemcmpEncodedBytes::Binary("III".to_string()),
|
||||
encoding: None,
|
||||
}
|
||||
.bytes_match(&data));
|
||||
}
|
||||
}
|
@@ -1,4 +1,5 @@
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -13,6 +14,7 @@ pub enum RpcRequest {
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetConfirmedSignaturesForAddress,
|
||||
GetConfirmedSignaturesForAddress2,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
@@ -36,6 +38,10 @@ pub enum RpcRequest {
|
||||
GetSlotsPerSegment,
|
||||
GetStoragePubkeysForSlot,
|
||||
GetSupply,
|
||||
GetTokenAccountBalance,
|
||||
GetTokenAccountsByDelegate,
|
||||
GetTokenAccountsByOwner,
|
||||
GetTokenSupply,
|
||||
GetTotalSupply,
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
@@ -60,6 +66,7 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
@@ -83,6 +90,10 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
|
||||
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||
RpcRequest::GetSupply => "getSupply",
|
||||
RpcRequest::GetTokenAccountBalance => "getTokenAccountBalance",
|
||||
RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate",
|
||||
RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner",
|
||||
RpcRequest::GetTokenSupply => "getTokenSupply",
|
||||
RpcRequest::GetTotalSupply => "getTotalSupply",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
@@ -102,6 +113,8 @@ impl fmt::Display for RpcRequest {
|
||||
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
|
||||
|
||||
// Validators that are this number of slots behind are considered delinquent
|
||||
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;
|
||||
@@ -130,9 +143,16 @@ pub enum RpcError {
|
||||
ForUser(String), /* "direct-to-user message" */
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum TokenAccountsFilter {
|
||||
Mint(Pubkey),
|
||||
ProgramId(Pubkey),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::rpc_config::RpcTokenAccountsFilter;
|
||||
use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel};
|
||||
|
||||
#[test]
|
||||
@@ -197,5 +217,16 @@ mod tests {
|
||||
let request =
|
||||
test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
|
||||
assert_eq!(request["params"], json!([addr, commitment_config]));
|
||||
|
||||
// Test request with CommitmentConfig and params
|
||||
let test_request = RpcRequest::GetTokenAccountsByOwner;
|
||||
let mint = Pubkey::new_rand();
|
||||
let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string());
|
||||
let request = test_request
|
||||
.build_request_json(1, json!([addr, token_account_filter, commitment_config]));
|
||||
assert_eq!(
|
||||
request["params"],
|
||||
json!([addr, token_account_filter, commitment_config])
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,15 +1,16 @@
|
||||
use crate::{client_error, rpc_request::RpcError};
|
||||
use crate::client_error;
|
||||
use solana_account_decoder::UiAccount;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
|
||||
use solana_transaction_status::ConfirmedTransactionStatusWithSignature;
|
||||
use std::{collections::HashMap, net::SocketAddr};
|
||||
|
||||
pub type RpcResult<T> = client_error::Result<Response<T>>;
|
||||
pub type RpcAmount = String;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RpcResponseContext {
|
||||
@@ -91,7 +92,7 @@ pub struct RpcInflationRate {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcKeyedAccount {
|
||||
pub pubkey: String,
|
||||
pub account: RpcAccount,
|
||||
pub account: UiAccount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
@@ -100,43 +101,6 @@ pub struct RpcSignatureResult {
|
||||
pub err: Option<TransactionError>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccount {
|
||||
pub lamports: u64,
|
||||
pub data: String,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
impl RpcAccount {
|
||||
pub fn encode(account: Account) -> Self {
|
||||
RpcAccount {
|
||||
lamports: account.lamports,
|
||||
data: bs58::encode(account.data.clone()).into_string(),
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> std::result::Result<Account, RpcError> {
|
||||
Ok(Account {
|
||||
lamports: self.lamports,
|
||||
data: bs58::decode(self.data.clone()).into_vec().map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account data".to_string())
|
||||
})?,
|
||||
owner: Pubkey::from_str(&self.owner).map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account owner".to_string())
|
||||
})?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct RpcContactInfo {
|
||||
/// Pubkey of the node as a base-58 string
|
||||
@@ -240,3 +204,62 @@ pub struct RpcSupply {
|
||||
pub non_circulating: u64,
|
||||
pub non_circulating_accounts: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum StakeActivationState {
|
||||
Activating,
|
||||
Active,
|
||||
Deactivating,
|
||||
Inactive,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStakeActivation {
|
||||
pub state: StakeActivationState,
|
||||
pub active: u64,
|
||||
pub inactive: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTokenAmount {
|
||||
pub ui_amount: f64,
|
||||
pub decimals: u8,
|
||||
pub amount: RpcAmount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTokenAccountBalance {
|
||||
pub address: String,
|
||||
#[serde(flatten)]
|
||||
pub amount: RpcTokenAmount,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedTransactionStatusWithSignature {
|
||||
pub signature: String,
|
||||
pub slot: Slot,
|
||||
pub err: Option<TransactionError>,
|
||||
pub memo: Option<String>,
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
|
||||
let ConfirmedTransactionStatusWithSignature {
|
||||
signature,
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
} = value;
|
||||
Self {
|
||||
signature: signature.to_string(),
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -21,7 +21,7 @@ byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
ed25519-dalek = "=1.0.0-pre.4"
|
||||
fs_extra = "1.1.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = "1.3"
|
||||
@@ -42,35 +42,38 @@ regex = "1.3.7"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.9" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.9" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.9" }
|
||||
solana-client = { path = "../client", version = "1.2.9" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.9" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.9" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.9" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.9" }
|
||||
solana-logger = { path = "../logger", version = "1.2.9" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.9" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.9" }
|
||||
solana-measure = { path = "../measure", version = "1.2.9" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.9" }
|
||||
solana-perf = { path = "../perf", version = "1.2.9" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.9" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.9" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.9" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.9" }
|
||||
solana-version = { path = "../version", version = "1.2.9" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.9" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.9" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.9" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.20" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.20" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.20" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.20" }
|
||||
solana-client = { path = "../client", version = "1.2.20" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.20" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.20" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.20" }
|
||||
solana-logger = { path = "../logger", version = "1.2.20" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.20" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.20" }
|
||||
solana-measure = { path = "../measure", version = "1.2.20" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.20" }
|
||||
solana-perf = { path = "../perf", version = "1.2.20" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.20" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.20" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.20" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.2.20" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.20" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.20" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.20" }
|
||||
solana-version = { path = "../version", version = "1.2.20" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.20" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.20" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.20" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.9" }
|
||||
tokio = { version = "0.2.22", features = ["full"] }
|
||||
tokio_01 = { version = "0.1", package = "tokio" }
|
||||
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
|
||||
tokio_io_01 = { version = "0.1", package = "tokio-io" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -509,7 +509,7 @@ impl BankingStage {
|
||||
// expires.
|
||||
let txs = batch.transactions();
|
||||
let pre_balances = if transaction_status_sender.is_some() {
|
||||
bank.collect_balances(txs)
|
||||
bank.collect_balances(batch)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@@ -545,10 +545,11 @@ impl BankingStage {
|
||||
.processing_results;
|
||||
|
||||
if let Some(sender) = transaction_status_sender {
|
||||
let post_balances = bank.collect_balances(txs);
|
||||
let post_balances = bank.collect_balances(batch);
|
||||
send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
transaction_statuses,
|
||||
TransactionBalancesSet::new(pre_balances, post_balances),
|
||||
sender,
|
||||
|
@@ -1,16 +1,18 @@
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
consensus::PubkeyVotes,
|
||||
crds_value::CrdsValueLabel,
|
||||
poh_recorder::PohRecorder,
|
||||
pubkey_references::LockedPubkeyReferences,
|
||||
replay_stage::ReplayVotesReceiver,
|
||||
result::{Error, Result},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify,
|
||||
verified_vote_packets::VerifiedVotePackets,
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender,
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Select, Sender as CrossbeamSender,
|
||||
};
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
@@ -30,7 +32,7 @@ use solana_sdk::{
|
||||
};
|
||||
use solana_vote_program::vote_instruction::VoteInstruction;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
{Arc, Mutex, RwLock},
|
||||
@@ -40,16 +42,22 @@ use std::{
|
||||
};
|
||||
|
||||
// Map from a vote account to the authorized voter for an epoch
|
||||
pub type VerifiedVotePacketsSender = CrossbeamSender<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedVotePacketsReceiver = CrossbeamReceiver<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedLabelVotePacketsSender = CrossbeamSender<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver<Vec<(CrdsValueLabel, Packets)>>;
|
||||
pub type VerifiedVoteTransactionsSender = CrossbeamSender<Vec<Transaction>>;
|
||||
pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
|
||||
pub type VerifiedVoteSender = CrossbeamSender<(Pubkey, Vec<Slot>)>;
|
||||
pub type VerifiedVoteReceiver = CrossbeamReceiver<(Pubkey, Vec<Slot>)>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SlotVoteTracker {
|
||||
voted: HashSet<Arc<Pubkey>>,
|
||||
// Maps pubkeys that have voted for this slot
|
||||
// to whether or not we've seen the vote on gossip.
|
||||
// True if seen on gossip, false if only seen in replay.
|
||||
voted: HashMap<Arc<Pubkey>, bool>,
|
||||
updates: Option<Vec<Arc<Pubkey>>>,
|
||||
pub total_stake: u64,
|
||||
total_stake: u64,
|
||||
gossip_only_stake: u64,
|
||||
}
|
||||
|
||||
impl SlotVoteTracker {
|
||||
@@ -62,7 +70,7 @@ impl SlotVoteTracker {
|
||||
#[derive(Default)]
|
||||
pub struct VoteTracker {
|
||||
// Map from a slot to a set of validators who have voted for that slot
|
||||
pub slot_vote_trackers: RwLock<HashMap<Slot, Arc<RwLock<SlotVoteTracker>>>>,
|
||||
slot_vote_trackers: RwLock<HashMap<Slot, Arc<RwLock<SlotVoteTracker>>>>,
|
||||
// Don't track votes from people who are not staked, acts as a spam filter
|
||||
epoch_authorized_voters: RwLock<HashMap<Epoch, Arc<EpochAuthorizedVoters>>>,
|
||||
leader_schedule_epoch: RwLock<Epoch>,
|
||||
@@ -126,7 +134,7 @@ impl VoteTracker {
|
||||
|
||||
let mut w_slot_vote_tracker = slot_vote_tracker.write().unwrap();
|
||||
|
||||
w_slot_vote_tracker.voted.insert(pubkey.clone());
|
||||
w_slot_vote_tracker.voted.insert(pubkey.clone(), true);
|
||||
if let Some(ref mut updates) = w_slot_vote_tracker.updates {
|
||||
updates.push(pubkey.clone())
|
||||
} else {
|
||||
@@ -202,15 +210,18 @@ impl ClusterInfoVoteListener {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
sender: CrossbeamSender<Vec<Packets>>,
|
||||
verified_packets_sender: CrossbeamSender<Vec<Packets>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Self {
|
||||
let exit_ = exit.clone();
|
||||
|
||||
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
|
||||
let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) =
|
||||
unbounded();
|
||||
let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded();
|
||||
let listen_thread = Builder::new()
|
||||
.name("solana-cluster_info_vote_listener".to_string())
|
||||
@@ -218,7 +229,7 @@ impl ClusterInfoVoteListener {
|
||||
let _ = Self::recv_loop(
|
||||
exit_,
|
||||
&cluster_info,
|
||||
verified_vote_packets_sender,
|
||||
verified_vote_label_packets_sender,
|
||||
verified_vote_transactions_sender,
|
||||
);
|
||||
})
|
||||
@@ -231,9 +242,9 @@ impl ClusterInfoVoteListener {
|
||||
.spawn(move || {
|
||||
let _ = Self::bank_send_loop(
|
||||
exit_,
|
||||
verified_vote_packets_receiver,
|
||||
verified_vote_label_packets_receiver,
|
||||
poh_recorder,
|
||||
&sender,
|
||||
&verified_packets_sender,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@@ -248,6 +259,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker,
|
||||
&bank_forks,
|
||||
subscriptions,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@@ -267,7 +280,7 @@ impl ClusterInfoVoteListener {
|
||||
fn recv_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: &ClusterInfo,
|
||||
verified_vote_packets_sender: VerifiedVotePacketsSender,
|
||||
verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender,
|
||||
verified_vote_transactions_sender: VerifiedVoteTransactionsSender,
|
||||
) -> Result<()> {
|
||||
let mut last_ts = 0;
|
||||
@@ -282,7 +295,7 @@ impl ClusterInfoVoteListener {
|
||||
if !votes.is_empty() {
|
||||
let (vote_txs, packets) = Self::verify_votes(votes, labels);
|
||||
verified_vote_transactions_sender.send(vote_txs)?;
|
||||
verified_vote_packets_sender.send(packets)?;
|
||||
verified_vote_label_packets_sender.send(packets)?;
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
|
||||
@@ -322,9 +335,9 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
fn bank_send_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_vote_packets_receiver: VerifiedVotePacketsReceiver,
|
||||
verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
packets_sender: &CrossbeamSender<Vec<Packets>>,
|
||||
verified_packets_sender: &CrossbeamSender<Vec<Packets>>,
|
||||
) -> Result<()> {
|
||||
let mut verified_vote_packets = VerifiedVotePackets::default();
|
||||
let mut time_since_lock = Instant::now();
|
||||
@@ -334,9 +347,10 @@ impl ClusterInfoVoteListener {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(e) = verified_vote_packets
|
||||
.get_and_process_vote_packets(&verified_vote_packets_receiver, &mut update_version)
|
||||
{
|
||||
if let Err(e) = verified_vote_packets.get_and_process_vote_packets(
|
||||
&verified_vote_label_packets_receiver,
|
||||
&mut update_version,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
@@ -353,7 +367,7 @@ impl ClusterInfoVoteListener {
|
||||
if let Some(bank) = bank {
|
||||
let last_version = bank.last_vote_sync.load(Ordering::Relaxed);
|
||||
let (new_version, msgs) = verified_vote_packets.get_latest_votes(last_version);
|
||||
packets_sender.send(msgs)?;
|
||||
verified_packets_sender.send(msgs)?;
|
||||
bank.last_vote_sync.compare_and_swap(
|
||||
last_version,
|
||||
new_version,
|
||||
@@ -371,6 +385,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@@ -380,19 +396,18 @@ impl ClusterInfoVoteListener {
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
vote_tracker.process_new_root_bank(&root_bank);
|
||||
let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch());
|
||||
|
||||
if let Err(e) = Self::get_and_process_votes(
|
||||
&vote_txs_receiver,
|
||||
&vote_tracker,
|
||||
root_bank.slot(),
|
||||
subscriptions.clone(),
|
||||
epoch_stakes,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
}
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
|
||||
| Error::ReadyTimeoutError => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
@@ -407,6 +422,8 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes_receiver: &ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
Self::get_and_process_votes(
|
||||
vote_txs_receiver,
|
||||
@@ -414,6 +431,8 @@ impl ClusterInfoVoteListener {
|
||||
last_root,
|
||||
subscriptions,
|
||||
None,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -423,19 +442,41 @@ impl ClusterInfoVoteListener {
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes_receiver: &ReplayVotesReceiver,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut vote_txs = vote_txs_receiver.recv_timeout(timer)?;
|
||||
while let Ok(new_txs) = vote_txs_receiver.try_recv() {
|
||||
vote_txs.extend(new_txs);
|
||||
let mut sel = Select::new();
|
||||
sel.recv(vote_txs_receiver);
|
||||
sel.recv(replay_votes_receiver);
|
||||
let mut remaining_wait_time = 200;
|
||||
loop {
|
||||
if remaining_wait_time == 0 {
|
||||
break;
|
||||
}
|
||||
let start = Instant::now();
|
||||
// Wait for one of the receivers to be ready. `ready_timeout`
|
||||
// will return if channels either have something, or are
|
||||
// disconnected. `ready_timeout` can wake up spuriously,
|
||||
// hence the loop
|
||||
let _ = sel.ready_timeout(Duration::from_millis(remaining_wait_time))?;
|
||||
let vote_txs: Vec<_> = vote_txs_receiver.try_iter().flatten().collect();
|
||||
let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect();
|
||||
if !vote_txs.is_empty() || !replay_votes.is_empty() {
|
||||
Self::process_votes(
|
||||
vote_tracker,
|
||||
vote_txs,
|
||||
last_root,
|
||||
subscriptions,
|
||||
epoch_stakes,
|
||||
verified_vote_sender,
|
||||
&replay_votes,
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
remaining_wait_time = remaining_wait_time
|
||||
.saturating_sub(std::cmp::max(start.elapsed().as_millis() as u64, 1));
|
||||
}
|
||||
}
|
||||
Self::process_votes(
|
||||
vote_tracker,
|
||||
vote_txs,
|
||||
last_root,
|
||||
subscriptions,
|
||||
epoch_stakes,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -445,10 +486,11 @@ impl ClusterInfoVoteListener {
|
||||
root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes: &[Arc<PubkeyVotes>],
|
||||
) {
|
||||
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
|
||||
let mut diff: HashMap<Slot, HashMap<Arc<Pubkey>, bool>> = HashMap::new();
|
||||
{
|
||||
let all_slot_trackers = &vote_tracker.slot_vote_trackers;
|
||||
for tx in vote_txs {
|
||||
if let (Some(vote_pubkey), Some(vote_instruction)) = tx
|
||||
.message
|
||||
@@ -502,25 +544,33 @@ impl ClusterInfoVoteListener {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't insert if we already have marked down this pubkey
|
||||
// voting for this slot
|
||||
let maybe_slot_tracker =
|
||||
all_slot_trackers.read().unwrap().get(&slot).cloned();
|
||||
if let Some(slot_tracker) = maybe_slot_tracker {
|
||||
if slot_tracker.read().unwrap().voted.contains(vote_pubkey) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(vote_pubkey);
|
||||
diff.entry(slot).or_default().insert(unduplicated_pubkey);
|
||||
diff.entry(slot)
|
||||
.or_default()
|
||||
.insert(unduplicated_pubkey, true);
|
||||
}
|
||||
|
||||
subscriptions.notify_vote(&vote);
|
||||
let _ = verified_vote_sender.send((*vote_pubkey, vote.slots));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (slot, slot_diff) in diff {
|
||||
// Process the replay votes
|
||||
for votes in replay_votes {
|
||||
for (pubkey, slot) in votes.iter() {
|
||||
if *slot <= root {
|
||||
continue;
|
||||
}
|
||||
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(pubkey);
|
||||
diff.entry(*slot)
|
||||
.or_default()
|
||||
.entry(unduplicated_pubkey)
|
||||
.or_default();
|
||||
}
|
||||
}
|
||||
|
||||
for (slot, mut slot_diff) in diff {
|
||||
let slot_tracker = vote_tracker
|
||||
.slot_vote_trackers
|
||||
.read()
|
||||
@@ -528,15 +578,55 @@ impl ClusterInfoVoteListener {
|
||||
.get(&slot)
|
||||
.cloned();
|
||||
if let Some(slot_tracker) = slot_tracker {
|
||||
{
|
||||
let r_slot_tracker = slot_tracker.read().unwrap();
|
||||
// Only keep the pubkeys we haven't seen voting for this slot
|
||||
slot_diff.retain(|pubkey, seen_in_gossip_above| {
|
||||
let seen_in_gossip_previously = r_slot_tracker.voted.get(pubkey);
|
||||
let is_new = seen_in_gossip_previously.is_none();
|
||||
if is_new && !*seen_in_gossip_above {
|
||||
// If this vote wasn't seen in gossip, then it must be a
|
||||
// replay vote, and we haven't sent a notification for
|
||||
// those yet
|
||||
let _ = verified_vote_sender.send((**pubkey, vec![slot]));
|
||||
}
|
||||
|
||||
// `is_new_from_gossip` means we observed a vote for this slot
|
||||
// for the first time in gossip
|
||||
let is_new_from_gossip =
|
||||
!seen_in_gossip_previously.cloned().unwrap_or(false)
|
||||
&& *seen_in_gossip_above;
|
||||
is_new || is_new_from_gossip
|
||||
});
|
||||
}
|
||||
let mut w_slot_tracker = slot_tracker.write().unwrap();
|
||||
if w_slot_tracker.updates.is_none() {
|
||||
w_slot_tracker.updates = Some(vec![]);
|
||||
}
|
||||
let mut current_stake = 0;
|
||||
for pubkey in slot_diff {
|
||||
Self::sum_stake(&mut current_stake, epoch_stakes, &pubkey);
|
||||
let mut gossip_only_stake = 0;
|
||||
for (pubkey, seen_in_gossip_above) in slot_diff {
|
||||
let is_new = !w_slot_tracker.voted.contains_key(&pubkey);
|
||||
Self::sum_stake(
|
||||
&mut current_stake,
|
||||
&mut gossip_only_stake,
|
||||
epoch_stakes,
|
||||
&pubkey,
|
||||
// By this point we know if the vote was seen in gossip above,
|
||||
// it was not seen in gossip at any point in the past, so it's
|
||||
// safe to pass this in here as an overall indicator of whether
|
||||
// this vote is new
|
||||
seen_in_gossip_above,
|
||||
is_new,
|
||||
);
|
||||
|
||||
w_slot_tracker.voted.insert(pubkey.clone());
|
||||
// From the `slot_diff.retain` earlier, we know because there are
|
||||
// no other writers to `slot_vote_tracker` that
|
||||
// `is_new || is_new_from_gossip`. In both cases we want to record
|
||||
// `is_new_from_gossip` for the `pubkey` entry.
|
||||
w_slot_tracker
|
||||
.voted
|
||||
.insert(pubkey.clone(), seen_in_gossip_above);
|
||||
w_slot_tracker.updates.as_mut().unwrap().push(pubkey);
|
||||
}
|
||||
Self::notify_for_stake_change(
|
||||
@@ -547,20 +637,33 @@ impl ClusterInfoVoteListener {
|
||||
slot,
|
||||
);
|
||||
w_slot_tracker.total_stake += current_stake;
|
||||
w_slot_tracker.gossip_only_stake += gossip_only_stake
|
||||
} else {
|
||||
let mut total_stake = 0;
|
||||
let voted: HashSet<_> = slot_diff
|
||||
let mut gossip_only_stake = 0;
|
||||
let voted: HashMap<_, _> = slot_diff
|
||||
.into_iter()
|
||||
.map(|pubkey| {
|
||||
Self::sum_stake(&mut total_stake, epoch_stakes, &pubkey);
|
||||
pubkey
|
||||
.map(|(pubkey, seen_in_gossip_above)| {
|
||||
if !seen_in_gossip_above {
|
||||
let _ = verified_vote_sender.send((*pubkey, vec![slot]));
|
||||
}
|
||||
Self::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
epoch_stakes,
|
||||
&pubkey,
|
||||
seen_in_gossip_above,
|
||||
true,
|
||||
);
|
||||
(pubkey, seen_in_gossip_above)
|
||||
})
|
||||
.collect();
|
||||
Self::notify_for_stake_change(total_stake, 0, &subscriptions, epoch_stakes, slot);
|
||||
let new_slot_tracker = SlotVoteTracker {
|
||||
voted: voted.clone(),
|
||||
updates: Some(voted.into_iter().collect()),
|
||||
updates: Some(voted.keys().cloned().collect()),
|
||||
voted,
|
||||
total_stake,
|
||||
gossip_only_stake,
|
||||
};
|
||||
vote_tracker
|
||||
.slot_vote_trackers
|
||||
@@ -588,10 +691,26 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
|
||||
fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) {
|
||||
fn sum_stake(
|
||||
sum: &mut u64,
|
||||
gossip_only_stake: &mut u64,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
pubkey: &Pubkey,
|
||||
is_new_from_gossip: bool,
|
||||
is_new: bool,
|
||||
) {
|
||||
if !is_new_from_gossip && !is_new {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(stakes) = epoch_stakes {
|
||||
if let Some(vote_account) = stakes.stakes().vote_accounts().get(pubkey) {
|
||||
*sum += vote_account.0;
|
||||
if is_new {
|
||||
*sum += vote_account.0;
|
||||
}
|
||||
if is_new_from_gossip {
|
||||
*gossip_only_stake += vote_account.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -611,6 +730,7 @@ mod tests {
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_vote_program::vote_transaction;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
#[test]
|
||||
fn test_max_vote_tx_fits() {
|
||||
@@ -783,8 +903,11 @@ mod tests {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slots = vec![1, 2];
|
||||
let replay_vote_slots = vec![3, 4];
|
||||
validator_voting_keypairs.iter().for_each(|keypairs| {
|
||||
let node_keypair = &keypairs.node_keypair;
|
||||
let vote_keypair = &keypairs.vote_keypair;
|
||||
@@ -797,6 +920,15 @@ mod tests {
|
||||
vote_keypair,
|
||||
);
|
||||
votes_sender.send(vec![vote_tx]).unwrap();
|
||||
for vote_slot in &replay_vote_slots {
|
||||
// Send twice, should only expect to be notified once later
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), *vote_slot)]))
|
||||
.unwrap();
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), *vote_slot)]))
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
@@ -806,14 +938,42 @@ mod tests {
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
for vote_slot in vote_slots {
|
||||
|
||||
// Check that the received votes were pushed to other commponents
|
||||
// subscribing via `verified_vote_receiver`
|
||||
let all_expected_slots: BTreeSet<_> = vote_slots
|
||||
.into_iter()
|
||||
.chain(replay_vote_slots.into_iter())
|
||||
.collect();
|
||||
let mut pubkey_to_votes: HashMap<Pubkey, BTreeSet<Slot>> = HashMap::new();
|
||||
for (received_pubkey, new_votes) in verified_vote_receiver.try_iter() {
|
||||
let already_received_votes = pubkey_to_votes.entry(received_pubkey).or_default();
|
||||
for new_vote in new_votes {
|
||||
// `new_vote` should only be received once
|
||||
assert!(already_received_votes.insert(new_vote));
|
||||
}
|
||||
}
|
||||
assert_eq!(pubkey_to_votes.len(), validator_voting_keypairs.len());
|
||||
for keypairs in &validator_voting_keypairs {
|
||||
assert_eq!(
|
||||
*pubkey_to_votes
|
||||
.get(&keypairs.vote_keypair.pubkey())
|
||||
.unwrap(),
|
||||
all_expected_slots
|
||||
);
|
||||
}
|
||||
|
||||
// Check the vote trackers were updated correctly
|
||||
for vote_slot in all_expected_slots {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
|
||||
let r_slot_vote_tracker = slot_vote_tracker.read().unwrap();
|
||||
for voting_keypairs in &validator_voting_keypairs {
|
||||
let pubkey = voting_keypairs.vote_keypair.pubkey();
|
||||
assert!(r_slot_vote_tracker.voted.contains(&pubkey));
|
||||
assert!(r_slot_vote_tracker.voted.contains_key(&pubkey));
|
||||
assert!(r_slot_vote_tracker
|
||||
.updates
|
||||
.as_ref()
|
||||
@@ -828,14 +988,18 @@ mod tests {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
|
||||
// Send some votes to process
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (votes_txs_sender, votes_txs_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (_replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let mut expected_votes = vec![];
|
||||
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
|
||||
let validator_votes: Vec<_> = keyset
|
||||
.iter()
|
||||
.map(|keypairs| {
|
||||
let node_keypair = &keypairs.node_keypair;
|
||||
let vote_keypair = &keypairs.vote_keypair;
|
||||
expected_votes.push((vote_keypair.pubkey(), vec![i as Slot + 1]));
|
||||
vote_transaction::new_vote_transaction(
|
||||
vec![i as u64 + 1],
|
||||
Hash::default(),
|
||||
@@ -846,24 +1010,38 @@ mod tests {
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
votes_sender.send(validator_votes).unwrap();
|
||||
votes_txs_sender.send(validator_votes).unwrap();
|
||||
}
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
// Read and process votes from channel `votes_receiver`
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&votes_txs_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check that the received votes were pushed to other commponents
|
||||
// subscribing via a channel
|
||||
let received_votes: Vec<_> = verified_vote_receiver.try_iter().collect();
|
||||
assert_eq!(received_votes.len(), validator_voting_keypairs.len());
|
||||
for (expected_pubkey_vote, received_pubkey_vote) in
|
||||
expected_votes.iter().zip(received_votes.iter())
|
||||
{
|
||||
assert_eq!(expected_pubkey_vote, received_pubkey_vote);
|
||||
}
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(i as u64 + 1).unwrap();
|
||||
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
|
||||
for voting_keypairs in keyset {
|
||||
let pubkey = voting_keypairs.vote_keypair.pubkey();
|
||||
assert!(r_slot_vote_tracker.voted.contains(&pubkey));
|
||||
assert!(r_slot_vote_tracker.voted.contains_key(&pubkey));
|
||||
assert!(r_slot_vote_tracker
|
||||
.updates
|
||||
.as_ref()
|
||||
@@ -873,6 +1051,79 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_votes3() {
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slot = 1;
|
||||
|
||||
// Events:
|
||||
// 0: Send gossip vote
|
||||
// 1: Send replay vote
|
||||
// 2: Send both
|
||||
let ordered_events = vec![
|
||||
vec![0],
|
||||
vec![1],
|
||||
vec![0, 1],
|
||||
vec![1, 0],
|
||||
vec![2],
|
||||
vec![0, 1, 2],
|
||||
vec![1, 0, 2],
|
||||
];
|
||||
for events in ordered_events {
|
||||
let (vote_tracker, bank, validator_voting_keypairs, subscriptions) = setup();
|
||||
let node_keypair = &validator_voting_keypairs[0].node_keypair;
|
||||
let vote_keypair = &validator_voting_keypairs[0].vote_keypair;
|
||||
for &e in &events {
|
||||
if e == 0 || e == 2 {
|
||||
// Create vote transaction
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
vec![vote_slot],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
node_keypair,
|
||||
vote_keypair,
|
||||
vote_keypair,
|
||||
);
|
||||
votes_sender.send(vec![vote_tx.clone()]).unwrap();
|
||||
}
|
||||
if e == 1 || e == 2 {
|
||||
replay_votes_sender
|
||||
.send(Arc::new(vec![(vote_keypair.pubkey(), vote_slot)]))
|
||||
.unwrap();
|
||||
}
|
||||
let _ = ClusterInfoVoteListener::get_and_process_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
Some(
|
||||
// Make sure `epoch_stakes` exists for this slot by unwrapping
|
||||
bank.epoch_stakes(bank.epoch_schedule().get_epoch(vote_slot))
|
||||
.unwrap(),
|
||||
),
|
||||
&verified_vote_sender,
|
||||
&replay_votes_receiver,
|
||||
);
|
||||
}
|
||||
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
|
||||
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
|
||||
|
||||
if events == vec![1] {
|
||||
// Check `gossip_only_stake` is not incremented
|
||||
assert_eq!(r_slot_vote_tracker.total_stake, 100);
|
||||
assert_eq!(r_slot_vote_tracker.gossip_only_stake, 0);
|
||||
} else {
|
||||
// Check that both the `gossip_only_stake` and `total_stake` both
|
||||
// increased
|
||||
assert_eq!(r_slot_vote_tracker.total_stake, 100);
|
||||
assert_eq!(r_slot_vote_tracker.gossip_only_stake, 100);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_voters_by_epoch() {
|
||||
// Create some voters at genesis
|
||||
@@ -936,14 +1187,14 @@ mod tests {
|
||||
let ref_count_per_vote = 2;
|
||||
|
||||
// Create some voters at genesis
|
||||
let validator_voting_keypairs: Vec<_> = (0..2)
|
||||
let validator_keypairs: Vec<_> = (0..2)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_voting_keypairs,
|
||||
&validator_keypairs,
|
||||
100,
|
||||
);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
@@ -957,16 +1208,17 @@ mod tests {
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
|
||||
// Send a vote to process, should add a reference to the pubkey for that voter
|
||||
// in the tracker
|
||||
let validator0_keypairs = &validator_voting_keypairs[0];
|
||||
let validator0_keypairs = &validator_keypairs[0];
|
||||
let voted_slot = bank.slot() + 1;
|
||||
let vote_tx = vec![vote_transaction::new_vote_transaction(
|
||||
// Must vote > root to be processed
|
||||
vec![bank.slot() + 1],
|
||||
vec![voted_slot],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
&validator0_keypairs.node_keypair,
|
||||
@@ -974,12 +1226,19 @@ mod tests {
|
||||
&validator0_keypairs.vote_keypair,
|
||||
)];
|
||||
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_tx,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
// Add vote for same slot, should not affect outcome
|
||||
&[Arc::new(vec![(
|
||||
validator0_keypairs.vote_keypair.pubkey(),
|
||||
voted_slot,
|
||||
)])],
|
||||
);
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
@@ -1014,8 +1273,9 @@ mod tests {
|
||||
// Test with votes across two epochs
|
||||
let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch);
|
||||
|
||||
// Make 2 new votes in two different epochs, ref count should go up
|
||||
// by 2 * ref_count_per_vote
|
||||
// Make 2 new votes in two different epochs for the same pubkey,
|
||||
// the ref count should go up by 3 * ref_count_per_vote
|
||||
// Add 1 vote through the replay channel, ref count should
|
||||
let vote_txs: Vec<_> = [bank.slot() + 2, first_slot_in_new_epoch]
|
||||
.iter()
|
||||
.map(|slot| {
|
||||
@@ -1031,8 +1291,32 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_txs,
|
||||
0,
|
||||
subscriptions,
|
||||
None,
|
||||
&verified_vote_sender,
|
||||
&[Arc::new(vec![(
|
||||
validator_keypairs[1].vote_keypair.pubkey(),
|
||||
first_slot_in_new_epoch,
|
||||
)])],
|
||||
);
|
||||
|
||||
// Check new replay vote pubkey first
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
.0
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&validator_keypairs[1].vote_keypair.pubkey())
|
||||
.unwrap(),
|
||||
);
|
||||
assert_eq!(ref_count, current_ref_count);
|
||||
|
||||
// Check the existing pubkey
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
@@ -1072,7 +1356,7 @@ mod tests {
|
||||
&exit,
|
||||
Arc::new(RwLock::new(bank_forks)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
|
||||
@@ -1158,4 +1442,78 @@ mod tests {
|
||||
assert_eq!(vote_txs.len(), 2);
|
||||
verify_packets_len(&packets, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum_stake() {
|
||||
let (_, bank, validator_voting_keypairs, _) = setup();
|
||||
let vote_keypair = &validator_voting_keypairs[0].vote_keypair;
|
||||
let epoch_stakes = bank.epoch_stakes(bank.epoch()).unwrap();
|
||||
|
||||
// If `is_new_from_gossip` and `is_new` are both true, both fields
|
||||
// should increase
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = true;
|
||||
let is_new = true;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 100);
|
||||
assert_eq!(gossip_only_stake, 100);
|
||||
|
||||
// If `is_new_from_gossip` and `is_new` are both false, none should increase
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = false;
|
||||
let is_new = false;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 0);
|
||||
assert_eq!(gossip_only_stake, 0);
|
||||
|
||||
// If only `is_new`, but not `is_new_from_gossip` then
|
||||
// `total_stake` will increase, but `gossip_only_stake` won't
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = false;
|
||||
let is_new = true;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 100);
|
||||
assert_eq!(gossip_only_stake, 0);
|
||||
|
||||
// If only `is_new_from_gossip`, but not `is_new` then
|
||||
// `gossip_only_stake` will increase, but `total_stake` won't
|
||||
let mut total_stake = 0;
|
||||
let mut gossip_only_stake = 0;
|
||||
let is_new_from_gossip = true;
|
||||
let is_new = false;
|
||||
ClusterInfoVoteListener::sum_stake(
|
||||
&mut total_stake,
|
||||
&mut gossip_only_stake,
|
||||
Some(epoch_stakes),
|
||||
&vote_keypair.pubkey(),
|
||||
is_new_from_gossip,
|
||||
is_new,
|
||||
);
|
||||
assert_eq!(total_stake, 0);
|
||||
assert_eq!(gossip_only_stake, 100);
|
||||
}
|
||||
}
|
||||
|
@@ -61,6 +61,8 @@ impl SwitchForkDecision {
|
||||
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
|
||||
pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
|
||||
|
||||
pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct StakeLockout {
|
||||
lockout: u64,
|
||||
@@ -84,7 +86,7 @@ pub(crate) struct ComputedBankState {
|
||||
pub total_staked: u64,
|
||||
pub bank_weight: u128,
|
||||
pub lockout_intervals: LockoutIntervals,
|
||||
pub pubkey_votes: Vec<(Pubkey, Slot)>,
|
||||
pub pubkey_votes: Arc<PubkeyVotes>,
|
||||
}
|
||||
|
||||
pub struct Tower {
|
||||
@@ -258,7 +260,7 @@ impl Tower {
|
||||
total_staked,
|
||||
bank_weight,
|
||||
lockout_intervals,
|
||||
pubkey_votes,
|
||||
pubkey_votes: Arc::new(pubkey_votes),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -666,6 +668,7 @@ pub mod test {
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
@@ -785,6 +788,7 @@ pub mod test {
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let (replay_slot_sender, _replay_slot_receiver) = unbounded();
|
||||
let _ = ReplayStage::compute_bank_stats(
|
||||
&my_pubkey,
|
||||
&ancestors,
|
||||
@@ -797,6 +801,7 @@ pub mod test {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_slot_sender,
|
||||
);
|
||||
|
||||
let vote_bank = self
|
||||
@@ -1353,7 +1358,7 @@ pub mod test {
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let mut accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: Vec<(Pubkey, Slot)> =
|
||||
let account_latest_votes: PubkeyVotes =
|
||||
accounts.iter().map(|(pubkey, _)| (*pubkey, 0)).collect();
|
||||
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
@@ -1363,7 +1368,7 @@ pub mod test {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
bank_weight,
|
||||
mut pubkey_votes,
|
||||
pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
@@ -1375,6 +1380,7 @@ pub mod test {
|
||||
assert_eq!(stake_lockouts[&0].stake, 2);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
assert_eq!(total_staked, 2);
|
||||
let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap();
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
|
||||
@@ -1390,7 +1396,7 @@ pub mod test {
|
||||
//two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
|
||||
let mut accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: Vec<(Pubkey, Slot)> = accounts
|
||||
let account_latest_votes: PubkeyVotes = accounts
|
||||
.iter()
|
||||
.map(|(pubkey, _)| (*pubkey, (MAX_LOCKOUT_HISTORY - 1) as Slot))
|
||||
.collect();
|
||||
@@ -1417,7 +1423,7 @@ pub mod test {
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
bank_weight,
|
||||
mut pubkey_votes,
|
||||
pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
@@ -1433,6 +1439,7 @@ pub mod test {
|
||||
// should be the sum of all the weights for root
|
||||
assert!(stake_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
|
||||
assert_eq!(bank_weight, expected_bank_weight);
|
||||
let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap();
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ use crate::{
|
||||
consensus::{ComputedBankState, Tower},
|
||||
fork_choice::ForkChoice,
|
||||
progress_map::ProgressMap,
|
||||
tree_diff::TreeDiff,
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::{bank::Bank, epoch_stakes::EpochStakes};
|
||||
@@ -142,10 +143,6 @@ impl HeaviestSubtreeForkChoice {
|
||||
.map(|fork_info| fork_info.stake_voted_subtree)
|
||||
}
|
||||
|
||||
pub fn contains_slot(&self, slot: Slot) -> bool {
|
||||
self.fork_infos.contains_key(&slot)
|
||||
}
|
||||
|
||||
pub fn root(&self) -> Slot {
|
||||
self.root
|
||||
}
|
||||
@@ -249,30 +246,6 @@ impl HeaviestSubtreeForkChoice {
|
||||
self.propagate_new_leaf(slot, parent)
|
||||
}
|
||||
|
||||
// Find all nodes reachable from `root1`, excluding subtree at `root2`
|
||||
pub fn subtree_diff(&self, root1: Slot, root2: Slot) -> HashSet<Slot> {
|
||||
if !self.contains_slot(root1) {
|
||||
return HashSet::new();
|
||||
}
|
||||
let mut pending_slots = vec![root1];
|
||||
let mut reachable_set = HashSet::new();
|
||||
while !pending_slots.is_empty() {
|
||||
let current_slot = pending_slots.pop().unwrap();
|
||||
if current_slot == root2 {
|
||||
continue;
|
||||
}
|
||||
reachable_set.insert(current_slot);
|
||||
for child in self
|
||||
.children(current_slot)
|
||||
.expect("slot was discovered earlier, must exist")
|
||||
{
|
||||
pending_slots.push(*child);
|
||||
}
|
||||
}
|
||||
|
||||
reachable_set
|
||||
}
|
||||
|
||||
// Returns if the given `maybe_best_child` is the heaviest among the children
|
||||
// it's parent
|
||||
fn is_best_child(&self, maybe_best_child: Slot) -> bool {
|
||||
@@ -306,12 +279,6 @@ impl HeaviestSubtreeForkChoice {
|
||||
AncestorIterator::new(start_slot, &self.fork_infos).collect()
|
||||
}
|
||||
|
||||
pub fn children(&self, slot: Slot) -> Option<&[Slot]> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| &fork_info.children[..])
|
||||
}
|
||||
|
||||
pub fn merge(
|
||||
&mut self,
|
||||
other: HeaviestSubtreeForkChoice,
|
||||
@@ -349,6 +316,12 @@ impl HeaviestSubtreeForkChoice {
|
||||
self.add_votes(&new_votes, epoch_stakes, epoch_schedule);
|
||||
}
|
||||
|
||||
pub fn stake_voted_at(&self, slot: Slot) -> Option<u64> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| fork_info.stake_voted_at)
|
||||
}
|
||||
|
||||
fn propagate_new_leaf(&mut self, slot: Slot, parent: Slot) {
|
||||
let parent_best_slot = self
|
||||
.best_slot(parent)
|
||||
@@ -526,13 +499,6 @@ impl HeaviestSubtreeForkChoice {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn stake_voted_at(&self, slot: Slot) -> Option<u64> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| fork_info.stake_voted_at)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn set_stake_voted_at(&mut self, slot: Slot, stake_voted_at: u64) {
|
||||
self.fork_infos.get_mut(&slot).unwrap().stake_voted_at = stake_voted_at;
|
||||
@@ -544,6 +510,18 @@ impl HeaviestSubtreeForkChoice {
|
||||
}
|
||||
}
|
||||
|
||||
impl TreeDiff for HeaviestSubtreeForkChoice {
|
||||
fn contains_slot(&self, slot: Slot) -> bool {
|
||||
self.fork_infos.contains_key(&slot)
|
||||
}
|
||||
|
||||
fn children(&self, slot: Slot) -> Option<&[Slot]> {
|
||||
self.fork_infos
|
||||
.get(&slot)
|
||||
.map(|fork_info| &fork_info.children[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl ForkChoice for HeaviestSubtreeForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
|
@@ -41,6 +41,7 @@ pub mod progress_map;
|
||||
pub mod pubkey_references;
|
||||
pub mod repair_response;
|
||||
pub mod repair_service;
|
||||
pub mod repair_weight;
|
||||
pub mod repair_weighted_traversal;
|
||||
pub mod replay_stage;
|
||||
mod result;
|
||||
@@ -62,6 +63,7 @@ pub mod sigverify_stage;
|
||||
pub mod snapshot_packager_service;
|
||||
pub mod tpu;
|
||||
pub mod transaction_status_service;
|
||||
pub mod tree_diff;
|
||||
pub mod tvu;
|
||||
pub mod validator;
|
||||
pub mod verified_vote_packets;
|
||||
|
@@ -23,14 +23,14 @@ pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSuppl
|
||||
let stake_account = StakeState::from(&account).unwrap_or_default();
|
||||
match stake_account {
|
||||
StakeState::Initialized(meta) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
StakeState::Stake(meta, _stake) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
@@ -77,6 +77,7 @@ solana_sdk::pubkeys!(
|
||||
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
|
||||
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
|
||||
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
|
||||
"AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF",
|
||||
]
|
||||
);
|
||||
|
||||
|
@@ -2,22 +2,21 @@
|
||||
//! regularly finds missing shreds in the ledger and sends repair requests for those shreds
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
repair_weight::RepairWeight,
|
||||
repair_weighted_traversal::Contains,
|
||||
result::Result,
|
||||
serve_repair::{RepairType, ServeRepair, DEFAULT_NONCE},
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
|
||||
shred::Nonce,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
|
||||
use std::{
|
||||
@@ -72,6 +71,39 @@ pub struct RepairStats {
|
||||
pub shred: RepairStatsGroup,
|
||||
pub highest_shred: RepairStatsGroup,
|
||||
pub orphan: RepairStatsGroup,
|
||||
pub get_best_orphans_us: u64,
|
||||
pub get_best_shreds_us: u64,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct RepairTiming {
|
||||
pub set_root_elapsed: u64,
|
||||
pub get_votes_elapsed: u64,
|
||||
pub add_votes_elapsed: u64,
|
||||
pub lowest_slot_elapsed: u64,
|
||||
pub update_completed_slots_elapsed: u64,
|
||||
pub get_best_orphans_elapsed: u64,
|
||||
pub get_best_shreds_elapsed: u64,
|
||||
pub send_repairs_elapsed: u64,
|
||||
}
|
||||
|
||||
impl RepairTiming {
|
||||
fn update(
|
||||
&mut self,
|
||||
set_root_elapsed: u64,
|
||||
get_votes_elapsed: u64,
|
||||
add_votes_elapsed: u64,
|
||||
lowest_slot_elapsed: u64,
|
||||
update_completed_slots_elapsed: u64,
|
||||
send_repairs_elapsed: u64,
|
||||
) {
|
||||
self.set_root_elapsed += set_root_elapsed;
|
||||
self.get_votes_elapsed += get_votes_elapsed;
|
||||
self.add_votes_elapsed += add_votes_elapsed;
|
||||
self.lowest_slot_elapsed += lowest_slot_elapsed;
|
||||
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
|
||||
self.send_repairs_elapsed += send_repairs_elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
pub const MAX_REPAIR_LENGTH: usize = 512;
|
||||
@@ -119,7 +151,7 @@ impl RepairService {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
repair_info: RepairInfo,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) -> Self {
|
||||
let t_repair = Builder::new()
|
||||
.name("solana-repair-service".to_string())
|
||||
@@ -131,7 +163,7 @@ impl RepairService {
|
||||
cluster_info,
|
||||
repair_info,
|
||||
&cluster_slots,
|
||||
vote_tracker,
|
||||
verified_vote_receiver,
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
@@ -146,14 +178,18 @@ impl RepairService {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
repair_info: RepairInfo,
|
||||
cluster_slots: &ClusterSlots,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) {
|
||||
let mut repair_weight = RepairWeight::new(repair_info.bank_forks.read().unwrap().root());
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let id = cluster_info.id();
|
||||
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
|
||||
let mut repair_stats = RepairStats::default();
|
||||
let mut repair_timing = RepairTiming::default();
|
||||
let mut last_stats = Instant::now();
|
||||
let duplicate_slot_repair_statuses = HashMap::new();
|
||||
let duplicate_slot_repair_statuses: HashMap<Slot, DuplicateSlotRepairStatus> =
|
||||
HashMap::new();
|
||||
|
||||
Self::initialize_epoch_slots(
|
||||
blockstore,
|
||||
&cluster_info,
|
||||
@@ -164,13 +200,52 @@ impl RepairService {
|
||||
break;
|
||||
}
|
||||
|
||||
let mut set_root_elapsed;
|
||||
let mut get_votes_elapsed;
|
||||
let mut add_votes_elapsed;
|
||||
let mut lowest_slot_elapsed;
|
||||
let mut update_completed_slots_elapsed;
|
||||
let repairs = {
|
||||
let root_bank = repair_info.bank_forks.read().unwrap().root_bank().clone();
|
||||
let new_root = root_bank.slot();
|
||||
|
||||
// Purge outdated slots from the weighting heuristic
|
||||
set_root_elapsed = Measure::start("set_root_elapsed");
|
||||
repair_weight.set_root(new_root);
|
||||
set_root_elapsed.stop();
|
||||
|
||||
// Add new votes to the weighting heuristic
|
||||
get_votes_elapsed = Measure::start("get_votes_elapsed");
|
||||
let mut slot_to_vote_pubkeys: HashMap<Slot, Vec<Pubkey>> = HashMap::new();
|
||||
verified_vote_receiver
|
||||
.try_iter()
|
||||
.for_each(|(vote_pubkey, vote_slots)| {
|
||||
for slot in vote_slots {
|
||||
slot_to_vote_pubkeys
|
||||
.entry(slot)
|
||||
.or_default()
|
||||
.push(vote_pubkey);
|
||||
}
|
||||
});
|
||||
get_votes_elapsed.stop();
|
||||
|
||||
add_votes_elapsed = Measure::start("add_votes");
|
||||
repair_weight.add_votes(
|
||||
&blockstore,
|
||||
slot_to_vote_pubkeys.into_iter(),
|
||||
root_bank.epoch_stakes_map(),
|
||||
root_bank.epoch_schedule(),
|
||||
);
|
||||
add_votes_elapsed.stop();
|
||||
|
||||
lowest_slot_elapsed = Measure::start("lowest_slot_elapsed");
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
lowest_slot_elapsed.stop();
|
||||
update_completed_slots_elapsed = Measure::start("update_completed_slots_elapsed");
|
||||
Self::update_completed_slots(&repair_info.completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, &cluster_info, &repair_info.bank_forks);
|
||||
update_completed_slots_elapsed.stop();
|
||||
/*let new_duplicate_slots = Self::find_new_duplicate_slots(
|
||||
&duplicate_slot_repair_statuses,
|
||||
blockstore,
|
||||
@@ -194,31 +269,42 @@ impl RepairService {
|
||||
&mut repair_stats,
|
||||
&repair_socket,
|
||||
);*/
|
||||
Self::generate_repairs(
|
||||
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
blockstore,
|
||||
root_bank.slot(),
|
||||
root_bank.epoch_stakes_map(),
|
||||
root_bank.epoch_schedule(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&duplicate_slot_repair_statuses,
|
||||
&vote_tracker,
|
||||
Some(&mut repair_timing),
|
||||
)
|
||||
};
|
||||
|
||||
if let Ok(repairs) = repairs {
|
||||
let mut cache = HashMap::new();
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
repair_request,
|
||||
&mut cache,
|
||||
&mut repair_stats,
|
||||
) {
|
||||
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
|
||||
info!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
let mut cache = HashMap::new();
|
||||
let mut send_repairs_elapsed = Measure::start("send_repairs_elapsed");
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
repair_request,
|
||||
&mut cache,
|
||||
&mut repair_stats,
|
||||
) {
|
||||
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
|
||||
info!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
}
|
||||
});
|
||||
send_repairs_elapsed.stop();
|
||||
repair_timing.update(
|
||||
set_root_elapsed.as_us(),
|
||||
get_votes_elapsed.as_us(),
|
||||
add_votes_elapsed.as_us(),
|
||||
lowest_slot_elapsed.as_us(),
|
||||
update_completed_slots_elapsed.as_us(),
|
||||
send_repairs_elapsed.as_us(),
|
||||
);
|
||||
|
||||
if last_stats.elapsed().as_secs() > 2 {
|
||||
let repair_total = repair_stats.shred.count
|
||||
@@ -236,7 +322,39 @@ impl RepairService {
|
||||
("repair-orphan", repair_stats.orphan.max, i64),
|
||||
);
|
||||
}
|
||||
datapoint_info!(
|
||||
"serve_repair-repair-timing",
|
||||
("set-root-elapsed", repair_timing.set_root_elapsed, i64),
|
||||
("get-votes-elapsed", repair_timing.get_votes_elapsed, i64),
|
||||
("add-votes-elapsed", repair_timing.add_votes_elapsed, i64),
|
||||
(
|
||||
"get-best-orphans-elapsed",
|
||||
repair_timing.get_best_orphans_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get-best-shreds-elapsed",
|
||||
repair_timing.get_best_shreds_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"lowest-slot-elapsed",
|
||||
repair_timing.lowest_slot_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"update-completed-slots-elapsed",
|
||||
repair_timing.update_completed_slots_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"send-repairs-elapsed",
|
||||
repair_timing.send_repairs_elapsed,
|
||||
i64
|
||||
),
|
||||
);
|
||||
repair_stats = RepairStats::default();
|
||||
repair_timing = RepairTiming::default();
|
||||
last_stats = Instant::now();
|
||||
}
|
||||
sleep(Duration::from_millis(REPAIR_MS));
|
||||
@@ -331,31 +449,6 @@ impl RepairService {
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_repairs(
|
||||
blockstore: &Blockstore,
|
||||
root: Slot,
|
||||
max_repairs: usize,
|
||||
duplicate_slot_repair_statuses: &HashMap<Slot, DuplicateSlotRepairStatus>,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
) -> Result<Vec<RepairType>> {
|
||||
// Slot height and shred indexes for shreds we want to repair
|
||||
let mut repairs: Vec<RepairType> = vec![];
|
||||
Self::generate_repairs_by_level(
|
||||
blockstore,
|
||||
&mut repairs,
|
||||
max_repairs,
|
||||
root,
|
||||
duplicate_slot_repair_statuses,
|
||||
vote_tracker,
|
||||
);
|
||||
|
||||
// Try to resolve orphans in blockstore
|
||||
let orphans = blockstore.orphans_iterator(root + 1).unwrap();
|
||||
Self::generate_repairs_for_orphans(orphans, &mut repairs);
|
||||
|
||||
Ok(repairs)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn generate_duplicate_repairs_for_slot(
|
||||
blockstore: &Blockstore,
|
||||
@@ -559,81 +652,6 @@ impl RepairService {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_repairs_for_orphans(
|
||||
orphans: impl Iterator<Item = u64>,
|
||||
repairs: &mut Vec<RepairType>,
|
||||
) {
|
||||
repairs.extend(orphans.take(MAX_ORPHANS).map(RepairType::Orphan));
|
||||
}
|
||||
|
||||
/// Repairs any fork starting at the input slot
|
||||
fn generate_repairs_by_level(
|
||||
blockstore: &Blockstore,
|
||||
repairs: &mut Vec<RepairType>,
|
||||
max_repairs: usize,
|
||||
slot: Slot,
|
||||
duplicate_slot_repair_statuses: &HashMap<Slot, DuplicateSlotRepairStatus>,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
) {
|
||||
let mut seed = [0u8; 32];
|
||||
thread_rng().fill(&mut seed);
|
||||
let rng = &mut ChaChaRng::from_seed(seed);
|
||||
let mut pending_slots = vec![slot];
|
||||
while repairs.len() < max_repairs && !pending_slots.is_empty() {
|
||||
pending_slots.retain(|slot| !duplicate_slot_repair_statuses.contains_key(slot));
|
||||
let mut next_pending_slots = vec![];
|
||||
let mut level_repairs = HashMap::new();
|
||||
for slot in &pending_slots {
|
||||
if let Some(slot_meta) = blockstore.meta(*slot).unwrap() {
|
||||
let new_repairs = Self::generate_repairs_for_slot(
|
||||
blockstore,
|
||||
*slot,
|
||||
&slot_meta,
|
||||
std::usize::MAX,
|
||||
);
|
||||
if !new_repairs.is_empty() {
|
||||
level_repairs.insert(*slot, new_repairs);
|
||||
}
|
||||
next_pending_slots.extend(slot_meta.next_slots);
|
||||
}
|
||||
}
|
||||
|
||||
if !level_repairs.is_empty() {
|
||||
let mut slots_to_repair: Vec<_> = level_repairs.keys().cloned().collect();
|
||||
let mut weights: Vec<_> = {
|
||||
let r_vote_tracker = vote_tracker.slot_vote_trackers.read().unwrap();
|
||||
slots_to_repair
|
||||
.iter()
|
||||
.map(|slot| {
|
||||
if let Some(slot_vote_tracker) = r_vote_tracker.get(slot) {
|
||||
std::cmp::max(slot_vote_tracker.read().unwrap().total_stake, 1)
|
||||
} else {
|
||||
// should it be something else?
|
||||
1
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
|
||||
let mut weighted_index = WeightedIndex::new(weights.clone()).unwrap();
|
||||
while repairs.len() < max_repairs && !level_repairs.is_empty() {
|
||||
let index = weighted_index.sample(rng);
|
||||
let slot_repairs = level_repairs.get_mut(&slots_to_repair[index]).unwrap();
|
||||
repairs.push(slot_repairs.remove(0));
|
||||
if slot_repairs.is_empty() {
|
||||
level_repairs.remove(&slots_to_repair[index]);
|
||||
slots_to_repair.remove(index);
|
||||
weights.remove(index);
|
||||
if !weights.is_empty() {
|
||||
weighted_index = WeightedIndex::new(weights.clone()).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pending_slots = next_pending_slots;
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
|
||||
// Safe to set into gossip because by this time, the leader schedule cache should
|
||||
// also be updated with the latest root (done in blockstore_processor) and thus
|
||||
@@ -705,6 +723,7 @@ mod test {
|
||||
use solana_runtime::genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs};
|
||||
use solana_sdk::signature::Signer;
|
||||
use solana_vote_program::vote_transaction;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[test]
|
||||
pub fn test_repair_orphan() {
|
||||
@@ -717,11 +736,18 @@ mod test {
|
||||
let (shreds2, _) = make_slot_entries(5, 2, 1);
|
||||
shreds.extend(shreds2);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blockstore, 0, 2, &HashMap::new(), &vote_tracker)
|
||||
.unwrap(),
|
||||
vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)]
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
&HashMap::new(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
vec![RepairType::Orphan(2), RepairType::HighestShred(0, 0)]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -739,12 +765,19 @@ mod test {
|
||||
// Write this shred to slot 2, should chain to slot 0, which we haven't received
|
||||
// any shreds for
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
// Check that repair tries to patch the empty slot
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blockstore, 0, 2, &HashMap::new(), &vote_tracker)
|
||||
.unwrap(),
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
&HashMap::new(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
vec![RepairType::HighestShred(0, 0)]
|
||||
);
|
||||
}
|
||||
@@ -789,83 +822,36 @@ mod test {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
std::usize::MAX,
|
||||
&HashMap::new(),
|
||||
&vote_tracker
|
||||
)
|
||||
.unwrap(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
expected
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
expected.len() - 2,
|
||||
&HashMap::new(),
|
||||
&vote_tracker,
|
||||
)
|
||||
.unwrap()[..],
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
expected.len() - 2,
|
||||
&HashSet::new(),
|
||||
None
|
||||
)[..],
|
||||
expected[0..expected.len() - 2]
|
||||
);
|
||||
}
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_repairs_distributed_across_slots() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let num_entries_per_slot = 100;
|
||||
|
||||
// Create some shreds
|
||||
for i in 1..10 {
|
||||
let (shreds, _) = make_slot_entries(i, 0, num_entries_per_slot as u64);
|
||||
|
||||
// Only insert the first shred
|
||||
blockstore
|
||||
.insert_shreds(shreds[..1].to_vec(), None, false)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let repairs = RepairService::generate_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
num_entries_per_slot,
|
||||
&HashMap::new(),
|
||||
&vote_tracker,
|
||||
)
|
||||
.unwrap();
|
||||
let mut repairs_slots = HashMap::new();
|
||||
for repair in repairs {
|
||||
match repair {
|
||||
RepairType::Shred(slot, _shred_index) => {
|
||||
*repairs_slots.entry(slot).or_insert(0) += 1;
|
||||
}
|
||||
RepairType::HighestShred(slot, _shred_index) => {
|
||||
*repairs_slots.entry(slot).or_insert(0) += 1;
|
||||
}
|
||||
RepairType::Orphan(slot) => {
|
||||
*repairs_slots.entry(slot).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
for i in 1..10 {
|
||||
assert!(repairs_slots.contains_key(&i));
|
||||
}
|
||||
}
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_generate_highest_repair() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
@@ -887,16 +873,17 @@ mod test {
|
||||
let expected: Vec<RepairType> =
|
||||
vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)];
|
||||
|
||||
let vote_tracker = Arc::new(VoteTracker::default());
|
||||
let mut repair_weight = RepairWeight::new(0);
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(
|
||||
repair_weight.get_best_weighted_repairs(
|
||||
&blockstore,
|
||||
0,
|
||||
std::usize::MAX,
|
||||
&HashMap::new(),
|
||||
&vote_tracker
|
||||
)
|
||||
.unwrap(),
|
||||
&EpochSchedule::default(),
|
||||
MAX_ORPHANS,
|
||||
MAX_REPAIR_LENGTH,
|
||||
&HashSet::new(),
|
||||
None
|
||||
),
|
||||
expected
|
||||
);
|
||||
}
|
||||
|
1240
core/src/repair_weight.rs
Normal file
1240
core/src/repair_weight.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
use crate::{
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, repair_service::RepairService,
|
||||
serve_repair::RepairType,
|
||||
serve_repair::RepairType, tree_diff::TreeDiff,
|
||||
};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::clock::Slot;
|
||||
|
@@ -8,7 +8,7 @@ use crate::{
|
||||
cluster_slots::ClusterSlots,
|
||||
commitment::BlockCommitmentCache,
|
||||
commitment_service::{AggregateCommitmentService, CommitmentAggregationData},
|
||||
consensus::{ComputedBankState, StakeLockout, SwitchForkDecision, Tower},
|
||||
consensus::{ComputedBankState, PubkeyVotes, StakeLockout, SwitchForkDecision, Tower},
|
||||
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
@@ -19,6 +19,7 @@ use crate::{
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
block_error::BlockError,
|
||||
@@ -61,6 +62,9 @@ pub const MAX_ENTRY_RECV_PER_ITER: usize = 512;
|
||||
pub const SUPERMINORITY_THRESHOLD: f64 = 1f64 / 3f64;
|
||||
pub const MAX_UNCONFIRMED_SLOTS: usize = 5;
|
||||
|
||||
pub type ReplayVotesSender = CrossbeamSender<Arc<PubkeyVotes>>;
|
||||
pub type ReplayVotesReceiver = CrossbeamReceiver<Arc<PubkeyVotes>>;
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub(crate) enum HeaviestForkFailures {
|
||||
LockedOut(u64),
|
||||
@@ -221,6 +225,7 @@ impl ReplayStage {
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
retransmit_slots_sender: RetransmitSlotsSender,
|
||||
duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
|
||||
replay_votes_sender: ReplayVotesSender,
|
||||
) -> Self {
|
||||
let ReplayStageConfig {
|
||||
my_pubkey,
|
||||
@@ -387,6 +392,7 @@ impl ReplayStage {
|
||||
&mut all_pubkeys,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut bank_weight_fork_choice,
|
||||
&replay_votes_sender,
|
||||
);
|
||||
compute_bank_stats_time.stop();
|
||||
|
||||
@@ -1303,6 +1309,7 @@ impl ReplayStage {
|
||||
all_pubkeys: &mut PubkeyReferences,
|
||||
heaviest_subtree_fork_choice: &mut dyn ForkChoice,
|
||||
bank_weight_fork_choice: &mut dyn ForkChoice,
|
||||
replay_votes_sender: &ReplayVotesSender,
|
||||
) -> Vec<Slot> {
|
||||
frozen_banks.sort_by_key(|bank| bank.slot());
|
||||
let mut new_stats = vec![];
|
||||
@@ -1324,6 +1331,9 @@ impl ReplayStage {
|
||||
&ancestors,
|
||||
all_pubkeys,
|
||||
);
|
||||
// Notify any listeners of the votes found in this newly computed
|
||||
// bank
|
||||
let _ = replay_votes_sender.send(computed_bank_state.pubkey_votes.clone());
|
||||
heaviest_subtree_fork_choice.compute_bank_stats(
|
||||
&bank,
|
||||
tower,
|
||||
@@ -1853,7 +1863,8 @@ impl ReplayStage {
|
||||
pub fn get_unlock_switch_vote_slot(operating_mode: OperatingMode) -> Slot {
|
||||
match operating_mode {
|
||||
OperatingMode::Development => 0,
|
||||
OperatingMode::Stable => std::u64::MAX / 2,
|
||||
// 400_000 slots into epoch 61
|
||||
OperatingMode::Stable => 26_752_000,
|
||||
// Epoch 63
|
||||
OperatingMode::Preview => 21_692_256,
|
||||
}
|
||||
@@ -1862,7 +1873,8 @@ impl ReplayStage {
|
||||
pub fn get_unlock_heaviest_subtree_fork_choice(operating_mode: OperatingMode) -> Slot {
|
||||
match operating_mode {
|
||||
OperatingMode::Development => 0,
|
||||
OperatingMode::Stable => std::u64::MAX / 2,
|
||||
// 400_000 slots into epoch 61
|
||||
OperatingMode::Stable => 26_752_000,
|
||||
// Epoch 63
|
||||
OperatingMode::Preview => 21_692_256,
|
||||
}
|
||||
@@ -1938,146 +1950,179 @@ pub(crate) mod tests {
|
||||
assert!(ReplayStage::is_partition_detected(&ancestors, 4, 3));
|
||||
}
|
||||
|
||||
struct ReplayBlockstoreComponents {
|
||||
blockstore: Arc<Blockstore>,
|
||||
validator_voting_keys: HashMap<Pubkey, Pubkey>,
|
||||
progress: ProgressMap,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
||||
rpc_subscriptions: Arc<RpcSubscriptions>,
|
||||
}
|
||||
|
||||
fn replay_blockstore_components() -> ReplayBlockstoreComponents {
|
||||
// Setup blockstore
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let validator_authorized_voter_keypairs: Vec<_> =
|
||||
(0..20).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
|
||||
|
||||
let validator_voting_keys: HashMap<_, _> = validator_authorized_voter_keypairs
|
||||
.iter()
|
||||
.map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey()))
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_authorized_voter_keypairs,
|
||||
100,
|
||||
);
|
||||
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
|
||||
// ProgressMap
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank0,
|
||||
bank0.collector_id(),
|
||||
&Pubkey::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
|
||||
// Leader schedule cache
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
||||
|
||||
// BankForks
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
||||
|
||||
// RpcSubscriptions
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let rpc_subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
|
||||
ReplayBlockstoreComponents {
|
||||
blockstore,
|
||||
validator_voting_keys,
|
||||
progress,
|
||||
bank_forks,
|
||||
leader_schedule_cache,
|
||||
rpc_subscriptions,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_child_slots_of_same_parent() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
// Setup
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let validator_authorized_voter_keypairs: Vec<_> = (0..20)
|
||||
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
|
||||
.collect();
|
||||
let ReplayBlockstoreComponents {
|
||||
blockstore,
|
||||
validator_voting_keys,
|
||||
mut progress,
|
||||
bank_forks,
|
||||
leader_schedule_cache,
|
||||
rpc_subscriptions,
|
||||
} = replay_blockstore_components();
|
||||
|
||||
let validator_voting_keys: HashMap<_, _> = validator_authorized_voter_keypairs
|
||||
.iter()
|
||||
.map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey()))
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_authorized_voter_keypairs,
|
||||
100,
|
||||
);
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
// Insert a non-root bank so that the propagation logic will update this
|
||||
// bank
|
||||
let bank1 = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(0).unwrap(),
|
||||
&leader_schedule_cache.slot_leader_at(1, None).unwrap(),
|
||||
1,
|
||||
);
|
||||
progress.insert(
|
||||
1,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank1,
|
||||
bank1.collector_id(),
|
||||
validator_voting_keys.get(&bank1.collector_id()).unwrap(),
|
||||
Some(0),
|
||||
0,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank0,
|
||||
bank0.collector_id(),
|
||||
&Pubkey::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
0,
|
||||
),
|
||||
);
|
||||
assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot);
|
||||
bank1.freeze();
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
|
||||
// Insert a non-root bank so that the propagation logic will update this
|
||||
// bank
|
||||
let bank1 = Bank::new_from_parent(
|
||||
bank_forks.get(0).unwrap(),
|
||||
&leader_schedule_cache.slot_leader_at(1, None).unwrap(),
|
||||
1,
|
||||
);
|
||||
progress.insert(
|
||||
1,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank1,
|
||||
bank1.collector_id(),
|
||||
&validator_voting_keys.get(&bank1.collector_id()).unwrap(),
|
||||
Some(0),
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot);
|
||||
bank1.freeze();
|
||||
bank_forks.insert(bank1);
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&rpc_subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
|
||||
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&rpc_subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
|
||||
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
// chaining to slot 1
|
||||
let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
// // There are 20 equally staked accounts, of which 3 have built
|
||||
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
|
||||
// we should see 3 validators in bank 1's propagated_validator set.
|
||||
let expected_leader_slots = vec![
|
||||
1,
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
];
|
||||
for slot in expected_leader_slots {
|
||||
let leader = leader_schedule_cache.slot_leader_at(slot, None).unwrap();
|
||||
let vote_key = validator_voting_keys.get(&leader).unwrap();
|
||||
assert!(progress
|
||||
.get_propagated_stats(1)
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
None,
|
||||
&mut progress,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_some());
|
||||
|
||||
// // There are 20 equally staked acccounts, of which 3 have built
|
||||
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
|
||||
// we should see 3 validators in bank 1's propagated_validator set.
|
||||
let expected_leader_slots = vec![
|
||||
1,
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
2 * NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
];
|
||||
for slot in expected_leader_slots {
|
||||
let leader = leader_schedule_cache.slot_leader_at(slot, None).unwrap();
|
||||
let vote_key = validator_voting_keys.get(&leader).unwrap();
|
||||
assert!(progress
|
||||
.get_propagated_stats(1)
|
||||
.unwrap()
|
||||
.propagated_validators
|
||||
.contains(vote_key));
|
||||
}
|
||||
.propagated_validators
|
||||
.contains(vote_key));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2695,6 +2740,7 @@ pub(crate) mod tests {
|
||||
.cloned()
|
||||
.collect();
|
||||
let tower = Tower::new_for_tests(0, 0.67);
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
let newly_computed = ReplayStage::compute_bank_stats(
|
||||
&node_pubkey,
|
||||
&ancestors,
|
||||
@@ -2707,8 +2753,13 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
// bank 0 has no votes, should not send any votes on the channel
|
||||
assert_eq!(replay_votes_receiver.try_recv().unwrap(), Arc::new(vec![]));
|
||||
assert_eq!(newly_computed, vec![0]);
|
||||
|
||||
// The only vote is in bank 1, and bank_forks does not currently contain
|
||||
// bank 1, so no slot should be confirmed.
|
||||
{
|
||||
@@ -2750,8 +2801,15 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
// Bank 1 had one vote, ensure that `compute_bank_stats` notifies listeners
|
||||
// via `replay_votes_receiver`.
|
||||
assert_eq!(
|
||||
replay_votes_receiver.try_recv().unwrap(),
|
||||
Arc::new(vec![(my_keypairs.vote_keypair.pubkey(), 0)])
|
||||
);
|
||||
assert_eq!(newly_computed, vec![1]);
|
||||
{
|
||||
let fork_progress = progress.get(&1).unwrap();
|
||||
@@ -2785,8 +2843,10 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
// No new stats should have been computed
|
||||
assert!(replay_votes_receiver.try_iter().next().is_none());
|
||||
assert!(newly_computed.is_empty());
|
||||
}
|
||||
|
||||
@@ -2811,6 +2871,7 @@ pub(crate) mod tests {
|
||||
.collect();
|
||||
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
|
||||
ReplayStage::compute_bank_stats(
|
||||
&node_pubkey,
|
||||
&ancestors,
|
||||
@@ -2823,6 +2884,7 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
@@ -2885,6 +2947,7 @@ pub(crate) mod tests {
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
|
||||
ReplayStage::compute_bank_stats(
|
||||
&node_pubkey,
|
||||
&vote_simulator.bank_forks.read().unwrap().ancestors(),
|
||||
@@ -2897,6 +2960,7 @@ pub(crate) mod tests {
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut vote_simulator.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
&replay_votes_sender,
|
||||
);
|
||||
|
||||
frozen_banks.sort_by_key(|bank| bank.slot());
|
||||
|
@@ -17,6 +17,7 @@ pub enum Error {
|
||||
RecvError(std::sync::mpsc::RecvError),
|
||||
TryCrossbeamRecvError(crossbeam_channel::TryRecvError),
|
||||
CrossbeamRecvTimeoutError(crossbeam_channel::RecvTimeoutError),
|
||||
ReadyTimeoutError,
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
CrossbeamSendError,
|
||||
TryRecvError(std::sync::mpsc::TryRecvError),
|
||||
@@ -61,6 +62,11 @@ impl std::convert::From<crossbeam_channel::RecvTimeoutError> for Error {
|
||||
Error::CrossbeamRecvTimeoutError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<crossbeam_channel::ReadyTimeoutError> for Error {
|
||||
fn from(_e: crossbeam_channel::ReadyTimeoutError) -> Error {
|
||||
Error::ReadyTimeoutError
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
|
||||
Error::RecvTimeoutError(e)
|
||||
|
@@ -1,8 +1,8 @@
|
||||
//! The `retransmit_stage` retransmits shreds between validators
|
||||
|
||||
use crate::cluster_info_vote_listener::VoteTracker;
|
||||
use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
contact_info::ContactInfo,
|
||||
repair_service::DuplicateSlotsResetSender,
|
||||
@@ -414,7 +414,7 @@ impl RetransmitStage {
|
||||
shred_version: u16,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) -> Self {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
@@ -459,7 +459,7 @@ impl RetransmitStage {
|
||||
rv && is_connected
|
||||
},
|
||||
cluster_slots,
|
||||
vote_tracker,
|
||||
verified_vote_receiver,
|
||||
);
|
||||
|
||||
let thread_hdls = t_retransmit;
|
||||
|
1484
core/src/rpc.rs
1484
core/src/rpc.rs
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@ use solana_sdk::clock::Slot;
|
||||
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
|
||||
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
|
||||
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
|
||||
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
NonexistentClusterRoot {
|
||||
@@ -17,6 +18,7 @@ pub enum RpcCustomError {
|
||||
SendTransactionPreflightFailure {
|
||||
message: String,
|
||||
},
|
||||
SendTransactionIsNotSigned,
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@@ -49,6 +51,11 @@ impl From<RpcCustomError> for Error {
|
||||
message,
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SendTransactionIsNotSigned => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_3),
|
||||
message: "Transaction is not signed".to_string(),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,8 +4,10 @@ use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote, SlotInfo};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
|
||||
use solana_client::rpc_response::{
|
||||
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
|
||||
use solana_account_decoder::UiAccount;
|
||||
use solana_client::{
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_response::{Response as RpcResponse, RpcKeyedAccount, RpcSignatureResult},
|
||||
};
|
||||
#[cfg(test)]
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
@@ -37,9 +39,9 @@ pub trait RpcSolPubSub {
|
||||
fn account_subscribe(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcAccount>>,
|
||||
subscriber: Subscriber<RpcResponse<UiAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcAccountInfoConfig>,
|
||||
);
|
||||
|
||||
// Unsubscribe from account notification subscription.
|
||||
@@ -63,7 +65,7 @@ pub trait RpcSolPubSub {
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcProgramAccountsConfig>,
|
||||
);
|
||||
|
||||
// Unsubscribe from account notification subscription.
|
||||
@@ -177,9 +179,9 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
fn account_subscribe(
|
||||
&self,
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcAccount>>,
|
||||
subscriber: Subscriber<RpcResponse<UiAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcAccountInfoConfig>,
|
||||
) {
|
||||
match param::<Pubkey>(&pubkey_str, "pubkey") {
|
||||
Ok(pubkey) => {
|
||||
@@ -187,7 +189,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
let sub_id = SubscriptionId::Number(id as u64);
|
||||
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
|
||||
self.subscriptions
|
||||
.add_account_subscription(pubkey, commitment, sub_id, subscriber)
|
||||
.add_account_subscription(pubkey, config, sub_id, subscriber)
|
||||
}
|
||||
Err(e) => subscriber.reject(e).unwrap(),
|
||||
}
|
||||
@@ -215,7 +217,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcProgramAccountsConfig>,
|
||||
) {
|
||||
match param::<Pubkey>(&pubkey_str, "pubkey") {
|
||||
Ok(pubkey) => {
|
||||
@@ -223,7 +225,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
let sub_id = SubscriptionId::Number(id as u64);
|
||||
info!("program_subscribe: account={:?} id={:?}", pubkey, sub_id);
|
||||
self.subscriptions
|
||||
.add_program_subscription(pubkey, commitment, sub_id, subscriber)
|
||||
.add_program_subscription(pubkey, config, sub_id, subscriber)
|
||||
}
|
||||
Err(e) => subscriber.reject(e).unwrap(),
|
||||
}
|
||||
@@ -362,6 +364,7 @@ mod tests {
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use serial_test_derive::serial;
|
||||
use solana_account_decoder::{parse_account_data::parse_account_data, UiAccountEncoding};
|
||||
use solana_budget_program::{self, budget_instruction};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
@@ -377,7 +380,7 @@ mod tests {
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_program, system_transaction,
|
||||
system_instruction, system_program, system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_vote_program::vote_transaction;
|
||||
@@ -556,7 +559,10 @@ mod tests {
|
||||
session,
|
||||
subscriber,
|
||||
contract_state.pubkey().to_string(),
|
||||
Some(CommitmentConfig::recent()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &contract_funds.pubkey(), 51, blockhash);
|
||||
@@ -629,6 +635,94 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_subscribe_with_encoding() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair: alice,
|
||||
..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let nonce_account = Keypair::new();
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore_bank(
|
||||
blockstore,
|
||||
bank_forks.read().unwrap().get(1).unwrap().clone(),
|
||||
1,
|
||||
),
|
||||
)),
|
||||
)),
|
||||
uid: Arc::new(atomic::AtomicUsize::default()),
|
||||
};
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(
|
||||
session,
|
||||
subscriber,
|
||||
nonce_account.pubkey().to_string(),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: Some(UiAccountEncoding::JsonParsed),
|
||||
}),
|
||||
);
|
||||
|
||||
let ixs = system_instruction::create_nonce_account(
|
||||
&alice.pubkey(),
|
||||
&nonce_account.pubkey(),
|
||||
&alice.pubkey(),
|
||||
100,
|
||||
);
|
||||
let message = Message::new(&ixs, Some(&alice.pubkey()));
|
||||
let tx = Transaction::new(&[&alice, &nonce_account], message, blockhash);
|
||||
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Test signature confirmation notification #1
|
||||
let expected_data = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(1)
|
||||
.unwrap()
|
||||
.get_account(&nonce_account.pubkey())
|
||||
.unwrap()
|
||||
.data;
|
||||
let expected_data = parse_account_data(&system_program::id(), &expected_data).unwrap();
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 1 },
|
||||
"value": {
|
||||
"owner": system_program::id().to_string(),
|
||||
"lamports": 100,
|
||||
"data": expected_data,
|
||||
"executable": false,
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
},
|
||||
"subscription": 0,
|
||||
}
|
||||
});
|
||||
|
||||
let (response, _) = robust_poll_or_panic(receiver);
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_unsubscribe() {
|
||||
@@ -703,7 +797,10 @@ mod tests {
|
||||
session,
|
||||
subscriber,
|
||||
bob.pubkey().to_string(),
|
||||
Some(CommitmentConfig::root()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::root()),
|
||||
encoding: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
|
||||
@@ -756,7 +853,10 @@ mod tests {
|
||||
session,
|
||||
subscriber,
|
||||
bob.pubkey().to_string(),
|
||||
Some(CommitmentConfig::root()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::root()),
|
||||
encoding: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
|
||||
@@ -921,11 +1021,15 @@ mod tests {
|
||||
});
|
||||
|
||||
// Process votes and check they were notified.
|
||||
let (s, _r) = unbounded();
|
||||
let (_replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
ClusterInfoVoteListener::get_and_process_votes_for_tests(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
rpc.subscriptions.clone(),
|
||||
&s,
|
||||
&replay_votes_receiver,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@@ -24,7 +24,7 @@ use std::{
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
use tokio::runtime;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@@ -33,6 +33,7 @@ pub struct JsonRpcService {
|
||||
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
|
||||
|
||||
close_handle: Option<CloseHandle>,
|
||||
runtime: runtime::Runtime,
|
||||
}
|
||||
|
||||
struct RpcRequestMiddleware {
|
||||
@@ -98,6 +99,9 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
|
||||
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
|
||||
// Stuck on tokio 0.1 until the jsonrpc-http-server crate upgrades to tokio 0.2
|
||||
use tokio_01::prelude::*;
|
||||
|
||||
let stem = path.split_at(1).1; // Drop leading '/' from path
|
||||
let filename = {
|
||||
match path {
|
||||
@@ -116,10 +120,10 @@ impl RpcRequestMiddleware {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(
|
||||
tokio_fs::file::File::open(filename)
|
||||
tokio_fs_01::file::File::open(filename)
|
||||
.and_then(|file| {
|
||||
let buf: Vec<u8> = Vec::new();
|
||||
tokio_io::io::read_to_end(file, buf)
|
||||
tokio_io_01::io::read_to_end(file, buf)
|
||||
.and_then(|item| Ok(hyper::Response::new(item.1.into())))
|
||||
.or_else(|_| Ok(RpcRequestMiddleware::internal_server_error()))
|
||||
})
|
||||
@@ -256,6 +260,27 @@ impl JsonRpcService {
|
||||
&exit_send_transaction_service,
|
||||
));
|
||||
|
||||
let mut runtime = runtime::Builder::new()
|
||||
.threaded_scheduler()
|
||||
.thread_name("rpc-runtime")
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Runtime");
|
||||
|
||||
let bigtable_ledger_storage = if config.enable_bigtable_ledger_storage {
|
||||
runtime
|
||||
.block_on(solana_storage_bigtable::LedgerStorage::new(false))
|
||||
.map(|x| {
|
||||
info!("BigTable ledger storage initialized");
|
||||
Some(x)
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to initialize BigTable ledger storage: {:?}", err);
|
||||
None
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks.clone(),
|
||||
@@ -266,6 +291,8 @@ impl JsonRpcService {
|
||||
cluster_info,
|
||||
genesis_hash,
|
||||
send_transaction_service,
|
||||
&runtime,
|
||||
bigtable_ledger_storage,
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -325,6 +352,7 @@ impl JsonRpcService {
|
||||
.register_exit(Box::new(move || close_handle_.close()));
|
||||
Self {
|
||||
thread_hdl,
|
||||
runtime,
|
||||
#[cfg(test)]
|
||||
request_processor: test_request_processor,
|
||||
close_handle: Some(close_handle),
|
||||
@@ -338,6 +366,7 @@ impl JsonRpcService {
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.runtime.shutdown_background();
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
@@ -8,8 +8,11 @@ use jsonrpc_pubsub::{
|
||||
SubscriptionId,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use solana_client::rpc_response::{
|
||||
Response, RpcAccount, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_client::{
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_filter::RpcFilterType,
|
||||
rpc_response::{Response, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult},
|
||||
};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::bank::Bank;
|
||||
@@ -33,7 +36,9 @@ use std::{
|
||||
iter,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
};
|
||||
use tokio::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
|
||||
|
||||
// Stuck on tokio 0.1 until the jsonrpc-pubsub crate upgrades to tokio 0.2
|
||||
use tokio_01::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
|
||||
|
||||
const RECEIVE_DELAY_MILLIS: u64 = 100;
|
||||
|
||||
@@ -86,29 +91,44 @@ impl std::fmt::Debug for NotificationEntry {
|
||||
}
|
||||
}
|
||||
|
||||
struct SubscriptionData<S> {
|
||||
struct SubscriptionData<S, T> {
|
||||
sink: Sink<S>,
|
||||
commitment: CommitmentConfig,
|
||||
last_notified_slot: RwLock<Slot>,
|
||||
config: Option<T>,
|
||||
}
|
||||
type RpcAccountSubscriptions =
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, SubscriptionData<Response<RpcAccount>>>>>;
|
||||
type RpcProgramSubscriptions =
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, SubscriptionData<Response<RpcKeyedAccount>>>>>;
|
||||
#[derive(Default, Clone)]
|
||||
struct ProgramConfig {
|
||||
filters: Vec<RpcFilterType>,
|
||||
encoding: Option<UiAccountEncoding>,
|
||||
}
|
||||
type RpcAccountSubscriptions = RwLock<
|
||||
HashMap<
|
||||
Pubkey,
|
||||
HashMap<SubscriptionId, SubscriptionData<Response<UiAccount>, UiAccountEncoding>>,
|
||||
>,
|
||||
>;
|
||||
type RpcProgramSubscriptions = RwLock<
|
||||
HashMap<
|
||||
Pubkey,
|
||||
HashMap<SubscriptionId, SubscriptionData<Response<RpcKeyedAccount>, ProgramConfig>>,
|
||||
>,
|
||||
>;
|
||||
type RpcSignatureSubscriptions = RwLock<
|
||||
HashMap<Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>>>>,
|
||||
HashMap<Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>, ()>>>,
|
||||
>;
|
||||
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
|
||||
type RpcVoteSubscriptions = RwLock<HashMap<SubscriptionId, Sink<RpcVote>>>;
|
||||
type RpcRootSubscriptions = RwLock<HashMap<SubscriptionId, Sink<Slot>>>;
|
||||
|
||||
fn add_subscription<K, S>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S>>>,
|
||||
fn add_subscription<K, S, T>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S, T>>>,
|
||||
hashmap_key: K,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<S>,
|
||||
last_notified_slot: Slot,
|
||||
config: Option<T>,
|
||||
) where
|
||||
K: Eq + Hash,
|
||||
S: Clone,
|
||||
@@ -119,6 +139,7 @@ fn add_subscription<K, S>(
|
||||
sink,
|
||||
commitment,
|
||||
last_notified_slot: RwLock::new(last_notified_slot),
|
||||
config,
|
||||
};
|
||||
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
|
||||
current_hashmap.insert(sub_id, subscription_data);
|
||||
@@ -129,8 +150,8 @@ fn add_subscription<K, S>(
|
||||
subscriptions.insert(hashmap_key, hashmap);
|
||||
}
|
||||
|
||||
fn remove_subscription<K, S>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S>>>,
|
||||
fn remove_subscription<K, S, T>(
|
||||
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S, T>>>,
|
||||
sub_id: &SubscriptionId,
|
||||
) -> bool
|
||||
where
|
||||
@@ -152,8 +173,8 @@ where
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn check_commitment_and_notify<K, S, B, F, X>(
|
||||
subscriptions: &HashMap<K, HashMap<SubscriptionId, SubscriptionData<Response<S>>>>,
|
||||
fn check_commitment_and_notify<K, S, B, F, X, T>(
|
||||
subscriptions: &HashMap<K, HashMap<SubscriptionId, SubscriptionData<Response<S>, T>>>,
|
||||
hashmap_key: &K,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cache_slot_info: &CacheSlotInfo,
|
||||
@@ -165,8 +186,9 @@ where
|
||||
K: Eq + Hash + Clone + Copy,
|
||||
S: Clone + Serialize,
|
||||
B: Fn(&Bank, &K) -> X,
|
||||
F: Fn(X, Slot) -> (Box<dyn Iterator<Item = S>>, Slot),
|
||||
F: Fn(X, Slot, Option<T>) -> (Box<dyn Iterator<Item = S>>, Slot),
|
||||
X: Clone + Serialize + Default,
|
||||
T: Clone,
|
||||
{
|
||||
let mut notified_set: HashSet<SubscriptionId> = HashSet::new();
|
||||
if let Some(hashmap) = subscriptions.get(hashmap_key) {
|
||||
@@ -176,6 +198,7 @@ where
|
||||
sink,
|
||||
commitment,
|
||||
last_notified_slot,
|
||||
config,
|
||||
},
|
||||
) in hashmap.iter()
|
||||
{
|
||||
@@ -195,7 +218,8 @@ where
|
||||
.unwrap_or_default()
|
||||
};
|
||||
let mut w_last_notified_slot = last_notified_slot.write().unwrap();
|
||||
let (filter_results, result_slot) = filter_results(results, *w_last_notified_slot);
|
||||
let (filter_results, result_slot) =
|
||||
filter_results(results, *w_last_notified_slot, config.as_ref().cloned());
|
||||
for result in filter_results {
|
||||
notifier.notify(
|
||||
Response {
|
||||
@@ -227,12 +251,17 @@ impl RpcNotifier {
|
||||
fn filter_account_result(
|
||||
result: Option<(Account, Slot)>,
|
||||
last_notified_slot: Slot,
|
||||
) -> (Box<dyn Iterator<Item = RpcAccount>>, Slot) {
|
||||
encoding: Option<UiAccountEncoding>,
|
||||
) -> (Box<dyn Iterator<Item = UiAccount>>, Slot) {
|
||||
if let Some((account, fork)) = result {
|
||||
// If fork < last_notified_slot this means that we last notified for a fork
|
||||
// and should notify that the account state has been reverted.
|
||||
if fork != last_notified_slot {
|
||||
return (Box::new(iter::once(RpcAccount::encode(account))), fork);
|
||||
let encoding = encoding.unwrap_or(UiAccountEncoding::Binary);
|
||||
return (
|
||||
Box::new(iter::once(UiAccount::encode(account, encoding))),
|
||||
fork,
|
||||
);
|
||||
}
|
||||
}
|
||||
(Box::new(iter::empty()), last_notified_slot)
|
||||
@@ -241,6 +270,7 @@ fn filter_account_result(
|
||||
fn filter_signature_result(
|
||||
result: Option<transaction::Result<()>>,
|
||||
last_notified_slot: Slot,
|
||||
_config: Option<()>,
|
||||
) -> (Box<dyn Iterator<Item = RpcSignatureResult>>, Slot) {
|
||||
(
|
||||
Box::new(
|
||||
@@ -255,14 +285,24 @@ fn filter_signature_result(
|
||||
fn filter_program_results(
|
||||
accounts: Vec<(Pubkey, Account)>,
|
||||
last_notified_slot: Slot,
|
||||
config: Option<ProgramConfig>,
|
||||
) -> (Box<dyn Iterator<Item = RpcKeyedAccount>>, Slot) {
|
||||
let config = config.unwrap_or_default();
|
||||
let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary);
|
||||
let filters = config.filters;
|
||||
(
|
||||
Box::new(
|
||||
accounts
|
||||
.into_iter()
|
||||
.map(|(pubkey, account)| RpcKeyedAccount {
|
||||
.filter(move |(_, account)| {
|
||||
filters.iter().all(|filter_type| match filter_type {
|
||||
RpcFilterType::DataSize(size) => account.data.len() as u64 == *size,
|
||||
RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data),
|
||||
})
|
||||
})
|
||||
.map(move |(pubkey, account)| RpcKeyedAccount {
|
||||
pubkey: pubkey.to_string(),
|
||||
account: RpcAccount::encode(account),
|
||||
account: UiAccount::encode(account, encoding.clone()),
|
||||
}),
|
||||
),
|
||||
last_notified_slot,
|
||||
@@ -454,11 +494,13 @@ impl RpcSubscriptions {
|
||||
pub fn add_account_subscription(
|
||||
&self,
|
||||
pubkey: Pubkey,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcAccountInfoConfig>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<RpcAccount>>,
|
||||
subscriber: Subscriber<Response<UiAccount>>,
|
||||
) {
|
||||
let commitment_level = commitment
|
||||
let config = config.unwrap_or_default();
|
||||
let commitment_level = config
|
||||
.commitment
|
||||
.unwrap_or_else(CommitmentConfig::single)
|
||||
.commitment;
|
||||
let slot = match commitment_level {
|
||||
@@ -504,10 +546,11 @@ impl RpcSubscriptions {
|
||||
add_subscription(
|
||||
&mut subscriptions,
|
||||
pubkey,
|
||||
commitment,
|
||||
config.commitment,
|
||||
sub_id,
|
||||
subscriber,
|
||||
last_notified_slot,
|
||||
config.encoding,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -528,11 +571,14 @@ impl RpcSubscriptions {
|
||||
pub fn add_program_subscription(
|
||||
&self,
|
||||
program_id: Pubkey,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcProgramAccountsConfig>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<RpcKeyedAccount>>,
|
||||
) {
|
||||
let commitment_level = commitment
|
||||
let config = config.unwrap_or_default();
|
||||
let commitment_level = config
|
||||
.account_config
|
||||
.commitment
|
||||
.unwrap_or_else(CommitmentConfig::recent)
|
||||
.commitment;
|
||||
let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip {
|
||||
@@ -546,10 +592,14 @@ impl RpcSubscriptions {
|
||||
add_subscription(
|
||||
&mut subscriptions,
|
||||
program_id,
|
||||
commitment,
|
||||
config.account_config.commitment,
|
||||
sub_id,
|
||||
subscriber,
|
||||
0, // last_notified_slot is not utilized for program subscriptions
|
||||
Some(ProgramConfig {
|
||||
filters: config.filters.unwrap_or_default(),
|
||||
encoding: config.account_config.encoding,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -592,6 +642,7 @@ impl RpcSubscriptions {
|
||||
sub_id,
|
||||
subscriber,
|
||||
0, // last_notified_slot is not utilized for signature subscriptions
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -711,6 +762,9 @@ impl RpcSubscriptions {
|
||||
notifier.notify(slot_info, sink);
|
||||
}
|
||||
}
|
||||
// These notifications are only triggered by votes observed on gossip,
|
||||
// unlike `NotificationEntry::Gossip`, which also accounts for slots seen
|
||||
// in VoteState's from bank states built in ReplayStage.
|
||||
NotificationEntry::Vote(ref vote_info) => {
|
||||
let subscriptions = subscriptions.vote_subscriptions.read().unwrap();
|
||||
for (_, sink) in subscriptions.iter() {
|
||||
@@ -913,7 +967,7 @@ pub(crate) mod tests {
|
||||
system_transaction,
|
||||
};
|
||||
use std::{fmt::Debug, sync::mpsc::channel, time::Instant};
|
||||
use tokio::{prelude::FutureExt, runtime::Runtime, timer::Delay};
|
||||
use tokio_01::{prelude::FutureExt, runtime::Runtime, timer::Delay};
|
||||
|
||||
pub(crate) fn robust_poll_or_panic<T: Debug + Send + 'static>(
|
||||
receiver: futures::sync::mpsc::Receiver<T>,
|
||||
@@ -975,7 +1029,10 @@ pub(crate) mod tests {
|
||||
);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::recent()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: None,
|
||||
}),
|
||||
sub_id.clone(),
|
||||
subscriber,
|
||||
);
|
||||
@@ -1385,7 +1442,7 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_add_and_remove_subscription() {
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, SubscriptionData<()>>> =
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, SubscriptionData<(), ()>>> =
|
||||
HashMap::new();
|
||||
|
||||
let num_keys = 5;
|
||||
@@ -1393,7 +1450,7 @@ pub(crate) mod tests {
|
||||
let (subscriber, _id_receiver, _transport_receiver) =
|
||||
Subscriber::new_test("notification");
|
||||
let sub_id = SubscriptionId::Number(key);
|
||||
add_subscription(&mut subscriptions, key, None, sub_id, subscriber, 0);
|
||||
add_subscription(&mut subscriptions, key, None, sub_id, subscriber, 0, None);
|
||||
}
|
||||
|
||||
// Add another subscription to the "0" key
|
||||
@@ -1406,6 +1463,7 @@ pub(crate) mod tests {
|
||||
extra_sub_id.clone(),
|
||||
subscriber,
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
assert_eq!(subscriptions.len(), num_keys as usize);
|
||||
@@ -1467,7 +1525,10 @@ pub(crate) mod tests {
|
||||
let sub_id0 = SubscriptionId::Number(0 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::single_gossip()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::single_gossip()),
|
||||
encoding: None,
|
||||
}),
|
||||
sub_id0.clone(),
|
||||
subscriber0,
|
||||
);
|
||||
@@ -1532,7 +1593,10 @@ pub(crate) mod tests {
|
||||
let sub_id1 = SubscriptionId::Number(1 as u64);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(CommitmentConfig::single_gossip()),
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::single_gossip()),
|
||||
encoding: None,
|
||||
}),
|
||||
sub_id1.clone(),
|
||||
subscriber1,
|
||||
);
|
||||
|
@@ -33,7 +33,7 @@ use std::{
|
||||
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
|
||||
pub const DEFAULT_NONCE: u32 = 42;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
||||
pub enum RepairType {
|
||||
Orphan(Slot),
|
||||
HighestShred(Slot, u64),
|
||||
|
@@ -5,9 +5,10 @@ use crate::{
|
||||
banking_stage::BankingStage,
|
||||
broadcast_stage::{BroadcastStage, BroadcastStageType, RetransmitSlotsReceiver},
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
|
||||
cluster_info_vote_listener::{ClusterInfoVoteListener, VerifiedVoteSender, VoteTracker},
|
||||
fetch_stage::FetchStage,
|
||||
poh_recorder::{PohRecorder, WorkingBankEntry},
|
||||
replay_stage::ReplayVotesReceiver,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify::TransactionSigVerifier,
|
||||
sigverify_stage::SigVerifyStage,
|
||||
@@ -52,6 +53,8 @@ impl Tpu {
|
||||
shred_version: u16,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
verified_vote_sender: VerifiedVoteSender,
|
||||
replay_votes_receiver: ReplayVotesReceiver,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let fetch_stage = FetchStage::new_with_sender(
|
||||
@@ -68,22 +71,24 @@ impl Tpu {
|
||||
SigVerifyStage::new(packet_receiver, verified_sender, verifier)
|
||||
};
|
||||
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
|
||||
let cluster_info_vote_listener = ClusterInfoVoteListener::new(
|
||||
&exit,
|
||||
cluster_info.clone(),
|
||||
verified_vote_sender,
|
||||
verified_vote_packets_sender,
|
||||
&poh_recorder,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
subscriptions.clone(),
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
);
|
||||
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
poh_recorder,
|
||||
verified_receiver,
|
||||
verified_vote_receiver,
|
||||
verified_vote_packets_receiver,
|
||||
transaction_status_sender,
|
||||
);
|
||||
|
||||
|
@@ -3,6 +3,7 @@ use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionSta
|
||||
use solana_runtime::{
|
||||
bank::{Bank, HashAgeKind},
|
||||
nonce_utils,
|
||||
transaction_utils::OrderedIterator,
|
||||
};
|
||||
use solana_transaction_status::TransactionStatusMeta;
|
||||
use std::{
|
||||
@@ -50,16 +51,17 @@ impl TransactionStatusService {
|
||||
let TransactionStatusBatch {
|
||||
bank,
|
||||
transactions,
|
||||
iteration_order,
|
||||
statuses,
|
||||
balances,
|
||||
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
let slot = bank.slot();
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in transactions
|
||||
.iter()
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in
|
||||
OrderedIterator::new(&transactions, iteration_order.as_deref())
|
||||
.zip(statuses)
|
||||
.zip(balances.pre_balances)
|
||||
.zip(balances.post_balances)
|
||||
{
|
||||
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
|
||||
let fee_calculator = match hash_age_kind {
|
||||
|
32
core/src/tree_diff.rs
Normal file
32
core/src/tree_diff.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub trait TreeDiff {
|
||||
fn children(&self, slot: Slot) -> Option<&[Slot]>;
|
||||
|
||||
fn contains_slot(&self, slot: Slot) -> bool;
|
||||
|
||||
// Find all nodes reachable from `root1`, excluding subtree at `root2`
|
||||
fn subtree_diff(&self, root1: Slot, root2: Slot) -> HashSet<Slot> {
|
||||
if !self.contains_slot(root1) {
|
||||
return HashSet::new();
|
||||
}
|
||||
let mut pending_slots = vec![root1];
|
||||
let mut reachable_set = HashSet::new();
|
||||
while !pending_slots.is_empty() {
|
||||
let current_slot = pending_slots.pop().unwrap();
|
||||
if current_slot == root2 {
|
||||
continue;
|
||||
}
|
||||
reachable_set.insert(current_slot);
|
||||
for child in self
|
||||
.children(current_slot)
|
||||
.expect("slot was discovered earlier, must exist")
|
||||
{
|
||||
pending_slots.push(*child);
|
||||
}
|
||||
}
|
||||
|
||||
reachable_set
|
||||
}
|
||||
}
|
@@ -6,12 +6,12 @@ use crate::{
|
||||
accounts_hash_verifier::AccountsHashVerifier,
|
||||
broadcast_stage::RetransmitSlotsSender,
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_info_vote_listener::{VerifiedVoteReceiver, VoteTracker},
|
||||
cluster_slots::ClusterSlots,
|
||||
commitment::BlockCommitmentCache,
|
||||
ledger_cleanup_service::LedgerCleanupService,
|
||||
poh_recorder::PohRecorder,
|
||||
replay_stage::{ReplayStage, ReplayStageConfig},
|
||||
replay_stage::{ReplayStage, ReplayStageConfig, ReplayVotesSender},
|
||||
retransmit_stage::RetransmitStage,
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
@@ -96,6 +96,8 @@ impl Tvu {
|
||||
snapshot_package_sender: Option<AccountsPackageSender>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
retransmit_slots_sender: RetransmitSlotsSender,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
replay_votes_sender: ReplayVotesSender,
|
||||
tvu_config: TvuConfig,
|
||||
) -> Self {
|
||||
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
|
||||
@@ -146,7 +148,7 @@ impl Tvu {
|
||||
tvu_config.shred_version,
|
||||
cluster_slots.clone(),
|
||||
duplicate_slots_reset_sender,
|
||||
vote_tracker.clone(),
|
||||
verified_vote_receiver,
|
||||
);
|
||||
|
||||
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
|
||||
@@ -196,6 +198,7 @@ impl Tvu {
|
||||
cluster_slots,
|
||||
retransmit_slots_sender,
|
||||
duplicate_slots_reset_receiver,
|
||||
replay_votes_sender,
|
||||
);
|
||||
|
||||
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
|
||||
@@ -278,6 +281,8 @@ pub mod tests {
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
|
||||
let (_verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let tvu = Tvu::new(
|
||||
&vote_keypair.pubkey(),
|
||||
@@ -310,6 +315,8 @@ pub mod tests {
|
||||
None,
|
||||
Arc::new(VoteTracker::new(&bank)),
|
||||
retransmit_slots_sender,
|
||||
verified_vote_receiver,
|
||||
replay_votes_sender,
|
||||
TvuConfig::default(),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
|
@@ -9,7 +9,7 @@ use crate::{
|
||||
gossip_service::{discover_cluster, GossipService},
|
||||
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
poh_service::PohService,
|
||||
rewards_recorder_service::RewardsRecorderService,
|
||||
rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService},
|
||||
rpc::JsonRpcConfig,
|
||||
rpc_pubsub_service::PubSubService,
|
||||
rpc_service::JsonRpcService,
|
||||
@@ -28,11 +28,13 @@ use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver, PurgeType},
|
||||
blockstore_processor, create_new_tmp_ledger,
|
||||
blockstore_processor::{self, TransactionStatusSender},
|
||||
create_new_tmp_ledger,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
leader_schedule::FixedSchedule,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
@@ -129,6 +131,14 @@ impl ValidatorExit {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TransactionHistoryServices {
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
transaction_status_service: Option<TransactionStatusService>,
|
||||
rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
rewards_recorder_service: Option<RewardsRecorderService>,
|
||||
}
|
||||
|
||||
pub struct Validator {
|
||||
pub id: Pubkey,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
@@ -193,7 +203,21 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
info!("Cleaning accounts paths..");
|
||||
let mut start = Measure::start("clean_accounts_paths");
|
||||
for accounts_path in &config.account_paths {
|
||||
cleanup_accounts_path(accounts_path);
|
||||
}
|
||||
start.stop();
|
||||
info!("done. {}", start);
|
||||
|
||||
info!("creating bank...");
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_ = exit.clone();
|
||||
validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed)));
|
||||
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
|
||||
|
||||
let (
|
||||
genesis_config,
|
||||
bank_forks,
|
||||
@@ -202,10 +226,15 @@ impl Validator {
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
snapshot_hash,
|
||||
) = new_banks_from_blockstore(config, ledger_path, poh_verify);
|
||||
TransactionHistoryServices {
|
||||
transaction_status_sender,
|
||||
transaction_status_service,
|
||||
rewards_recorder_sender,
|
||||
rewards_recorder_service,
|
||||
},
|
||||
) = new_banks_from_blockstore(config, ledger_path, poh_verify, &exit);
|
||||
|
||||
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank = bank_forks.working_bank();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
@@ -217,11 +246,6 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit_ = exit.clone();
|
||||
validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed)));
|
||||
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
|
||||
|
||||
node.info.wallclock = timestamp();
|
||||
node.info.shred_version = compute_shred_version(
|
||||
&genesis_config.hash(),
|
||||
@@ -240,7 +264,6 @@ impl Validator {
|
||||
}
|
||||
|
||||
let cluster_info = Arc::new(ClusterInfo::new(node.info.clone(), keypair.clone()));
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
@@ -283,36 +306,6 @@ impl Validator {
|
||||
)
|
||||
});
|
||||
|
||||
let (transaction_status_sender, transaction_status_service) =
|
||||
if rpc_service.is_some() && config.rpc_config.enable_rpc_transaction_history {
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
(
|
||||
Some(transaction_status_sender),
|
||||
Some(TransactionStatusService::new(
|
||||
transaction_status_receiver,
|
||||
blockstore.clone(),
|
||||
&exit,
|
||||
)),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let (rewards_recorder_sender, rewards_recorder_service) =
|
||||
if rpc_service.is_some() && config.rpc_config.enable_rpc_transaction_history {
|
||||
let (rewards_recorder_sender, rewards_receiver) = unbounded();
|
||||
(
|
||||
Some(rewards_recorder_sender),
|
||||
Some(RewardsRecorderService::new(
|
||||
rewards_receiver,
|
||||
blockstore.clone(),
|
||||
&exit,
|
||||
)),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
info!(
|
||||
"Starting PoH: epoch={} slot={} tick_height={} blockhash={} leader={:?}",
|
||||
bank.epoch(),
|
||||
@@ -407,6 +400,8 @@ impl Validator {
|
||||
let vote_tracker = Arc::new(VoteTracker::new(bank_forks.read().unwrap().root_bank()));
|
||||
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
let tvu = Tvu::new(
|
||||
vote_account,
|
||||
authorized_voter_keypairs,
|
||||
@@ -451,6 +446,8 @@ impl Validator {
|
||||
snapshot_package_sender,
|
||||
vote_tracker.clone(),
|
||||
retransmit_slots_sender,
|
||||
verified_vote_receiver,
|
||||
replay_votes_sender,
|
||||
TvuConfig {
|
||||
max_ledger_shreds: config.max_ledger_shreds,
|
||||
halt_on_trusted_validators_accounts_hash_mismatch: config
|
||||
@@ -477,6 +474,8 @@ impl Validator {
|
||||
node.info.shred_version,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
verified_vote_sender,
|
||||
replay_votes_receiver,
|
||||
);
|
||||
|
||||
datapoint_info!("validator-new", ("id", id.to_string(), String));
|
||||
@@ -568,14 +567,16 @@ fn new_banks_from_blockstore(
|
||||
config: &ValidatorConfig,
|
||||
blockstore_path: &Path,
|
||||
poh_verify: bool,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> (
|
||||
GenesisConfig,
|
||||
BankForks,
|
||||
Blockstore,
|
||||
Arc<Blockstore>,
|
||||
Receiver<bool>,
|
||||
CompletedSlotsReceiver,
|
||||
LeaderScheduleCache,
|
||||
Option<(Slot, Hash)>,
|
||||
TransactionHistoryServices,
|
||||
) {
|
||||
let genesis_config =
|
||||
open_genesis_config(blockstore_path, config.max_genesis_archive_unpacked_size);
|
||||
@@ -613,12 +614,23 @@ fn new_banks_from_blockstore(
|
||||
..blockstore_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let transaction_history_services =
|
||||
if config.rpc_ports.is_some() && config.rpc_config.enable_rpc_transaction_history {
|
||||
initialize_rpc_transaction_history_services(blockstore.clone(), exit)
|
||||
} else {
|
||||
TransactionHistoryServices::default()
|
||||
};
|
||||
|
||||
let (mut bank_forks, mut leader_schedule_cache, snapshot_hash) = bank_forks_utils::load(
|
||||
&genesis_config,
|
||||
&blockstore,
|
||||
config.account_paths.clone(),
|
||||
config.snapshot_config.as_ref(),
|
||||
process_options,
|
||||
transaction_history_services
|
||||
.transaction_status_sender
|
||||
.clone(),
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to load ledger: {:?}", err);
|
||||
@@ -638,6 +650,7 @@ fn new_banks_from_blockstore(
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
snapshot_hash,
|
||||
transaction_history_services,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -701,6 +714,33 @@ fn backup_and_clear_blockstore(ledger_path: &Path, start_slot: Slot, shred_versi
|
||||
drop(blockstore);
|
||||
}
|
||||
|
||||
fn initialize_rpc_transaction_history_services(
|
||||
blockstore: Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> TransactionHistoryServices {
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
let transaction_status_sender = Some(transaction_status_sender);
|
||||
let transaction_status_service = Some(TransactionStatusService::new(
|
||||
transaction_status_receiver,
|
||||
blockstore.clone(),
|
||||
exit,
|
||||
));
|
||||
|
||||
let (rewards_recorder_sender, rewards_receiver) = unbounded();
|
||||
let rewards_recorder_sender = Some(rewards_recorder_sender);
|
||||
let rewards_recorder_service = Some(RewardsRecorderService::new(
|
||||
rewards_receiver,
|
||||
blockstore,
|
||||
exit,
|
||||
));
|
||||
TransactionHistoryServices {
|
||||
transaction_status_sender,
|
||||
transaction_status_service,
|
||||
rewards_recorder_sender,
|
||||
rewards_recorder_service,
|
||||
}
|
||||
}
|
||||
|
||||
// Return true on error, indicating the validator should exit.
|
||||
fn wait_for_supermajority(
|
||||
config: &ValidatorConfig,
|
||||
@@ -958,6 +998,16 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo
|
||||
online_stake * 100 / total_activated_stake
|
||||
}
|
||||
|
||||
// Cleanup anything that looks like an accounts append-vec
|
||||
fn cleanup_accounts_path(account_path: &std::path::Path) {
|
||||
if std::fs::remove_dir_all(account_path).is_err() {
|
||||
warn!(
|
||||
"encountered error removing accounts path: {:?}",
|
||||
account_path
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
cluster_info_vote_listener::VerifiedVotePacketsReceiver, crds_value::CrdsValueLabel,
|
||||
cluster_info_vote_listener::VerifiedLabelVotePacketsReceiver, crds_value::CrdsValueLabel,
|
||||
result::Result,
|
||||
};
|
||||
use solana_perf::packet::Packets;
|
||||
@@ -18,7 +18,7 @@ impl Deref for VerifiedVotePackets {
|
||||
impl VerifiedVotePackets {
|
||||
pub fn get_and_process_vote_packets(
|
||||
&mut self,
|
||||
vote_packets_receiver: &VerifiedVotePacketsReceiver,
|
||||
vote_packets_receiver: &VerifiedLabelVotePacketsReceiver,
|
||||
last_update_version: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
|
@@ -3,7 +3,7 @@
|
||||
//!
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
repair_response,
|
||||
repair_service::{RepairInfo, RepairService},
|
||||
@@ -302,7 +302,7 @@ impl WindowService {
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
shred_filter: F,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
) -> WindowService
|
||||
where
|
||||
F: 'static
|
||||
@@ -319,7 +319,7 @@ impl WindowService {
|
||||
cluster_info.clone(),
|
||||
repair_info,
|
||||
cluster_slots,
|
||||
vote_tracker,
|
||||
verified_vote_receiver,
|
||||
);
|
||||
|
||||
let (insert_sender, insert_receiver) = unbounded();
|
||||
|
@@ -7,9 +7,10 @@ use jsonrpc_core_client::transports::ws;
|
||||
use log::*;
|
||||
use reqwest::{self, header::CONTENT_TYPE};
|
||||
use serde_json::{json, Value};
|
||||
use solana_account_decoder::UiAccount;
|
||||
use solana_client::{
|
||||
rpc_client::{get_rpc_request_str, RpcClient},
|
||||
rpc_response::{Response, RpcAccount, RpcSignatureResult},
|
||||
rpc_response::{Response, RpcSignatureResult},
|
||||
};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, validator::TestValidator};
|
||||
@@ -25,7 +26,7 @@ use std::{
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio_01::runtime::Runtime;
|
||||
|
||||
macro_rules! json_req {
|
||||
($method: expr, $params: expr) => {{
|
||||
@@ -120,14 +121,14 @@ fn test_rpc_invalid_requests() {
|
||||
let json = post_rpc(req, &leader_data);
|
||||
|
||||
let the_error = json["error"]["message"].as_str().unwrap();
|
||||
assert_eq!(the_error, "Invalid");
|
||||
assert_eq!(the_error, "Invalid param: Invalid");
|
||||
|
||||
// test invalid get_account_info request
|
||||
let req = json_req!("getAccountInfo", json!(["invalid9999"]));
|
||||
let json = post_rpc(req, &leader_data);
|
||||
|
||||
let the_error = json["error"]["message"].as_str().unwrap();
|
||||
assert_eq!(the_error, "Invalid");
|
||||
assert_eq!(the_error, "Invalid param: Invalid");
|
||||
|
||||
// test invalid get_account_info request
|
||||
let req = json_req!("getAccountInfo", json!([bob_pubkey.to_string()]));
|
||||
@@ -172,7 +173,7 @@ fn test_rpc_subscriptions() {
|
||||
// Track when subscriptions are ready
|
||||
let (ready_sender, ready_receiver) = channel::<()>();
|
||||
// Track account notifications are received
|
||||
let (account_sender, account_receiver) = channel::<Response<RpcAccount>>();
|
||||
let (account_sender, account_receiver) = channel::<Response<UiAccount>>();
|
||||
// Track when status notifications are received
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<RpcSignatureResult>)>();
|
||||
|
||||
@@ -188,7 +189,7 @@ fn test_rpc_subscriptions() {
|
||||
.and_then(move |client| {
|
||||
for sig in signature_set {
|
||||
let status_sender = status_sender.clone();
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.signature_subscribe(sig.clone(), None)
|
||||
.and_then(move |sig_stream| {
|
||||
@@ -202,7 +203,7 @@ fn test_rpc_subscriptions() {
|
||||
}),
|
||||
);
|
||||
}
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.slot_subscribe()
|
||||
.and_then(move |slot_stream| {
|
||||
@@ -217,7 +218,7 @@ fn test_rpc_subscriptions() {
|
||||
);
|
||||
for pubkey in account_set {
|
||||
let account_sender = account_sender.clone();
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.account_subscribe(pubkey, None)
|
||||
.and_then(move |account_stream| {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.2.9"
|
||||
version = "1.2.20"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -21,7 +21,7 @@ rand_chacha = { version = "0.2.2" }
|
||||
regex-syntax = { version = "0.6.12" }
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde = { version = "1.0.100", features = ["rc"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.3", features = ["serde"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.4", features = ["serde"] }
|
||||
syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "fold", "full"] }
|
||||
syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] }
|
||||
tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] }
|
||||
|
21
docs/.eslintrc
Normal file
21
docs/.eslintrc
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"env": {
|
||||
"browser": true,
|
||||
"node": true
|
||||
},
|
||||
"parser": "babel-eslint",
|
||||
"rules": {
|
||||
"strict": 0,
|
||||
"no-unused-vars": ["error", { "argsIgnorePattern": "^_" }],
|
||||
"no-trailing-spaces": ["error", { "skipBlankLines": true }]
|
||||
},
|
||||
"settings": {
|
||||
"react": {
|
||||
"version": "detect", // React version. "detect" automatically picks the version you have installed.
|
||||
}
|
||||
},
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:react/recommended"
|
||||
]
|
||||
}
|
1
docs/.gitattributes
vendored
1
docs/.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
theme/highlight.js binary
|
24
docs/.gitignore
vendored
Normal file
24
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Dependencies
|
||||
/node_modules
|
||||
|
||||
# Production
|
||||
/build
|
||||
|
||||
# Generated files
|
||||
.docusaurus
|
||||
.cache-loader
|
||||
.vercel
|
||||
/static/img/*.svg
|
||||
/static/img/*.png
|
||||
vercel.json
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
9
docs/.travis/before_install.sh
Normal file
9
docs/.travis/before_install.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
# |source| this file
|
||||
|
||||
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
|
||||
sudo apt install -y nodejs
|
||||
|
||||
npm install --global docusaurus-init
|
||||
docusaurus-init
|
||||
|
||||
npm install --global vercel
|
4
docs/.travis/script.sh
Normal file
4
docs/.travis/script.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
# |source| this file
|
||||
|
||||
set -ex
|
||||
./build.sh
|
@@ -1,31 +1,39 @@
|
||||
Building the Solana Docs
|
||||
---
|
||||
# Docs Readme
|
||||
|
||||
Install dependencies, build, and test the docs:
|
||||
Solana's Docs are built using [Docusaurus 2](https://v2.docusaurus.io/) with `npm`.
|
||||
Static content delivery is handled using `vercel`.
|
||||
|
||||
```bash
|
||||
$ brew install coreutils
|
||||
$ brew install mscgen
|
||||
$ cargo install svgbob_cli
|
||||
$ cargo install mdbook-linkcheck
|
||||
$ cargo install mdbook
|
||||
$ ./build.sh
|
||||
### Installing Docusaurus
|
||||
|
||||
```
|
||||
$ npm install
|
||||
```
|
||||
|
||||
Run any Rust tests in the markdown:
|
||||
### Local Development
|
||||
|
||||
```bash
|
||||
$ make test
|
||||
This command starts a local development server and open up a browser window.
|
||||
Most changes are reflected live without having to restart the server.
|
||||
|
||||
```
|
||||
$ npm run start
|
||||
```
|
||||
|
||||
Render markdown as HTML:
|
||||
### Build Locally
|
||||
|
||||
```bash
|
||||
$ make build
|
||||
This command generates static content into the `build` directory and can be
|
||||
served using any static contents hosting service.
|
||||
|
||||
```
|
||||
$ docs/build.sh
|
||||
```
|
||||
|
||||
Render and view the docs:
|
||||
### CI Build Flow
|
||||
The docs are built and published in Travis CI with the `docs/build.sh` script.
|
||||
On each PR, the docs are built, but not published.
|
||||
|
||||
```bash
|
||||
$ make open
|
||||
```
|
||||
In each post-commit build, docs are built and published using `vercel` to their
|
||||
respective domain depending on the build branch.
|
||||
|
||||
- Master branch docs are published to `edge.docs.solana.com`
|
||||
- Beta branch docs are published to `beta.docs.solana.com`
|
||||
- Latest release tag docs are published to `docs.solana.com`
|
||||
|
3
docs/babel.config.js
Normal file
3
docs/babel.config.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = {
|
||||
presets: [require.resolve("@docusaurus/core/lib/babel/preset")],
|
||||
};
|
@@ -1,12 +0,0 @@
|
||||
[book]
|
||||
title = "Solana: Blockchain Rebuilt for Scale"
|
||||
authors = ["The Solana Team"]
|
||||
|
||||
[build]
|
||||
build-dir = "html"
|
||||
create-missing = false
|
||||
|
||||
[output.html]
|
||||
theme = "theme"
|
||||
|
||||
[output.linkcheck]
|
@@ -3,6 +3,9 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# shellcheck source=ci/rust-version.sh
|
||||
source ../ci/rust-version.sh stable
|
||||
|
||||
: "${rust_stable:=}" # Pacify shellcheck
|
||||
|
||||
usage=$(cargo +"$rust_stable" -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')
|
||||
|
@@ -1,17 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# md check
|
||||
find src -name '*.md' -a \! -name SUMMARY.md |
|
||||
while read -r file; do
|
||||
if ! grep -q '('"${file#src/}"')' src/SUMMARY.md; then
|
||||
echo "Error: $file missing from SUMMARY.md"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
# shellcheck source=ci/env.sh
|
||||
source ../ci/env.sh
|
||||
|
||||
mdbook --version
|
||||
mdbook-linkcheck --version
|
||||
make -j"$(nproc)"
|
||||
: "${rust_stable_docker_image:=}" # Pacify shellcheck
|
||||
|
||||
# shellcheck source=ci/rust-version.sh
|
||||
source ../ci/rust-version.sh
|
||||
../ci/docker-run.sh "$rust_stable_docker_image" docs/build-cli-usage.sh
|
||||
../ci/docker-run.sh "$rust_stable_docker_image" docs/convert-ascii-to-svg.sh
|
||||
./set-solana-release-tag.sh
|
||||
|
||||
# Build from /src into /build
|
||||
npm run build
|
||||
|
||||
# Publish only from merge commits and release tags
|
||||
if [[ -n $CI ]]; then
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
./publish-docs.sh
|
||||
fi
|
||||
fi
|
||||
|
21
docs/convert-ascii-to-svg.sh
Executable file
21
docs/convert-ascii-to-svg.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Convert .bob and .msc files in docs/art to .svg files located where the
|
||||
# site build will find them.
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
output_dir=static/img
|
||||
|
||||
mkdir -p "$output_dir"
|
||||
|
||||
while read -r bob_file; do
|
||||
out_file=$(basename "${bob_file%.*}".svg)
|
||||
svgbob "$bob_file" --output "$output_dir/$out_file"
|
||||
done < <(find art/*.bob)
|
||||
|
||||
while read -r msc_file; do
|
||||
out_file=$(basename "${msc_file%.*}".png)
|
||||
mscgen -T png -o "$output_dir/$out_file" -i "$msc_file"
|
||||
done < <(find art/*.msc)
|
116
docs/docusaurus.config.js
Normal file
116
docs/docusaurus.config.js
Normal file
@@ -0,0 +1,116 @@
|
||||
module.exports = {
|
||||
title: "Solana Docs",
|
||||
tagline:
|
||||
"Solana is an open source project implementing a new, high-performance, permissionless blockchain.",
|
||||
url: "https://docs.solana.com",
|
||||
baseUrl: "/",
|
||||
favicon: "img/favicon.ico",
|
||||
organizationName: "solana-labs", // Usually your GitHub org/user name.
|
||||
projectName: "solana", // Usually your repo name.
|
||||
themeConfig: {
|
||||
navbar: {
|
||||
logo: {
|
||||
alt: "Solana Logo",
|
||||
src: "img/logo-horizontal.svg",
|
||||
srcDark: "img/logo-horizontal-dark.svg",
|
||||
},
|
||||
links: [
|
||||
{
|
||||
to: "introduction",
|
||||
label: "Introduction",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
to: "apps",
|
||||
label: "Developers",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
to: "running-validator",
|
||||
label: "Validators",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
to: "clusters",
|
||||
label: "Clusters",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
href: "https://discordapp.com/invite/pquxPsq",
|
||||
label: "Chat",
|
||||
position: "right",
|
||||
},
|
||||
|
||||
{
|
||||
href: "https://github.com/solana-labs/solana",
|
||||
label: "GitHub",
|
||||
position: "right",
|
||||
},
|
||||
],
|
||||
},
|
||||
algolia: {
|
||||
apiKey: "d58e0d68c875346d52645d68b13f3ac0",
|
||||
indexName: "solana",
|
||||
},
|
||||
footer: {
|
||||
style: "dark",
|
||||
links: [
|
||||
{
|
||||
title: "Docs",
|
||||
items: [
|
||||
{
|
||||
label: "Introduction",
|
||||
to: "introduction",
|
||||
},
|
||||
{
|
||||
label: "Tour de SOL",
|
||||
to: "tour-de-sol",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: "Community",
|
||||
items: [
|
||||
{
|
||||
label: "Discord",
|
||||
href: "https://discordapp.com/invite/pquxPsq",
|
||||
},
|
||||
{
|
||||
label: "Twitter",
|
||||
href: "https://twitter.com/solana",
|
||||
},
|
||||
{
|
||||
label: "Forums",
|
||||
href: "https://forums.solana.com",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: "More",
|
||||
items: [
|
||||
{
|
||||
label: "GitHub",
|
||||
href: "https://github.com/solana-labs/solana",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Solana Foundation`,
|
||||
},
|
||||
},
|
||||
presets: [
|
||||
[
|
||||
"@docusaurus/preset-classic",
|
||||
{
|
||||
docs: {
|
||||
path: "src",
|
||||
routeBasePath: "/",
|
||||
sidebarPath: require.resolve("./sidebars.js"),
|
||||
},
|
||||
theme: {
|
||||
customCss: require.resolve("./src/css/custom.css"),
|
||||
},
|
||||
},
|
||||
],
|
||||
],
|
||||
};
|
@@ -1,51 +0,0 @@
|
||||
BOB_SRCS=$(wildcard art/*.bob)
|
||||
MSC_SRCS=$(wildcard art/*.msc)
|
||||
MD_SRCS=$(wildcard src/*.md src/*/*.md) src/cli/usage.md
|
||||
|
||||
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/.gitbook/assets/%.svg) $(MSC_SRCS:art/%.msc=src/.gitbook/assets/%.svg)
|
||||
|
||||
TARGET=html/index.html
|
||||
TEST_STAMP=src/tests.ok
|
||||
|
||||
all: $(TARGET)
|
||||
|
||||
svg: $(SVG_IMGS)
|
||||
|
||||
test: $(TEST_STAMP)
|
||||
|
||||
open: $(TEST_STAMP)
|
||||
mdbook build --open
|
||||
./set-solana-release-tag.sh
|
||||
|
||||
watch: $(SVG_IMGS)
|
||||
mdbook watch
|
||||
|
||||
src/.gitbook/assets/%.svg: art/%.bob
|
||||
@mkdir -p $(@D)
|
||||
svgbob < $< > $@
|
||||
|
||||
src/.gitbook/assets/%.svg: art/%.msc
|
||||
@mkdir -p $(@D)
|
||||
mscgen -T svg -i $< -o $@
|
||||
|
||||
../target/debug/solana:
|
||||
cd ../cli && cargo build
|
||||
|
||||
src/cli/usage.md: build-cli-usage.sh ../target/debug/solana
|
||||
./$<
|
||||
|
||||
src/%.md: %.md
|
||||
@mkdir -p $(@D)
|
||||
@cp $< $@
|
||||
|
||||
$(TEST_STAMP): $(TARGET)
|
||||
mdbook test
|
||||
touch $@
|
||||
|
||||
$(TARGET): $(SVG_IMGS) $(MD_SRCS)
|
||||
mdbook build
|
||||
./set-solana-release-tag.sh
|
||||
|
||||
clean:
|
||||
rm -f $(SVG_IMGS) src/tests.ok
|
||||
rm -rf html
|
13659
docs/package-lock.json
generated
Normal file
13659
docs/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
39
docs/package.json
Normal file
39
docs/package.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"name": "solana-docs",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "docusaurus start",
|
||||
"build": "docusaurus build",
|
||||
"swizzle": "docusaurus swizzle",
|
||||
"deploy": "docusaurus deploy",
|
||||
"format": "prettier --check \"**/*.{js,jsx,json,md,scss}\"",
|
||||
"format:fix": "prettier --write \"**/*.{js,jsx,json,md,scss}\"",
|
||||
"lint": "set -ex; eslint .",
|
||||
"lint:fix": "npm run lint -- --fix"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^2.0.0-alpha.58",
|
||||
"@docusaurus/preset-classic": "^2.0.0-alpha.58",
|
||||
"@docusaurus/theme-search-algolia": "^2.0.0-alpha.32",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"clsx": "^1.1.1",
|
||||
"eslint": "^7.3.1",
|
||||
"eslint-plugin-react": "^7.20.0",
|
||||
"prettier": "^2.0.5",
|
||||
"react": "^16.8.4",
|
||||
"react-dom": "^16.8.4"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.2%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
}
|
||||
}
|
39
docs/publish-docs.sh
Executable file
39
docs/publish-docs.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
if [[ -d .vercel ]]; then
|
||||
rm -r .vercel
|
||||
fi
|
||||
|
||||
CONFIG_FILE=vercel.json
|
||||
|
||||
if [[ -n $CI_TAG ]]; then
|
||||
PROJECT_NAME=docs-solana-com
|
||||
else
|
||||
eval "$(../ci/channel-info.sh)"
|
||||
case $CHANNEL in
|
||||
edge)
|
||||
PROJECT_NAME=edge-docs-solana-com
|
||||
;;
|
||||
beta)
|
||||
PROJECT_NAME=beta-docs-solana-com
|
||||
;;
|
||||
*)
|
||||
PROJECT_NAME=docs
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
cat > "$CONFIG_FILE" <<EOF
|
||||
{
|
||||
"name": "$PROJECT_NAME",
|
||||
"scope": "solana-labs"
|
||||
}
|
||||
EOF
|
||||
|
||||
[[ -n $VERCEL_TOKEN ]] || {
|
||||
echo "VERCEL_TOKEN is undefined. Needed for Vercel authentication."
|
||||
exit 1
|
||||
}
|
||||
vercel deploy . --local-config="$CONFIG_FILE" --confirm --token "$VERCEL_TOKEN" --prod
|
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [[ -n $CI_TAG ]]; then
|
||||
@@ -23,7 +23,6 @@ if [[ -z "$LATEST_SOLANA_RELEASE_VERSION" ]]; then
|
||||
fi
|
||||
|
||||
set -x
|
||||
find html/ -name \*.html -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
|
||||
if [[ -n $CI ]]; then
|
||||
find src/ -name \*.md -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
|
||||
fi
|
||||
|
176
docs/sidebars.js
Normal file
176
docs/sidebars.js
Normal file
@@ -0,0 +1,176 @@
|
||||
module.exports = {
|
||||
docs: {
|
||||
"Introduction": ["introduction"],
|
||||
"Wallet Guide": [
|
||||
"wallet-guide",
|
||||
{
|
||||
type: "category",
|
||||
label: "App Wallets",
|
||||
items: [
|
||||
"wallet-guide/apps",
|
||||
"wallet-guide/trust-wallet",
|
||||
"wallet-guide/ledger-live",
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Command-line Wallets",
|
||||
items: [
|
||||
"wallet-guide/cli",
|
||||
"wallet-guide/paper-wallet",
|
||||
{
|
||||
type: "category",
|
||||
label: "Hardware Wallets",
|
||||
items: ["wallet-guide/hardware-wallets", "wallet-guide/hardware-wallets/ledger"],
|
||||
},
|
||||
"wallet-guide/file-system-wallet",
|
||||
],
|
||||
},
|
||||
"wallet-guide/support",
|
||||
],
|
||||
"Staking Guide": [
|
||||
"staking",
|
||||
"staking/stake-accounts",
|
||||
],
|
||||
"Command Line Guide": [
|
||||
"cli",
|
||||
"cli/install-solana-cli-tools",
|
||||
"cli/conventions",
|
||||
"cli/choose-a-cluster",
|
||||
"cli/transfer-tokens",
|
||||
"cli/delegate-stake",
|
||||
"cli/manage-stake-accounts",
|
||||
"offline-signing",
|
||||
"offline-signing/durable-nonce",
|
||||
"cli/usage",
|
||||
],
|
||||
"Solana Clusters": ["clusters"],
|
||||
"Develop Applications": [
|
||||
"apps",
|
||||
"apps/rent",
|
||||
"apps/hello-world",
|
||||
"apps/break",
|
||||
"apps/webwallet",
|
||||
"apps/drones",
|
||||
"transaction",
|
||||
"apps/jsonrpc-api",
|
||||
"apps/javascript-api",
|
||||
"apps/builtins",
|
||||
],
|
||||
"Integration Guides": ["integrations/exchange"],
|
||||
"Run a Validator": [
|
||||
"running-validator",
|
||||
"running-validator/validator-reqs",
|
||||
"running-validator/validator-start",
|
||||
"running-validator/vote-accounts",
|
||||
"running-validator/validator-stake",
|
||||
"running-validator/validator-monitor",
|
||||
"running-validator/validator-info",
|
||||
"running-validator/validator-troubleshoot",
|
||||
],
|
||||
"Tour de SOL": [
|
||||
"tour-de-sol",
|
||||
{
|
||||
type: "category",
|
||||
label: "Registration",
|
||||
items: [
|
||||
"tour-de-sol/registration/how-to-register",
|
||||
"tour-de-sol/registration/terms-of-participation",
|
||||
"tour-de-sol/registration/rewards",
|
||||
"tour-de-sol/registration/confidentiality",
|
||||
"tour-de-sol/registration/validator-registration-and-rewards-faq",
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Participation",
|
||||
items: [
|
||||
"tour-de-sol/participation/validator-technical-requirements",
|
||||
"tour-de-sol/participation/validator-public-key-registration",
|
||||
"tour-de-sol/participation/steps-to-create-a-validator",
|
||||
],
|
||||
},
|
||||
"tour-de-sol/useful-links",
|
||||
"tour-de-sol/submitting-bugs",
|
||||
],
|
||||
"Benchmark a Cluster": ["cluster/bench-tps", "cluster/performance-metrics"],
|
||||
"Solana's Architecture": [
|
||||
"cluster/overview",
|
||||
"cluster/synchronization",
|
||||
"cluster/leader-rotation",
|
||||
"cluster/fork-generation",
|
||||
"cluster/managing-forks",
|
||||
"cluster/turbine-block-propagation",
|
||||
"cluster/vote-signing",
|
||||
"cluster/stake-delegation-and-rewards",
|
||||
],
|
||||
"Anatomy of a Validator": [
|
||||
"validator/anatomy",
|
||||
"validator/tpu",
|
||||
"validator/tvu",
|
||||
"validator/blockstore",
|
||||
"validator/gossip",
|
||||
"validator/runtime",
|
||||
],
|
||||
Terminology: ["terminology"],
|
||||
History: ["history"],
|
||||
"Implemented Design Proposals": [
|
||||
"implemented-proposals/implemented-proposals",
|
||||
{
|
||||
type: "category",
|
||||
label: "Economic Design",
|
||||
items: [
|
||||
"implemented-proposals/ed_overview/ed_overview",
|
||||
{
|
||||
type: "category",
|
||||
label: "Validation Client Economics",
|
||||
items: [
|
||||
"implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_overview",
|
||||
"implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards",
|
||||
"implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees",
|
||||
"implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation",
|
||||
],
|
||||
},
|
||||
"implemented-proposals/ed_overview/ed_storage_rent_economics",
|
||||
"implemented-proposals/ed_overview/ed_economic_sustainability",
|
||||
"implemented-proposals/ed_overview/ed_mvp",
|
||||
"implemented-proposals/ed_overview/ed_references",
|
||||
],
|
||||
},
|
||||
"implemented-proposals/transaction-fees",
|
||||
"implemented-proposals/tower-bft",
|
||||
"implemented-proposals/leader-leader-transition",
|
||||
"implemented-proposals/leader-validator-transition",
|
||||
"implemented-proposals/persistent-account-storage",
|
||||
"implemented-proposals/reliable-vote-transmission",
|
||||
"implemented-proposals/repair-service",
|
||||
"implemented-proposals/testing-programs",
|
||||
"implemented-proposals/readonly-accounts",
|
||||
"implemented-proposals/staking-rewards",
|
||||
"implemented-proposals/rent",
|
||||
"implemented-proposals/durable-tx-nonces",
|
||||
"implemented-proposals/validator-timestamp-oracle",
|
||||
"implemented-proposals/commitment",
|
||||
"implemented-proposals/snapshot-verification",
|
||||
"implemented-proposals/cross-program-invocation",
|
||||
"implemented-proposals/program-derived-addresses",
|
||||
],
|
||||
"Accepted Design Proposals": [
|
||||
"proposals/accepted-design-proposals",
|
||||
"proposals/ledger-replication-to-implement",
|
||||
"proposals/optimistic-confirmation-and-slashing",
|
||||
"proposals/vote-signing-to-implement",
|
||||
"proposals/cluster-test-framework",
|
||||
"proposals/validator-proposal",
|
||||
"proposals/simple-payment-and-state-verification",
|
||||
"proposals/interchain-transaction-verification",
|
||||
"proposals/snapshot-verification",
|
||||
"proposals/bankless-leader",
|
||||
"proposals/slashing",
|
||||
"proposals/tick-verification",
|
||||
"proposals/block-confirmation",
|
||||
"proposals/rust-clients",
|
||||
"proposals/optimistic_confirmation",
|
||||
],
|
||||
},
|
||||
};
|
Binary file not shown.
@@ -1,4 +1,6 @@
|
||||
# Table of contents
|
||||
---
|
||||
title: Table of contents
|
||||
---
|
||||
|
||||
* [Introduction](introduction.md)
|
||||
* [Wallet Guide](wallet-guide/README.md)
|
||||
|
@@ -1,4 +1,6 @@
|
||||
# Programming Model
|
||||
---
|
||||
title: Programming Model
|
||||
---
|
||||
|
||||
An _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to _programs_ deployed by app developers beforehand. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transferred. Instructions are executed sequentially and atomically for each transaction. If any instruction is invalid, all account changes in the transaction are discarded.
|
||||
|
||||
@@ -18,9 +20,9 @@ Each instruction specifies a single program account \(which must be marked execu
|
||||
|
||||
## Deploying Programs to a Cluster
|
||||
|
||||

|
||||

|
||||
|
||||
As shown in the diagram above, a program author creates a program and compiles it to an ELF shared object containing BPF bytecode and uploads it to the Solana cluster with a special _deploy_ transaction. The cluster makes it available to clients via a _program ID_. The program ID is a _address_ specified when deploying and is used to reference the program in subsequent transactions.
|
||||
As shown in the diagram above, a program author creates a program and compiles it to an ELF shared object containing BPF bytecode and uploads it to the Solana cluster with a special _deploy_ transaction. The cluster makes it available to clients via a _program ID_. The program ID is an _address_ specified when deploying and is used to reference the program in subsequent transactions.
|
||||
|
||||
A program may be written in any programming language that can target the Berkley Packet Filter \(BPF\) safe execution environment. The Solana SDK offers the best support for C/C++ and Rust programs, which are compiled to BPF using the [LLVM compiler infrastructure](https://llvm.org).
|
||||
|
||||
@@ -28,7 +30,7 @@ A program may be written in any programming language that can target the Berkley
|
||||
|
||||
If the program needs to store state between transactions, it does so using _accounts_. Accounts are similar to files in operating systems such as Linux. Like a file, an account may hold arbitrary data and that data persists beyond the lifetime of a program. Also like a file, an account includes metadata that tells the runtime who is allowed to access the data and how.
|
||||
|
||||
Unlike a file, the account includes metadata for the lifetime of the file. That lifetime is expressed in "tokens", which is a number of fractional native tokens, called _lamports_. Accounts are held in validator memory and pay ["rent"](rent.md) to stay there. Each validator periodically scans all accounts and collects rent. Any account that drops to zero lamports is purged.
|
||||
Unlike a file, the account includes metadata for the lifetime of the file. That lifetime is expressed in "tokens", which is a number of fractional native tokens, called _lamports_. Accounts are held in validator memory and pay ["rent"](apps/rent.md) to stay there. Each validator periodically scans all accounts and collects rent. Any account that drops to zero lamports is purged.
|
||||
|
||||
In the same way that a Linux user uses a path to look up a file, a Solana client uses an _address_ to look up an account. The address is usually a 256-bit public key. To create an account with a public-key address, the client generates a _keypair_ and registers its public key using the `CreateAccount` instruction with preallocated fixed storage size in bytes. In fact, the account address can be an arbitrary 32 bytes, and there is a mechanism for advanced users to create derived addresses (`CreateAccountWithSeed`). Addresses are presented in Base58 encoding on user interfaces.
|
||||
|
31
docs/src/apps/break.md
Normal file
31
docs/src/apps/break.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: "Example: Break"
|
||||
---
|
||||
|
||||
[Break](https://break.solana.com/) is a React app that gives users a visceral
|
||||
feeling for just how fast and high-performance the Solana network really is.
|
||||
Can you _break_ the Solana blockchain?
|
||||
During a 15 second playthough, each click of a button or keystroke
|
||||
sends a new transaction to the cluster. Smash the keyboard as fast as you can
|
||||
and watch your transactions get finalized in real time while the network takes
|
||||
it all in stride!
|
||||
|
||||
Break can be played on our Devnet, Testnet and Mainnet Beta networks. Plays are
|
||||
free on Devnet and Testnet, where the session is funded by a network faucet.
|
||||
On Mainnet Beta, users pay to play 0.08 SOL per game. The session account can
|
||||
be funded by a local keystore wallet or by scanning a QR code from Trust Wallet
|
||||
to transfer the tokens.
|
||||
|
||||
[Click here to play Break](https://break.solana.com/)
|
||||
|
||||
## Build and run Break locally
|
||||
|
||||
First fetch the latest version of the example code:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/break.git
|
||||
$ cd break
|
||||
```
|
||||
|
||||
Next, follow the steps in the git repository's
|
||||
[README](https://github.com/solana-labs/break/blob/master/README.md).
|
@@ -1,4 +1,6 @@
|
||||
# Builtin Programs
|
||||
---
|
||||
title: Builtin Programs
|
||||
---
|
||||
|
||||
Solana contains a small handful of builtin programs, which are required to run
|
||||
validator nodes. Unlike third-party programs, the builtin programs are part of
|
||||
@@ -18,15 +20,15 @@ programs, as well include instructions from third-party programs.
|
||||
|
||||
Create accounts and transfer lamports between them
|
||||
|
||||
* Program ID: `11111111111111111111111111111111`
|
||||
* Instructions: [SystemInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/system_instruction/enum.SystemInstruction.html)
|
||||
- Program ID: `11111111111111111111111111111111`
|
||||
- Instructions: [SystemInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/system_instruction/enum.SystemInstruction.html)
|
||||
|
||||
## Config Program
|
||||
|
||||
Add configuration data to the chain and the list of public keys that are permitted to modify it
|
||||
|
||||
* Program ID: `Config1111111111111111111111111111111111111`
|
||||
* Instructions: [config_instruction](https://docs.rs/solana-config-program/LATEST_SOLANA_RELEASE_VERSION/solana_config_program/config_instruction/index.html)
|
||||
- Program ID: `Config1111111111111111111111111111111111111`
|
||||
- Instructions: [config_instruction](https://docs.rs/solana-config-program/LATEST_SOLANA_RELEASE_VERSION/solana_config_program/config_instruction/index.html)
|
||||
|
||||
Unlike the other programs, the Config program does not define any individual
|
||||
instructions. It has just one implicit instruction, a "store" instruction. Its
|
||||
@@ -37,25 +39,25 @@ data to store in it.
|
||||
|
||||
Create stake accounts and delegate it to validators
|
||||
|
||||
* Program ID: `Stake11111111111111111111111111111111111111`
|
||||
* Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/LATEST_SOLANA_RELEASE_VERSION/solana_stake_program/stake_instruction/enum.StakeInstruction.html)
|
||||
- Program ID: `Stake11111111111111111111111111111111111111`
|
||||
- Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/LATEST_SOLANA_RELEASE_VERSION/solana_stake_program/stake_instruction/enum.StakeInstruction.html)
|
||||
|
||||
## Vote Program
|
||||
|
||||
Create vote accounts and vote on blocks
|
||||
|
||||
* Program ID: `Vote111111111111111111111111111111111111111`
|
||||
* Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/LATEST_SOLANA_RELEASE_VERSION/solana_vote_program/vote_instruction/enum.VoteInstruction.html)
|
||||
- Program ID: `Vote111111111111111111111111111111111111111`
|
||||
- Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/LATEST_SOLANA_RELEASE_VERSION/solana_vote_program/vote_instruction/enum.VoteInstruction.html)
|
||||
|
||||
## BPF Loader
|
||||
|
||||
Add programs to the chain.
|
||||
|
||||
* Program ID: `BPFLoader1111111111111111111111111111111111`
|
||||
* Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/loader_instruction/enum.LoaderInstruction.html)
|
||||
- Program ID: `BPFLoader1111111111111111111111111111111111`
|
||||
- Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/loader_instruction/enum.LoaderInstruction.html)
|
||||
|
||||
The BPF Loader marks itself as its "owner" of the executable account it
|
||||
creates to store your program. When a user invokes an instruction via a
|
||||
program ID, the Solana runtime will load both your executable account and its
|
||||
owner, the BPF Loader. The runtime then passes your program to the BPF Loader
|
||||
to process the instruction.
|
||||
to process the instruction.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user