Compare commits
69 Commits
Author | SHA1 | Date | |
---|---|---|---|
6f098e0145 | |||
f90bc20a8b | |||
60074c9d36 | |||
5d9354fca7 | |||
0ea09d75ed | |||
f475a46df6 | |||
5681a24896 | |||
214aba6d2f | |||
fa551e5fc1 | |||
d9a5a86d10 | |||
83ad921ad6 | |||
5753c719bd | |||
322e2e0c6a | |||
371fdc6495 | |||
d23f2b5754 | |||
a50a015542 | |||
353cfb1980 | |||
79d737e760 | |||
8745034cec | |||
db979b30c4 | |||
a92855c995 | |||
5b006eba57 | |||
32a728d585 | |||
1b3be91e3c | |||
2509002fe4 | |||
9c9a690d0d | |||
216cc34224 | |||
71f1459ef9 | |||
f84bdb7d81 | |||
ed59c58a72 | |||
de941f4074 | |||
b7fb050d09 | |||
9ee2e768d6 | |||
d6d3a3c3d8 | |||
3e229b248f | |||
0470072436 | |||
f74fa60c8b | |||
c189767090 | |||
c82c18353d | |||
da58a272dd | |||
001f5fbb6b | |||
63cd452ab5 | |||
6ee77e9754 | |||
cee22262fc | |||
0d13352916 | |||
78a9832f13 | |||
795cf14650 | |||
8c112e8bc4 | |||
8e6d213459 | |||
b33df42640 | |||
e0462e6933 | |||
1f5e30a366 | |||
633eeb1586 | |||
c1148a6da3 | |||
713e86670d | |||
c004c726e7 | |||
5ffb8631e0 | |||
fd32a0280e | |||
e76f202eb3 | |||
ba4558cb92 | |||
74e5577dd4 | |||
b878002cf5 | |||
f111250e3b | |||
3d91f650db | |||
91a88cda6a | |||
2128c17ed0 | |||
7b819c9b74 | |||
eec5c661af | |||
0398f6b87a |
16
.mergify.yml
16
.mergify.yml
@ -19,6 +19,14 @@ pull_request_rules:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v0.21 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.21
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.21
|
||||
- name: v0.22 backport
|
||||
conditions:
|
||||
- base=master
|
||||
@ -35,11 +43,3 @@ pull_request_rules:
|
||||
backport:
|
||||
branches:
|
||||
- v0.23
|
||||
- name: v0.24 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.24
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.24
|
||||
|
1446
Cargo.lock
generated
1446
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -4,10 +4,7 @@ members = [
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"banking-bench",
|
||||
"chacha",
|
||||
"chacha-cuda",
|
||||
"chacha-sys",
|
||||
"cli-config",
|
||||
"client",
|
||||
"core",
|
||||
"faucet",
|
||||
@ -41,8 +38,6 @@ members = [
|
||||
"programs/vest",
|
||||
"programs/vote",
|
||||
"archiver",
|
||||
"archiver-lib",
|
||||
"archiver-utils",
|
||||
"runtime",
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
@ -50,6 +45,7 @@ members = [
|
||||
"sys-tuner",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"fixed-buf",
|
||||
"vote-signer",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
|
19
RELEASE.md
19
RELEASE.md
@ -140,6 +140,25 @@ TODO: Documentation update procedure is WIP as we move to gitbook
|
||||
|
||||
Document the new recommended version by updating `book/src/running-archiver.md` and `book/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version.
|
||||
|
||||
#### Publish updated Book
|
||||
We maintain three copies of the "book" as official documentation:
|
||||
|
||||
1) "Book" is the documentation for the latest official release. This should get manually updated whenever a new release is made. It is published here:
|
||||
https://solana-labs.github.io/book/
|
||||
|
||||
2) "Book-edge" tracks the tip of the master branch and updates automatically.
|
||||
https://solana-labs.github.io/book-edge/
|
||||
|
||||
3) "Book-beta" tracks the tip of the beta branch and updates automatically.
|
||||
https://solana-labs.github.io/book-beta/
|
||||
|
||||
To manually trigger an update of the "Book", create a new job of the manual-update-book pipeline.
|
||||
Set the tag of the latest release as the PUBLISH_BOOK_TAG environment variable.
|
||||
```bash
|
||||
PUBLISH_BOOK_TAG=v0.16.6
|
||||
```
|
||||
https://buildkite.com/solana-labs/manual-update-book
|
||||
|
||||
### Update software on testnet.solana.com
|
||||
|
||||
The testnet running on testnet.solana.com is set to use a fixed release tag
|
||||
|
@ -1,39 +0,0 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "0.23.2"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
crossbeam-channel = "0.3"
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "0.23.2" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.2" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.44"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.2" }
|
||||
solana-chacha = { path = "../chacha", version = "0.23.2" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-perf = { path = "../perf", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
solana-core = { path = "../core", version = "0.23.2" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_lib"
|
@ -1,11 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_metrics;
|
||||
|
||||
pub mod archiver;
|
||||
mod result;
|
@ -1,48 +0,0 @@
|
||||
use serde_json;
|
||||
use solana_client::client_error;
|
||||
use solana_ledger::blockstore;
|
||||
use solana_sdk::transport;
|
||||
use std::any::Any;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ArchiverError {
|
||||
#[error("IO error")]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
#[error("blockstore error")]
|
||||
BlockstoreError(#[from] blockstore::BlockstoreError),
|
||||
|
||||
#[error("crossbeam error")]
|
||||
CrossbeamSendError(#[from] crossbeam_channel::SendError<u64>),
|
||||
|
||||
#[error("send error")]
|
||||
SendError(#[from] std::sync::mpsc::SendError<u64>),
|
||||
|
||||
#[error("join error")]
|
||||
JoinError(Box<dyn Any + Send + 'static>),
|
||||
|
||||
#[error("transport error")]
|
||||
TransportError(#[from] transport::TransportError),
|
||||
|
||||
#[error("client error")]
|
||||
ClientError(#[from] client_error::ClientError),
|
||||
|
||||
#[error("Json parsing error")]
|
||||
JsonError(#[from] serde_json::error::Error),
|
||||
|
||||
#[error("Storage account has no balance")]
|
||||
EmptyStorageAccountBalance,
|
||||
|
||||
#[error("No RPC peers..")]
|
||||
NoRpcPeers,
|
||||
|
||||
#[error("Couldn't download full segment")]
|
||||
SegmentDownloadError,
|
||||
}
|
||||
|
||||
impl std::convert::From<Box<dyn Any + Send + 'static>> for ArchiverError {
|
||||
fn from(e: Box<dyn Any + Send + 'static>) -> ArchiverError {
|
||||
ArchiverError::JoinError(e)
|
||||
}
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "0.23.2"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha = { path = "../chacha", version = "0.23.2" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-perf = { path = "../perf", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_utils"
|
@ -1,120 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use solana_sdk::hash::{Hash, Hasher};
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom};
|
||||
use std::mem::size_of;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
|
||||
let in_file = File::open(in_path)?;
|
||||
let metadata = in_file.metadata()?;
|
||||
let mut buffer_file = BufReader::new(in_file);
|
||||
|
||||
let mut hasher = Hasher::default();
|
||||
let sample_size = size_of::<Hash>();
|
||||
let sample_size64 = sample_size as u64;
|
||||
let mut buf = vec![0; sample_size];
|
||||
|
||||
let file_len = metadata.len();
|
||||
if file_len < sample_size64 {
|
||||
return Err(io::Error::new(ErrorKind::Other, "file too short!"));
|
||||
}
|
||||
for offset in sample_offsets {
|
||||
if *offset > (file_len - sample_size64) / sample_size64 {
|
||||
return Err(io::Error::new(ErrorKind::Other, "offset too large"));
|
||||
}
|
||||
buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?;
|
||||
trace!("sampling @ {} ", *offset);
|
||||
match buffer_file.read(&mut buf) {
|
||||
Ok(size) => {
|
||||
assert_eq!(size, buf.len());
|
||||
hasher.hash(&buf);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error sampling file");
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(hasher.result())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::fs::{create_dir_all, remove_file};
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
|
||||
extern crate hex;
|
||||
|
||||
fn tmp_file_path(name: &str) -> PathBuf {
|
||||
use std::env;
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let mut rand_bits = [0u8; 32];
|
||||
thread_rng().fill(&mut rand_bits[..]);
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
path.push(out_dir);
|
||||
path.push("tmp");
|
||||
create_dir_all(&path).unwrap();
|
||||
|
||||
path.push(format!("{}-{:?}", name, hex::encode(rand_bits)));
|
||||
println!("path: {:?}", path);
|
||||
path
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file() {
|
||||
solana_logger::setup();
|
||||
let in_path = tmp_file_path("test_sample_file_input.txt");
|
||||
let num_strings = 4096;
|
||||
let string = "12foobar";
|
||||
{
|
||||
let mut in_file = File::create(&in_path).unwrap();
|
||||
for _ in 0..num_strings {
|
||||
in_file.write(string.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64;
|
||||
let samples: Vec<_> = (0..num_samples).collect();
|
||||
let res = sample_file(&in_path, samples.as_slice());
|
||||
let ref_hash: Hash = Hash::new(&[
|
||||
173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144,
|
||||
151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52,
|
||||
]);
|
||||
let res = res.unwrap();
|
||||
assert_eq!(res, ref_hash);
|
||||
|
||||
// Sample just past the end
|
||||
assert!(sample_file(&in_path, &[num_samples]).is_err());
|
||||
remove_file(&in_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file_invalid_offset() {
|
||||
let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt");
|
||||
{
|
||||
let mut in_file = File::create(&in_path).unwrap();
|
||||
for _ in 0..4096 {
|
||||
in_file.write("123456foobar".as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
let samples = [0, 200000];
|
||||
let res = sample_file(&in_path, &samples);
|
||||
assert!(res.is_err());
|
||||
remove_file(in_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sample_file_missing_file() {
|
||||
let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt");
|
||||
let samples = [0, 5];
|
||||
let res = sample_file(&in_path, &samples);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.2" }
|
||||
solana-core = { path = "../core", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.2" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "0.23.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use console::style;
|
||||
use solana_archiver_lib::archiver::Archiver;
|
||||
use solana_clap_utils::{
|
||||
input_validators::is_keypair,
|
||||
keypair::{
|
||||
@ -9,6 +8,7 @@ use solana_clap_utils::{
|
||||
},
|
||||
};
|
||||
use solana_core::{
|
||||
archiver::Archiver,
|
||||
cluster_info::{Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
};
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "0.23.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.2" }
|
||||
solana-measure = { path = "../measure", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-measure = { path = "../measure", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
|
@ -162,8 +162,8 @@ fn main() {
|
||||
// If it is dropped before poh_service, then poh_service will error when
|
||||
// calling send() on the channel.
|
||||
let signal_receiver = Arc::new(signal_receiver);
|
||||
let mut total_us = 0;
|
||||
let mut tx_total_us = 0;
|
||||
let mut total = 0;
|
||||
let mut tx_total = 0;
|
||||
let mut txs_processed = 0;
|
||||
let mut root = 1;
|
||||
let collector = Pubkey::new_rand();
|
||||
@ -173,7 +173,6 @@ fn main() {
|
||||
chunk_len,
|
||||
num_threads,
|
||||
};
|
||||
let mut total_sent = 0;
|
||||
for _ in 0..ITERS {
|
||||
let now = Instant::now();
|
||||
let mut sent = 0;
|
||||
@ -224,7 +223,7 @@ fn main() {
|
||||
);
|
||||
assert!(txs_processed < bank.transaction_count());
|
||||
txs_processed = bank.transaction_count();
|
||||
tx_total_us += duration_as_us(&now.elapsed());
|
||||
tx_total += duration_as_us(&now.elapsed());
|
||||
|
||||
let mut poh_time = Measure::start("poh_time");
|
||||
poh_recorder.lock().unwrap().reset(
|
||||
@ -256,21 +255,20 @@ fn main() {
|
||||
poh_time.as_us(),
|
||||
);
|
||||
} else {
|
||||
tx_total_us += duration_as_us(&now.elapsed());
|
||||
tx_total += duration_as_us(&now.elapsed());
|
||||
}
|
||||
|
||||
// This signature clear may not actually clear the signatures
|
||||
// in this chunk, but since we rotate between CHUNKS then
|
||||
// we should clear them by the time we come around again to re-use that chunk.
|
||||
bank.clear_signatures();
|
||||
total_us += duration_as_us(&now.elapsed());
|
||||
total += duration_as_us(&now.elapsed());
|
||||
debug!(
|
||||
"time: {} us checked: {} sent: {}",
|
||||
duration_as_us(&now.elapsed()),
|
||||
txes / CHUNKS,
|
||||
sent,
|
||||
);
|
||||
total_sent += sent;
|
||||
|
||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||
for tx in transactions.iter_mut() {
|
||||
@ -286,11 +284,11 @@ fn main() {
|
||||
}
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_total', 'median': '{}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
|
||||
total / ITERS as u64,
|
||||
);
|
||||
eprintln!(
|
||||
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
|
||||
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
|
||||
tx_total / ITERS as u64,
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -23,19 +23,19 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.2" }
|
||||
solana-core = { path = "../core", version = "0.23.2" }
|
||||
solana-genesis = { path = "../genesis", version = "0.23.2" }
|
||||
solana-client = { path = "../client", version = "0.23.2" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-genesis = { path = "../genesis", version = "0.22.3" }
|
||||
solana-client = { path = "../client", version = "0.22.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
untrusted = "0.7.0"
|
||||
ws = "0.9.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.23.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.3" }
|
||||
|
@ -2,14 +2,14 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.2" }
|
||||
solana-core = { path = "../core", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
|
@ -1,5 +1,6 @@
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_core::result::Result;
|
||||
use solana_core::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
@ -7,7 +8,7 @@ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle, Result};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -16,24 +16,24 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.2" }
|
||||
solana-core = { path = "../core", version = "0.23.2" }
|
||||
solana-genesis = { path = "../genesis", version = "0.23.2" }
|
||||
solana-client = { path = "../client", version = "0.23.2" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.2" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "0.23.2", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.2" }
|
||||
solana-measure = { path = "../measure", version = "0.23.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.23.2", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-genesis = { path = "../genesis", version = "0.22.3" }
|
||||
solana-client = { path = "../client", version = "0.22.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.3" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "0.22.3", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-measure = { path = "../measure", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.3.2"
|
||||
serial_test_derive = "0.3.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.23.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.3" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@ -21,7 +21,8 @@ use solana_sdk::{
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
cmp,
|
||||
collections::VecDeque,
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
@ -65,9 +66,10 @@ fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
clients: Vec<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
keypair0_balance: u64,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
where
|
||||
@ -80,9 +82,13 @@ where
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
num_lamports_per_account,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
||||
let client = &clients[0];
|
||||
|
||||
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
||||
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
||||
assert!(gen_keypairs.len() >= 2 * tx_count);
|
||||
@ -109,17 +115,20 @@ where
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
info!("Sampling TPS every {} second...", sample_period);
|
||||
let sample_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
let v_threads: Vec<_> = clients
|
||||
.iter()
|
||||
.map(|client| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
let client = client.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
@ -165,10 +174,11 @@ where
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
let keypair_chunks = source_keypair_chunks.len();
|
||||
let keypair_chunks = source_keypair_chunks.len() as u64;
|
||||
let mut reclaim_lamports_back_to_source_account = false;
|
||||
let mut chunk_index = 0;
|
||||
let mut i = keypair0_balance;
|
||||
while start.elapsed() < duration {
|
||||
let chunk_index = (i % keypair_chunks) as usize;
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
&recent_blockhash,
|
||||
@ -187,9 +197,7 @@ where
|
||||
sleep(Duration::from_millis(1));
|
||||
}
|
||||
} else {
|
||||
while !shared_txs.read().unwrap().is_empty()
|
||||
|| shared_tx_active_thread_count.load(Ordering::Relaxed) > 0
|
||||
{
|
||||
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
|
||||
sleep(Duration::from_millis(1));
|
||||
}
|
||||
}
|
||||
@ -198,11 +206,8 @@ where
|
||||
// transaction signatures even when blockhash is reused.
|
||||
dest_keypair_chunks[chunk_index].rotate_left(1);
|
||||
|
||||
// Move on to next chunk
|
||||
chunk_index = (chunk_index + 1) % keypair_chunks;
|
||||
|
||||
// Switch directions after transfering for each "chunk"
|
||||
if chunk_index == 0 {
|
||||
i += 1;
|
||||
if should_switch_directions(num_lamports_per_account, keypair_chunks, i) {
|
||||
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
||||
}
|
||||
}
|
||||
@ -210,9 +215,11 @@ where
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
exit_signal.store(true, Ordering::Relaxed);
|
||||
|
||||
info!("Waiting for sampler threads...");
|
||||
if let Err(err) = sample_thread.join() {
|
||||
info!(" join() failed with: {:?}", err);
|
||||
info!("Waiting for validator threads...");
|
||||
for t in v_threads {
|
||||
if let Err(err) = t.join() {
|
||||
info!(" join() failed with: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// join the tx send threads
|
||||
@ -493,218 +500,177 @@ fn do_tx_transfers<T: Client>(
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount: u64) -> bool {
|
||||
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
|
||||
for a in &tx.message().account_keys[1..] {
|
||||
match client.get_balance_with_commitment(a, CommitmentConfig::recent()) {
|
||||
Ok(balance) => return balance >= amount,
|
||||
Err(err) => error!("failed to get balance {:?}", err),
|
||||
if client
|
||||
.get_balance_with_commitment(a, CommitmentConfig::recent())
|
||||
.unwrap_or(0)
|
||||
>= amount
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
trait FundingTransactions<'a> {
|
||||
fn fund<T: 'static + Client + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||
to_lamports: u64,
|
||||
);
|
||||
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]);
|
||||
fn sign(&mut self, blockhash: Hash);
|
||||
fn send<T: Client>(&self, client: &Arc<T>);
|
||||
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64);
|
||||
}
|
||||
|
||||
impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
fn fund<T: 'static + Client + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||
to_lamports: u64,
|
||||
) {
|
||||
self.make(to_fund);
|
||||
|
||||
let mut tries = 0;
|
||||
while !self.is_empty() {
|
||||
info!(
|
||||
"{} {} each to {} accounts in {} txs",
|
||||
if tries == 0 {
|
||||
"transferring"
|
||||
} else {
|
||||
" retrying"
|
||||
},
|
||||
to_lamports,
|
||||
self.len() * MAX_SPENDS_PER_TX as usize,
|
||||
self.len(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client.as_ref());
|
||||
|
||||
// re-sign retained to_fund_txes with updated blockhash
|
||||
self.sign(blockhash);
|
||||
self.send(&client);
|
||||
|
||||
// Sleep a few slots to allow transactions to process
|
||||
sleep(Duration::from_secs(1));
|
||||
|
||||
self.verify(&client, to_lamports);
|
||||
|
||||
// retry anything that seems to have dropped through cracks
|
||||
// again since these txs are all or nothing, they're fine to
|
||||
// retry
|
||||
tries += 1;
|
||||
}
|
||||
info!("transferred");
|
||||
}
|
||||
|
||||
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]) {
|
||||
let mut make_txs = Measure::start("make_txs");
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let tx = Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&t,
|
||||
));
|
||||
(*k, tx)
|
||||
})
|
||||
.collect();
|
||||
make_txs.stop();
|
||||
debug!(
|
||||
"make {} unsigned txs: {}us",
|
||||
to_fund_txs.len(),
|
||||
make_txs.as_us()
|
||||
);
|
||||
self.extend(to_fund_txs);
|
||||
}
|
||||
|
||||
fn sign(&mut self, blockhash: Hash) {
|
||||
let mut sign_txs = Measure::start("sign_txs");
|
||||
self.par_iter_mut().for_each(|(k, tx)| {
|
||||
tx.sign(&[*k], blockhash);
|
||||
});
|
||||
sign_txs.stop();
|
||||
debug!("sign {} txs: {}us", self.len(), sign_txs.as_us());
|
||||
}
|
||||
|
||||
fn send<T: Client>(&self, client: &Arc<T>) {
|
||||
let mut send_txs = Measure::start("send_txs");
|
||||
self.iter().for_each(|(_, tx)| {
|
||||
client.async_send_transaction(tx.clone()).expect("transfer");
|
||||
});
|
||||
send_txs.stop();
|
||||
debug!("send {} txs: {}us", self.len(), send_txs.as_us());
|
||||
}
|
||||
|
||||
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64) {
|
||||
let starting_txs = self.len();
|
||||
let verified_txs = Arc::new(AtomicUsize::new(0));
|
||||
let too_many_failures = Arc::new(AtomicBool::new(false));
|
||||
let loops = if starting_txs < 1000 { 3 } else { 1 };
|
||||
// Only loop multiple times for small (quick) transaction batches
|
||||
for _ in 0..loops {
|
||||
let failed_verify = Arc::new(AtomicUsize::new(0));
|
||||
let client = client.clone();
|
||||
let verified_txs = &verified_txs;
|
||||
let failed_verify = &failed_verify;
|
||||
let too_many_failures = &too_many_failures;
|
||||
let verified_set: HashSet<Pubkey> = self
|
||||
.par_iter()
|
||||
.filter_map(move |(k, tx)| {
|
||||
if too_many_failures.load(Ordering::Relaxed) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
|
||||
verified_txs.fetch_add(1, Ordering::Relaxed);
|
||||
Some(k.pubkey())
|
||||
} else {
|
||||
failed_verify.fetch_add(1, Ordering::Relaxed);
|
||||
None
|
||||
};
|
||||
|
||||
let verified_txs = verified_txs.load(Ordering::Relaxed);
|
||||
let failed_verify = failed_verify.load(Ordering::Relaxed);
|
||||
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
|
||||
if failed_verify > 100 && failed_verify > verified_txs {
|
||||
too_many_failures.store(true, Ordering::Relaxed);
|
||||
warn!(
|
||||
"Too many failed transfers... {} remaining, {} verified, {} failures",
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
}
|
||||
if remaining_count % 100 == 0 {
|
||||
info!(
|
||||
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
}
|
||||
|
||||
verified
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.retain(|(k, _)| !verified_set.contains(&k.pubkey()));
|
||||
if self.is_empty() {
|
||||
break;
|
||||
}
|
||||
info!("Looping verifications");
|
||||
|
||||
let verified_txs = verified_txs.load(Ordering::Relaxed);
|
||||
let failed_verify = failed_verify.load(Ordering::Relaxed);
|
||||
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
|
||||
info!(
|
||||
"Verifying transfers... {} remaining, {} verified, {} failures",
|
||||
remaining_count, verified_txs, failed_verify
|
||||
);
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
||||
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
||||
/// or full
|
||||
pub fn fund_keys<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
pub fn fund_keys<T: Client>(
|
||||
client: &T,
|
||||
source: &Keypair,
|
||||
dests: &[Keypair],
|
||||
total: u64,
|
||||
max_fee: u64,
|
||||
lamports_per_account: u64,
|
||||
mut extra: u64,
|
||||
) {
|
||||
let mut funded: Vec<&Keypair> = vec![source];
|
||||
let mut funded_funds = total;
|
||||
let mut not_funded: Vec<&Keypair> = dests.iter().collect();
|
||||
while !not_funded.is_empty() {
|
||||
// Build to fund list and prepare funding sources for next iteration
|
||||
let mut new_funded: Vec<&Keypair> = vec![];
|
||||
let mut to_fund: Vec<(&Keypair, Vec<(Pubkey, u64)>)> = vec![];
|
||||
let to_lamports = (funded_funds - lamports_per_account - max_fee) / MAX_SPENDS_PER_TX;
|
||||
for f in funded {
|
||||
let start = not_funded.len() - MAX_SPENDS_PER_TX as usize;
|
||||
let dests: Vec<_> = not_funded.drain(start..).collect();
|
||||
let spends: Vec<_> = dests.iter().map(|k| (k.pubkey(), to_lamports)).collect();
|
||||
to_fund.push((f, spends));
|
||||
new_funded.extend(dests.into_iter());
|
||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
||||
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
|
||||
|
||||
info!(
|
||||
"funding keys {} with lamports: {:?} total: {}",
|
||||
dests.len(),
|
||||
client.get_balance(&source.pubkey()),
|
||||
total
|
||||
);
|
||||
while !notfunded.is_empty() {
|
||||
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
|
||||
let mut to_fund = vec![];
|
||||
info!("creating from... {}", funded.len());
|
||||
let mut build_to_fund = Measure::start("build_to_fund");
|
||||
for f in &mut funded {
|
||||
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
|
||||
if max_units == 0 {
|
||||
break;
|
||||
}
|
||||
let start = notfunded.len() - max_units as usize;
|
||||
let fees = if extra > 0 { max_fee } else { 0 };
|
||||
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
|
||||
let moves: Vec<_> = notfunded[start..]
|
||||
.iter()
|
||||
.map(|k| (k.pubkey(), per_unit))
|
||||
.collect();
|
||||
notfunded[start..]
|
||||
.iter()
|
||||
.for_each(|k| new_funded.push((k, per_unit)));
|
||||
notfunded.truncate(start);
|
||||
if !moves.is_empty() {
|
||||
to_fund.push((f.0, moves));
|
||||
}
|
||||
extra -= 1;
|
||||
}
|
||||
build_to_fund.stop();
|
||||
debug!("build to_fund vec: {}us", build_to_fund.as_us());
|
||||
|
||||
// try to transfer a "few" at a time with recent blockhash
|
||||
// assume 4MB network buffers, and 512 byte packets
|
||||
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
|
||||
|
||||
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
|
||||
Vec::<(&Keypair, Transaction)>::with_capacity(chunk.len()).fund(
|
||||
&client,
|
||||
chunk,
|
||||
to_lamports,
|
||||
);
|
||||
});
|
||||
let mut tries = 0;
|
||||
|
||||
info!("funded: {} left: {}", new_funded.len(), not_funded.len());
|
||||
let mut make_txs = Measure::start("make_txs");
|
||||
// this set of transactions just initializes us for bookkeeping
|
||||
#[allow(clippy::clone_double_ref)] // sigh
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
let tx = Transaction::new_unsigned_instructions(
|
||||
system_instruction::transfer_many(&k.pubkey(), &m),
|
||||
);
|
||||
(k.clone(), tx)
|
||||
})
|
||||
.collect();
|
||||
make_txs.stop();
|
||||
debug!(
|
||||
"make {} unsigned txs: {}us",
|
||||
to_fund_txs.len(),
|
||||
make_txs.as_us()
|
||||
);
|
||||
|
||||
let amount = chunk[0].1[0].1;
|
||||
|
||||
while !to_fund_txs.is_empty() {
|
||||
let receivers = to_fund_txs
|
||||
.iter()
|
||||
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
|
||||
|
||||
info!(
|
||||
"{} {} to {} in {} txs",
|
||||
if tries == 0 {
|
||||
"transferring"
|
||||
} else {
|
||||
" retrying"
|
||||
},
|
||||
amount,
|
||||
receivers,
|
||||
to_fund_txs.len(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
|
||||
// re-sign retained to_fund_txes with updated blockhash
|
||||
let mut sign_txs = Measure::start("sign_txs");
|
||||
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
||||
tx.sign(&[*k], blockhash);
|
||||
});
|
||||
sign_txs.stop();
|
||||
debug!("sign {} txs: {}us", to_fund_txs.len(), sign_txs.as_us());
|
||||
|
||||
let mut send_txs = Measure::start("send_txs");
|
||||
to_fund_txs.iter().for_each(|(_, tx)| {
|
||||
client.async_send_transaction(tx.clone()).expect("transfer");
|
||||
});
|
||||
send_txs.stop();
|
||||
debug!("send {} txs: {}us", to_fund_txs.len(), send_txs.as_us());
|
||||
|
||||
let mut verify_txs = Measure::start("verify_txs");
|
||||
let mut starting_txs = to_fund_txs.len();
|
||||
let mut verified_txs = 0;
|
||||
let mut failed_verify = 0;
|
||||
// Only loop multiple times for small (quick) transaction batches
|
||||
for _ in 0..(if starting_txs < 1000 { 3 } else { 1 }) {
|
||||
let mut timer = Instant::now();
|
||||
to_fund_txs.retain(|(_, tx)| {
|
||||
if timer.elapsed() >= Duration::from_secs(5) {
|
||||
if failed_verify > 0 {
|
||||
debug!("total txs failed verify: {}", failed_verify);
|
||||
}
|
||||
info!(
|
||||
"Verifying transfers... {} remaining",
|
||||
starting_txs - verified_txs
|
||||
);
|
||||
timer = Instant::now();
|
||||
}
|
||||
let verified = verify_funding_transfer(client, &tx, amount);
|
||||
if verified {
|
||||
verified_txs += 1;
|
||||
} else {
|
||||
failed_verify += 1;
|
||||
}
|
||||
!verified
|
||||
});
|
||||
if to_fund_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
debug!("Looping verifications");
|
||||
info!("Verifying transfers... {} remaining", to_fund_txs.len());
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
starting_txs -= to_fund_txs.len();
|
||||
verify_txs.stop();
|
||||
debug!("verified {} txs: {}us", starting_txs, verify_txs.as_us());
|
||||
|
||||
// retry anything that seems to have dropped through cracks
|
||||
// again since these txs are all or nothing, they're fine to
|
||||
// retry
|
||||
tries += 1;
|
||||
}
|
||||
info!("transferred");
|
||||
});
|
||||
info!("funded: {} left: {}", new_funded.len(), notfunded.len());
|
||||
funded = new_funded;
|
||||
funded_funds = to_lamports;
|
||||
}
|
||||
}
|
||||
|
||||
@ -712,14 +678,14 @@ pub fn airdrop_lamports<T: Client>(
|
||||
client: &T,
|
||||
faucet_addr: &SocketAddr,
|
||||
id: &Keypair,
|
||||
desired_balance: u64,
|
||||
tx_count: u64,
|
||||
) -> Result<()> {
|
||||
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(starting_balance);
|
||||
info!("starting balance {}", starting_balance);
|
||||
|
||||
if starting_balance < desired_balance {
|
||||
let airdrop_amount = desired_balance - starting_balance;
|
||||
if starting_balance < tx_count {
|
||||
let airdrop_amount = tx_count - starting_balance;
|
||||
info!(
|
||||
"Airdropping {:?} lamports from {} for {}",
|
||||
airdrop_amount,
|
||||
@ -844,6 +810,17 @@ fn compute_and_report_stats(
|
||||
);
|
||||
}
|
||||
|
||||
// First transfer 2/3 of the lamports to the dest accounts
|
||||
// then ping-pong 1/3 of the lamports back to the other account
|
||||
// this leaves 1/3 lamport buffer in each account
|
||||
fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool {
|
||||
if i < keypair_chunks * (2 * num_lamports_per_account) / 3 {
|
||||
return false;
|
||||
}
|
||||
|
||||
i % (keypair_chunks * num_lamports_per_account / 3) == 0
|
||||
}
|
||||
|
||||
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
||||
@ -1027,25 +1004,23 @@ fn fund_move_keys<T: Client>(
|
||||
info!("done funding keys, took {} ms", funding_time.as_ms());
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
pub fn generate_and_fund_keypairs<T: Client>(
|
||||
client: &T,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
use_move: bool,
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
info!("Get lamports...");
|
||||
|
||||
// Sample the first keypair, to prevent lamport loss on repeated solana-bench-tps executions
|
||||
let first_key = keypairs[0].pubkey();
|
||||
let first_keypair_balance = client.get_balance(&first_key).unwrap_or(0);
|
||||
|
||||
// Sample the last keypair, to check if funding was already completed
|
||||
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||
// Sample the first keypair, see if it has lamports, if so then resume.
|
||||
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
|
||||
let last_keypair_balance = client
|
||||
.get_balance(&keypairs[keypair_count - 1].pubkey())
|
||||
.unwrap_or(0);
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
let mut move_keypairs_ret = None;
|
||||
@ -1053,38 +1028,31 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
#[cfg(not(feature = "move"))]
|
||||
let move_keypairs_ret = None;
|
||||
|
||||
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||
// start another bench-tps run without re-funding all of the keypairs, check if the
|
||||
// keypairs still have at least 80% of the expected funds. That should be enough to
|
||||
// pay for the transaction fees in a new run.
|
||||
let enough_lamports = 8 * lamports_per_account / 10;
|
||||
if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports {
|
||||
let (_blockhash, fee_calculator) = get_recent_blockhash(client.as_ref());
|
||||
let max_fee = fee_calculator.max_lamports_per_signature;
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let mut total = lamports_per_account * total_keypairs + extra_fees;
|
||||
if lamports_per_account > last_keypair_balance {
|
||||
let (_blockhash, fee_calculator) = get_recent_blockhash(client);
|
||||
let account_desired_balance =
|
||||
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
||||
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
|
||||
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
||||
if use_move {
|
||||
total *= 3;
|
||||
}
|
||||
|
||||
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
|
||||
info!(
|
||||
"Funding keypair balance: {} max_fee: {} lamports_per_account: {} extra: {} total: {}",
|
||||
funding_key_balance, max_fee, lamports_per_account, extra, total
|
||||
);
|
||||
info!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
|
||||
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
|
||||
account_desired_balance, total
|
||||
);
|
||||
|
||||
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
||||
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||
airdrop_lamports(client, &faucet_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
if use_move {
|
||||
let libra_genesis_keypair =
|
||||
create_genesis(&funding_key, client.as_ref(), 10_000_000);
|
||||
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
|
||||
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
|
||||
let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000);
|
||||
let libra_mint_program_id = upload_mint_script(&funding_key, client);
|
||||
let libra_pay_program_id = upload_payment_script(&funding_key, client);
|
||||
|
||||
// Generate another set of keypairs for move accounts.
|
||||
// Still fund the solana ones which will be used for fees.
|
||||
@ -1092,7 +1060,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
||||
fund_move_keys(
|
||||
client.as_ref(),
|
||||
client,
|
||||
funding_key,
|
||||
&move_keypairs,
|
||||
total / 3,
|
||||
@ -1117,15 +1085,15 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
funding_key,
|
||||
&keypairs,
|
||||
total,
|
||||
max_fee,
|
||||
lamports_per_account,
|
||||
fee_calculator.max_lamports_per_signature,
|
||||
extra,
|
||||
);
|
||||
}
|
||||
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok((keypairs, move_keypairs_ret))
|
||||
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -1137,11 +1105,30 @@ mod tests {
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::genesis_config::create_genesis_config;
|
||||
|
||||
#[test]
|
||||
fn test_switch_directions() {
|
||||
assert_eq!(should_switch_directions(30, 1, 0), false);
|
||||
assert_eq!(should_switch_directions(30, 1, 1), false);
|
||||
assert_eq!(should_switch_directions(30, 1, 20), true);
|
||||
assert_eq!(should_switch_directions(30, 1, 21), false);
|
||||
assert_eq!(should_switch_directions(30, 1, 30), true);
|
||||
assert_eq!(should_switch_directions(30, 1, 90), true);
|
||||
assert_eq!(should_switch_directions(30, 1, 91), false);
|
||||
|
||||
assert_eq!(should_switch_directions(30, 2, 0), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 1), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 20), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 40), true);
|
||||
assert_eq!(should_switch_directions(30, 2, 90), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 100), true);
|
||||
assert_eq!(should_switch_directions(30, 2, 101), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
let mut config = Config::default();
|
||||
config.id = id;
|
||||
@ -1149,24 +1136,23 @@ mod tests {
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(client, config, keypairs, None);
|
||||
do_bench_tps(clients, config, keypairs, 0, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_fund_keys() {
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let client = BankClient::new(bank);
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
@ -1184,16 +1170,23 @@ mod tests {
|
||||
let fee_calculator = FeeCalculator::new(11, 0);
|
||||
genesis_config.fee_calculator = fee_calculator;
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let client = BankClient::new(bank);
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
||||
|
||||
let max_fee = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.1
|
||||
.max_lamports_per_signature;
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
assert_eq!(
|
||||
client.get_balance(&kp.pubkey()).unwrap(),
|
||||
lamports + max_fee
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil};
|
||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||
|
||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::SOL_LAMPORTS;
|
||||
|
||||
/// Holds the configuration for a single run of the benchmark
|
||||
pub struct Config {
|
||||
|
@ -6,13 +6,13 @@ use solana_genesis::Base64Account;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_program;
|
||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit};
|
||||
|
||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
solana_logger::setup_with_filter("solana=info");
|
||||
solana_metrics::set_panic_hook("bench-tps");
|
||||
|
||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
||||
@ -82,12 +82,12 @@ fn main() {
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
Arc::new(client)
|
||||
client
|
||||
} else {
|
||||
Arc::new(get_client(&nodes))
|
||||
get_client(&nodes)
|
||||
};
|
||||
|
||||
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
|
||||
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
@ -117,10 +117,10 @@ fn main() {
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||
(keypairs, None)
|
||||
(keypairs, None, last_balance)
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
&client,
|
||||
Some(*faucet_addr),
|
||||
&id,
|
||||
keypair_count,
|
||||
@ -133,5 +133,11 @@ fn main() {
|
||||
})
|
||||
};
|
||||
|
||||
do_bench_tps(client, cli_config, keypairs, move_keypairs);
|
||||
do_bench_tps(
|
||||
vec![client],
|
||||
cli_config,
|
||||
keypairs,
|
||||
keypair_balance,
|
||||
move_keypairs,
|
||||
);
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
#[cfg(feature = "move")]
|
||||
use solana_sdk::move_loader::solana_move_loader_program;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::sync::{mpsc::channel, Arc};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
@ -36,10 +36,10 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
100_000_000,
|
||||
);
|
||||
|
||||
let client = Arc::new(create_client(
|
||||
let client = create_client(
|
||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
));
|
||||
);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_faucet(faucet_keypair, addr_sender, None);
|
||||
@ -48,8 +48,8 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
||||
&client,
|
||||
Some(faucet_addr),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
@ -58,7 +58,7 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
|
||||
let _total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
assert!(_total > 100);
|
||||
|
@ -24,7 +24,7 @@ msc {
|
||||
... ;
|
||||
Validator abox Validator [label="\nmax\nlockout\n"];
|
||||
|||;
|
||||
Cluster box Cluster [label="credits redeemed (at epoch)"];
|
||||
|
||||
StakerX => Cluster [label="StakeState::RedeemCredits()"];
|
||||
StakerY => Cluster [label="StakeState::RedeemCredits()"] ;
|
||||
|
||||
}
|
||||
|
@ -1,19 +0,0 @@
|
||||
+----------+
|
||||
| Bank-Hash|
|
||||
+----------+
|
||||
^
|
||||
|
|
||||
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
|
||||
: :
|
||||
: +--------------+ +-------------+ :
|
||||
: Hash( | Accounts-Hash| + | Block-Merkle| ) :
|
||||
: +--------------+ +-------------+ :
|
||||
: ^ :
|
||||
+~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
|
||||
|
|
||||
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
|
||||
: +---------------+ +---------------+ +---------------+ :
|
||||
: Hash( | Hash(Account1)| + | Hash(Account2)| + ... + | Hash(AccountN)| ) :
|
||||
: +---------------+ +---------------+ +---------------+ :
|
||||
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
|
||||
|
18
book/art/spv-bank-merkle.bob
Normal file
18
book/art/spv-bank-merkle.bob
Normal file
@ -0,0 +1,18 @@
|
||||
+------------+
|
||||
| Bank-Merkle|
|
||||
+------------+
|
||||
^ ^
|
||||
/ \
|
||||
+-----------------+ +-------------+
|
||||
| Bank-Diff-Merkle| | Block-Merkle|
|
||||
+-----------------+ +-------------+
|
||||
^ ^
|
||||
/ \
|
||||
+------+ +--------------------------+
|
||||
| Hash | | Previous Bank-Diff-Merkle|
|
||||
+------+ +--------------------------+
|
||||
^ ^
|
||||
/ \
|
||||
+---------------+ +---------------+
|
||||
| Hash(Account1)| | Hash(Account2)|
|
||||
+---------------+ +---------------+
|
@ -5,9 +5,9 @@ cd "$(dirname "$0")"
|
||||
|
||||
usage=$(cargo -q run -p solana-cli -- -C ~/.foo --help | sed 's|'"$HOME"'|~|g')
|
||||
|
||||
out=${1:-src/cli/usage.md}
|
||||
out=${1:-src/api-reference/cli.md}
|
||||
|
||||
cat src/cli/.usage.md.header > "$out"
|
||||
cat src/api-reference/.cli.md > "$out"
|
||||
|
||||
section() {
|
||||
declare mark=${2:-"###"}
|
||||
|
@ -3,14 +3,4 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# md check
|
||||
find src -name '*.md' -a \! -name SUMMARY.md |
|
||||
while read -r file; do
|
||||
if ! grep -q '('"${file#src/}"')' src/SUMMARY.md; then
|
||||
echo "Error: $file missing from SUMMARY.md"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
make -j"$(nproc)" test
|
||||
|
@ -1,6 +1,6 @@
|
||||
BOB_SRCS=$(wildcard art/*.bob)
|
||||
MSC_SRCS=$(wildcard art/*.msc)
|
||||
MD_SRCS=$(wildcard src/*.md src/*/*.md)
|
||||
MD_SRCS=$(wildcard src/*.md)
|
||||
|
||||
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/.gitbook/assets/%.svg) $(MSC_SRCS:art/%.msc=src/.gitbook/assets/%.svg)
|
||||
|
||||
|
@ -1,31 +1,14 @@
|
||||
# Table of contents
|
||||
|
||||
* [Introduction](introduction.md)
|
||||
* [Using Solana from the Command-line](cli/README.md)
|
||||
* [Command-line Usage](cli/usage.md)
|
||||
* [Paper Wallet](paper-wallet/README.md)
|
||||
* [Installation](paper-wallet/installation.md)
|
||||
* [Paper Wallet Usage](paper-wallet/usage.md)
|
||||
* [Offline Signing](offline-signing/README.md)
|
||||
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
|
||||
* [Developing Applications](apps/README.md)
|
||||
* [Example: Web Wallet](apps/webwallet.md)
|
||||
* [Example: Tic-Tac-Toe](apps/tictactoe.md)
|
||||
* [Drones](apps/drones.md)
|
||||
* [Anatomy of a Transaction](transaction.md)
|
||||
* [JSON RPC API](apps/jsonrpc-api.md)
|
||||
* [JavaScript API](apps/javascript-api.md)
|
||||
* [Running a Validator](running-validator/README.md)
|
||||
* [Validator Requirements](running-validator/validator-reqs.md)
|
||||
* [Choosing a Testnet](running-validator/validator-testnet.md)
|
||||
* [Installing the Validator Software](running-validator/validator-software.md)
|
||||
* [Starting a Validator](running-validator/validator-start.md)
|
||||
* [Staking](running-validator/validator-stake.md)
|
||||
* [Monitoring a Validator](running-validator/validator-monitor.md)
|
||||
* [Publishing Validator Info](running-validator/validator-info.md)
|
||||
* [Troubleshooting](running-validator/validator-troubleshoot.md)
|
||||
* [Running an Archiver](running-archiver.md)
|
||||
* [Understanding Solana's Architecture](cluster/README.md)
|
||||
* [Terminology](terminology.md)
|
||||
* [Getting Started](getting-started/README.md)
|
||||
* [Testnet Participation](getting-started/testnet-participation.md)
|
||||
* [Example Client: Web Wallet](getting-started/webwallet.md)
|
||||
* [Programming Model](programs/README.md)
|
||||
* [Example: Tic-Tac-Toe](programs/tictactoe.md)
|
||||
* [Drones](programs/drones.md)
|
||||
* [A Solana Cluster](cluster/README.md)
|
||||
* [Synchronization](cluster/synchronization.md)
|
||||
* [Leader Rotation](cluster/leader-rotation.md)
|
||||
* [Fork Generation](cluster/fork-generation.md)
|
||||
@ -37,13 +20,46 @@
|
||||
* [Performance Metrics](cluster/performance-metrics.md)
|
||||
* [Anatomy of a Validator](validator/README.md)
|
||||
* [TPU](validator/tpu.md)
|
||||
* [TVU](validator/tvu.md)
|
||||
* [Blockstore](validator/blockstore.md)
|
||||
* [TVU](validator/tvu/README.md)
|
||||
* [Blockstore](validator/tvu/blockstore.md)
|
||||
* [Gossip Service](validator/gossip.md)
|
||||
* [The Runtime](validator/runtime.md)
|
||||
* [Building from Source](building-from-source.md)
|
||||
* [Terminology](terminology.md)
|
||||
* [Anatomy of a Transaction](transaction.md)
|
||||
* [Running a Validator](running-validator/README.md)
|
||||
* [Validator Requirements](running-validator/validator-reqs.md)
|
||||
* [Choosing a Testnet](running-validator/validator-testnet.md)
|
||||
* [Installing the Validator Software](running-validator/validator-software.md)
|
||||
* [Starting a Validator](running-validator/validator-start.md)
|
||||
* [Staking](running-validator/validator-stake.md)
|
||||
* [Monitoring a Validator](running-validator/validator-monitor.md)
|
||||
* [Publishing Validator Info](running-validator/validator-info.md)
|
||||
* [Troubleshooting](running-validator/validator-troubleshoot.md)
|
||||
* [Running an Archiver](running-archiver.md)
|
||||
* [Paper Wallet](paper-wallet/README.md)
|
||||
* [Installation](paper-wallet/installation.md)
|
||||
* [Paper Wallet Usage](paper-wallet/usage.md)
|
||||
* [Offline Signing](offline-signing/README.md)
|
||||
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
|
||||
* [API Reference](api-reference/README.md)
|
||||
* [Transaction](api-reference/transaction-api.md)
|
||||
* [Instruction](api-reference/instruction-api.md)
|
||||
* [Blockstreamer](api-reference/blockstreamer.md)
|
||||
* [JSON RPC API](api-reference/jsonrpc-api.md)
|
||||
* [JavaScript API](api-reference/javascript-api.md)
|
||||
* [solana CLI](api-reference/cli.md)
|
||||
* [Accepted Design Proposals](proposals/README.md)
|
||||
* [Ledger Replication](proposals/ledger-replication-to-implement.md)
|
||||
* [Secure Vote Signing](proposals/vote-signing-to-implement.md)
|
||||
* [Cluster Test Framework](proposals/cluster-test-framework.md)
|
||||
* [Validator](proposals/validator-proposal.md)
|
||||
* [Simple Payment and State Verification](proposals/simple-payment-and-state-verification.md)
|
||||
* [Cross-Program Invocation](proposals/cross-program-invocation.md)
|
||||
* [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md)
|
||||
* [Snapshot Verification](proposals/snapshot-verification.md)
|
||||
* [Bankless Leader](proposals/bankless-leader.md)
|
||||
* [Slashing](proposals/slashing.md)
|
||||
* [Implemented Design Proposals](implemented-proposals/README.md)
|
||||
* [Blockstore](implemented-proposals/blockstore.md)
|
||||
* [Cluster Software Installation and Updates](implemented-proposals/installer.md)
|
||||
* [Cluster Economics](implemented-proposals/ed_overview/README.md)
|
||||
* [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md)
|
||||
@ -54,7 +70,6 @@
|
||||
* [Replication-client Economics](implemented-proposals/ed_overview/ed_replication_client_economics/README.md)
|
||||
* [Storage-replication Rewards](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md)
|
||||
* [Replication-client Reward Auto-delegation](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md)
|
||||
* [Storage Rent Economics](implemented-proposals/ed_overview/ed_storage_rent_economics.md)
|
||||
* [Economic Sustainability](implemented-proposals/ed_overview/ed_economic_sustainability.md)
|
||||
* [Attack Vectors](implemented-proposals/ed_overview/ed_attack_vectors.md)
|
||||
* [Economic Design MVP](implemented-proposals/ed_overview/ed_mvp.md)
|
||||
@ -73,19 +88,3 @@
|
||||
* [Rent](implemented-proposals/rent.md)
|
||||
* [Durable Transaction Nonces](implemented-proposals/durable-tx-nonces.md)
|
||||
* [Validator Timestamp Oracle](implemented-proposals/validator-timestamp-oracle.md)
|
||||
* [Commitment](implemented-proposals/commitment.md)
|
||||
* [Snapshot Verification](implemented-proposals/snapshot-verification.md)
|
||||
* [Accepted Design Proposals](proposals/README.md)
|
||||
* [Ledger Replication](proposals/ledger-replication-to-implement.md)
|
||||
* [Secure Vote Signing](proposals/vote-signing-to-implement.md)
|
||||
* [Cluster Test Framework](proposals/cluster-test-framework.md)
|
||||
* [Validator](proposals/validator-proposal.md)
|
||||
* [Simple Payment and State Verification](proposals/simple-payment-and-state-verification.md)
|
||||
* [Cross-Program Invocation](proposals/cross-program-invocation.md)
|
||||
* [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md)
|
||||
* [Snapshot Verification](proposals/snapshot-verification.md)
|
||||
* [Bankless Leader](proposals/bankless-leader.md)
|
||||
* [Slashing](proposals/slashing.md)
|
||||
* [Tick Verification](proposals/tick-verification.md)
|
||||
* [Block Confirmation](proposals/block-confirmation.md)
|
||||
* [ABI Management](proposals/abi-management.md)
|
||||
|
4
book/src/api-reference/README.md
Normal file
4
book/src/api-reference/README.md
Normal file
@ -0,0 +1,4 @@
|
||||
# API Reference
|
||||
|
||||
The following sections contain API references material you may find useful when developing applications utilizing a Solana cluster.
|
||||
|
28
book/src/api-reference/blockstreamer.md
Normal file
28
book/src/api-reference/blockstreamer.md
Normal file
@ -0,0 +1,28 @@
|
||||
# Blockstreamer
|
||||
|
||||
Solana supports a node type called an _blockstreamer_. This validator variation is intended for applications that need to observe the data plane without participating in transaction validation or ledger replication.
|
||||
|
||||
A blockstreamer runs without a vote signer, and can optionally stream ledger entries out to a Unix domain socket as they are processed. The JSON-RPC service still functions as on any other node.
|
||||
|
||||
To run a blockstreamer, include the argument `no-signer` and \(optional\) `blockstream` socket location:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/validator-x.sh --no-signer --blockstream <SOCKET>
|
||||
```
|
||||
|
||||
The stream will output a series of JSON objects:
|
||||
|
||||
* An Entry event JSON object is sent when each ledger entry is processed, with the following fields:
|
||||
* `dt`, the system datetime, as RFC3339-formatted string
|
||||
* `t`, the event type, always "entry"
|
||||
* `s`, the slot height, as unsigned 64-bit integer
|
||||
* `h`, the tick height, as unsigned 64-bit integer
|
||||
* `entry`, the entry, as JSON object
|
||||
* A Block event JSON object is sent when a block is complete, with the following fields:
|
||||
* `dt`, the system datetime, as RFC3339-formatted string
|
||||
* `t`, the event type, always "block"
|
||||
* `s`, the slot height, as unsigned 64-bit integer
|
||||
* `h`, the tick height, as unsigned 64-bit integer
|
||||
* `l`, the slot leader id, as base-58 encoded string
|
||||
* `hash`, the [blockhash](terminology.md#blockhash), as base-58 encoded string
|
||||
|
File diff suppressed because it is too large
Load Diff
38
book/src/api-reference/instruction-api.md
Normal file
38
book/src/api-reference/instruction-api.md
Normal file
@ -0,0 +1,38 @@
|
||||
# Instruction
|
||||
|
||||
For the purposes of building a [Transaction](../transaction.md), a more verbose instruction format is used:
|
||||
|
||||
* **Instruction:**
|
||||
* **program\_id:** The pubkey of the on-chain program that executes the
|
||||
|
||||
instruction
|
||||
|
||||
* **accounts:** An ordered list of accounts that should be passed to
|
||||
|
||||
the program processing the instruction, including metadata detailing
|
||||
|
||||
if an account is a signer of the transaction and if it is a credit
|
||||
|
||||
only account.
|
||||
|
||||
* **data:** A byte array that is passed to the program executing the
|
||||
|
||||
instruction
|
||||
|
||||
A more compact form is actually included in a `Transaction`:
|
||||
|
||||
* **CompiledInstruction:**
|
||||
* **program\_id\_index:** The index of the `program_id` in the
|
||||
|
||||
`account_keys` list
|
||||
|
||||
* **accounts:** An ordered list of indices into `account_keys`
|
||||
|
||||
specifying the accounds that should be passed to the program
|
||||
|
||||
processing the instruction.
|
||||
|
||||
* **data:** A byte array that is passed to the program executing the
|
||||
|
||||
instruction
|
||||
|
@ -25,29 +25,24 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
||||
* [getInflation](jsonrpc-api.md#getinflation)
|
||||
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
|
||||
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
|
||||
* [getNumBlocksSinceSignatureConfirmation](jsonrpc-api.md#getnumblockssincesignatureconfirmation)
|
||||
* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
|
||||
* [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash)
|
||||
* [getSignatureConfirmation](jsonrpc-api.md#getsignatureconfirmation)
|
||||
* [getSignatureStatus](jsonrpc-api.md#getsignaturestatus)
|
||||
* [getSlot](jsonrpc-api.md#getslot)
|
||||
* [getSlotLeader](jsonrpc-api.md#getslotleader)
|
||||
* [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment)
|
||||
* [getStoragePubkeysForSlot](jsonrpc-api.md#getstoragepubkeysforslot)
|
||||
* [getStorageTurn](jsonrpc-api.md#getstorageturn)
|
||||
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
|
||||
* [getTransactionCount](jsonrpc-api.md#gettransactioncount)
|
||||
* [getTotalSupply](jsonrpc-api.md#gettotalsupply)
|
||||
* [getVersion](jsonrpc-api.md#getversion)
|
||||
* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
|
||||
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
|
||||
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
|
||||
* [sendTransaction](jsonrpc-api.md#sendtransaction)
|
||||
* [setLogFilter](jsonrpc-api.md#setlogfilter)
|
||||
* [validatorExit](jsonrpc-api.md#validatorexit)
|
||||
* [startSubscriptionChannel](jsonrpc-api.md#startsubscriptionchannel)
|
||||
* [Subscription Websocket](jsonrpc-api.md#subscription-websocket)
|
||||
* [accountSubscribe](jsonrpc-api.md#accountsubscribe)
|
||||
* [accountUnsubscribe](jsonrpc-api.md#accountunsubscribe)
|
||||
@ -55,17 +50,15 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [programUnsubscribe](jsonrpc-api.md#programunsubscribe)
|
||||
* [signatureSubscribe](jsonrpc-api.md#signaturesubscribe)
|
||||
* [signatureUnsubscribe](jsonrpc-api.md#signatureunsubscribe)
|
||||
* [slotSubscribe](jsonrpc-api.md#slotsubscribe)
|
||||
* [slotUnsubscribe](jsonrpc-api.md#slotunsubscribe)
|
||||
|
||||
## Request Formatting
|
||||
|
||||
To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields:
|
||||
|
||||
* `jsonrpc: <string>`, set to `"2.0"`
|
||||
* `id: <number>`, a unique client-generated identifying integer
|
||||
* `method: <string>`, a string containing the method to be invoked
|
||||
* `params: <array>`, a JSON array of ordered parameter values
|
||||
* `jsonrpc`, set to `"2.0"`
|
||||
* `id`, a unique client-generated identifying integer
|
||||
* `method`, a string containing the method to be invoked
|
||||
* `params`, a JSON array of ordered parameter values
|
||||
|
||||
Example using curl:
|
||||
|
||||
@ -75,9 +68,9 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
||||
|
||||
The response output will be a JSON object with the following fields:
|
||||
|
||||
* `jsonrpc: <string>`, matching the request specification
|
||||
* `id: <number>`, matching the request identifier
|
||||
* `result: <array|number|object|string>`, requested data or success confirmation
|
||||
* `jsonrpc`, matching the request specification
|
||||
* `id`, matching the request identifier
|
||||
* `result`, requested data or success confirmation
|
||||
|
||||
Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST.
|
||||
|
||||
@ -121,12 +114,12 @@ Returns a transaction receipt
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `RpcResponse<bool>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
|
||||
* `RpcResponse<boolean>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -144,19 +137,18 @@ Returns all information associated with the account of provided Pubkey
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Pubkey of account to query, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result value will be an RpcResponse JSON object containing an AccountInfo JSON object.
|
||||
|
||||
* `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing:
|
||||
* `lamports: <u64>`, number of lamports assigned to this account, as a u64
|
||||
* `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
|
||||
* `data: <string>`, base-58 encoded data associated with the account
|
||||
* `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
* `rentEpoch`: <u64>, the epoch at which this account will next owe rent, as u64
|
||||
* `lamports`, number of lamports assigned to this account, as a u64
|
||||
* `owner`, base-58 encoded pubkey of the program this account has been assigned to
|
||||
* `data`, base-58 encoded data associated with the account
|
||||
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -165,7 +157,7 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF","rentEpoch":2}},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"}},"id":1}
|
||||
```
|
||||
|
||||
### getBalance
|
||||
@ -174,12 +166,12 @@ Returns the balance of the account of provided Pubkey
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Pubkey of account to query, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `RpcResponse<u64>` - RpcResponse JSON object with `value` field set to the balance
|
||||
* `RpcResponse<u64>` - RpcResponse JSON object with `value` field set to quantity
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -197,15 +189,16 @@ Returns commitment for particular block
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<u64>` - block, identified by Slot
|
||||
* `u64` - block, identified by Slot
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be a JSON object containing:
|
||||
|
||||
* `commitment` - commitment, comprising either:
|
||||
* `<null>` - Unknown block
|
||||
* `<array>` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
|
||||
* `null` - Unknown block
|
||||
* `object` - BlockCommitment
|
||||
* `array` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
|
||||
* `totalStake` - total active stake, in lamports, of the current epoch
|
||||
|
||||
#### Example:
|
||||
@ -215,7 +208,7 @@ The result field will be a JSON object containing:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockCommitment","params":[5]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32],"totalStake": 42},"id":1}
|
||||
{"jsonrpc":"2.0","result":[{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32]},42],"id":1}
|
||||
```
|
||||
|
||||
### getBlockTime
|
||||
@ -234,12 +227,12 @@ query a node that is built from genesis and retains the entire ledger.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<u64>` - block, identified by Slot
|
||||
* `u64` - block, identified by Slot
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<null>` - block has not yet been produced
|
||||
* `<i64>` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
|
||||
* `null` - block has not yet been produced
|
||||
* `i64` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -263,10 +256,10 @@ None
|
||||
|
||||
The result field will be an array of JSON objects, each with the following sub fields:
|
||||
|
||||
* `pubkey: <string>` - Node public key, as base-58 encoded string
|
||||
* `gossip: <string>` - Gossip network address for the node
|
||||
* `tpu: <string>` - TPU network address for the node
|
||||
* `rpc: <string>` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||
* `pubkey` - Node public key, as base-58 encoded string
|
||||
* `gossip` - Gossip network address for the node
|
||||
* `tpu` - TPU network address for the node
|
||||
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -284,25 +277,25 @@ Returns identity and transaction information about a confirmed block in the ledg
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<u64>` - slot, as u64 integer
|
||||
* `<string>` - (optional) encoding for each returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
|
||||
* `integer` - slot, as u64 integer
|
||||
* `string` - (optional) encoding for each returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string
|
||||
* `parentSlot: <u64>` - the slot index of this block's parent
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](transaction-api.md) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `status: <object>` - Transaction status:
|
||||
* `blockhash` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash` - the blockhash of this block's parent, as base-58 encoded string
|
||||
* `parentSlot` - the slot index of this block's parent
|
||||
* `transactions` - an array of JSON objects containing:
|
||||
* `transaction` - [Transaction](transaction-api.md) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta` - transaction status metadata object, containing `null` or:
|
||||
* `status` - Transaction status:
|
||||
* `"Ok": null` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* `fee` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances` - array of u64 account balances after the transaction was processed
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -311,13 +304,13 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[[{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[["81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ",{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedBlocks
|
||||
@ -326,8 +319,8 @@ Returns a list of confirmed blocks
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<u64>` - start_slot, as u64 integer
|
||||
* `<u64>` - (optional) end_slot, as u64 integer
|
||||
* `integer` - start_slot, as u64 integer
|
||||
* `integer` - (optional) end_slot, as u64 integer
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -351,16 +344,15 @@ Returns information about the current epoch
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `absoluteSlot: <u64>`, the current slot
|
||||
* `epoch: <u64>`, the current epoch
|
||||
* `slotIndex: <u64>`, the current slot relative to the start of the current epoch
|
||||
* `slotsInEpoch: <u64>`, the number of slots in this epoch
|
||||
* `epoch`, the current epoch
|
||||
* `slotIndex`, the current slot relative to the start of the current epoch
|
||||
* `slotsInEpoch`, the number of slots in this epoch
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -369,7 +361,7 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"absoluteSlot":166598,"epoch":27,"slotIndex":2790,"slotsInEpoch":8192},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1}
|
||||
```
|
||||
|
||||
### getEpochSchedule
|
||||
@ -384,11 +376,11 @@ None
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `slotsPerEpoch: <u64>`, the maximum number of slots in each epoch
|
||||
* `leaderScheduleSlotOffset: <u64>`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
|
||||
* `warmup: <bool>`, whether epochs start short and grow
|
||||
* `firstNormalEpoch: <u64>`, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
|
||||
* `firstNormalSlot: <u64>`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(firstNormalEpoch) - 1)
|
||||
* `slotsPerEpoch`, the maximum number of slots in each epoch
|
||||
* `leaderScheduleSlotOffset`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
|
||||
* `warmup`, whether epochs start short and grow
|
||||
* `firstNormalEpoch`, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
|
||||
* `firstNormalSlot`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(firstNormalEpoch) - 1)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -410,7 +402,7 @@ None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<string>` - a Hash as base-58 encoded string
|
||||
* `string` - a Hash as base-58 encoded string
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -422,43 +414,14 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1}
|
||||
```
|
||||
|
||||
### getInflation
|
||||
|
||||
Returns the inflation configuration of the cluster
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an Inflation object with the following fields:
|
||||
|
||||
* `initial: <f64>`, the initial inflation percentage from time 0
|
||||
* `terminal: <f64>`, terminal inflation percentage
|
||||
* `taper: <f64>`, rate per year at which inflation is lowered
|
||||
* `foundation: <f64>`, percentage of total inflation allocated to the foundation
|
||||
* `foundationTerm: <f64>`, duration of foundation pool inflation in years
|
||||
* `storage: <f64>`, percentage of total inflation allocated to storage rewards
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflation"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
|
||||
```
|
||||
|
||||
### getLeaderSchedule
|
||||
|
||||
Returns the leader schedule for an epoch
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<u64>` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `slot` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -482,12 +445,12 @@ Returns minimum balance required to make account rent exempt.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<usize>` - account data length
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `u64` - account data length
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - minimum lamports required in account
|
||||
* `u64` - minimum lamports required in account
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -505,12 +468,12 @@ Returns the current number of blocks since signature has been confirmed.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - count, or null if signature not found
|
||||
* `u64` - count
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -528,20 +491,18 @@ Returns all accounts owned by the provided program Pubkey
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Pubkey of program, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `string` - Pubkey of program, as base-58 encoded string
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of JSON objects, which will contain:
|
||||
The result field will be an array of arrays. Each sub array will contain:
|
||||
|
||||
* `pubkey: <string>` - the account Pubkey as base-58 encoded string
|
||||
* `account: <object>` - a JSON object, with the following sub fields:
|
||||
* `lamports: <u64>`, number of lamports assigned to this account, as a u64
|
||||
* `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
|
||||
* `data: <string>`, base-58 encoded data associated with the account
|
||||
* `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
* `rentEpoch`: <u64>, the epoch at which this account will next owe rent, as u64
|
||||
* `string` - the account Pubkey as base-58 encoded string and a JSON object, with the following sub fields:
|
||||
* `lamports`, number of lamports assigned to this account, as a u64
|
||||
* `owner`, base-58 encoded pubkey of the program this account has been assigned to
|
||||
* `data`, base-58 encoded data associated with the account
|
||||
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -550,7 +511,7 @@ The result field will be an array of JSON objects, which will contain:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":[{"account":{"data":"2R9jLfiAQ9bgdcw6h8s44439","executable":false,"lamports":15298080,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","rentEpoch":28},"pubkey":"CxELquR1gPP8wHe33gZ4QxqGB3sZ9RSwsJ2KshVewkFY"}],"id":1}
|
||||
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":1,"data":"", ["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":10,"data":[]]]},"id":1}
|
||||
```
|
||||
|
||||
### getRecentBlockhash
|
||||
@ -559,15 +520,15 @@ Returns a recent block hash from the ledger, and a fee schedule that can be used
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
An RpcResponse containing a JSON object consisting of a string blockhash and FeeCalculator JSON object.
|
||||
|
||||
* `RpcResponse<object>` - RpcResponse JSON object with `value` field set to a JSON object including:
|
||||
* `blockhash: <string>` - a Hash as base-58 encoded string
|
||||
* `feeCalculator: <object>` - FeeCalculator object, the fee schedule for this block hash
|
||||
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to a JSON object including:
|
||||
* `blockhash` - a Hash as base-58 encoded string
|
||||
* `feeCalculator` - FeeCalculator object, the fee schedule for this block hash
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -576,34 +537,7 @@ An RpcResponse containing a JSON object consisting of a string blockhash and Fee
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureConfirmation
|
||||
|
||||
Returns the status and number of confirmations of a given signature.
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<null>` - Unknown transaction
|
||||
* `<object>` - Transaction confirmations and status:
|
||||
* `confirmations: <u64>` - count of confirmations since transaction was processed
|
||||
* `status: <object>` -
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureConfirmation", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"confirmations":12,"status":{"Ok": null}},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","feeCalculator":{"lamportsPerSignature": 0}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatus
|
||||
@ -612,14 +546,14 @@ Returns the status of a given signature. This method is similar to [confirmTrans
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<null>` - Unknown transaction
|
||||
* `<object>` - Transaction status:
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `null` - Unknown transaction
|
||||
* `object` - Transaction status:
|
||||
* `"Ok": null` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
|
||||
#### Example:
|
||||
@ -629,7 +563,7 @@ Returns the status of a given signature. This method is similar to [confirmTrans
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"Ok": null},"id":1}
|
||||
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
|
||||
```
|
||||
|
||||
### getSlot
|
||||
@ -638,11 +572,11 @@ Returns the current slot the node is processing
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Current slot
|
||||
* `u64` - Current slot
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -651,7 +585,7 @@ Returns the current slot the node is processing
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlot"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":1234,"id":1}
|
||||
{"jsonrpc":"2.0","result":"1234","id":1}
|
||||
```
|
||||
|
||||
### getSlotLeader
|
||||
@ -660,11 +594,11 @@ Returns the current slot leader
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<string>` - Node identity Pubkey as base-58 encoded string
|
||||
* `string` - Node Id as base-58 encoded string
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -682,11 +616,11 @@ Returns the current storage segment size in terms of slots
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Number of slots in a storage segment
|
||||
* `u64` - Number of slots in a storage segment
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -694,28 +628,7 @@ Returns the current storage segment size in terms of slots
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":1024,"id":1}
|
||||
```
|
||||
|
||||
### getStoragePubkeysForSlot
|
||||
|
||||
Returns the storage Pubkeys for a particular slot
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
An array of Pubkeys, as base-58 encoded strings
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStoragePubkeysForSlot","params":[1]}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC"],"id":1}
|
||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
||||
```
|
||||
|
||||
### getStorageTurn
|
||||
@ -730,8 +643,8 @@ None
|
||||
|
||||
A JSON object consisting of
|
||||
|
||||
* `blockhash: <string>` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
||||
* `slot: <u64>` - the current storage turn slot
|
||||
* `blockhash` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
||||
* `slot` - the current storage turn slot
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -739,7 +652,7 @@ A JSON object consisting of
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "slot": 2048},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "slot": "2048"},"id":1}
|
||||
```
|
||||
|
||||
### getStorageTurnRate
|
||||
@ -752,7 +665,7 @@ None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Number of slots in storage turn
|
||||
* `u64` - Number of slots in storage turn
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -769,11 +682,11 @@ Returns the current Transaction count from the ledger
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - count
|
||||
* `u64` - count
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -787,15 +700,15 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
|
||||
### getTotalSupply
|
||||
|
||||
Returns the current total supply in lamports
|
||||
Returns the current total supply in Lamports
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Total supply
|
||||
* `u64` - Total supply
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -817,7 +730,7 @@ None
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be a JSON object with the following fields:
|
||||
The result field will be a JSON object with the following sub fields:
|
||||
|
||||
* `solana-core`, software version of solana-core
|
||||
|
||||
@ -827,7 +740,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "0.23.2"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "0.17.2"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@ -836,19 +749,18 @@ Returns the account info and associated stake for all the voting accounts in the
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be a JSON object of `current` and `delinquent` accounts, each containing an array of JSON objects with the following sub fields:
|
||||
|
||||
* `votePubkey: <string>` - Vote account public key, as base-58 encoded string
|
||||
* `nodePubkey: <string>` - Node public key, as base-58 encoded string
|
||||
* `activatedStake: <u64>` - the stake, in lamports, delegated to this vote account and active in this epoch
|
||||
* `epochVoteAccount: <bool>` - bool, whether the vote account is staked for this epoch
|
||||
* `commission: <number>`, percentage (0-100) of rewards payout owed to the vote account
|
||||
* `lastVote: <u64>` - Most recent slot voted on by this vote account
|
||||
* `epochCredits: <array>` - History of how many credits earned by the end of each epoch, as an array of arrays containing: [epoch, credits, previousCredits]
|
||||
* `votePubkey` - Vote account public key, as base-58 encoded string
|
||||
* `nodePubkey` - Node public key, as base-58 encoded string
|
||||
* `activatedStake` - the stake, in lamports, delegated to this vote account and active in this epoch
|
||||
* `epochVoteAccount` - bool, whether the vote account is staked for this epoch
|
||||
* `commission`, percentage (0-100) of rewards payout owed to the vote account
|
||||
* `lastVote` - Most recent slot voted on by this vote account
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -857,30 +769,7 @@ The result field will be a JSON object of `current` and `delinquent` accounts, e
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"epochCredits":[[1,64,0],[2,192,64]],"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"epochCredits":[],"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
|
||||
```
|
||||
|
||||
### minimumLedgerSlot
|
||||
|
||||
Returns the lowest slot that the node has information about in its ledger. This
|
||||
value may increase over time if the node is configured to purge older ledger data
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `u64` - Minimum ledger slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"minimumLedgerSlot"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":1234,"id":1}
|
||||
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
|
||||
```
|
||||
|
||||
### requestAirdrop
|
||||
@ -889,13 +778,13 @@ Requests an airdrop of lamports to a Pubkey
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Pubkey of account to receive lamports, as base-58 encoded string
|
||||
* `<integer>` - lamports, as a u64
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success)
|
||||
* `string` - Pubkey of account to receive lamports, as base-58 encoded string
|
||||
* `integer` - lamports, as a u64
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<string>` - Transaction Signature of airdrop, as base-58 encoded string
|
||||
* `string` - Transaction Signature of airdrop, as base-58 encoded string
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -913,66 +802,22 @@ Creates new transaction
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<array>` - array of octets containing a fully-signed Transaction
|
||||
* `array` - array of octets containing a fully-signed Transaction
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<string>` - Transaction Signature, as base-58 encoded string
|
||||
* `string` - Transaction Signature, as base-58 encoded string
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["3gKEMTuxvm3DKEJc4UyiyoNz1sxwdVRW2pyDDXqaCvUjGApnsazGh2y4W92zuaSSdJhBbWLYAkZokBt4N5oW27R7zCVaLLpLxvATL2GgheEh9DmmDR1P9r1ZqirVXM2fF3z5cafmc4EtwWc1UErFdCWj1qYvy4bDGMLXRYLURxaKytEEqrxz6JXj8rUHhDpjTZeFxmC6iAW3hZr6cmaAzewQCQfiEv2HfydriwHDtN95u3Y1EF6SuXxcRqox2aTjGye2Ln9zFj4XbnAtjCmkZhR"]}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":[[61, 98, 55, 49, 15, 187, 41, 215, 176, 49, 234, 229, 228, 77, 129, 221, 239, 88, 145, 227, 81, 158, 223, 123, 14, 229, 235, 247, 191, 115, 199, 71, 121, 17, 32, 67, 63, 209, 239, 160, 161, 2, 94, 105, 48, 159, 235, 235, 93, 98, 172, 97, 63, 197, 160, 164, 192, 20, 92, 111, 57, 145, 251, 6, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 13, 39, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 11, 12, 106, 49, 74, 226, 201, 16, 161, 192, 28, 84, 124, 97, 190, 201, 171, 186, 6, 18, 70, 142, 89, 185, 176, 154, 115, 61, 26, 163, 77, 1, 88, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
|
||||
```
|
||||
|
||||
### setLogFilter
|
||||
|
||||
Sets the log filter on the validator
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - the new log filter to use
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<null>`
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"setLogFilter", "params":["solana_core=debug"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":null,"id":1}
|
||||
```
|
||||
|
||||
### validatorExit
|
||||
|
||||
If a validator boots with RPC exit enabled (`--enable-rpc-exit` parameter), this request causes the validator to exit.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<bool>` - Whether the validator exit operation was successful
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":true,"id":1}
|
||||
```
|
||||
|
||||
### Subscription Websocket
|
||||
|
||||
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
||||
@ -1001,14 +846,14 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - account Pubkey, as base-58 encoded string
|
||||
* `<u64>` - optional, number of confirmed blocks to wait before notification.
|
||||
* `string` - account Pubkey, as base-58 encoded string
|
||||
* `integer` - optional, number of confirmed blocks to wait before notification.
|
||||
|
||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<number>` - Subscription id \(needed to unsubscribe\)
|
||||
* `integer` - Subscription id \(needed to unsubscribe\)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -1025,7 +870,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF","rentEpoch":28},"subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"},"subscription":0}}
|
||||
```
|
||||
|
||||
### accountUnsubscribe
|
||||
@ -1034,11 +879,11 @@ Unsubscribe from account change notifications
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<number>` - id of account Subscription to cancel
|
||||
* `integer` - id of account Subscription to cancel
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<bool>` - unsubscribe success message
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -1056,14 +901,14 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - program\_id Pubkey, as base-58 encoded string
|
||||
* `<u64>` - optional, number of confirmed blocks to wait before notification.
|
||||
* `string` - program\_id Pubkey, as base-58 encoded string
|
||||
* `integer` - optional, number of confirmed blocks to wait before notification.
|
||||
|
||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<integer>` - Subscription id \(needed to unsubscribe\)
|
||||
* `integer` - Subscription id \(needed to unsubscribe\)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -1079,11 +924,11 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
* `<string>` - account Pubkey, as base-58 encoded string
|
||||
* `<object>` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
||||
* `string` - account Pubkey, as base-58 encoded string
|
||||
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
||||
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd","rentEpoch":28}],"subscription":0}}
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":{"pubkey": "8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM","account":{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd"}},"subscription":0}}
|
||||
```
|
||||
|
||||
### programUnsubscribe
|
||||
@ -1092,11 +937,11 @@ Unsubscribe from program-owned account change notifications
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<integer>` - id of account Subscription to cancel
|
||||
* `integer` - id of account Subscription to cancel
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<bool>` - unsubscribe success message
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -1114,8 +959,8 @@ Subscribe to a transaction signature to receive notification when the transactio
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Transaction Signature, as base-58 encoded string
|
||||
* `<integer>` - optional, number of confirmed blocks to wait before notification.
|
||||
* `string` - Transaction Signature, as base-58 encoded string
|
||||
* `integer` - optional, number of confirmed blocks to wait before notification.
|
||||
|
||||
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
|
||||
|
||||
@ -1147,11 +992,11 @@ Unsubscribe from signature confirmation notification
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<integer>` - subscription id to cancel
|
||||
* `integer` - subscription id to cancel
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<bool>` - unsubscribe success message
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -1162,53 +1007,3 @@ Unsubscribe from signature confirmation notification
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
||||
### slotSubscribe
|
||||
|
||||
Subscribe to receive notification anytime a slot is processed by the validator
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `integer` - subscription id \(needed to unsubscribe\)
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"slotSubscribe"}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "slotNotification", "params": {"result":{"parent":75,"root":44,"slot":76},"subscription":0}}
|
||||
```
|
||||
|
||||
### slotUnsubscribe
|
||||
|
||||
Unsubscribe from signature confirmation notification
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<integer>` - subscription id to cancel
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<bool>` - unsubscribe success message
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"slotUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
62
book/src/api-reference/transaction-api.md
Normal file
62
book/src/api-reference/transaction-api.md
Normal file
@ -0,0 +1,62 @@
|
||||
# Transaction
|
||||
|
||||
## Components of a `Transaction`
|
||||
|
||||
* **Transaction:**
|
||||
* **message:** Defines the transaction
|
||||
* **header:** Details the account types of and signatures required by
|
||||
|
||||
the transaction
|
||||
|
||||
* **num\_required\_signatures:** The total number of signatures
|
||||
|
||||
required to make the transaction valid.
|
||||
|
||||
* **num\_credit\_only\_signed\_accounts:** The last
|
||||
|
||||
`num_readonly_signed_accounts` signatures refer to signing
|
||||
|
||||
credit only accounts. Credit only accounts can be used concurrently
|
||||
|
||||
by multiple parallel transactions, but their balance may only be
|
||||
|
||||
increased, and their account data is read-only.
|
||||
|
||||
* **num\_credit\_only\_unsigned\_accounts:** The last
|
||||
|
||||
`num_readonly_unsigned_accounts` public keys in `account_keys` refer
|
||||
|
||||
to non-signing credit only accounts
|
||||
|
||||
* **account\_keys:** List of public keys used by the transaction, including
|
||||
|
||||
by the instructions and for signatures. The first
|
||||
|
||||
`num_required_signatures` public keys must sign the transaction.
|
||||
|
||||
* **recent\_blockhash:** The ID of a recent ledger entry. Validators will
|
||||
|
||||
reject transactions with a `recent_blockhash` that is too old.
|
||||
|
||||
* **instructions:** A list of [instructions](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/instruction.md) that are
|
||||
|
||||
run sequentially and committed in one atomic transaction if all
|
||||
|
||||
succeed.
|
||||
* **signatures:** A list of signatures applied to the transaction. The
|
||||
|
||||
list is always of length `num_required_signatures`, and the signature
|
||||
|
||||
at index `i` corresponds to the public key at index `i` in `account_keys`.
|
||||
|
||||
The list is initialized with empty signatures \(i.e. zeros\), and
|
||||
|
||||
populated as signatures are added.
|
||||
|
||||
## Transaction Signing
|
||||
|
||||
A `Transaction` is signed by using an ed25519 keypair to sign the serialization of the `message`. The resulting signature is placed at the index of `signatures` matching the index of the keypair's public key in `account_keys`.
|
||||
|
||||
## Transaction Serialization
|
||||
|
||||
`Transaction`s \(and their `message`s\) are serialized and deserialized using the [bincode](https://crates.io/crates/bincode) crate with a non-standard vector serialization that uses only one byte for the length if it can be encoded in 7 bits, 2 bytes if it fits in 14 bits, or 3 bytes if it requires 15 or 16 bits. The vector serialization is defined by Solana's [short-vec](https://github.com/solana-labs/solana/blob/master/sdk/src/short_vec.rs).
|
@ -1,5 +0,0 @@
|
||||
# Using Solana from the Command-line
|
||||
|
||||
This chapter describes the command-line tools for interacting with Solana. One
|
||||
could use these tools to send payments, stake validators, and check account
|
||||
balances.
|
@ -4,7 +4,7 @@ A Solana cluster is a set of validators working together to serve client transac
|
||||
|
||||
## Creating a Cluster
|
||||
|
||||
Before starting any validators, one first needs to create a _genesis config_. The config references two public keys, a _mint_ and a _bootstrap validator_. The validator holding the bootstrap validator's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis config. The second validator then contacts the bootstrap validator to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster.
|
||||
Before starting any validators, one first needs to create a _genesis config_. The config references two public keys, a _mint_ and a _bootstrap leader_. The validator holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis config. The second validator then contacts the bootstrap leader to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster.
|
||||
|
||||
A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until archiver nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy.
|
||||
|
||||
@ -37,4 +37,4 @@ Solana rotates leaders at fixed intervals, called _slots_. Each leader may only
|
||||
|
||||
Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions.
|
||||
|
||||
A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md).
|
||||
A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique _data plane fanout_; learn more in the [data plan fanout](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/data-plane-fanout.md) section.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Stake Delegation and Rewards
|
||||
|
||||
Stakers are rewarded for helping to validate the ledger. They do this by delegating their stake to validator nodes. Those validators do the legwork of replaying the ledger and send votes to a per-node vote account to which stakers can delegate their stakes. The rest of the cluster uses those stake-weighted votes to select a block when forks arise. Both the validator and staker need some economic incentive to play their part. The validator needs to be compensated for its hardware and the staker needs to be compensated for the risk of getting its stake slashed. The economics are covered in [staking rewards](../implemented-proposals/staking-rewards.md). This chapter, on the other hand, describes the underlying mechanics of its implementation.
|
||||
Stakers are rewarded for helping to validate the ledger. They do this by delegating their stake to validator nodes. Those validators do the legwork of replaying the ledger and send votes to a per-node vote account to which stakers can delegate their stakes. The rest of the cluster uses those stake-weighted votes to select a block when forks arise. Both the validator and staker need some economic incentive to play their part. The validator needs to be compensated for its hardware and the staker needs to be compensated for the risk of getting its stake slashed. The economics are covered in [staking rewards](../proposals/staking-rewards.md). This chapter, on the other hand, describes the underlying mechanics of its implementation.
|
||||
|
||||
## Basic Design
|
||||
|
||||
@ -94,22 +94,42 @@ The Stakes and the RewardsPool are accounts that are owned by the same `Stake` p
|
||||
|
||||
### StakeInstruction::DelegateStake
|
||||
|
||||
The Stake account is moved from Initialized to StakeState::Stake form, or from a deactivated (i.e. fully cooled-down) StakeState::Stake to activated StakeState::Stake. This is how stakers choose the vote account and validator node to which their stake account lamports are delegated. The transaction must be signed by the stake's `authorized_staker`.
|
||||
The Stake account is moved from Ininitialized to StakeState::Stake form. This is how stakers choose their initial delegate validator node and activate their stake account lamports. The transaction must be signed by the stake's `authorized_staker`. If the stake account is already StakeState::Stake \(i.e. already activated\), the stake is re-delegated. Stakes may be re-delegated at any time, and updated stakes are reflected immediately, but only one re-delegation is permitted per epoch.
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX
|
||||
* `account[1]` - R - The VoteState instance.
|
||||
* `account[2]` - R - sysvar::clock account, carries information about current Bank epoch
|
||||
* `account[3]` - R - sysvar::stakehistory account, carries information about stake history
|
||||
* `account[4]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration
|
||||
* `account[3]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration
|
||||
|
||||
### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\)
|
||||
|
||||
Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`. Any stake lock-up must have expired, or the lock-up custodian must also sign the transaction.
|
||||
Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`.
|
||||
|
||||
* `account[0]` - RW - The StakeState
|
||||
|
||||
`StakeState::authorized_staker` or `authorized_withdrawer` is set to to `Pubkey`.
|
||||
|
||||
### StakeInstruction::RedeemVoteCredits
|
||||
|
||||
The staker or the owner of the Stake account sends a transaction with this instruction to claim rewards.
|
||||
|
||||
The Vote account and the Stake account pair maintain a lifetime counter of total rewards generated and claimed. Rewards are paid according to a point value supplied by the Bank from inflation. A `point` is one credit \* one staked lamport, rewards paid are proportional to the number of lamports staked.
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake instance that is redeeming rewards.
|
||||
* `account[1]` - R - The VoteState instance, must be the same as `StakeState::voter_pubkey`
|
||||
* `account[2]` - RW - The StakeState::RewardsPool instance that will fulfill the request \(picked at random\).
|
||||
* `account[3]` - R - sysvar::rewards account from the Bank that carries point value.
|
||||
* `account[4]` - R - sysvar::stake\_history account from the Bank that carries stake warmup/cooldown history
|
||||
|
||||
Reward is paid out for the difference between `VoteState::credits` to `StakeState::Stake::credits_observed`, multiplied by `sysvar::rewards::Rewards::validator_point_value`. `StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token balance, and the reward is deposited to the Stake account token balance and the stake account's `stake` is increased by the same amount \(re-invested\).
|
||||
|
||||
```text
|
||||
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
|
||||
stake_state.credits_observed = vote_state.credits;
|
||||
```
|
||||
|
||||
`credits_to_claim` is used to compute the reward and commission, and `StakeState::Stake::credits_observed` is updated to the latest `VoteState::credits` value.
|
||||
|
||||
### StakeInstruction::Deactivate
|
||||
|
||||
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
|
||||
@ -142,11 +162,11 @@ Lamports build up over time in a Stake account and any excess over activated sta
|
||||
|
||||
## Staking Rewards
|
||||
|
||||
The specific mechanics and rules of the validator rewards regime is outlined here. Rewards are earned by delegating stake to a validator that is voting correctly. Voting incorrectly exposes that validator's stakes to [slashing](../proposals/slashing.md).
|
||||
The specific mechanics and rules of the validator rewards regime is outlined here. Rewards are earned by delegating stake to a validator that is voting correctly. Voting incorrectly exposes that validator's stakes to [slashing](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/staking-and-rewards.md).
|
||||
|
||||
### Basics
|
||||
|
||||
The network pays rewards from a portion of network [inflation](../terminology.md#inflation). The number of lamports available to pay rewards for an epoch is fixed and must be evenly divided among all staked nodes according to their relative stake weight and participation. The weighting unit is called a [point](../terminology.md#point).
|
||||
The network pays rewards from a portion of network [inflation](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/inflation.md). The number of lamports available to pay rewards for an epoch is fixed and must be evenly divided among all staked nodes according to their relative stake weight and participation. The weighting unit is called a [point](../terminology.md#point).
|
||||
|
||||
Rewards for an epoch are not available until the end of that epoch.
|
||||
|
||||
@ -208,4 +228,4 @@ Only lamports in excess of effective+activating stake may be withdrawn at any ti
|
||||
|
||||
### Lock-up
|
||||
|
||||
Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state. Changing the authorized staker or withdrawer is also subject to lock-up, as such an operation is effectively a transfer.
|
||||
Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Building from Source
|
||||
# Getting Started
|
||||
|
||||
The Solana git repository contains all the scripts you might need to spin up your own local testnet. Depending on what you're looking to achieve, you may want to run a different variation, as the full-fledged, performance-enhanced multinode testnet is considerably more complex to set up than a Rust-only, singlenode testnode. If you are looking to develop high-level features, such as experimenting with smart contracts, save yourself some setup headaches and stick to the Rust-only singlenode demo. If you're doing performance optimization of the transaction pipeline, consider the enhanced singlenode demo. If you're doing consensus work, you'll need at least a Rust-only multinode demo. If you want to reproduce our TPS metrics, run the enhanced multinode demo.
|
||||
|
||||
@ -52,12 +52,12 @@ $ NDEBUG=1 ./multinode-demo/faucet.sh
|
||||
|
||||
### Singlenode Testnet
|
||||
|
||||
Before you start a validator, make sure you know the IP address of the machine you want to be the bootstrap validator for the demo, and make sure that udp ports 8000-10000 are open on all the machines you want to test with.
|
||||
Before you start a validator, make sure you know the IP address of the machine you want to be the bootstrap leader for the demo, and make sure that udp ports 8000-10000 are open on all the machines you want to test with.
|
||||
|
||||
Now start the bootstrap validator in a separate shell:
|
||||
Now start the bootstrap leader in a separate shell:
|
||||
|
||||
```bash
|
||||
$ NDEBUG=1 ./multinode-demo/bootstrap-validator.sh
|
||||
$ NDEBUG=1 ./multinode-demo/bootstrap-leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to receive transactions. The leader will request some tokens from the faucet if it doesn't have any. The faucet does not need to be running for subsequent leader starts.
|
||||
@ -74,7 +74,7 @@ To run a performance-enhanced validator on Linux, [CUDA 10.0](https://developer.
|
||||
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/bootstrap-validator.sh
|
||||
$ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/bootstrap-leader.sh
|
||||
$ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/validator.sh
|
||||
```
|
||||
|
||||
@ -121,34 +121,6 @@ thread apply all bt
|
||||
|
||||
This will dump all the threads stack traces into gdb.txt
|
||||
|
||||
### Blockstreamer
|
||||
|
||||
Solana supports a node type called an _blockstreamer_. This validator variation is intended for applications that need to observe the data plane without participating in transaction validation or ledger replication.
|
||||
|
||||
A blockstreamer runs without a vote signer, and can optionally stream ledger entries out to a Unix domain socket as they are processed. The JSON-RPC service still functions as on any other node.
|
||||
|
||||
To run a blockstreamer, include the argument `no-signer` and \(optional\) `blockstream` socket location:
|
||||
|
||||
```bash
|
||||
$ NDEBUG=1 ./multinode-demo/validator-x.sh --no-signer --blockstream <SOCKET>
|
||||
```
|
||||
|
||||
The stream will output a series of JSON objects:
|
||||
|
||||
* An Entry event JSON object is sent when each ledger entry is processed, with the following fields:
|
||||
* `dt`, the system datetime, as RFC3339-formatted string
|
||||
* `t`, the event type, always "entry"
|
||||
* `s`, the slot height, as unsigned 64-bit integer
|
||||
* `h`, the tick height, as unsigned 64-bit integer
|
||||
* `entry`, the entry, as JSON object
|
||||
* A Block event JSON object is sent when a block is complete, with the following fields:
|
||||
* `dt`, the system datetime, as RFC3339-formatted string
|
||||
* `t`, the event type, always "block"
|
||||
* `s`, the slot height, as unsigned 64-bit integer
|
||||
* `h`, the tick height, as unsigned 64-bit integer
|
||||
* `l`, the slot leader id, as base-58 encoded string
|
||||
* `hash`, the [blockhash](terminology.md#blockhash), as base-58 encoded string
|
||||
|
||||
## Public Testnet
|
||||
|
||||
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
7
book/src/getting-started/testnet-participation.md
Normal file
7
book/src/getting-started/testnet-participation.md
Normal file
@ -0,0 +1,7 @@
|
||||
# Testnet Participation
|
||||
|
||||
Participate in our testnet:
|
||||
|
||||
* [Running a Validator](../running-validator/)
|
||||
* [Running an Archiver](../running-archiver.md)
|
||||
|
90
book/src/implemented-proposals/blockstore.md
Normal file
90
book/src/implemented-proposals/blockstore.md
Normal file
@ -0,0 +1,90 @@
|
||||
# Blockstore
|
||||
|
||||
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
|
||||
|
||||
The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
|
||||
|
||||
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
|
||||
|
||||
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
|
||||
|
||||
## Functionalities of Blockstore
|
||||
|
||||
1. Persistence: the Blockstore lives in the front of the nodes verification
|
||||
|
||||
pipeline, right behind network receive and signature verification. If the
|
||||
|
||||
shred received is consistent with the leader schedule \(i.e. was signed by the
|
||||
|
||||
leader for the indicated slot\), it is immediately stored.
|
||||
|
||||
2. Repair: repair is the same as window repair above, but able to serve any
|
||||
|
||||
shred that's been received. Blockstore stores shreds with signatures,
|
||||
|
||||
preserving the chain of origination.
|
||||
|
||||
3. Forks: Blockstore supports random access of shreds, so can support a
|
||||
|
||||
validator's need to rollback and replay from a Bank checkpoint.
|
||||
|
||||
4. Restart: with proper pruning/culling, the Blockstore can be replayed by
|
||||
|
||||
ordered enumeration of entries from slot 0. The logic of the replay stage
|
||||
|
||||
\(i.e. dealing with forks\) will have to be used for the most recent entries in
|
||||
|
||||
the Blockstore.
|
||||
|
||||
## Blockstore Design
|
||||
|
||||
1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
|
||||
2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
|
||||
* `slot_index` - The index of this slot
|
||||
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
|
||||
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
|
||||
* `received` - The highest received shred index for the slot
|
||||
* `next_slots` - A list of future slots this slot could chain to. Used when rebuilding
|
||||
|
||||
the ledger to find possible fork points.
|
||||
|
||||
* `last_index` - The index of the shred that is flagged as the last shred for this slot. This flag on a shred will be set by the leader for a slot when they are transmitting the last shred for a slot.
|
||||
* `is_rooted` - True iff every block from 0...slot forms a full sequence without any holes. We can derive is\_rooted for each slot with the following rules. Let slot\(n\) be the slot with index `n`, and slot\(n\).is\_full\(\) is true if the slot with index `n` has all the ticks expected for that slot. Let is\_rooted\(n\) be the statement that "the slot\(n\).is\_rooted is true". Then:
|
||||
|
||||
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
|
||||
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
|
||||
4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
|
||||
5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
|
||||
|
||||
## Blockstore APIs
|
||||
|
||||
The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
|
||||
|
||||
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
|
||||
|
||||
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
|
||||
|
||||
## Interfacing with Bank
|
||||
|
||||
The bank exposes to replay stage:
|
||||
|
||||
1. `prev_hash`: which PoH chain it's working on as indicated by the hash of the last
|
||||
|
||||
entry it processed
|
||||
|
||||
2. `tick_height`: the ticks in the PoH chain currently being verified by this
|
||||
|
||||
bank
|
||||
|
||||
3. `votes`: a stack of records that contain: 1. `prev_hashes`: what anything after this vote must chain to in PoH 2. `tick_height`: the tick height at which this vote was cast 3. `lockout period`: how long a chain must be observed to be in the ledger to
|
||||
|
||||
be able to be chained below this vote
|
||||
|
||||
Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
|
||||
|
||||
## Pruning Blockstore
|
||||
|
||||
Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
|
||||
|
||||
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.
|
||||
|
@ -28,7 +28,7 @@ lockout on a bank `b`.
|
||||
|
||||
This computation is performed on a votable candidate bank `b` as follows.
|
||||
|
||||
```text
|
||||
```
|
||||
let output: HashMap<b, StakeLockout> = HashMap::new();
|
||||
for vote_account in b.vote_accounts {
|
||||
for v in vote_account.vote_stack {
|
||||
@ -62,7 +62,7 @@ votes > v as the number of confirmations will be lower).
|
||||
|
||||
Now more specifically, we augment the above computation to:
|
||||
|
||||
```text
|
||||
```
|
||||
let output: HashMap<b, StakeLockout> = HashMap::new();
|
||||
let fork_commitment_cache = ForkCommitmentCache::default();
|
||||
for vote_account in b.vote_accounts {
|
||||
@ -76,7 +76,7 @@ Now more specifically, we augment the above computation to:
|
||||
```
|
||||
|
||||
where `f'` is defined as:
|
||||
```text
|
||||
```
|
||||
fn f`(
|
||||
stake_lockout: &mut StakeLockout,
|
||||
some_ancestor: &mut BlockCommitment,
|
||||
|
@ -26,7 +26,7 @@ account data. A transaction is now constructed in the normal way, but with the
|
||||
following additional requirements:
|
||||
|
||||
1) The durable nonce value is used in the `recent_blockhash` field
|
||||
2) An `AdvanceNonceAccount` instruction is the first issued in the transaction
|
||||
2) A `NonceAdvance` instruction is the first issued in the transaction
|
||||
|
||||
### Contract Mechanics
|
||||
|
||||
@ -67,7 +67,7 @@ A client wishing to use this feature starts by creating a nonce account under
|
||||
the system program. This account will be in the `Uninitialized` state with no
|
||||
stored hash, and thus unusable.
|
||||
|
||||
To initialize a newly created account, an `InitializeNonceAccount` instruction must be
|
||||
To initialize a newly created account, a `NonceInitialize` instruction must be
|
||||
issued. This instruction takes one parameter, the `Pubkey` of the account's
|
||||
[authority](../offline-signing/durable-nonce.md#nonce-authority). Nonce accounts
|
||||
must be [rent-exempt](rent.md#two-tiered-rent-regime) to meet the data-persistence
|
||||
@ -76,27 +76,27 @@ deposited before they can be initialized. Upon successful initialization, the
|
||||
cluster's most recent blockhash is stored along with specified nonce authority
|
||||
`Pubkey`.
|
||||
|
||||
The `AdvanceNonceAccount` instruction is used to manage the account's stored nonce
|
||||
The `NonceAdvance` instruction is used to manage the account's stored nonce
|
||||
value. It stores the cluster's most recent blockhash in the account's state data,
|
||||
failing if that matches the value already stored there. This check prevents
|
||||
replaying transactions within the same block.
|
||||
|
||||
Due to nonce accounts' [rent-exempt](rent.md#two-tiered-rent-regime) requirement,
|
||||
a custom withdraw instruction is used to move funds out of the account.
|
||||
The `WithdrawNonceAccount` instruction takes a single argument, lamports to withdraw,
|
||||
The `NonceWithdraw` instruction takes a single argument, lamports to withdraw,
|
||||
and enforces rent-exemption by preventing the account's balance from falling
|
||||
below the rent-exempt minimum. An exception to this check is if the final balance
|
||||
would be zero lamports, which makes the account eligible for deletion. This
|
||||
account closure detail has an additional requirement that the stored nonce value
|
||||
must not match the cluster's most recent blockhash, as per `AdvanceNonceAccount`.
|
||||
must not match the cluster's most recent blockhash, as per `NonceAdvance`.
|
||||
|
||||
The account's [nonce authority](../offline-signing/durable-nonce.md#nonce-authority)
|
||||
can be changed using the `AuthorizeNonceAccount` instruction. It takes one parameter,
|
||||
can be changed using the `NonceAuthorize` instruction. It takes one parameter,
|
||||
the `Pubkey` of the new authority. Executing this instruction grants full
|
||||
control over the account and its balance to the new authority.
|
||||
|
||||
{% hint style="info" %}
|
||||
`AdvanceNonceAccount`, `WithdrawNonceAccount` and `AuthorizeNonceAccount` all require the current
|
||||
`NonceAdvance`, `NonceWithdraw` and `NonceAuthorize` all require the current
|
||||
[nonce authority](../offline-signing/durable-nonce.md#nonce-authority) for the
|
||||
account to sign the transaction.
|
||||
{% endhint %}
|
||||
@ -108,7 +108,7 @@ an extant `recent_blockhash` on the transaction and prevent fee theft via
|
||||
failed transaction replay, runtime modifications are necessary.
|
||||
|
||||
Any transaction failing the usual `check_hash_age` validation will be tested
|
||||
for a Durable Transaction Nonce. This is signaled by including a `AdvanceNonceAccount`
|
||||
for a Durable Transaction Nonce. This is signaled by including a `NonceAdvance`
|
||||
instruction as the first instruction in the transaction.
|
||||
|
||||
If the runtime determines that a Durable Transaction Nonce is in use, it will
|
||||
@ -124,10 +124,10 @@ If all three of the above checks succeed, the transaction is allowed to continue
|
||||
validation.
|
||||
|
||||
Since transactions that fail with an `InstructionError` are charged a fee and
|
||||
changes to their state rolled back, there is an opportunity for fee theft if an
|
||||
`AdvanceNonceAccount` instruction is reverted. A malicious validator could replay the
|
||||
changes to their state rolled back, there is an opportunity for fee theft if a
|
||||
`NonceAdvance` instruction is reverted. A malicious validator could replay the
|
||||
failed transaction until the stored nonce is successfully advanced. Runtime
|
||||
changes prevent this behavior. When a durable nonce transaction fails with an
|
||||
`InstructionError` aside from the `AdvanceNonceAccount` instruction, the nonce account
|
||||
`InstructionError` aside from the `NonceAdvance` instruction, the nonce account
|
||||
is rolled back to its pre-execution state as usual. Then the runtime advances
|
||||
its nonce value and the advanced nonce account stored as if it succeeded.
|
||||
|
@ -10,6 +10,7 @@ These protocol-based rewards, to be distributed to participating validation and
|
||||
|
||||
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction \(be it a state execution or proof-of-replication verification\). A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below.
|
||||
|
||||
A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
|
||||
A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
|
||||
|
||||
**Figure 1**: Schematic overview of Solana economic incentive design.
|
||||
|
||||
|
@ -8,4 +8,5 @@ While replication-clients are incentivized and rewarded through protocol-based r
|
||||
|
||||
The validation of PoReps by validation-clients is computationally more expensive than state-validation \(detail in the [Economic Sustainability](../ed_economic_sustainability.md) chapter\), thus the transaction fees are expected to be proportionally higher.
|
||||
|
||||
There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](../ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\).
|
||||
There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\).
|
||||
|
||||
|
@ -11,7 +11,7 @@ Validator-client rewards for these services are to be distributed at the end of
|
||||
|
||||
The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of:
|
||||
|
||||
* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](.)\)
|
||||
* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_validartion_client_economics.md)\)
|
||||
* the fraction of staked SOLs out of the current total circulating supply,
|
||||
* the up-time/participation \[% of available slots that validator had opportunity to vote on\] of a given validator over the previous epoch.
|
||||
|
||||
|
@ -13,6 +13,7 @@ Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-b
|
||||
|
||||
Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](../../transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical gas usage. In this way, the protocol can use the minimum fee to target a desired hardware utilisation. By monitoring a protocol specified gas usage with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual gas usage per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level.
|
||||
|
||||
As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechansim that protects against "tax evasion" attacks \(i.e. side-channel fee payments\)[1](../ed_references.md).
|
||||
As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechansim that protects against "tax evasion" attacks \(i.e. side-channel fee payments\)[1](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_referenced.md).
|
||||
|
||||
Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place.
|
||||
|
||||
|
@ -18,9 +18,9 @@ Accounts whose balance is insufficient to satisfy the rent that would be due sim
|
||||
|
||||
A percentage of the rent collected is destroyed. The rest is distributed to validator accounts by stake weight, a la transaction fees, at the end of every slot.
|
||||
|
||||
## Read-only accounts
|
||||
## Credit only
|
||||
|
||||
Read-only accounts are not being charged rent in current implementation.
|
||||
Credit only accounts are treated as a special case. They are loaded as if rent were due, but updates to their state may be delayed until the end of the slot, when credits are paid.
|
||||
|
||||
## Design considerations, others considered
|
||||
|
||||
|
@ -12,7 +12,7 @@ For brevity this design assumes that a single voter with a stake is deployed as
|
||||
|
||||
## Time
|
||||
|
||||
The Solana cluster generates a source of time via a Verifiable Delay Function we are calling [Proof of History](../cluster/synchronization.md).
|
||||
The Solana cluster generates a source of time via a Verifiable Delay Function we are calling [Proof of History](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/synchronization.md).
|
||||
|
||||
Proof of History is used to create a deterministic round robin schedule for all the active leaders. At any given time only 1 leader, which can be computed from the ledger itself, can propose a fork. For more details, see [fork generation](../cluster/fork-generation.md) and [leader rotation](../cluster/leader-rotation.md).
|
||||
|
||||
@ -109,7 +109,7 @@ When evaluating multiple forks, each validator should use the following rules:
|
||||
3. Pick the fork that has the greatest amount of cluster transaction fees.
|
||||
4. Pick the latest fork in terms of PoH.
|
||||
|
||||
Cluster transaction fees are fees that are deposited to the mining pool as described in the [Staking Rewards](staking-rewards.md) section.
|
||||
Cluster transaction fees are fees that are deposited to the mining pool as described in the [Staking Rewards](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/staking-rewards.md) section.
|
||||
|
||||
## PoH ASIC Resistance
|
||||
|
||||
@ -134,3 +134,4 @@ An attacker generates a concurrent fork from an older block to try to rollback t
|
||||
* 3 votes have a lockout of 8 slots. Concurrent fork must be at least 8 slots ahead and produced in 3 slots. Therefore requires an ASIC 2.6x faster.
|
||||
* 10 votes have a lockout of 1024 slots. 1024/10, or 102.4x faster ASIC.
|
||||
* 20 votes have a lockout of 2^20 slots. 2^20/20, or 52,428.8x faster ASIC.
|
||||
|
||||
|
@ -64,7 +64,7 @@ presently stored nonce value with
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana nonce nonce-keypair.json
|
||||
solana get-nonce nonce-keypair.json
|
||||
```
|
||||
|
||||
- Output
|
||||
@ -105,7 +105,7 @@ Inspect a nonce account in a more human friendly format with
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana nonce-account nonce-keypair.json
|
||||
solana show-nonce-account nonce-keypair.json
|
||||
```
|
||||
|
||||
- Output
|
||||
@ -117,7 +117,7 @@ nonce: DZar6t2EaCFQTbUP4DHKwZ1wT8gCPW2aRfkVWhydkBvS
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
[Full usage documentation](../api-reference/cli.md#solana-nonce-account)
|
||||
[Full usage documentation](../api-reference/cli.md#solana-show-nonce-account)
|
||||
{% endhint %}
|
||||
|
||||
### Withdraw Funds from a Nonce Account
|
||||
@ -236,7 +236,7 @@ Remember, `alice.json` is the [nonce authority](#nonce-authority) in this exampl
|
||||
{% endhint %}
|
||||
|
||||
```bash
|
||||
$ solana nonce-account nonce.json
|
||||
$ solana show-nonce-account nonce.json
|
||||
balance: 1 SOL
|
||||
minimum balance required: 0.00136416 SOL
|
||||
nonce: F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7
|
||||
@ -256,7 +256,7 @@ $ solana balance -k bob.json
|
||||
1 SOL
|
||||
```
|
||||
```bash
|
||||
$ solana nonce-account nonce.json
|
||||
$ solana show-nonce-account nonce.json
|
||||
balance: 1 SOL
|
||||
minimum balance required: 0.00136416 SOL
|
||||
nonce: 6bjroqDcZgTv6Vavhqf81oBHTv3aMnX19UTB51YhAZnN
|
||||
|
@ -2,7 +2,7 @@
|
||||
Follow this guide to setup Solana's key generation tool called `solana-keygen`
|
||||
|
||||
{% hint style="warn" %}
|
||||
After installation, ensure your version is `0.23.1` or higher by running `solana-keygen -V`
|
||||
After installation, ensure your version is `0.21.1` or higher by running `solana-keygen -V`
|
||||
{% endhint %}
|
||||
|
||||
## Download
|
||||
|
@ -102,7 +102,7 @@ networked machine.
|
||||
Next, configure the `solana` CLI tool to connect to a particular cluster:
|
||||
|
||||
```bash
|
||||
solana config set --url <CLUSTER URL> # (i.e. http://testnet.solana.com:8899)
|
||||
solana set --url <CLUSTER URL> # (i.e. http://testnet.solana.com:8899)
|
||||
```
|
||||
|
||||
Finally, to check the balance, run the following command:
|
||||
|
@ -1,20 +1,6 @@
|
||||
# Programming Model
|
||||
|
||||
An _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to user-contributed _programs_. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transfered. Instructions are executed sequentially and atomically. If any instruction is invalid, any changes made within the transaction are discarded.
|
||||
|
||||
### Accounts and Signatures
|
||||
|
||||
Each transaction explicitly lists all account public keys referenced by the transaction's instructions. A subset of those public keys are each accompanied by a transaction signature. Those signatures signal on-chain programs that the account holder has authorized the transaction. Typically, the program uses the authorization to permit debiting the account or modifying its data.
|
||||
|
||||
The transaction also marks some accounts as _read-only accounts_. The runtime permits read-only accounts to be read concurrently. If a program attempts to modify a read-only account, the transaction is rejected by the runtime.
|
||||
|
||||
### Recent Blockhash
|
||||
|
||||
A Transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
|
||||
|
||||
### Instructions
|
||||
|
||||
Each instruction specifies a single program account \(which must be marked executable\), a subset of the transaction's accounts that should be passed to the program, and a data byte array instruction that is passed to the program. The program interprets the data array and operates on the accounts specified by the instructions. The program can return successfully, or with an error code. An error return causes the entire transaction to fail immediately.
|
||||
A client _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to user-contributed _programs_. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transfered. Instructions are executed atomically. If any instruction is invalid, any changes made within the transaction are discarded.
|
||||
|
||||
## Deploying Programs to a Cluster
|
||||
|
@ -16,8 +16,6 @@ Creator of on-chain game tic-tac-toe hosts a drone that responds to airdrop requ
|
||||
|
||||
Creator of a new on-chain token \(ERC-20 interface\), may wish to do a worldwide airdrop to distribute its tokens to millions of users over just a few seconds. That drone cannot spend resources interacting with the Solana cluster. Instead, the drone should only verify the client is unique and human, and then return the signature. It may also want to listen to the Solana cluster for recent entry IDs to support client retries and to ensure the airdrop is targeting the desired cluster.
|
||||
|
||||
Note: the Solana cluster will not parallelize transactions funded by the same fee-paying account. This means that the max throughput of a single fee-paying account is limited to the number of _ticks_ processed per second by the current leader. Add additional fee-paying accounts to improve throughput.
|
||||
|
||||
## Attack vectors
|
||||
|
||||
### Invalid recent\_blockhash
|
@ -1,90 +0,0 @@
|
||||
# Solana ABI management process
|
||||
|
||||
This document proposes the Solana ABI management process. The ABI management
|
||||
process is an engineering practice and a supporting technical framework to avoid
|
||||
introducing unintended incompatible ABI changes.
|
||||
|
||||
# Problem
|
||||
|
||||
The Solana ABI (binary interface to the cluster) is currently only defined
|
||||
implicitly by the implementation and requires a very careful eye to notice
|
||||
breaking changes. This makes it extremely difficult to upgrade the software
|
||||
on an existing cluster without rebooting the ledger.
|
||||
|
||||
# Requirements and objectives
|
||||
|
||||
- Unintended ABI changes can be detected as CI failures mechanically.
|
||||
- Newer implementation must be able to process the oldest data (since genesis)
|
||||
once we go mainnet.
|
||||
- The objective of this proposal is to protect the ABI while sustaining rather
|
||||
rapid development by opting for a mechanical process rather than a very long
|
||||
human-driven auditing process.
|
||||
- Once signed cryptographically, data blob must be identical, so no
|
||||
in-place data format update is possible regardless of inbound and outbound of
|
||||
the online system. Also, considering the sheer volume of transactions we're
|
||||
aiming to handle, retrospective in-place update is undesirable at best.
|
||||
|
||||
# Solution
|
||||
|
||||
Instead of natural human's eye due-diligence, which should be assumed to fail
|
||||
regularly, we need a systematic assurance of not breaking the cluster when
|
||||
changing the source code.
|
||||
|
||||
For that purpose, we introduce a mechanism of marking every ABI-related things
|
||||
in source code (`struct`s, `enum`s) with the new `#[frozen_abi]` attribute. This
|
||||
takes hard-coded digest value derived from types of its fields via
|
||||
`ser::Serialize`. And the attribute automatically generates a unit test to try
|
||||
to detect any unsanctioned changes to the marked ABI-related things.
|
||||
|
||||
However, the detection cannot be complete; no matter how hard we statically
|
||||
analyze the source code, it's still possible to break ABI. For example, this
|
||||
includes not-`derive`d hand-written `ser::Serialize`, underlying library's
|
||||
implementation changes (for example `bincode`), CPU architecture differences.
|
||||
The detection of these possible ABI incompatibilities is out-of-scope for this
|
||||
ABI management.
|
||||
|
||||
# Definitions
|
||||
|
||||
ABI item/type: various types to be used for serialization, which collectively
|
||||
comprises the whole ABI for any system components. For example, those types
|
||||
include `struct`s and `enum`s.
|
||||
|
||||
ABI item digest: Some fixed hash derived from type information of ABI item's
|
||||
fields.
|
||||
|
||||
# Example
|
||||
|
||||
```patch
|
||||
+#[frozen_abi(digest="1c6a53e9")]
|
||||
#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Vote {
|
||||
/// A stack of votes starting with the oldest vote
|
||||
pub slots: Vec<Slot>,
|
||||
/// signature of the bank's state at the last slot
|
||||
pub hash: Hash,
|
||||
}
|
||||
```
|
||||
|
||||
# Developer's workflow
|
||||
|
||||
To know the digest for new ABI items, developers can add `frozen_abi` with a
|
||||
random digest value and run the unit tests and replace it with the correct
|
||||
digest from the assertion test error message.
|
||||
|
||||
In general, once we add `frozen_abi` and its change is published in the stable
|
||||
release channel, its digest should never change. If such a change is needed, we
|
||||
should opt for defining a new struct like `FooV1`. And special release flow like
|
||||
hard forks should be approached.
|
||||
|
||||
# Implementation remarks
|
||||
|
||||
We use some degree of macro machinery to automatically generate unit tests
|
||||
and calculate a digest from ABI items. This is doable by clever use of
|
||||
`serde::Serialize` ([1]) and `any::typename` ([2]). For a precedent for similar
|
||||
implementation, `ink` from the Parity Technologies [3] could be informational.
|
||||
|
||||
# References
|
||||
|
||||
1. [(De)Serialization with type info · Issue #1095 · serde-rs/serde](https://github.com/serde-rs/serde/issues/1095#issuecomment-345483479)
|
||||
2. [`std::any::type_name` - Rust](https://doc.rust-lang.org/std/any/fn.type_name.html)
|
||||
3. [Parity's ink to write smart contracts](https://github.com/paritytech/ink)
|
@ -10,7 +10,7 @@ When replay stage starts processing the same transactions, it can assume that Po
|
||||
|
||||
## Fee Account
|
||||
|
||||
The [fee account](../terminology.md#fee_account) pays for the transaction to be included in the block. The leader only needs to validate that the fee account has the balance to pay for the fee.
|
||||
The [fee account](https://github.com/solana-labs/solana/tree/b5f7a4bff9953415b1f3d385bd59bc65c1ec11a4/book/src/proposals/terminology.md#fee_account) pays for the transaction to be included in the block. The leader only needs to validate that the fee account has the balance to pay for the fee.
|
||||
|
||||
## Balance Cache
|
||||
|
||||
@ -53,3 +53,4 @@ The same fee account can be reused many times in the same block until it is used
|
||||
Clients that transmit a large number of transactions per second should use a dedicated fee account that is not used as Credit-Debit in any instruction.
|
||||
|
||||
Once an account fee is used as Credit-Debit, it will fail the balance check until the balance cache is reset.
|
||||
|
||||
|
@ -1,108 +1,74 @@
|
||||
# Simple Payment and State Verification
|
||||
|
||||
It is often useful to allow low resourced clients to participate in a Solana
|
||||
cluster. Be this participation economic or contract execution, verification
|
||||
that a client's activity has been accepted by the network is typically
|
||||
expensive. This proposal lays out a mechanism for such clients to confirm that
|
||||
their actions have been committed to the ledger state with minimal resource
|
||||
expenditure and third-party trust.
|
||||
It is often useful to allow low resourced clients to participate in a Solana cluster. Be this participation economic or contract execution, verification that a client's activity has been accepted by the network is typically expensive. This proposal lays out a mechanism for such clients to confirm that their actions have been committed to the ledger state with minimal resource expenditure and third-party trust.
|
||||
|
||||
## A Naive Approach
|
||||
|
||||
Validators store the signatures of recently confirmed transactions for a short
|
||||
period of time to ensure that they are not processed more than once. Validators
|
||||
provide a JSON RPC endpoint, which clients can use to query the cluster if a
|
||||
transaction has been recently processed. Validators also provide a PubSub
|
||||
notification, whereby a client registers to be notified when a given signature
|
||||
is observed by the validator. While these two mechanisms allow a client to
|
||||
verify a payment, they are not a proof and rely on completely trusting a
|
||||
validator.
|
||||
Validators store the signatures of recently confirmed transactions for a short period of time to ensure that they are not processed more than once. Validators provide a JSON RPC endpoint, which clients can use to query the cluster if a transaction has been recently processed. Validators also provide a PubSub notification, whereby a client registers to be notified when a given signature is observed by the validator. While these two mechanisms allow a client to verify a payment, they are not a proof and rely on completely trusting a validator.
|
||||
|
||||
We will describe a way to minimize this trust using Merkle Proofs to anchor the
|
||||
validator's response in the ledger, allowing the client to confirm on their own
|
||||
that a sufficient number of their preferred validators have confirmed a
|
||||
transaction. Requiring multiple validator attestations further reduces trust in
|
||||
the validator, as it increases both the technical and economic difficulty of
|
||||
compromising several other network participants.
|
||||
We will describe a way to minimize this trust using Merkle Proofs to anchor the validator's response in the ledger, allowing the client to confirm on their own that a sufficient number of their preferred validators have confirmed a transaction. Requiring multiple validator attestations further reduces trust in the validator, as it increases both the technical and economic difficulty of compromising several other network participants.
|
||||
|
||||
## Light Clients
|
||||
|
||||
A 'light client' is a cluster participant that does not itself run a validator.
|
||||
This light client would provide a level of security greater than trusting a
|
||||
remote validator, without requiring the light client to spend a lot of resources
|
||||
verifying the ledger.
|
||||
A 'light client' is a cluster participant that does not itself run a validator. This light client would provide a level of security greater than trusting a remote validator, without requiring the light client to spend a lot of resources verifying the ledger.
|
||||
|
||||
Rather than providing transaction signatures directly to a light client, the
|
||||
validator instead generates a Merkle Proof from the transaction of interest to
|
||||
the root of a Merkle Tree of all transactions in the including block. This
|
||||
Merkle Root is stored in a ledger entry which is voted on by validators,
|
||||
providing it consensus legitimacy. The additional level of security for a light
|
||||
client depends on an initial canonical set of validators the light client
|
||||
considers to be the stakeholders of the cluster. As that set is changed, the
|
||||
client can update its internal set of known validators with
|
||||
[receipts](simple-payment-and-state-verification.md#receipts). This may become
|
||||
challenging with a large number of delegated stakes.
|
||||
Rather than providing transaction signatures directly to a light client, the validator instead generates a Merkle Proof from the transaction of interest to the root of a Merkle Tree of all transactions in the including block. This Merkle Root is stored in a ledger entry which is voted on by validators, providing it consensus legitimacy. The additional level of security for a light client depends on an initial canonical set of validators the light client considers to be the stakeholders of the cluster. As that set is changed, the client can update its internal set of known validators with [receipts](simple-payment-and-state-verification.md#receipts). This may become challenging with a large number of delegated stakes.
|
||||
|
||||
Validators themselves may want to use light client APIs for performance reasons.
|
||||
For example, during the initial launch of a validator, the validator may use a
|
||||
cluster provided checkpoint of the state and verify it with a receipt.
|
||||
Validators themselves may want to use light client APIs for performance reasons. For example, during the initial launch of a validator, the validator may use a cluster provided checkpoint of the state and verify it with a receipt.
|
||||
|
||||
## Receipts
|
||||
|
||||
A receipt is a minimal proof that; a transaction has been included in a block,
|
||||
that the block has been voted on by the client's preferred set of validators
|
||||
and that the votes have reached the desired confirmation depth.
|
||||
A receipt is a minimal proof that; a transaction has been included in a block, that the block has been voted on by the client's preferred set of validators and that the votes have reached the desired confirmation depth.
|
||||
|
||||
### Transaction Inclusion Proof
|
||||
The receipts for both state and payments start with a Merkle Path from the value into a Bank-Merkle that has been voted on and included in the ledger. A chain of PoH Entries containing subsequent validator votes, deriving from the Bank-Merkle, is the confirmation proof.
|
||||
|
||||
A transaction inclusion proof is a data structure that contains a Merkle Path
|
||||
from a transaction, through an Entry-Merkle to a Block-Merkle, which is included
|
||||
in a Bank-Hash with the required set of validator votes. A chain of PoH Entries
|
||||
containing subsequent validator votes, deriving from the Bank-Hash, is the proof
|
||||
of confirmation. Clients can examine this ledger data and compute finality using
|
||||
Solana's fork selection rules.
|
||||
Clients can examine this ledger data and compute the finality using Solana's fork selection rules.
|
||||
|
||||
An Entry-Merkle is a Merkle Root including all transactions in a given entry,
|
||||
sorted by signature.
|
||||
### Payment Merkle Path
|
||||
|
||||
A Block-Merkle is the Merkle Root of all the Entry-Merkles sequenced in the block.
|
||||
A payment receipt is a data structure that contains a Merkle Path from a transaction to the required set of validator votes.
|
||||
|
||||
An Entry-Merkle is a Merkle Root including all transactions in the entry, sorted by signature.
|
||||
|
||||

|
||||
|
||||
A Bank-Hash is the hash of the concatenation of the Block-Merkle and Accounts-Hash
|
||||
A Block-Merkle is a Merkle root of all the Entry-Merkles sequenced in the block. Transaction status is necessary for the receipt because the state receipt is constructed for the block. Two transactions over the same state can appear in the block, and therefore, there is no way to infer from just the state whether a transaction that is committed to the ledger has succeeded or failed in modifying the intended state. It may not be necessary to encode the full status code, but a single status bit to indicate the transaction's success.
|
||||
|
||||
<img alt="Bank Hash Diagram" src="img/spv-bank-hash.svg" class="center"/>
|
||||
### State Merkle Path
|
||||
|
||||
An Accounts-Hash is the hash of the concatentation of the state hashes of each
|
||||
account modified during the current slot.
|
||||
A state receipt provides a confirmation that a specific state is committed at the end of the block. Inter-block state transitions do not generate a receipt.
|
||||
|
||||
Transaction status is necessary for the receipt because the state receipt is
|
||||
constructed for the block. Two transactions over the same state can appear in
|
||||
the block, and therefore, there is no way to infer from just the state whether
|
||||
a transaction that is committed to the ledger has succeeded or failed in
|
||||
modifying the intended state. It may not be necessary to encode the full status
|
||||
code, but a single status bit to indicate the transaction's success.
|
||||
For example:
|
||||
|
||||
### Account State Verification
|
||||
* A sends 5 Lamports to B
|
||||
* B spends 5 Lamports
|
||||
* C sends 5 Lamports to A
|
||||
|
||||
An account's state (balance or other data) can be verified by submitting a
|
||||
transaction with a ___TBD___ Instruction to the cluster. The client can then
|
||||
use a [Transaction Inclusion Proof](#transaction-inclusion-proof) to verify
|
||||
whether the cluster agrees that the acount has reached the expected state.
|
||||
At the end of the block, A and B are in the exact same starting state, and any state receipt would point to the same value for A or B.
|
||||
|
||||
The Bank-Merkle is computed from the Merkle Tree of the new state changes, along with the Previous Bank-Merkle, and the Block-Merkle.
|
||||
|
||||

|
||||
|
||||
A state receipt contains only the state changes occurring in the block. A direct Merkle Path to the current Bank-Merkle guarantees the state value at that bank hash, but it cannot be used to generate a “current” receipt to the latest state if the state modification occurred in some previous block. There is no guarantee that the path provided by the validator is the latest one available out of all the previous Bank-Merkles.
|
||||
|
||||
Clients that want to query the chain for a receipt of the "latest" state would need to create a transaction that would update the Merkle Path for that account, such as a credit of 0 Lamports.
|
||||
|
||||
### Validator Votes
|
||||
|
||||
Leaders should coalesce the validator votes by stake weight into a single entry.
|
||||
This will reduce the number of entries necessary to create a receipt.
|
||||
Leaders should coalesce the validator votes by stake weight into a single entry. This will reduce the number of entries necessary to create a receipt.
|
||||
|
||||
### Chain of Entries
|
||||
|
||||
A receipt has a PoH link from the payment or state Merkle Path root to a list
|
||||
of consecutive validation votes.
|
||||
A receipt has a PoH link from the payment or state Merkle Path root to a list of consecutive validation votes.
|
||||
|
||||
It contains the following:
|
||||
|
||||
* Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Hash
|
||||
* State -> Bank-Merkle
|
||||
|
||||
or
|
||||
|
||||
* Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Merkle
|
||||
|
||||
And a vector of PoH entries:
|
||||
|
||||
@ -123,33 +89,21 @@ LightEntry {
|
||||
}
|
||||
```
|
||||
|
||||
The light entries are reconstructed from Entries and simply show the entry
|
||||
Merkle Root that was mixed in to the PoH hash, instead of the full transaction
|
||||
set.
|
||||
The light entries are reconstructed from Entries and simply show the entry Merkle Root that was mixed in to the PoH hash, instead of the full transaction set.
|
||||
|
||||
Clients do not need the starting vote state. The
|
||||
[fork selection](../implemented-proposals/tower-bft.md) algorithm is defined
|
||||
such that only votes that appear after the transaction provide finality for the
|
||||
transaction, and finality is independent of the starting state.
|
||||
Clients do not need the starting vote state. The [fork selection](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/fork-selection.md) algorithm is defined such that only votes that appear after the transaction provide finality for the transaction, and finality is independent of the starting state.
|
||||
|
||||
### Verification
|
||||
|
||||
A light client that is aware of the supermajority set validators can verify a
|
||||
receipt by following the Merkle Path to the PoH chain. The Block-Merkle is the
|
||||
Merkle Root and will appear in votes included in an Entry. The light client can
|
||||
simulate [fork selection](../implemented-proposals/tower-bft.md) for the
|
||||
consecutive votes and verify that the receipt is confirmed at the desired
|
||||
lockout threshold.
|
||||
A light client that is aware of the supermajority set validators can verify a receipt by following the Merkle Path to the PoH chain. The Bank-Merkle is the Merkle Root and will appear in votes included in an Entry. The light client can simulate [fork selection](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/fork-selection.md) for the consecutive votes and verify that the receipt is confirmed at the desired lockout threshold.
|
||||
|
||||
### Synthetic State
|
||||
|
||||
Synthetic state should be computed into the Bank-Hash along with the bank
|
||||
generated state.
|
||||
Synthetic state should be computed into the Bank-Merkle along with the bank generated state.
|
||||
|
||||
For example:
|
||||
|
||||
* Epoch validator accounts and their stakes and weights.
|
||||
* Computed fee rates
|
||||
|
||||
These values should have an entry in the Bank-Hash. They should live under known
|
||||
accounts, and therefore have an index into the hash concatenation.
|
||||
These values should have an entry in the Bank-Merkle. They should live under known accounts, and therefore have an exact address in the Merkle Path.
|
||||
|
@ -2,44 +2,24 @@
|
||||
|
||||
## History
|
||||
|
||||
When we first started Solana, the goal was to de-risk our TPS claims. We knew
|
||||
that between optimistic concurrency control and sufficiently long leader slots,
|
||||
that PoS consensus was not the biggest risk to TPS. It was GPU-based signature
|
||||
verification, software pipelining and concurrent banking. Thus, the TPU was
|
||||
born. After topping 100k TPS, we split the team into one group working toward
|
||||
710k TPS and another to flesh out the validator pipeline. Hence, the TVU was
|
||||
born. The current architecture is a consequence of incremental development with
|
||||
that ordering and project priorities. It is not a reflection of what we ever
|
||||
believed was the most technically elegant cross-section of those technologies.
|
||||
In the context of leader rotation, the strong distinction between leading and
|
||||
validating is blurred.
|
||||
When we first started Solana, the goal was to de-risk our TPS claims. We knew that between optimistic concurrency control and sufficiently long leader slots, that PoS consensus was not the biggest risk to TPS. It was GPU-based signature verification, software pipelining and concurrent banking. Thus, the TPU was born. After topping 100k TPS, we split the team into one group working toward 710k TPS and another to flesh out the validator pipeline. Hence, the TVU was born. The current architecture is a consequence of incremental development with that ordering and project priorities. It is not a reflection of what we ever believed was the most technically elegant cross-section of those technologies. In the context of leader rotation, the strong distinction between leading and validating is blurred.
|
||||
|
||||
## Difference between validating and leading
|
||||
|
||||
The fundamental difference between the pipelines is when the PoH is present. In
|
||||
a leader, we process transactions, removing bad ones, and then tag the result
|
||||
with a PoH hash. In the validator, we verify that hash, peel it off, and
|
||||
process the transactions in exactly the same way. The only difference is that
|
||||
if a validator sees a bad transaction, it can't simply remove it like the
|
||||
leader does, because that would cause the PoH hash to change. Instead, it
|
||||
rejects the whole block. The other difference between the pipelines is what
|
||||
happens _after_ banking. The leader broadcasts entries to downstream validators
|
||||
whereas the validator will have already done that in RetransmitStage, which is
|
||||
a confirmation time optimization. The validation pipeline, on the other hand,
|
||||
has one last step. Any time it finishes processing a block, it needs to weigh
|
||||
any forks it's observing, possibly cast a vote, and if so, reset its PoH hash
|
||||
to the block hash it just voted on.
|
||||
The fundamental difference between the pipelines is when the PoH is present. In a leader, we process transactions, removing bad ones, and then tag the result with a PoH hash. In the validator, we verify that hash, peel it off, and process the transactions in exactly the same way. The only difference is that if a validator sees a bad transaction, it can't simply remove it like the leader does, because that would cause the PoH hash to change. Instead, it rejects the whole block. The other difference between the pipelines is what happens _after_ banking. The leader broadcasts entries to downstream validators whereas the validator will have already done that in RetransmitStage, which is a confirmation time optimization. The validation pipeline, on the other hand, has one last step. Any time it finishes processing a block, it needs to weigh any forks it's observing, possibly cast a vote, and if so, reset its PoH hash to the block hash it just voted on.
|
||||
|
||||
## Proposed Design
|
||||
|
||||
We unwrap the many abstraction layers and build a single pipeline that can
|
||||
toggle leader mode on whenever the validator's ID shows up in the leader
|
||||
schedule.
|
||||
We unwrap the many abstraction layers and build a single pipeline that can toggle leader mode on whenever the validator's ID shows up in the leader schedule.
|
||||
|
||||

|
||||
|
||||
## Notable changes
|
||||
|
||||
* No threads are shut down to switch out of leader mode. Instead, FetchStage
|
||||
|
||||
should forward transactions to the next leader.
|
||||
|
||||
* Hoist FetchStage and BroadcastStage out of TPU
|
||||
* BankForks renamed to Banktree
|
||||
* TPU moves to new socket-free crate called solana-tpu.
|
||||
@ -47,6 +27,8 @@ schedule.
|
||||
* TVU goes away
|
||||
* New RepairStage absorbs Shred Fetch Stage and repair requests
|
||||
* JSON RPC Service is optional - used for debugging. It should instead be part
|
||||
|
||||
of a separate `solana-blockstreamer` executable.
|
||||
|
||||
* New MulticastStage absorbs retransmit part of RetransmitStage
|
||||
* MulticastStage downstream of Blockstore
|
||||
|
@ -149,8 +149,8 @@ From another console, confirm the IP address and **identity pubkey** of your arc
|
||||
solana-gossip spy --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
Provide the **storage account pubkey** to the `solana storage-account` command to view the recent mining activity from your archiver:
|
||||
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:
|
||||
|
||||
```bash
|
||||
solana --keypair storage-keypair.json storage-account $STORAGE_IDENTITY
|
||||
solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||
```
|
||||
|
@ -21,11 +21,11 @@ solana balance --lamports
|
||||
|
||||
## Check Vote Activity
|
||||
|
||||
The `solana vote-account` command displays the recent voting activity from
|
||||
The `solana show-vote-account` command displays the recent voting activity from
|
||||
your validator:
|
||||
|
||||
```bash
|
||||
solana vote-account ~/validator-vote-keypair.json
|
||||
solana show-vote-account ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
## Get Cluster Info
|
||||
|
@ -1,14 +1,14 @@
|
||||
# Installing the Validator Software
|
||||
|
||||
Install the Solana release
|
||||
[v0.23.1](https://github.com/solana-labs/solana/releases/tag/v0.23.1) on your
|
||||
[v0.21.0](https://github.com/solana-labs/solana/releases/tag/v0.21.0) on your
|
||||
machine by running:
|
||||
|
||||
```bash
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.23.1/install/solana-install-init.sh | sh -s - 0.23.1
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.21.0/install/solana-install-init.sh | sh -s - 0.21.0
|
||||
```
|
||||
|
||||
If you are connecting to a different testnet, you can replace `0.23.1` with the
|
||||
If you are connecting to a different testnet, you can replace `0.21.0` with the
|
||||
release tag matching the software version of your desired testnet, or replace it
|
||||
with the named channel `stable`, `beta`, or `edge`.
|
||||
|
||||
@ -16,11 +16,11 @@ The following output indicates a successful update:
|
||||
|
||||
```text
|
||||
looking for latest release
|
||||
downloading v0.23.1 installer
|
||||
downloading v0.21.0 installer
|
||||
Configuration: /home/solana/.config/solana/install/config.yml
|
||||
Active release directory: /home/solana/.local/share/solana/install/active_release
|
||||
* Release version: 0.23.1
|
||||
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.23.1/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
* Release version: 0.21.0
|
||||
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.21.0/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
Update successful
|
||||
```
|
||||
|
||||
|
@ -54,7 +54,11 @@ solana delegate-stake ~/validator-stake-keypair.json ~/some-other-validator-vote
|
||||
```
|
||||
|
||||
Assuming the node is voting, now you're up and running and generating validator
|
||||
rewards. Rewards are paid automatically on epoch boundaries.
|
||||
rewards. You'll want to periodically redeem/claim your rewards:
|
||||
|
||||
```bash
|
||||
solana redeem-vote-credits ~/validator-stake-keypair.json ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
The rewards lamports earned are split between your stake account and the vote
|
||||
account according to the commission rate set in the vote account. Rewards can
|
||||
@ -81,10 +85,11 @@ so it can take an hour or more for stake to come fully online.
|
||||
|
||||
To monitor your validator during its warmup period:
|
||||
|
||||
* View your vote account:`solana vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
|
||||
* View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json`
|
||||
* `solana validators` displays the current active stake of all validators, including yours
|
||||
* `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
* View your vote account:`solana show-vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
|
||||
* View your stake account, the delegation preference and details of your stake:`solana show-stake-account ~/validator-stake-keypair.json`
|
||||
* `solana uptime ~/validator-vote-keypair.json` will display the voting history \(aka, uptime\) of your validator over recent Epochs
|
||||
* `solana show-validators` displays the current active stake of all validators, including yours
|
||||
* `solana show-stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####`
|
||||
* Once your stake is warmed up, you will see a stake balance listed for your validator on the [Solana Network Explorer](http://explorer.solana.com/validators)
|
||||
|
||||
@ -127,3 +132,6 @@ depending on active stake and the size of your stake.
|
||||
|
||||
Note that a stake account may only be used once, so after deactivation, use the
|
||||
cli's `withdraw-stake` command to recover the previously staked lamports.
|
||||
|
||||
Be sure and redeem your credits before withdrawing all your lamports. Once the
|
||||
account is fully withdrawn, the account is destroyed.
|
||||
|
@ -6,7 +6,7 @@ The solana cli includes `get` and `set` configuration commands to automatically
|
||||
set the `--url` argument for cli commands. For example:
|
||||
|
||||
```bash
|
||||
solana config set --url http://testnet.solana.com:8899
|
||||
solana set --url http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
\(You can always override the set configuration by explicitly passing the
|
||||
@ -18,7 +18,7 @@ Before attaching a validator node, sanity check that the cluster is accessible
|
||||
to your machine by fetching the transaction count:
|
||||
|
||||
```bash
|
||||
solana transaction-count
|
||||
solana get-transaction-count
|
||||
```
|
||||
|
||||
Inspect the network explorer at
|
||||
@ -100,7 +100,7 @@ Now that you have a keypair, set the solana configuration to use your validator
|
||||
keypair for all following commands:
|
||||
|
||||
```bash
|
||||
solana config set --keypair ~/validator-keypair.json
|
||||
solana set --keypair ~/validator-keypair.json
|
||||
```
|
||||
|
||||
You should see the following output:
|
||||
|
@ -26,7 +26,7 @@ A preimage resistant [hash](terminology.md#hash) of the [ledger](terminology.md#
|
||||
|
||||
The number of [blocks](terminology.md#block) beneath the current block. The first block after the [genesis block](terminology.md#genesis-block) has height one.
|
||||
|
||||
## bootstrap validator
|
||||
## bootstrap leader
|
||||
|
||||
The first [validator](terminology.md#validator) to produce a [block](terminology.md#block).
|
||||
|
||||
@ -112,10 +112,6 @@ The configuration file that prepares the [ledger](terminology.md#ledger) for the
|
||||
|
||||
A digital fingerprint of a sequence of bytes.
|
||||
|
||||
## inflation
|
||||
|
||||
An increase in token supply over time used to fund rewards for validation and replication and to fund continued development of Solana.
|
||||
|
||||
## instruction
|
||||
|
||||
The smallest unit of a [program](terminology.md#program) that a [client](terminology.md#client) can include in a [transaction](terminology.md#instruction).
|
||||
|
@ -1,79 +1,17 @@
|
||||
# Anatomy of a Transaction
|
||||
|
||||
This chapter documents the binary format of a transaction.
|
||||
Transactions encode lists of instructions that are executed sequentially, and only committed if all the instructions complete successfully. All account updates are reverted upon the failure of a transaction. Each transaction details the accounts used, including which must sign and which are read only, a recent blockhash, the instructions, and any signatures.
|
||||
|
||||
## Transaction Format
|
||||
## Accounts and Signatures
|
||||
|
||||
A transaction contains a [compact-array](#compact-array-format) of signatures,
|
||||
followed by a [message](#message-format). Each item in the signatures array is
|
||||
a [digital signature](#signature-format) of the given message. The Solana
|
||||
runtime verifies that the number of signatures matches the number in the first
|
||||
8 bits of the [message header](#message-header-format). It also verifies that
|
||||
each signature was signed by the private key corresponding to the public key at
|
||||
the same index in the message's account addresses array.
|
||||
Each transaction explicitly lists all account public keys referenced by the transaction's instructions. A subset of those public keys are each accompanied by a transaction signature. Those signatures signal on-chain programs that the account holder has authorized the transaction. Typically, the program uses the authorization to permit debiting the account or modifying its data.
|
||||
|
||||
### Signature Format
|
||||
The transaction also marks some accounts as _read-only accounts_. The runtime permits read-only accounts to be read concurrently. If a program attempts to modify a read-only account, the transaction is rejected by the runtime.
|
||||
|
||||
Each digital signature is in the ed25519 binary format and consumes 64 bytes.
|
||||
## Recent Blockhash
|
||||
|
||||
A Transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
|
||||
|
||||
## Message Format
|
||||
## Instructions
|
||||
|
||||
A message contains a [header](#message-header-format), followed by a
|
||||
compact-array of [account addresses](#account-addresses-format), followed by a
|
||||
recent [blockhash](#blockhash-format), followed by a compact-array of
|
||||
[instructions](#instruction-format).
|
||||
|
||||
### Message Header Format
|
||||
|
||||
The message header contains three unsigned 8-bit values. The first value is the
|
||||
number of required signatures in the containing transaction. The second value
|
||||
is the number of those corresponding account addresses that are read-only. The
|
||||
third value in the message header is the number of read-only account addresses
|
||||
not requiring signatures.
|
||||
|
||||
### Account Addresses Format
|
||||
|
||||
The addresses that require signatures appear at the beginning of the account
|
||||
address array, with addresses requesting write access first and read-only
|
||||
accounts following. The addresses that do not require signatures follow the
|
||||
addresses that do, again with read-write accounts first and read-only accounts
|
||||
following.
|
||||
|
||||
|
||||
### Blockhash Format
|
||||
|
||||
A blockhash contains a 32-byte SHA-256 hash. It is used to indicate when a
|
||||
client last observed the ledger. Validators will reject transactions when the
|
||||
blockhash is too old.
|
||||
|
||||
|
||||
## Instruction Format
|
||||
|
||||
An instruction contains a program ID index, followed by a compact-array of
|
||||
account address indexes, followed by a compact-array of opaque 8-bit data. The
|
||||
program ID index is used to identify an on-chain program that can interpret the
|
||||
opaque data. The program ID index is an unsigned 8-bit index to an account
|
||||
address in the message's array of account addresses. The account address
|
||||
indexes are each an unsigned 8-bit index into that same array.
|
||||
|
||||
|
||||
## Compact-Array Format
|
||||
|
||||
A compact-array is serialized as the array length, followed by each array item.
|
||||
The array length is a special multi-byte encoding called compact-u16.
|
||||
|
||||
### Compact-u16 Format
|
||||
|
||||
A compact-u16 is a multi-byte encoding of 16 bits. The first byte contains the
|
||||
lower 7 bits of the value in its lower 7 bits. If the value is above 0x7f, the
|
||||
high bit is set and the next 7 bits of the value are placed into the lower 7
|
||||
bits of a second byte. If the value is above 0x3fff, the high bit is set and
|
||||
the remaining 2 bits of the value are placed into the lower 2 bits of a third
|
||||
byte.
|
||||
|
||||
## Account Address Format
|
||||
|
||||
An account address is 32-bytes of arbitrary data. When the address requires a
|
||||
digital signature, the runtime interprets it as the public key of an ed25519
|
||||
keypair.
|
||||
Each instruction specifies a single program account \(which must be marked executable\), a subset of the transaction's accounts that should be passed to the program, and a data byte array instruction that is passed to the program. The program interprets the data array and operates on the accounts specified by the instructions. The program can return successfully, or with an error code. An error return causes the entire transaction to fail immediately.
|
||||
|
@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "0.23.2"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.2" }
|
||||
solana-chacha = { path = "../chacha", version = "0.23.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-perf = { path = "../perf", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha_cuda"
|
@ -1,8 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate hex_literal;
|
||||
|
||||
pub mod chacha_cuda;
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -9,4 +9,4 @@ license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.49"
|
||||
cc = "1.0.48"
|
||||
|
1
chacha/.gitignore
vendored
1
chacha/.gitignore
vendored
@ -1 +0,0 @@
|
||||
/farf/
|
@ -1,25 +0,0 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "0.23.2"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-perf = { path = "../perf", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha"
|
@ -1,8 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate hex_literal;
|
||||
|
||||
pub mod chacha;
|
@ -29,7 +29,7 @@ Start a local cluster and run sanity on it
|
||||
-x - Add an extra validator (may be supplied multiple times)
|
||||
-r - Select the RPC endpoint hosted by a node that starts as
|
||||
a validator node. If unspecified the RPC endpoint hosted by
|
||||
the bootstrap validator will be used.
|
||||
the bootstrap leader will be used.
|
||||
-c - Reuse existing node/ledger configuration from a previous sanity
|
||||
run
|
||||
|
||||
@ -74,7 +74,7 @@ source multinode-demo/common.sh
|
||||
|
||||
nodes=(
|
||||
"multinode-demo/faucet.sh"
|
||||
"multinode-demo/bootstrap-validator.sh \
|
||||
"multinode-demo/bootstrap-leader.sh \
|
||||
--no-restart \
|
||||
--init-complete-file init-complete-node1.log \
|
||||
--dynamic-port-range 8000-8050"
|
||||
@ -170,7 +170,7 @@ startNodes() {
|
||||
logs+=("$(getNodeLogFile "$i" "$cmd")")
|
||||
fi
|
||||
|
||||
# 1 == bootstrap validator, wait until it boots before starting
|
||||
# 1 == bootstrap leader, wait until it boots before starting
|
||||
# other validators
|
||||
if [[ "$i" -eq 1 ]]; then
|
||||
SECONDS=
|
||||
@ -178,8 +178,8 @@ startNodes() {
|
||||
|
||||
(
|
||||
set -x
|
||||
$solana_cli --keypair config/bootstrap-validator/identity-keypair.json \
|
||||
--url http://127.0.0.1:8899 genesis-hash
|
||||
$solana_cli --keypair config/bootstrap-leader/identity-keypair.json \
|
||||
--url http://127.0.0.1:8899 get-genesis-hash
|
||||
) | tee genesis-hash.log
|
||||
maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)"
|
||||
fi
|
||||
@ -277,7 +277,7 @@ rollingNodeRestart() {
|
||||
}
|
||||
|
||||
verifyLedger() {
|
||||
for ledger in bootstrap-validator validator; do
|
||||
for ledger in bootstrap-leader validator; do
|
||||
echo "--- $ledger ledger verification"
|
||||
(
|
||||
set -x
|
||||
@ -331,7 +331,7 @@ while [[ $iteration -le $iterations ]]; do
|
||||
rm -rf $client_keypair
|
||||
) || flag_error
|
||||
|
||||
echo "--- RPC API: bootstrap-validator getTransactionCount ($iteration)"
|
||||
echo "--- RPC API: bootstrap-leader getTransactionCount ($iteration)"
|
||||
(
|
||||
set -x
|
||||
curl --retry 5 --retry-delay 2 --retry-connrefused \
|
||||
@ -351,7 +351,7 @@ while [[ $iteration -le $iterations ]]; do
|
||||
http://localhost:18899
|
||||
) || flag_error
|
||||
|
||||
# Verify transaction count as reported by the bootstrap-validator node is advancing
|
||||
# Verify transaction count as reported by the bootstrap-leader node is advancing
|
||||
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
|
||||
if [[ -n $lastTransactionCount ]]; then
|
||||
echo "--- Transaction count check: $lastTransactionCount < $transactionCount"
|
||||
|
@ -20,7 +20,6 @@ declare prints=(
|
||||
declare print_free_tree=(
|
||||
'core/src'
|
||||
'faucet/src'
|
||||
'ledger/src'
|
||||
'metrics/src'
|
||||
'net-utils/src'
|
||||
'runtime/src'
|
||||
|
@ -8,6 +8,7 @@ me=$(basename "$0")
|
||||
echo --- update gitbook-cage
|
||||
if [[ -n $CI_BRANCH ]]; then
|
||||
(
|
||||
|
||||
set -x
|
||||
(
|
||||
. ci/rust-version.sh stable
|
||||
@ -24,8 +25,74 @@ if [[ -n $CI_BRANCH ]]; then
|
||||
git reset --hard HEAD~
|
||||
fi
|
||||
)
|
||||
else
|
||||
echo CI_BRANCH not set
|
||||
fi
|
||||
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ -n $PUBLISH_BOOK_TAG ]]; then
|
||||
CURRENT_TAG="$(git describe --tags)"
|
||||
COMMIT_TO_PUBLISH="$(git rev-list -n 1 "${PUBLISH_BOOK_TAG}")"
|
||||
|
||||
# book is manually published at a specified release tag
|
||||
if [[ $PUBLISH_BOOK_TAG != "$CURRENT_TAG" ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||
async: true
|
||||
build:
|
||||
message: "$BUILDKITE_MESSAGE"
|
||||
commit: "$COMMIT_TO_PUBLISH"
|
||||
env:
|
||||
PUBLISH_BOOK_TAG: "$PUBLISH_BOOK_TAG"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
repo=git@github.com:solana-labs/book.git
|
||||
BOOK="book"
|
||||
else
|
||||
# book-edge and book-beta are published automatically on the tip of the branch
|
||||
case $CHANNEL in
|
||||
edge)
|
||||
repo=git@github.com:solana-labs/book-edge.git
|
||||
;;
|
||||
beta)
|
||||
repo=git@github.com:solana-labs/book-beta.git
|
||||
;;
|
||||
*)
|
||||
echo "--- publish skipped"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
BOOK=$CHANNEL
|
||||
fi
|
||||
|
||||
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "book/build.sh"
|
||||
|
||||
echo --- create book repo
|
||||
(
|
||||
set -x
|
||||
cd book/html/
|
||||
git init .
|
||||
git add ./* ./.nojekyll
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$me"
|
||||
git commit -m "${CI_COMMIT:-local}"
|
||||
)
|
||||
|
||||
echo "--- publish $BOOK"
|
||||
(
|
||||
cd book/html/
|
||||
git remote add origin $repo
|
||||
git fetch origin master
|
||||
if ! git diff HEAD origin/master --quiet; then
|
||||
git push -f origin HEAD:master
|
||||
else
|
||||
echo "Content unchanged, publish skipped"
|
||||
fi
|
||||
)
|
||||
|
||||
exit 0
|
||||
|
@ -10,9 +10,6 @@ source ci/rust-version.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
# Look for failed mergify.io backports
|
||||
_ git show HEAD --check --oneline
|
||||
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
|
||||
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
|
||||
@ -22,7 +19,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
|
||||
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0013 --ignore RUSTSEC-2018-0015 --ignore RUSTSEC-2019-0031
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ book/build.sh
|
||||
@ -30,7 +27,7 @@ _ ci/check-ssh-keys.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
_ cargo +"$rust_stable" audit
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0031
|
||||
for project in rust/*/ ; do
|
||||
echo "+++ do_bpf_checks $project"
|
||||
(
|
||||
|
@ -86,7 +86,7 @@ test-stable-perf)
|
||||
fi
|
||||
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-chacha-cuda --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-move)
|
||||
ci/affects-files.sh \
|
||||
|
@ -142,7 +142,6 @@ testnet-beta|testnet-beta-perf)
|
||||
testnet)
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
export CLOUDSDK_CORE_PROJECT=testnet-solana-com
|
||||
;;
|
||||
testnet-perf)
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
@ -158,7 +157,6 @@ tds)
|
||||
: "${TDS_CHANNEL_OR_TAG:=edge}"
|
||||
CHANNEL_OR_TAG="$TDS_CHANNEL_OR_TAG"
|
||||
CHANNEL_BRANCH="$CI_BRANCH"
|
||||
export CLOUDSDK_CORE_PROJECT=tour-de-sol
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
@ -206,7 +204,6 @@ steps:
|
||||
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
||||
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
|
||||
GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT"
|
||||
RUST_LOG: "$RUST_LOG"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
@ -378,7 +375,7 @@ deploy() {
|
||||
(
|
||||
set -x
|
||||
ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \
|
||||
-t "$CHANNEL_OR_TAG" -n 0 -c 0 -u -P \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
|
||||
-a testnet-solana-com --letsencrypt testnet.solana.com \
|
||||
--limit-ledger-size \
|
||||
${skipCreate:+-e} \
|
||||
@ -389,7 +386,7 @@ deploy() {
|
||||
(
|
||||
echo "--- net.sh update"
|
||||
set -x
|
||||
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx #--platform windows
|
||||
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx --platform windows
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
@ -455,10 +452,6 @@ deploy() {
|
||||
TDS_CLIENT_COUNT="1"
|
||||
fi
|
||||
|
||||
if [[ -n $TDS_SLOTS_PER_EPOCH ]]; then
|
||||
maybeSlotsPerEpoch=(--slots-per-epoch "$TDS_SLOTS_PER_EPOCH")
|
||||
fi
|
||||
|
||||
if [[ -z $ENABLE_GPU ]]; then
|
||||
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
|
||||
elif [[ $ENABLE_GPU == skip ]]; then
|
||||
@ -544,7 +537,7 @@ deploy() {
|
||||
${maybeInternalNodesLamports} \
|
||||
${maybeExternalAccountsFile} \
|
||||
--target-lamports-per-signature 0 \
|
||||
"${maybeSlotsPerEpoch[@]}" \
|
||||
--slots-per-epoch 4096 \
|
||||
${maybeAdditionalDisk}
|
||||
)
|
||||
;;
|
||||
|
@ -60,7 +60,7 @@ trap shutdown EXIT INT
|
||||
set -x
|
||||
for zone in "$@"; do
|
||||
echo "--- $cloudProvider config [$zone]"
|
||||
timeout 5m net/"$cloudProvider".sh config $maybePublicNetwork -n 1 -p "$netName" -z "$zone"
|
||||
timeout 5m net/"$cloudProvider".sh config $maybePublicNetwork -p "$netName" -z "$zone"
|
||||
net/init-metrics.sh -e
|
||||
echo "+++ $cloudProvider.sh info"
|
||||
net/"$cloudProvider".sh info
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,8 +12,8 @@ edition = "2018"
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
semver = "0.9.0"
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
tiny-bip39 = "0.7.0"
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
tiny-bip39 = "0.6.2"
|
||||
url = "2.1.0"
|
||||
chrono = "0.4"
|
||||
|
||||
|
@ -64,20 +64,6 @@ pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
|
||||
value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey()))
|
||||
}
|
||||
|
||||
pub fn pubkeys_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Pubkey>> {
|
||||
matches.values_of(name).map(|values| {
|
||||
values
|
||||
.map(|value| {
|
||||
value.parse::<Pubkey>().unwrap_or_else(|_| {
|
||||
read_keypair_file(value)
|
||||
.expect("read_keypair_file failed")
|
||||
.pubkey()
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
// Return pubkey/signature pairs for a string of the form pubkey=signature
|
||||
pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubkey, Signature)>> {
|
||||
matches.values_of(name).map(|values| {
|
||||
@ -168,7 +154,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_keypair_of() {
|
||||
let keypair = Keypair::new();
|
||||
let outfile = tmp_file_path("test_keypair_of.json", &keypair.pubkey());
|
||||
let outfile = tmp_file_path("test_gen_keypair_file.json", &keypair.pubkey());
|
||||
let _ = write_keypair_file(&keypair, &outfile).unwrap();
|
||||
|
||||
let matches = app()
|
||||
@ -192,7 +178,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_pubkey_of() {
|
||||
let keypair = Keypair::new();
|
||||
let outfile = tmp_file_path("test_pubkey_of.json", &keypair.pubkey());
|
||||
let outfile = tmp_file_path("test_gen_keypair_file.json", &keypair.pubkey());
|
||||
let _ = write_keypair_file(&keypair, &outfile).unwrap();
|
||||
|
||||
let matches = app()
|
||||
@ -216,26 +202,6 @@ mod tests {
|
||||
fs::remove_file(&outfile).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pubkeys_of() {
|
||||
let keypair = Keypair::new();
|
||||
let outfile = tmp_file_path("test_pubkeys_of.json", &keypair.pubkey());
|
||||
let _ = write_keypair_file(&keypair, &outfile).unwrap();
|
||||
|
||||
let matches = app().clone().get_matches_from(vec![
|
||||
"test",
|
||||
"--multiple",
|
||||
&keypair.pubkey().to_string(),
|
||||
"--multiple",
|
||||
&outfile,
|
||||
]);
|
||||
assert_eq!(
|
||||
pubkeys_of(&matches, "multiple"),
|
||||
Some(vec![keypair.pubkey(), keypair.pubkey()])
|
||||
);
|
||||
fs::remove_file(&outfile).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pubkeys_sigs_of() {
|
||||
let key1 = Pubkey::new_rand();
|
||||
|
@ -43,11 +43,6 @@ pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey or keypair file or keypair ask keyword
|
||||
pub fn is_pubkey_or_keypair_or_ask_keyword(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair_or_ask_keyword(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey=signature string
|
||||
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
let mut signer = string.split('=');
|
||||
|
@ -21,7 +21,7 @@ pub const ASK_KEYWORD: &str = "ASK";
|
||||
pub const ASK_SEED_PHRASE_ARG: ArgConstant<'static> = ArgConstant {
|
||||
long: "ask-seed-phrase",
|
||||
name: "ask_seed_phrase",
|
||||
help: "Recover a keypair using a seed phrase and optional passphrase",
|
||||
help: "Securely recover a keypair using a seed phrase and optional passphrase",
|
||||
};
|
||||
|
||||
pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
|
||||
@ -80,7 +80,7 @@ pub fn keypair_from_seed_phrase(
|
||||
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
|
||||
} else {
|
||||
let sanitized = sanitize_seed_phrase(seed_phrase);
|
||||
let mnemonic = Mnemonic::from_phrase(&sanitized, Language::English)?;
|
||||
let mnemonic = Mnemonic::from_phrase(sanitized, Language::English)?;
|
||||
let passphrase = prompt_passphrase(&passphrase_prompt)?;
|
||||
let seed = Seed::new(&mnemonic, &passphrase);
|
||||
keypair_from_seed(seed.as_bytes())?
|
||||
|
@ -1,4 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod config;
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.2"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -17,35 +17,35 @@ criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.3", features = ["termination"] }
|
||||
console = "0.9.1"
|
||||
dirs = "2.0.2"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
indicatif = "0.13.0"
|
||||
humantime = "2.0.0"
|
||||
humantime = "1.3.0"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.1.1"
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
|
||||
reqwest = { version = "0.9.24", default-features = false, features = ["rustls-tls"] }
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.2" }
|
||||
solana-cli-config = { path = "../cli-config", version = "0.23.2" }
|
||||
solana-client = { path = "../client", version = "0.23.2" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.2" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.2" }
|
||||
solana-logger = { path = "../logger", version = "0.23.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.2" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.23.2" }
|
||||
url = "2.1.1"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-client = { path = "../client", version = "0.22.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.22.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.22.3" }
|
||||
url = "2.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "0.23.2" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.2" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
491
cli/src/cli.rs
491
cli/src/cli.rs
@ -1,8 +1,7 @@
|
||||
use crate::{
|
||||
cluster_query::*,
|
||||
display::{println_name_value, println_signers},
|
||||
nonce::{self, *},
|
||||
offline::*,
|
||||
nonce::*,
|
||||
stake::*,
|
||||
storage::*,
|
||||
validator_info::*,
|
||||
@ -32,7 +31,7 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, KeypairUtil, Signature},
|
||||
signature::{Keypair, KeypairUtil, Signature},
|
||||
system_instruction::{create_address_with_seed, SystemError, MAX_ADDRESS_SEED_LEN},
|
||||
system_transaction,
|
||||
transaction::{Transaction, TransactionError},
|
||||
@ -73,89 +72,6 @@ impl std::ops::Deref for KeypairEq {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SigningAuthority {
|
||||
Online(Keypair),
|
||||
// We hold a random keypair alongside our legit pubkey in order
|
||||
// to generate a placeholder signature in the transaction
|
||||
Offline(Pubkey, Keypair),
|
||||
}
|
||||
|
||||
impl SigningAuthority {
|
||||
pub fn new_from_matches(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
signers: Option<&[(Pubkey, Signature)]>,
|
||||
) -> Result<Option<Self>, CliError> {
|
||||
if matches.is_present(name) {
|
||||
keypair_of(matches, name)
|
||||
.map(|keypair| keypair.into())
|
||||
.or_else(|| {
|
||||
pubkey_of(matches, name)
|
||||
.filter(|pubkey| {
|
||||
signers
|
||||
.and_then(|signers| {
|
||||
signers.iter().find(|(signer, _sig)| *signer == *pubkey)
|
||||
})
|
||||
.is_some()
|
||||
})
|
||||
.map(|pubkey| pubkey.into())
|
||||
})
|
||||
.ok_or_else(|| CliError::BadParameter("Invalid authority".to_string()))
|
||||
.map(Some)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> &Keypair {
|
||||
match self {
|
||||
SigningAuthority::Online(keypair) => keypair,
|
||||
SigningAuthority::Offline(_pubkey, keypair) => keypair,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
match self {
|
||||
SigningAuthority::Online(keypair) => keypair.pubkey(),
|
||||
SigningAuthority::Offline(pubkey, _keypair) => *pubkey,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Keypair> for SigningAuthority {
|
||||
fn from(keypair: Keypair) -> Self {
|
||||
SigningAuthority::Online(keypair)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Pubkey> for SigningAuthority {
|
||||
fn from(pubkey: Pubkey) -> Self {
|
||||
SigningAuthority::Offline(pubkey, keypair_from_seed(pubkey.as_ref()).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for SigningAuthority {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(SigningAuthority::Online(keypair1), SigningAuthority::Online(keypair2)) => {
|
||||
keypair1.pubkey() == keypair2.pubkey()
|
||||
}
|
||||
(SigningAuthority::Online(keypair), SigningAuthority::Offline(pubkey, _))
|
||||
| (SigningAuthority::Offline(pubkey, _), SigningAuthority::Online(keypair)) => {
|
||||
keypair.pubkey() == *pubkey
|
||||
}
|
||||
(SigningAuthority::Offline(pubkey1, _), SigningAuthority::Offline(pubkey2, _)) => {
|
||||
pubkey1 == pubkey2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
nonce::nonce_authority_arg().requires(NONCE_ARG.name)
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq)]
|
||||
pub struct PayCommand {
|
||||
pub lamports: u64,
|
||||
@ -166,9 +82,9 @@ pub struct PayCommand {
|
||||
pub cancelable: bool,
|
||||
pub sign_only: bool,
|
||||
pub signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
pub blockhash_query: BlockhashQuery,
|
||||
pub blockhash: Option<Hash>,
|
||||
pub nonce_account: Option<Pubkey>,
|
||||
pub nonce_authority: Option<SigningAuthority>,
|
||||
pub nonce_authority: Option<KeypairEq>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@ -198,7 +114,6 @@ pub enum CliCommand {
|
||||
GetTransactionCount {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
LeaderSchedule,
|
||||
Ping {
|
||||
lamports: u64,
|
||||
interval: Duration,
|
||||
@ -211,29 +126,24 @@ pub enum CliCommand {
|
||||
slot_limit: Option<u64>,
|
||||
},
|
||||
ShowGossip,
|
||||
ShowStakes {
|
||||
use_lamports_unit: bool,
|
||||
vote_account_pubkeys: Option<Vec<Pubkey>>,
|
||||
},
|
||||
ShowValidators {
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
nonce_account: Pubkey,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
new_authority: Pubkey,
|
||||
},
|
||||
CreateNonceAccount {
|
||||
nonce_account: KeypairEq,
|
||||
seed: Option<String>,
|
||||
nonce_authority: Option<Pubkey>,
|
||||
lamports: u64,
|
||||
},
|
||||
GetNonce(Pubkey),
|
||||
NewNonce {
|
||||
nonce_account: Pubkey,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
},
|
||||
ShowNonceAccount {
|
||||
nonce_account_pubkey: Pubkey,
|
||||
@ -241,7 +151,7 @@ pub enum CliCommand {
|
||||
},
|
||||
WithdrawFromNonceAccount {
|
||||
nonce_account: Pubkey,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
destination_account_pubkey: Pubkey,
|
||||
lamports: u64,
|
||||
},
|
||||
@ -250,7 +160,6 @@ pub enum CliCommand {
|
||||
// Stake Commands
|
||||
CreateStakeAccount {
|
||||
stake_account: KeypairEq,
|
||||
seed: Option<String>,
|
||||
staker: Option<Pubkey>,
|
||||
withdrawer: Option<Pubkey>,
|
||||
lockup: Lockup,
|
||||
@ -258,36 +167,25 @@ pub enum CliCommand {
|
||||
},
|
||||
DeactivateStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
stake_authority: Option<SigningAuthority>,
|
||||
stake_authority: Option<KeypairEq>,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: BlockhashQuery,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
},
|
||||
DelegateStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
vote_account_pubkey: Pubkey,
|
||||
stake_authority: Option<SigningAuthority>,
|
||||
stake_authority: Option<KeypairEq>,
|
||||
force: bool,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: BlockhashQuery,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
},
|
||||
SplitStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
stake_authority: Option<SigningAuthority>,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
split_stake_account: KeypairEq,
|
||||
seed: Option<String>,
|
||||
lamports: u64,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
},
|
||||
RedeemVoteCredits(Pubkey, Pubkey),
|
||||
ShowStakeHistory {
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
@ -299,18 +197,13 @@ pub enum CliCommand {
|
||||
stake_account_pubkey: Pubkey,
|
||||
new_authorized_pubkey: Pubkey,
|
||||
stake_authorize: StakeAuthorize,
|
||||
authority: Option<SigningAuthority>,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
authority: Option<KeypairEq>,
|
||||
},
|
||||
WithdrawStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
destination_account_pubkey: Pubkey,
|
||||
lamports: u64,
|
||||
withdraw_authority: Option<SigningAuthority>,
|
||||
withdraw_authority: Option<KeypairEq>,
|
||||
},
|
||||
// Storage Commands
|
||||
CreateStorageAccount {
|
||||
@ -333,7 +226,6 @@ pub enum CliCommand {
|
||||
// Vote Commands
|
||||
CreateVoteAccount {
|
||||
vote_account: KeypairEq,
|
||||
seed: Option<String>,
|
||||
node_pubkey: Pubkey,
|
||||
authorized_voter: Option<Pubkey>,
|
||||
authorized_withdrawer: Option<Pubkey>,
|
||||
@ -343,6 +235,11 @@ pub enum CliCommand {
|
||||
pubkey: Pubkey,
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
Uptime {
|
||||
pubkey: Pubkey,
|
||||
aggregate: bool,
|
||||
span: Option<u64>,
|
||||
},
|
||||
VoteAuthorize {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_authorized_pubkey: Pubkey,
|
||||
@ -461,32 +358,27 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
command: CliCommand::Fees,
|
||||
require_keypair: false,
|
||||
}),
|
||||
("block-time", Some(matches)) => parse_get_block_time(matches),
|
||||
("epoch-info", Some(matches)) => parse_get_epoch_info(matches),
|
||||
("genesis-hash", Some(_matches)) => Ok(CliCommandInfo {
|
||||
("get-block-time", Some(matches)) => parse_get_block_time(matches),
|
||||
("get-epoch-info", Some(matches)) => parse_get_epoch_info(matches),
|
||||
("get-genesis-hash", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::GetGenesisHash,
|
||||
require_keypair: false,
|
||||
}),
|
||||
("slot", Some(matches)) => parse_get_slot(matches),
|
||||
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
|
||||
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::LeaderSchedule,
|
||||
require_keypair: false,
|
||||
}),
|
||||
("get-slot", Some(matches)) => parse_get_slot(matches),
|
||||
("get-transaction-count", Some(matches)) => parse_get_transaction_count(matches),
|
||||
("ping", Some(matches)) => parse_cluster_ping(matches),
|
||||
("block-production", Some(matches)) => parse_show_block_production(matches),
|
||||
("gossip", Some(_matches)) => Ok(CliCommandInfo {
|
||||
("show-block-production", Some(matches)) => parse_show_block_production(matches),
|
||||
("show-gossip", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowGossip,
|
||||
require_keypair: false,
|
||||
}),
|
||||
("stakes", Some(matches)) => parse_show_stakes(matches),
|
||||
("validators", Some(matches)) => parse_show_validators(matches),
|
||||
("show-validators", Some(matches)) => parse_show_validators(matches),
|
||||
// Nonce Commands
|
||||
("authorize-nonce-account", Some(matches)) => parse_authorize_nonce_account(matches),
|
||||
("create-nonce-account", Some(matches)) => parse_nonce_create_account(matches),
|
||||
("nonce", Some(matches)) => parse_get_nonce(matches),
|
||||
("get-nonce", Some(matches)) => parse_get_nonce(matches),
|
||||
("new-nonce", Some(matches)) => parse_new_nonce(matches),
|
||||
("nonce-account", Some(matches)) => parse_show_nonce_account(matches),
|
||||
("show-nonce-account", Some(matches)) => parse_show_nonce_account(matches),
|
||||
("withdraw-from-nonce-account", Some(matches)) => {
|
||||
parse_withdraw_from_nonce_account(matches)
|
||||
}
|
||||
@ -500,15 +392,15 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
("delegate-stake", Some(matches)) => parse_stake_delegate_stake(matches),
|
||||
("withdraw-stake", Some(matches)) => parse_stake_withdraw_stake(matches),
|
||||
("deactivate-stake", Some(matches)) => parse_stake_deactivate_stake(matches),
|
||||
("split-stake", Some(matches)) => parse_split_stake(matches),
|
||||
("stake-authorize-staker", Some(matches)) => {
|
||||
parse_stake_authorize(matches, StakeAuthorize::Staker)
|
||||
}
|
||||
("stake-authorize-withdrawer", Some(matches)) => {
|
||||
parse_stake_authorize(matches, StakeAuthorize::Withdrawer)
|
||||
}
|
||||
("stake-account", Some(matches)) => parse_show_stake_account(matches),
|
||||
("stake-history", Some(matches)) => parse_show_stake_history(matches),
|
||||
("redeem-vote-credits", Some(matches)) => parse_redeem_vote_credits(matches),
|
||||
("show-stake-account", Some(matches)) => parse_show_stake_account(matches),
|
||||
("show-stake-history", Some(matches)) => parse_show_stake_history(matches),
|
||||
// Storage Commands
|
||||
("create-archiver-storage-account", Some(matches)) => {
|
||||
parse_storage_create_archiver_account(matches)
|
||||
@ -517,11 +409,17 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
parse_storage_create_validator_account(matches)
|
||||
}
|
||||
("claim-storage-reward", Some(matches)) => parse_storage_claim_reward(matches),
|
||||
("storage-account", Some(matches)) => parse_storage_get_account_command(matches),
|
||||
("show-storage-account", Some(matches)) => parse_storage_get_account_command(matches),
|
||||
// Validator Info Commands
|
||||
("validator-info", Some(matches)) => match matches.subcommand() {
|
||||
("publish", Some(matches)) => parse_validator_info_command(matches),
|
||||
("get", Some(matches)) => parse_get_validator_info_command(matches),
|
||||
("", None) => {
|
||||
eprintln!("{}", matches.usage());
|
||||
Err(CliError::CommandNotRecognized(
|
||||
"no validator-info subcommand given".to_string(),
|
||||
))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
// Vote Commands
|
||||
@ -533,7 +431,8 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
("vote-authorize-withdrawer", Some(matches)) => {
|
||||
parse_vote_authorize(matches, VoteAuthorize::Withdrawer)
|
||||
}
|
||||
("vote-account", Some(matches)) => parse_vote_get_account_command(matches),
|
||||
("show-vote-account", Some(matches)) => parse_vote_get_account_command(matches),
|
||||
("uptime", Some(matches)) => parse_vote_uptime_command(matches),
|
||||
// Wallet Commands
|
||||
("address", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::Address,
|
||||
@ -617,15 +516,19 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
let timestamp_pubkey = value_of(&matches, "timestamp_pubkey");
|
||||
let witnesses = values_of(&matches, "witness");
|
||||
let cancelable = matches.is_present("cancelable");
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
|
||||
let blockhash_query = BlockhashQuery::new_from_matches(&matches);
|
||||
let sign_only = matches.is_present("sign_only");
|
||||
let signers = pubkeys_sigs_of(&matches, "signer");
|
||||
let blockhash = value_of(&matches, "blockhash");
|
||||
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
|
||||
let nonce_authority = SigningAuthority::new_from_matches(
|
||||
&matches,
|
||||
NONCE_AUTHORITY_ARG.name,
|
||||
signers.as_deref(),
|
||||
)?;
|
||||
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
|
||||
let authority =
|
||||
keypair_of(&matches, NONCE_AUTHORITY_ARG.name).ok_or_else(|| {
|
||||
CliError::BadParameter("Invalid keypair for nonce-authority".into())
|
||||
})?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
@ -637,14 +540,14 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
cancelable,
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
}),
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
("account", Some(matches)) => {
|
||||
("show-account", Some(matches)) => {
|
||||
let account_pubkey = pubkey_of(matches, "account_pubkey").unwrap();
|
||||
let output_file = matches.value_of("output_file");
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
@ -1016,16 +919,17 @@ fn process_pay(
|
||||
cancelable: bool,
|
||||
sign_only: bool,
|
||||
signers: &Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<&SigningAuthority>,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
(to, "to".to_string()),
|
||||
)?;
|
||||
|
||||
let (blockhash, fee_calculator) = blockhash_query.get_blockhash_fee_calculator(rpc_client)?;
|
||||
let (blockhash, fee_calculator) =
|
||||
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
|
||||
|
||||
let cancelable = if cancelable {
|
||||
Some(config.keypair.pubkey())
|
||||
@ -1035,9 +939,7 @@ fn process_pay(
|
||||
|
||||
if timestamp == None && *witnesses == None {
|
||||
let mut tx = if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: &Keypair = nonce_authority
|
||||
.map(|authority| authority.keypair())
|
||||
.unwrap_or(&config.keypair);
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
system_transaction::nonced_transfer(
|
||||
&config.keypair,
|
||||
to,
|
||||
@ -1058,11 +960,9 @@ fn process_pay(
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: Pubkey = nonce_authority
|
||||
.map(|authority| authority.pubkey())
|
||||
.unwrap_or_else(|| config.keypair.pubkey());
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
let nonce_account = rpc_client.get_account(nonce_account)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority, &blockhash)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &blockhash)?;
|
||||
}
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
@ -1273,7 +1173,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::GetTransactionCount { commitment_config } => {
|
||||
process_get_transaction_count(&rpc_client, commitment_config)
|
||||
}
|
||||
CliCommand::LeaderSchedule => process_leader_schedule(&rpc_client),
|
||||
CliCommand::Ping {
|
||||
lamports,
|
||||
interval,
|
||||
@ -1293,14 +1192,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
process_show_block_production(&rpc_client, config, *epoch, *slot_limit)
|
||||
}
|
||||
CliCommand::ShowGossip => process_show_gossip(&rpc_client),
|
||||
CliCommand::ShowStakes {
|
||||
use_lamports_unit,
|
||||
vote_account_pubkeys,
|
||||
} => process_show_stakes(
|
||||
&rpc_client,
|
||||
*use_lamports_unit,
|
||||
vote_account_pubkeys.as_deref(),
|
||||
),
|
||||
CliCommand::ShowValidators { use_lamports_unit } => {
|
||||
process_show_validators(&rpc_client, *use_lamports_unit)
|
||||
}
|
||||
@ -1316,20 +1207,18 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&rpc_client,
|
||||
config,
|
||||
nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
nonce_authority.as_deref(),
|
||||
new_authority,
|
||||
),
|
||||
// Create nonce account
|
||||
CliCommand::CreateNonceAccount {
|
||||
nonce_account,
|
||||
seed,
|
||||
nonce_authority,
|
||||
lamports,
|
||||
} => process_create_nonce_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
nonce_account,
|
||||
seed.clone(),
|
||||
*nonce_authority,
|
||||
*lamports,
|
||||
),
|
||||
@ -1341,7 +1230,12 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::NewNonce {
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
} => process_new_nonce(&rpc_client, config, nonce_account, nonce_authority.as_ref()),
|
||||
} => process_new_nonce(
|
||||
&rpc_client,
|
||||
config,
|
||||
nonce_account,
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
// Show the contents of a nonce account
|
||||
CliCommand::ShowNonceAccount {
|
||||
nonce_account_pubkey,
|
||||
@ -1357,7 +1251,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&rpc_client,
|
||||
config,
|
||||
&nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
nonce_authority.as_deref(),
|
||||
&destination_account_pubkey,
|
||||
*lamports,
|
||||
),
|
||||
@ -1374,7 +1268,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Create stake account
|
||||
CliCommand::CreateStakeAccount {
|
||||
stake_account,
|
||||
seed,
|
||||
staker,
|
||||
withdrawer,
|
||||
lockup,
|
||||
@ -1383,30 +1276,30 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&rpc_client,
|
||||
config,
|
||||
stake_account,
|
||||
seed,
|
||||
staker,
|
||||
withdrawer,
|
||||
lockup,
|
||||
*lamports,
|
||||
),
|
||||
// Deactivate stake account
|
||||
CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
ref stake_authority,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash_query,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
} => process_deactivate_stake_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_authority.as_ref(),
|
||||
stake_authority.as_deref(),
|
||||
*sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
*blockhash,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@ -1415,7 +1308,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
force,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash_query,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
} => process_delegate_stake(
|
||||
@ -1423,39 +1316,22 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&vote_account_pubkey,
|
||||
stake_authority.as_ref(),
|
||||
stake_authority.as_deref(),
|
||||
*force,
|
||||
*sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
*blockhash,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
),
|
||||
CliCommand::SplitStake {
|
||||
stake_account_pubkey,
|
||||
ref stake_authority,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
split_stake_account,
|
||||
seed,
|
||||
lamports,
|
||||
} => process_split_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_authority.as_ref(),
|
||||
*sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
split_stake_account,
|
||||
seed,
|
||||
*lamports,
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey) => {
|
||||
process_redeem_vote_credits(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&vote_account_pubkey,
|
||||
)
|
||||
}
|
||||
CliCommand::ShowStakeAccount {
|
||||
pubkey: stake_account_pubkey,
|
||||
use_lamports_unit,
|
||||
@ -1473,23 +1349,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
new_authorized_pubkey,
|
||||
stake_authorize,
|
||||
ref authority,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
} => process_stake_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&new_authorized_pubkey,
|
||||
*stake_authorize,
|
||||
authority.as_ref(),
|
||||
*sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
authority.as_deref(),
|
||||
),
|
||||
|
||||
CliCommand::WithdrawStake {
|
||||
@ -1503,7 +1369,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&stake_account_pubkey,
|
||||
&destination_account_pubkey,
|
||||
*lamports,
|
||||
withdraw_authority.as_ref(),
|
||||
withdraw_authority.as_deref(),
|
||||
),
|
||||
|
||||
// Storage Commands
|
||||
@ -1557,7 +1423,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Create vote account
|
||||
CliCommand::CreateVoteAccount {
|
||||
vote_account,
|
||||
seed,
|
||||
node_pubkey,
|
||||
authorized_voter,
|
||||
authorized_withdrawer,
|
||||
@ -1566,7 +1431,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&rpc_client,
|
||||
config,
|
||||
vote_account,
|
||||
seed,
|
||||
&node_pubkey,
|
||||
authorized_voter,
|
||||
authorized_withdrawer,
|
||||
@ -1603,6 +1467,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&new_identity_pubkey,
|
||||
authorized_voter,
|
||||
),
|
||||
CliCommand::Uptime {
|
||||
pubkey: vote_account_pubkey,
|
||||
aggregate,
|
||||
span,
|
||||
} => process_uptime(&rpc_client, config, &vote_account_pubkey, *aggregate, *span),
|
||||
|
||||
// Wallet Commands
|
||||
|
||||
@ -1654,7 +1523,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
cancelable,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash_query,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
}) => process_pay(
|
||||
@ -1668,9 +1537,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*cancelable,
|
||||
*sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
*blockhash,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
CliCommand::ShowAccount {
|
||||
pubkey,
|
||||
@ -1904,7 +1773,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("create-address-with-seed")
|
||||
.about("Generate a derived account address with a seed")
|
||||
.about("Generate a dervied account address with a seed")
|
||||
.arg(
|
||||
Arg::with_name("seed")
|
||||
.index(1)
|
||||
@ -2006,9 +1875,46 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.long("cancelable")
|
||||
.takes_value(false),
|
||||
)
|
||||
.offline_args()
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg()),
|
||||
.arg(
|
||||
Arg::with_name("sign_only")
|
||||
.long("sign-only")
|
||||
.takes_value(false)
|
||||
.help("Sign the transaction offline"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_ARG.name)
|
||||
.long(NONCE_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.requires("blockhash")
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help(NONCE_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.requires(NONCE_ARG.name)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help(NONCE_AUTHORITY_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("signer")
|
||||
.long("signer")
|
||||
.value_name("PUBKEY=BASE58_SIG")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_sig)
|
||||
.multiple(true)
|
||||
.help("Provide a public-key/signature pair for the transaction"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("blockhash")
|
||||
.long("blockhash")
|
||||
.value_name("BLOCKHASH")
|
||||
.takes_value(true)
|
||||
.validator(is_hash)
|
||||
.help("Use the supplied blockhash"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("send-signature")
|
||||
@ -2060,9 +1966,8 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("account")
|
||||
SubCommand::with_name("show-account")
|
||||
.about("Show the contents of an account")
|
||||
.alias("account")
|
||||
.arg(
|
||||
Arg::with_name("account_pubkey")
|
||||
.index(1)
|
||||
@ -2103,7 +2008,6 @@ mod tests {
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
nonce_state::{Meta as NonceMeta, NonceState},
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, write_keypair_file},
|
||||
system_program,
|
||||
transaction::TransactionError,
|
||||
@ -2124,13 +2028,6 @@ mod tests {
|
||||
path
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signing_authority_dummy_keypairs() {
|
||||
let signing_authority: SigningAuthority = Pubkey::new(&[1u8; 32]).into();
|
||||
assert_eq!(signing_authority, Pubkey::new(&[1u8; 32]).into());
|
||||
assert_ne!(signing_authority, Pubkey::new(&[2u8; 32]).into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cli_parse_command() {
|
||||
let test_commands = app("test", "desc", "version");
|
||||
@ -2394,16 +2291,12 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ sign-only
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
@ -2412,7 +2305,6 @@ mod tests {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
sign_only: true,
|
||||
..PayCommand::default()
|
||||
}),
|
||||
@ -2430,8 +2322,6 @@ mod tests {
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--signer",
|
||||
&signer1,
|
||||
]);
|
||||
@ -2441,7 +2331,6 @@ mod tests {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
signers: Some(vec![(key1, sig1)]),
|
||||
..PayCommand::default()
|
||||
}),
|
||||
@ -2459,8 +2348,6 @@ mod tests {
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--signer",
|
||||
&signer1,
|
||||
"--signer",
|
||||
@ -2472,7 +2359,6 @@ mod tests {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
signers: Some(vec![(key1, sig1), (key2, sig2)]),
|
||||
..PayCommand::default()
|
||||
}),
|
||||
@ -2481,6 +2367,8 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ Blockhash
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
@ -2496,7 +2384,7 @@ mod tests {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash: Some(blockhash),
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
@ -2523,7 +2411,7 @@ mod tests {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: Some(pubkey),
|
||||
..PayCommand::default()
|
||||
}),
|
||||
@ -2554,7 +2442,7 @@ mod tests {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: Some(pubkey),
|
||||
nonce_authority: Some(keypair.into()),
|
||||
..PayCommand::default()
|
||||
@ -2563,67 +2451,6 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ Nonce and Offline Nonce Authority
|
||||
let keypair = read_keypair_file(&keypair_file).unwrap();
|
||||
let authority_pubkey = keypair.pubkey();
|
||||
let authority_pubkey_string = format!("{}", authority_pubkey);
|
||||
let sig = keypair.sign_message(&[0u8]);
|
||||
let signer_arg = format!("{}={}", authority_pubkey, sig);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--nonce",
|
||||
&pubkey_string,
|
||||
"--nonce-authority",
|
||||
&authority_pubkey_string,
|
||||
"--signer",
|
||||
&signer_arg,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: Some(pubkey),
|
||||
nonce_authority: Some(authority_pubkey.into()),
|
||||
signers: Some(vec![(authority_pubkey, sig)]),
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ Nonce and Offline Nonce Authority
|
||||
// authority pubkey not in signers fails
|
||||
let keypair = read_keypair_file(&keypair_file).unwrap();
|
||||
let authority_pubkey = keypair.pubkey();
|
||||
let authority_pubkey_string = format!("{}", authority_pubkey);
|
||||
let sig = keypair.sign_message(&[0u8]);
|
||||
let signer_arg = format!("{}={}", Pubkey::new_rand(), sig);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--nonce",
|
||||
&pubkey_string,
|
||||
"--nonce-authority",
|
||||
&authority_pubkey_string,
|
||||
"--signer",
|
||||
&signer_arg,
|
||||
]);
|
||||
assert!(parse_command(&test_pay).is_err());
|
||||
|
||||
// Test Send-Signature Subcommand
|
||||
let test_send_signature = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
@ -2732,7 +2559,6 @@ mod tests {
|
||||
let node_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: bob_keypair.into(),
|
||||
seed: None,
|
||||
node_pubkey,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
authorized_withdrawer: Some(bob_pubkey),
|
||||
@ -2764,7 +2590,6 @@ mod tests {
|
||||
let custodian = Pubkey::new_rand();
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: bob_keypair.into(),
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
lockup: Lockup {
|
||||
@ -2794,30 +2619,13 @@ mod tests {
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let split_stake_account = Keypair::new();
|
||||
config.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
split_stake_account: split_stake_account.into(),
|
||||
seed: None,
|
||||
lamports: 1234,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
config.command = CliCommand::GetSlot {
|
||||
commitment_config: CommitmentConfig::default(),
|
||||
};
|
||||
@ -2897,7 +2705,7 @@ mod tests {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
nonce_account: Some(bob_pubkey),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash: Some(blockhash),
|
||||
..PayCommand::default()
|
||||
});
|
||||
let signature = process_command(&config);
|
||||
@ -2924,7 +2732,7 @@ mod tests {
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: Some(bob_pubkey),
|
||||
nonce_authority: Some(bob_keypair.into()),
|
||||
..PayCommand::default()
|
||||
@ -2999,7 +2807,6 @@ mod tests {
|
||||
let bob_keypair = Keypair::new();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: bob_keypair.into(),
|
||||
seed: None,
|
||||
node_pubkey,
|
||||
authorized_voter: Some(bob_pubkey),
|
||||
authorized_withdrawer: Some(bob_pubkey),
|
||||
|
@ -11,10 +11,9 @@ use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_client::{rpc_client::RpcClient, rpc_response::RpcVoteAccountInfo};
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
clock::{self, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::Epoch,
|
||||
epoch_schedule::{Epoch, EpochSchedule},
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
@ -55,9 +54,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.about("Get the version of the cluster entrypoint"),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("fees").about("Display current cluster fees"))
|
||||
.subcommand(SubCommand::with_name("block-time")
|
||||
.subcommand(SubCommand::with_name("get-block-time")
|
||||
.about("Get estimated production time of a block")
|
||||
.alias("get-block-time")
|
||||
.arg(
|
||||
Arg::with_name("slot")
|
||||
.index(1)
|
||||
@ -67,11 +65,9 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.help("Slot number of the block to query")
|
||||
)
|
||||
)
|
||||
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule"))
|
||||
.subcommand(
|
||||
SubCommand::with_name("epoch-info")
|
||||
SubCommand::with_name("get-epoch-info")
|
||||
.about("Get information about the current epoch")
|
||||
.alias("get-epoch-info")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
@ -82,13 +78,10 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("genesis-hash")
|
||||
.about("Get the genesis hash")
|
||||
.alias("get-genesis-hash")
|
||||
SubCommand::with_name("get-genesis-hash").about("Get the genesis hash"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("slot").about("Get current slot")
|
||||
.alias("get-slot")
|
||||
SubCommand::with_name("get-slot").about("Get current slot")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
@ -99,8 +92,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-count").about("Get current transaction count")
|
||||
.alias("get-transaction-count")
|
||||
SubCommand::with_name("get-transaction-count").about("Get current transaction count")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
@ -158,9 +150,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("block-production")
|
||||
SubCommand::with_name("show-block-production")
|
||||
.about("Show information about block production")
|
||||
.alias("show-block-production")
|
||||
.arg(
|
||||
Arg::with_name("epoch")
|
||||
.long("epoch")
|
||||
@ -175,33 +166,12 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("gossip")
|
||||
.about("Show the current gossip network nodes")
|
||||
.alias("show-gossip")
|
||||
SubCommand::with_name("show-gossip")
|
||||
.about("Show the current gossip network nodes"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("stakes")
|
||||
.about("Show stake account information")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkeys")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEYS")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Only show stake accounts delegated to the provided vote accounts"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("lamports")
|
||||
.long("lamports")
|
||||
.takes_value(false)
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("validators")
|
||||
.about("Show summary information about the current validators")
|
||||
.alias("show-validators")
|
||||
SubCommand::with_name("show-validators")
|
||||
.about("Show information about the current validators")
|
||||
.arg(
|
||||
Arg::with_name("lamports")
|
||||
.long("lamports")
|
||||
@ -290,19 +260,6 @@ pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliComman
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_show_stakes(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let vote_account_pubkeys = pubkeys_of(matches, "vote_account_pubkeys");
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowStakes {
|
||||
use_lamports_unit,
|
||||
vote_account_pubkeys,
|
||||
},
|
||||
require_keypair: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
|
||||
@ -321,6 +278,20 @@ fn new_spinner_progress_bar() -> ProgressBar {
|
||||
progress_bar
|
||||
}
|
||||
|
||||
/// Aggregate epoch credit stats and return (total credits, total slots, total epochs)
|
||||
pub fn aggregate_epoch_credits(
|
||||
epoch_credits: &[(Epoch, u64, u64)],
|
||||
epoch_schedule: &EpochSchedule,
|
||||
) -> (u64, u64, u64) {
|
||||
epoch_credits
|
||||
.iter()
|
||||
.fold((0, 0, 0), |acc, (epoch, credits, prev_credits)| {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
(acc.0 + credits_earned, acc.1 + slots_in_epoch, acc.2 + 1)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_catchup(rpc_client: &RpcClient, node_pubkey: &Pubkey) -> ProcessResult {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
|
||||
@ -393,90 +364,35 @@ pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
|
||||
))
|
||||
}
|
||||
|
||||
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info()?;
|
||||
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
|
||||
|
||||
let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot_in_epoch))?;
|
||||
if leader_schedule.is_none() {
|
||||
return Err(format!(
|
||||
"Unable to fetch leader schedule for slot {}",
|
||||
first_slot_in_epoch
|
||||
)
|
||||
.into());
|
||||
}
|
||||
let leader_schedule = leader_schedule.unwrap();
|
||||
|
||||
let mut leader_per_slot_index = Vec::new();
|
||||
for (pubkey, leader_slots) in leader_schedule.iter() {
|
||||
for slot_index in leader_slots.iter() {
|
||||
if *slot_index >= leader_per_slot_index.len() {
|
||||
leader_per_slot_index.resize(*slot_index + 1, "?");
|
||||
}
|
||||
leader_per_slot_index[*slot_index] = pubkey;
|
||||
}
|
||||
}
|
||||
|
||||
for (slot_index, leader) in leader_per_slot_index.iter().enumerate() {
|
||||
println!(
|
||||
" {:<15} {:<44}",
|
||||
first_slot_in_epoch + slot_index as u64,
|
||||
leader
|
||||
);
|
||||
}
|
||||
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
|
||||
let timestamp = rpc_client.get_block_time(slot)?;
|
||||
Ok(timestamp.to_string())
|
||||
}
|
||||
|
||||
fn slot_to_human_time(slot: Slot) -> String {
|
||||
humantime::format_duration(Duration::from_secs(
|
||||
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
|
||||
))
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub fn process_get_epoch_info(
|
||||
rpc_client: &RpcClient,
|
||||
commitment_config: &CommitmentConfig,
|
||||
) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
|
||||
println!();
|
||||
println_name_value("Slot:", &epoch_info.absolute_slot.to_string());
|
||||
println_name_value("Epoch:", &epoch_info.epoch.to_string());
|
||||
let start_slot = epoch_info.absolute_slot - epoch_info.slot_index;
|
||||
let end_slot = start_slot + epoch_info.slots_in_epoch;
|
||||
println_name_value("Current epoch:", &epoch_info.epoch.to_string());
|
||||
println_name_value("Current slot:", &epoch_info.absolute_slot.to_string());
|
||||
println_name_value(
|
||||
"Epoch slot range:",
|
||||
&format!("[{}..{})", start_slot, end_slot),
|
||||
);
|
||||
println_name_value(
|
||||
"Epoch completed percent:",
|
||||
&format!(
|
||||
"{:>3.3}%",
|
||||
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64
|
||||
),
|
||||
"Total slots in current epoch:",
|
||||
&epoch_info.slots_in_epoch.to_string(),
|
||||
);
|
||||
let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index;
|
||||
println_name_value(
|
||||
"Epoch completed slots:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots_in_epoch
|
||||
),
|
||||
"Remaining slots in current epoch:",
|
||||
&remaining_slots_in_epoch.to_string(),
|
||||
);
|
||||
|
||||
let remaining_time_in_epoch = Duration::from_secs(
|
||||
remaining_slots_in_epoch * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
|
||||
);
|
||||
println_name_value(
|
||||
"Epoch completed time:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
slot_to_human_time(epoch_info.slot_index),
|
||||
slot_to_human_time(epoch_info.slots_in_epoch),
|
||||
slot_to_human_time(remaining_slots_in_epoch)
|
||||
),
|
||||
"Time remaining in current epoch:",
|
||||
&humantime::format_duration(remaining_time_in_epoch).to_string(),
|
||||
);
|
||||
Ok("".to_string())
|
||||
}
|
||||
@ -518,39 +434,19 @@ pub fn process_show_block_production(
|
||||
return Err(format!("Epoch {} is in the future", epoch).into());
|
||||
}
|
||||
|
||||
let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?;
|
||||
|
||||
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
|
||||
let end_slot = std::cmp::min(
|
||||
epoch_info.absolute_slot,
|
||||
epoch_schedule.get_last_slot_in_epoch(epoch),
|
||||
);
|
||||
|
||||
let mut start_slot = if let Some(slot_limit) = slot_limit {
|
||||
let start_slot = if let Some(slot_limit) = slot_limit {
|
||||
std::cmp::max(end_slot.saturating_sub(slot_limit), first_slot_in_epoch)
|
||||
} else {
|
||||
first_slot_in_epoch
|
||||
};
|
||||
|
||||
if minimum_ledger_slot > end_slot {
|
||||
return Err(format!(
|
||||
"Ledger data not available for slots {} to {} (minimum ledger slot is {})",
|
||||
start_slot, end_slot, minimum_ledger_slot
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
if minimum_ledger_slot > start_slot {
|
||||
println!(
|
||||
"\n{}",
|
||||
style(format!(
|
||||
"Note: Requested start slot was {} but minimum ledger slot is {}",
|
||||
start_slot, minimum_ledger_slot
|
||||
))
|
||||
.italic(),
|
||||
);
|
||||
start_slot = minimum_ledger_slot;
|
||||
}
|
||||
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
|
||||
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message(&format!(
|
||||
@ -559,8 +455,6 @@ pub fn process_show_block_production(
|
||||
));
|
||||
let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?;
|
||||
|
||||
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
|
||||
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
|
||||
let total_slots = end_slot_index - start_slot_index + 1;
|
||||
let total_blocks = confirmed_blocks.len();
|
||||
assert!(total_blocks <= total_slots);
|
||||
@ -600,7 +494,7 @@ pub fn process_show_block_production(
|
||||
let skipped_slots = leader_skipped_slots.entry(leader).or_insert(0);
|
||||
|
||||
loop {
|
||||
if confirmed_blocks_index < confirmed_blocks.len() {
|
||||
if !confirmed_blocks.is_empty() {
|
||||
let slot_of_next_confirmed_block = confirmed_blocks[confirmed_blocks_index];
|
||||
if slot_of_next_confirmed_block < slot {
|
||||
confirmed_blocks_index += 1;
|
||||
@ -846,47 +740,8 @@ pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
|
||||
))
|
||||
}
|
||||
|
||||
pub fn process_show_stakes(
|
||||
rpc_client: &RpcClient,
|
||||
use_lamports_unit: bool,
|
||||
vote_account_pubkeys: Option<&[Pubkey]>,
|
||||
) -> ProcessResult {
|
||||
use crate::stake::print_stake_state;
|
||||
use solana_stake_program::stake_state::StakeState;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Fetching stake accounts...");
|
||||
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
|
||||
progress_bar.finish_and_clear();
|
||||
|
||||
for (stake_pubkey, stake_account) in all_stake_accounts {
|
||||
if let Ok(stake_state) = stake_account.state() {
|
||||
match stake_state {
|
||||
StakeState::Initialized(_) => {
|
||||
if vote_account_pubkeys.is_none() {
|
||||
println!("\nstake pubkey: {}", stake_pubkey);
|
||||
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
}
|
||||
}
|
||||
StakeState::Stake(_, stake) => {
|
||||
if vote_account_pubkeys.is_none()
|
||||
|| vote_account_pubkeys
|
||||
.unwrap()
|
||||
.contains(&stake.delegation.voter_pubkey)
|
||||
{
|
||||
println!("\nstake pubkey: {}", stake_pubkey);
|
||||
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info()?;
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let vote_accounts = rpc_client.get_vote_accounts()?;
|
||||
let total_active_stake = vote_accounts
|
||||
.current
|
||||
@ -935,7 +790,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
"Commission",
|
||||
"Last Vote",
|
||||
"Root Block",
|
||||
"Credits",
|
||||
"Uptime",
|
||||
"Active Stake",
|
||||
))
|
||||
.bold()
|
||||
@ -943,7 +798,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
|
||||
fn print_vote_account(
|
||||
vote_account: RpcVoteAccountInfo,
|
||||
current_epoch: Epoch,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
total_active_stake: f64,
|
||||
use_lamports_unit: bool,
|
||||
delinquent: bool,
|
||||
@ -956,6 +811,17 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
}
|
||||
}
|
||||
|
||||
fn uptime(epoch_credits: Vec<(Epoch, u64, u64)>, epoch_schedule: &EpochSchedule) -> String {
|
||||
let (total_credits, total_slots, _) =
|
||||
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
|
||||
if total_slots > 0 {
|
||||
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
|
||||
format!("{:.2}%", total_uptime)
|
||||
} else {
|
||||
"-".into()
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
|
||||
if delinquent {
|
||||
@ -968,15 +834,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
vote_account.commission,
|
||||
non_zero_or_dash(vote_account.last_vote),
|
||||
non_zero_or_dash(vote_account.root_slot),
|
||||
vote_account
|
||||
.epoch_credits
|
||||
.iter()
|
||||
.find_map(|(epoch, credits, _)| if *epoch == current_epoch {
|
||||
Some(*credits)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
.unwrap_or(0),
|
||||
uptime(vote_account.epoch_credits, epoch_schedule),
|
||||
if vote_account.activated_stake > 0 {
|
||||
format!(
|
||||
"{} ({:.2}%)",
|
||||
@ -992,7 +850,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
for vote_account in vote_accounts.current.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
epoch_info.epoch,
|
||||
&epoch_schedule,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
false,
|
||||
@ -1001,7 +859,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
for vote_account in vote_accounts.delinquent.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
epoch_info.epoch,
|
||||
&epoch_schedule,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
true,
|
||||
@ -1041,10 +899,11 @@ mod tests {
|
||||
);
|
||||
|
||||
let slot = 100;
|
||||
let test_get_block_time =
|
||||
test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "block-time", &slot.to_string()]);
|
||||
let test_get_block_time = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"get-block-time",
|
||||
&slot.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_block_time).unwrap(),
|
||||
CliCommandInfo {
|
||||
@ -1055,7 +914,7 @@ mod tests {
|
||||
|
||||
let test_get_epoch_info = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "epoch-info"]);
|
||||
.get_matches_from(vec!["test", "get-epoch-info"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_epoch_info).unwrap(),
|
||||
CliCommandInfo {
|
||||
@ -1068,7 +927,7 @@ mod tests {
|
||||
|
||||
let test_get_genesis_hash = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "genesis-hash"]);
|
||||
.get_matches_from(vec!["test", "get-genesis-hash"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_genesis_hash).unwrap(),
|
||||
CliCommandInfo {
|
||||
@ -1077,7 +936,9 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_get_slot = test_commands.clone().get_matches_from(vec!["test", "slot"]);
|
||||
let test_get_slot = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "get-slot"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_slot).unwrap(),
|
||||
CliCommandInfo {
|
||||
@ -1090,7 +951,7 @@ mod tests {
|
||||
|
||||
let test_transaction_count = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "transaction-count"]);
|
||||
.get_matches_from(vec!["test", "get-transaction-count"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transaction_count).unwrap(),
|
||||
CliCommandInfo {
|
||||
|
@ -1,10 +1,8 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
use std::fs::{create_dir_all, File};
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG_FILE: Option<String> = {
|
@ -1,8 +1,11 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod cli;
|
||||
pub mod cluster_query;
|
||||
pub mod config;
|
||||
pub mod display;
|
||||
pub mod nonce;
|
||||
pub mod offline;
|
||||
pub mod stake;
|
||||
pub mod storage;
|
||||
pub mod validator_info;
|
||||
|
145
cli/src/main.rs
145
cli/src/main.rs
@ -1,4 +1,4 @@
|
||||
use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches, SubCommand};
|
||||
use clap::{crate_description, crate_name, Arg, ArgGroup, ArgMatches, SubCommand};
|
||||
use console::style;
|
||||
|
||||
use solana_clap_utils::{
|
||||
@ -10,70 +10,67 @@ use solana_clap_utils::{
|
||||
};
|
||||
use solana_cli::{
|
||||
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliError},
|
||||
config::{self, Config},
|
||||
display::{println_name_value, println_name_value_or},
|
||||
};
|
||||
use solana_cli_config::config::{Config, CONFIG_FILE};
|
||||
use solana_sdk::signature::read_keypair_file;
|
||||
|
||||
use std::error;
|
||||
|
||||
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
|
||||
let parse_args = match matches.subcommand() {
|
||||
("config", Some(matches)) => match matches.subcommand() {
|
||||
("get", Some(subcommand_matches)) => {
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(field) = subcommand_matches.value_of("specific_setting") {
|
||||
let (value, default_value) = match field {
|
||||
"url" => (config.url, CliConfig::default_json_rpc_url()),
|
||||
"keypair" => (config.keypair_path, CliConfig::default_keypair_path()),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
println_name_value_or(&format!("* {}:", field), &value, &default_value);
|
||||
} else {
|
||||
println_name_value("Wallet Config:", config_file);
|
||||
println_name_value_or(
|
||||
"* url:",
|
||||
&config.url,
|
||||
&CliConfig::default_json_rpc_url(),
|
||||
);
|
||||
println_name_value_or(
|
||||
"* keypair:",
|
||||
&config.keypair_path,
|
||||
&CliConfig::default_keypair_path(),
|
||||
);
|
||||
}
|
||||
("get", Some(subcommand_matches)) => {
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(field) = subcommand_matches.value_of("specific_setting") {
|
||||
let (value, default_value) = match field {
|
||||
"url" => (config.url, CliConfig::default_json_rpc_url()),
|
||||
"keypair" => (config.keypair_path, CliConfig::default_keypair_path()),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
println_name_value_or(&format!("* {}:", field), &value, &default_value);
|
||||
} else {
|
||||
println!(
|
||||
"{} Either provide the `--config` arg or ensure home directory exists to use the default config location",
|
||||
style("No config file found.").bold()
|
||||
println_name_value("Wallet Config:", config_file);
|
||||
println_name_value_or(
|
||||
"* url:",
|
||||
&config.url,
|
||||
&CliConfig::default_json_rpc_url(),
|
||||
);
|
||||
println_name_value_or(
|
||||
"* keypair:",
|
||||
&config.keypair_path,
|
||||
&CliConfig::default_keypair_path(),
|
||||
);
|
||||
}
|
||||
false
|
||||
} else {
|
||||
println!(
|
||||
"{} Either provide the `--config` arg or ensure home directory exists to use the default config location",
|
||||
style("No config file found.").bold()
|
||||
);
|
||||
}
|
||||
("set", Some(subcommand_matches)) => {
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let mut config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(url) = subcommand_matches.value_of("json_rpc_url") {
|
||||
config.url = url.to_string();
|
||||
}
|
||||
if let Some(keypair) = subcommand_matches.value_of("keypair") {
|
||||
config.keypair_path = keypair.to_string();
|
||||
}
|
||||
config.save(config_file)?;
|
||||
println_name_value("Wallet Config Updated:", config_file);
|
||||
println_name_value("* url:", &config.url);
|
||||
println_name_value("* keypair:", &config.keypair_path);
|
||||
} else {
|
||||
println!(
|
||||
"{} Either provide the `--config` arg or ensure home directory exists to use the default config location",
|
||||
style("No config file found.").bold()
|
||||
);
|
||||
false
|
||||
}
|
||||
("set", Some(subcommand_matches)) => {
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let mut config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(url) = subcommand_matches.value_of("json_rpc_url") {
|
||||
config.url = url.to_string();
|
||||
}
|
||||
false
|
||||
if let Some(keypair) = subcommand_matches.value_of("keypair") {
|
||||
config.keypair_path = keypair.to_string();
|
||||
}
|
||||
config.save(config_file)?;
|
||||
println_name_value("Wallet Config Updated:", config_file);
|
||||
println_name_value("* url:", &config.url);
|
||||
println_name_value("* keypair:", &config.keypair_path);
|
||||
} else {
|
||||
println!(
|
||||
"{} Either provide the `--config` arg or ensure home directory exists to use the default config location",
|
||||
style("No config file found.").bold()
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
false
|
||||
}
|
||||
_ => true,
|
||||
};
|
||||
Ok(parse_args)
|
||||
@ -162,7 +159,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.help("Configuration file to use");
|
||||
if let Some(ref config_file) = *CONFIG_FILE {
|
||||
if let Some(ref config_file) = *config::CONFIG_FILE {
|
||||
arg.default_value(&config_file)
|
||||
} else {
|
||||
arg
|
||||
@ -210,31 +207,25 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("config")
|
||||
.about("Solana command-line tool configuration settings")
|
||||
.aliases(&["get", "set"])
|
||||
.setting(AppSettings::SubcommandRequiredElseHelp)
|
||||
.subcommand(
|
||||
SubCommand::with_name("get")
|
||||
.about("Get current config settings")
|
||||
.arg(
|
||||
Arg::with_name("specific_setting")
|
||||
.index(1)
|
||||
.value_name("CONFIG_FIELD")
|
||||
.takes_value(true)
|
||||
.possible_values(&["url", "keypair"])
|
||||
.help("Return a specific config setting"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("set")
|
||||
.about("Set a config setting")
|
||||
.group(
|
||||
ArgGroup::with_name("config_settings")
|
||||
.args(&["json_rpc_url", "keypair"])
|
||||
.multiple(true)
|
||||
.required(true),
|
||||
),
|
||||
SubCommand::with_name("get")
|
||||
.about("Get cli config settings")
|
||||
.arg(
|
||||
Arg::with_name("specific_setting")
|
||||
.index(1)
|
||||
.value_name("CONFIG_FIELD")
|
||||
.takes_value(true)
|
||||
.possible_values(&["url", "keypair"])
|
||||
.help("Return a specific config setting"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("set")
|
||||
.about("Set a cli config setting")
|
||||
.group(
|
||||
ArgGroup::with_name("config_settings")
|
||||
.args(&["json_rpc_url", "keypair"])
|
||||
.multiple(true)
|
||||
.required(true),
|
||||
),
|
||||
)
|
||||
.get_matches();
|
||||
|
160
cli/src/nonce.rs
160
cli/src/nonce.rs
@ -1,22 +1,20 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
log_instruction_custom_error, required_lamports_from, CliCommand, CliCommandInfo, CliConfig,
|
||||
CliError, ProcessResult, SigningAuthority,
|
||||
CliError, ProcessResult,
|
||||
};
|
||||
use crate::offline::BLOCKHASH_ARG;
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
account_utils::StateMut,
|
||||
account_utils::State,
|
||||
hash::Hash,
|
||||
nonce_state::{Meta, NonceState},
|
||||
nonce_state::NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_instruction::{
|
||||
advance_nonce_account, authorize_nonce_account, create_address_with_seed,
|
||||
create_nonce_account, create_nonce_account_with_seed, withdraw_nonce_account, NonceError,
|
||||
create_nonce_account, nonce_advance, nonce_authorize, nonce_withdraw, NonceError,
|
||||
SystemError,
|
||||
},
|
||||
system_program,
|
||||
@ -51,23 +49,13 @@ pub trait NonceSubCommands {
|
||||
fn nonce_subcommands(self) -> Self;
|
||||
}
|
||||
|
||||
pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(NONCE_ARG.name)
|
||||
.long(NONCE_ARG.long)
|
||||
fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name("nonce_authority")
|
||||
.long("nonce-authority")
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.validator(is_pubkey)
|
||||
.help(NONCE_ARG.help)
|
||||
}
|
||||
|
||||
pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("KEYPAIR or PUBKEY")
|
||||
.validator(is_pubkey_or_keypair_or_ask_keyword)
|
||||
.help(NONCE_AUTHORITY_ARG.help)
|
||||
.value_name("KEYPAIR")
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help("Specify nonce authority if different from account")
|
||||
}
|
||||
|
||||
impl NonceSubCommands for App<'_, '_> {
|
||||
@ -93,13 +81,6 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Account to be granted authority of the nonce account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seed")
|
||||
.long("seed")
|
||||
.value_name("SEED STRING")
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
@ -132,8 +113,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.help("Specify unit to use for request"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
Arg::with_name("nonce_authority")
|
||||
.long("nonce-authority")
|
||||
.takes_value(true)
|
||||
.value_name("BASE58_PUBKEY")
|
||||
.validator(is_pubkey_or_keypair)
|
||||
@ -141,9 +122,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("nonce")
|
||||
SubCommand::with_name("get-nonce")
|
||||
.about("Get the current nonce value")
|
||||
.alias("get-nonce")
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_pubkey")
|
||||
.index(1)
|
||||
@ -169,9 +149,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.arg(nonce_authority_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("nonce-account")
|
||||
SubCommand::with_name("show-nonce-account")
|
||||
.about("Show the contents of a nonce account")
|
||||
.alias("show-nonce-account")
|
||||
.arg(
|
||||
Arg::with_name("nonce_account_pubkey")
|
||||
.index(1)
|
||||
@ -234,8 +213,7 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let new_authority = pubkey_of(matches, "new_authority").unwrap();
|
||||
let nonce_authority =
|
||||
SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
|
||||
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::AuthorizeNonceAccount {
|
||||
@ -249,14 +227,12 @@ pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliComm
|
||||
|
||||
pub fn parse_nonce_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = keypair_of(matches, "nonce_account_keypair").unwrap();
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let lamports = required_lamports_from(matches, "amount", "unit")?;
|
||||
let nonce_authority = pubkey_of(matches, NONCE_AUTHORITY_ARG.name);
|
||||
let nonce_authority = pubkey_of(matches, "nonce_authority");
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
nonce_account: nonce_account.into(),
|
||||
seed,
|
||||
nonce_authority,
|
||||
lamports,
|
||||
},
|
||||
@ -275,8 +251,7 @@ pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
|
||||
|
||||
pub fn parse_new_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let nonce_authority =
|
||||
SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
|
||||
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::NewNonce {
|
||||
@ -306,8 +281,7 @@ pub fn parse_withdraw_from_nonce_account(
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
|
||||
let lamports = required_lamports_from(matches, "amount", "unit")?;
|
||||
let nonce_authority =
|
||||
SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
|
||||
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromNonceAccount {
|
||||
@ -352,15 +326,13 @@ pub fn process_authorize_nonce_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Pubkey,
|
||||
nonce_authority: Option<&SigningAuthority>,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
new_authority: &Pubkey,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let nonce_authority = nonce_authority
|
||||
.map(|a| a.keypair())
|
||||
.unwrap_or(&config.keypair);
|
||||
let ix = authorize_nonce_account(nonce_account, &nonce_authority.pubkey(), new_authority);
|
||||
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
|
||||
let ix = nonce_authorize(nonce_account, &nonce_authority.pubkey(), new_authority);
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
vec![ix],
|
||||
Some(&config.keypair.pubkey()),
|
||||
@ -382,31 +354,24 @@ pub fn process_create_nonce_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Keypair,
|
||||
seed: Option<String>,
|
||||
nonce_authority: Option<Pubkey>,
|
||||
lamports: u64,
|
||||
) -> ProcessResult {
|
||||
let nonce_account_pubkey = nonce_account.pubkey();
|
||||
let nonce_account_address = if let Some(seed) = seed.clone() {
|
||||
create_address_with_seed(&nonce_account_pubkey, &seed, &system_program::id())?
|
||||
} else {
|
||||
nonce_account_pubkey
|
||||
};
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
(&nonce_account_address, "nonce_account".to_string()),
|
||||
(&nonce_account_pubkey, "nonce_account_pubkey".to_string()),
|
||||
)?;
|
||||
|
||||
if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_address) {
|
||||
if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_pubkey) {
|
||||
let err_msg = if nonce_account.owner == system_program::id()
|
||||
&& StateMut::<NonceState>::state(&nonce_account).is_ok()
|
||||
&& State::<NonceState>::state(&nonce_account).is_ok()
|
||||
{
|
||||
format!("Nonce account {} already exists", nonce_account_address)
|
||||
format!("Nonce account {} already exists", nonce_account_pubkey)
|
||||
} else {
|
||||
format!(
|
||||
"Account {} already exists and is not a nonce account",
|
||||
nonce_account_address
|
||||
nonce_account_pubkey
|
||||
)
|
||||
};
|
||||
return Err(CliError::BadParameter(err_msg).into());
|
||||
@ -422,37 +387,17 @@ pub fn process_create_nonce_account(
|
||||
}
|
||||
|
||||
let nonce_authority = nonce_authority.unwrap_or_else(|| config.keypair.pubkey());
|
||||
|
||||
let ixs = if let Some(seed) = seed {
|
||||
create_nonce_account_with_seed(
|
||||
&config.keypair.pubkey(), // from
|
||||
&nonce_account_address, // to
|
||||
&nonce_account_pubkey, // base
|
||||
&seed, // seed
|
||||
&nonce_authority,
|
||||
lamports,
|
||||
)
|
||||
} else {
|
||||
create_nonce_account(
|
||||
&config.keypair.pubkey(),
|
||||
&nonce_account_pubkey,
|
||||
&nonce_authority,
|
||||
lamports,
|
||||
)
|
||||
};
|
||||
|
||||
let ixs = create_nonce_account(
|
||||
&config.keypair.pubkey(),
|
||||
&nonce_account_pubkey,
|
||||
&nonce_authority,
|
||||
lamports,
|
||||
);
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let signers = if nonce_account_pubkey != config.keypair.pubkey() {
|
||||
vec![&config.keypair, nonce_account] // both must sign if `from` and `to` differ
|
||||
} else {
|
||||
vec![&config.keypair] // when stake_account == config.keypair and there's a seed, we only need one signature
|
||||
};
|
||||
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&signers,
|
||||
&[&config.keypair, nonce_account],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(
|
||||
@ -461,7 +406,8 @@ pub fn process_create_nonce_account(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &signers);
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, nonce_account]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
|
||||
@ -489,7 +435,7 @@ pub fn process_new_nonce(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Pubkey,
|
||||
nonce_authority: Option<&SigningAuthority>,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
@ -503,10 +449,8 @@ pub fn process_new_nonce(
|
||||
.into());
|
||||
}
|
||||
|
||||
let nonce_authority = nonce_authority
|
||||
.map(|a| a.keypair())
|
||||
.unwrap_or(&config.keypair);
|
||||
let ix = advance_nonce_account(&nonce_account, &nonce_authority.pubkey());
|
||||
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
|
||||
let ix = nonce_advance(&nonce_account, &nonce_authority.pubkey());
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
vec![ix],
|
||||
@ -538,7 +482,7 @@ pub fn process_show_nonce_account(
|
||||
))
|
||||
.into());
|
||||
}
|
||||
let print_account = |data: Option<(Meta, Hash)>| {
|
||||
let print_account = |hash: Option<Hash>| {
|
||||
println!(
|
||||
"balance: {}",
|
||||
build_balance_message(nonce_account.lamports, use_lamports_unit, true)
|
||||
@ -551,21 +495,15 @@ pub fn process_show_nonce_account(
|
||||
true
|
||||
)
|
||||
);
|
||||
match data {
|
||||
Some((meta, hash)) => {
|
||||
println!("nonce: {}", hash);
|
||||
println!("authority: {}", meta.nonce_authority);
|
||||
}
|
||||
None => {
|
||||
println!("nonce: uninitialized");
|
||||
println!("authority: uninitialized");
|
||||
}
|
||||
match hash {
|
||||
Some(hash) => println!("nonce: {}", hash),
|
||||
None => println!("nonce: uninitialized"),
|
||||
}
|
||||
Ok("".to_string())
|
||||
};
|
||||
match nonce_account.state() {
|
||||
Ok(NonceState::Uninitialized) => print_account(None),
|
||||
Ok(NonceState::Initialized(meta, hash)) => print_account(Some((meta, hash))),
|
||||
Ok(NonceState::Initialized(_, hash)) => print_account(Some(hash)),
|
||||
Err(err) => Err(CliError::RpcRequestError(format!(
|
||||
"Account data could not be deserialized to nonce state: {:?}",
|
||||
err
|
||||
@ -578,16 +516,14 @@ pub fn process_withdraw_from_nonce_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Pubkey,
|
||||
nonce_authority: Option<&SigningAuthority>,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
destination_account_pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let nonce_authority = nonce_authority
|
||||
.map(|a| a.keypair())
|
||||
.unwrap_or(&config.keypair);
|
||||
let ix = withdraw_nonce_account(
|
||||
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
|
||||
let ix = nonce_withdraw(
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
destination_account_pubkey,
|
||||
@ -696,7 +632,6 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
nonce_authority: None,
|
||||
lamports: 50,
|
||||
},
|
||||
@ -719,7 +654,6 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
nonce_authority: Some(
|
||||
read_keypair_file(&authority_keypair_file).unwrap().pubkey()
|
||||
),
|
||||
@ -785,7 +719,7 @@ mod tests {
|
||||
// Test ShowNonceAccount Subcommand
|
||||
let test_show_nonce_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"nonce-account",
|
||||
"show-nonce-account",
|
||||
&nonce_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
|
@ -1,271 +0,0 @@
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use serde_json::Value;
|
||||
use solana_clap_utils::{
|
||||
input_parsers::value_of,
|
||||
input_validators::{is_hash, is_pubkey_sig},
|
||||
ArgConstant,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::{fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature};
|
||||
use std::str::FromStr;
|
||||
|
||||
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "blockhash",
|
||||
long: "blockhash",
|
||||
help: "Use the supplied blockhash",
|
||||
};
|
||||
|
||||
pub const SIGN_ONLY_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "sign_only",
|
||||
long: "sign-only",
|
||||
help: "Sign the transaction offline",
|
||||
};
|
||||
|
||||
pub const SIGNER_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "signer",
|
||||
long: "signer",
|
||||
help: "Provid a public-key/signature pair for the transaction",
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum BlockhashQuery {
|
||||
None(Hash, FeeCalculator),
|
||||
FeeCalculator(Hash),
|
||||
All,
|
||||
}
|
||||
|
||||
impl BlockhashQuery {
|
||||
pub fn new(blockhash: Option<Hash>, sign_only: bool) -> Self {
|
||||
match blockhash {
|
||||
Some(hash) if sign_only => Self::None(hash, FeeCalculator::default()),
|
||||
Some(hash) if !sign_only => Self::FeeCalculator(hash),
|
||||
None if !sign_only => Self::All,
|
||||
_ => panic!("Cannot resolve blockhash"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self {
|
||||
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
BlockhashQuery::new(blockhash, sign_only)
|
||||
}
|
||||
|
||||
pub fn get_blockhash_fee_calculator(
|
||||
&self,
|
||||
rpc_client: &RpcClient,
|
||||
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
|
||||
let (hash, fee_calc) = match self {
|
||||
BlockhashQuery::None(hash, fee_calc) => (Some(hash), Some(fee_calc)),
|
||||
BlockhashQuery::FeeCalculator(hash) => (Some(hash), None),
|
||||
BlockhashQuery::All => (None, None),
|
||||
};
|
||||
if None == fee_calc {
|
||||
let (cluster_hash, fee_calc) = rpc_client.get_recent_blockhash()?;
|
||||
Ok((*hash.unwrap_or(&cluster_hash), fee_calc))
|
||||
} else {
|
||||
Ok((*hash.unwrap(), fee_calc.unwrap().clone()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BlockhashQuery {
|
||||
fn default() -> Self {
|
||||
BlockhashQuery::All
|
||||
}
|
||||
}
|
||||
|
||||
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(BLOCKHASH_ARG.name)
|
||||
.long(BLOCKHASH_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("BLOCKHASH")
|
||||
.validator(is_hash)
|
||||
.help(BLOCKHASH_ARG.help)
|
||||
}
|
||||
|
||||
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(SIGN_ONLY_ARG.name)
|
||||
.long(SIGN_ONLY_ARG.long)
|
||||
.takes_value(false)
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.help(SIGN_ONLY_ARG.help)
|
||||
}
|
||||
|
||||
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(SIGNER_ARG.name)
|
||||
.long(SIGNER_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("BASE58_PUBKEY=BASE58_SIG")
|
||||
.validator(is_pubkey_sig)
|
||||
.requires(BLOCKHASH_ARG.name)
|
||||
.multiple(true)
|
||||
.help(SIGNER_ARG.help)
|
||||
}
|
||||
|
||||
pub trait OfflineArgs {
|
||||
fn offline_args(self) -> Self;
|
||||
}
|
||||
|
||||
impl OfflineArgs for App<'_, '_> {
|
||||
fn offline_args(self) -> Self {
|
||||
self.arg(blockhash_arg())
|
||||
.arg(sign_only_arg())
|
||||
.arg(signer_arg())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
|
||||
let object: Value = serde_json::from_str(&reply).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let blockhash = blockhash_str.parse::<Hash>().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
(blockhash, signers)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::App;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_client::{
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::{fee_calculator::FeeCalculator, hash::hash};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_blockhashspec_new_ok() {
|
||||
let blockhash = hash(&[1u8]);
|
||||
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), true),
|
||||
BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new(Some(blockhash), false),
|
||||
BlockhashQuery::FeeCalculator(blockhash),
|
||||
);
|
||||
assert_eq!(BlockhashQuery::new(None, false), BlockhashQuery::All,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhashspec_new_fail() {
|
||||
BlockhashQuery::new(None, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockhashspec_new_from_matches_ok() {
|
||||
let test_commands = App::new("blockhashspec_test").offline_args();
|
||||
let blockhash = hash(&[1u8]);
|
||||
let blockhash_string = blockhash.to_string();
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhashspec_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
);
|
||||
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"blockhashspec_test",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::FeeCalculator(blockhash),
|
||||
);
|
||||
|
||||
let matches = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["blockhashspec_test"]);
|
||||
assert_eq!(
|
||||
BlockhashQuery::new_from_matches(&matches),
|
||||
BlockhashQuery::All,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_blockhashspec_new_from_matches_fail() {
|
||||
let test_commands = App::new("blockhashspec_test")
|
||||
.arg(blockhash_arg())
|
||||
// We can really only hit this case unless the arg requirements
|
||||
// are broken, so unset the requires() to recreate that condition
|
||||
.arg(sign_only_arg().requires(""));
|
||||
|
||||
let matches = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["blockhashspec_test", "--sign-only"]);
|
||||
BlockhashQuery::new_from_matches(&matches);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockhashspec_get_blockhash_fee_calc() {
|
||||
let test_blockhash = hash(&[0u8]);
|
||||
let rpc_blockhash = hash(&[1u8]);
|
||||
let rpc_fee_calc = FeeCalculator::new(42, 42);
|
||||
let get_recent_blockhash_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!((
|
||||
Value::String(rpc_blockhash.to_string()),
|
||||
serde_json::to_value(rpc_fee_calc.clone()).unwrap()
|
||||
)),
|
||||
});
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::All
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(rpc_blockhash, rpc_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::FeeCalculator(test_blockhash)
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(test_blockhash, rpc_fee_calc.clone()),
|
||||
);
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
get_recent_blockhash_response.clone(),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
assert_eq!(
|
||||
BlockhashQuery::None(test_blockhash, FeeCalculator::default())
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.unwrap(),
|
||||
(test_blockhash, FeeCalculator::default()),
|
||||
);
|
||||
let rpc_client = RpcClient::new_mock("fails".to_string());
|
||||
assert!(BlockhashQuery::All
|
||||
.get_blockhash_fee_calculator(&rpc_client)
|
||||
.is_err());
|
||||
}
|
||||
}
|
926
cli/src/stake.rs
926
cli/src/stake.rs
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user