Compare commits

...

140 Commits

Author SHA1 Message Date
b6864d8524 Factor out creating genesis with vote accounts into a utility function (bp #8315) (#8316)
automerge
2020-02-18 13:21:05 -08:00
5ddc0d5485 Bump version to 0.22.10 2020-02-14 23:29:27 -07:00
3534a7cbc7 Datapoints overwhelm the metrics queue and blow up ram usage. (#8272) (#8282)
automerge
2020-02-14 21:49:26 -08:00
e2f5a63316 Cargo.lock 2020-02-14 21:24:05 -07:00
6475314ede Bump version to 0.22.9 2020-02-14 19:58:16 -07:00
535ee281e8 Filter old CrdsValues received via Pull Responses in Gossip (#8150) (#8277)
automerge
2020-02-14 10:21:26 -08:00
da843a7ace Fix larger than necessary allocations in streamer (#8187) (#8191)
automerge
2020-02-10 12:52:16 -08:00
772cf8288c Bump version to 0.22.8 2020-02-03 21:08:54 -07:00
e81a40ba55 Lock snapshot version to 0.22.6 2020-02-03 17:06:29 -07:00
a52359a6be Cargo.lock 2020-02-03 17:05:35 -07:00
2fe0853fba Fix consensus threshold when new root is created (#8093)
When a new root is created, the oldest slot is popped off
but when the logic checks for identical slots, it assumes
that any difference means a slot was popped off the front.
2020-02-03 16:47:02 -07:00
de3630f76c Filter repairman peers based on shred_version (#8069)
(cherry picked from commit b9988b62e4)
2020-02-01 08:58:26 -07:00
ff9e388843 Fix stale gossip entrypoint (#8053)
(cherry picked from commit fd207b6907)
2020-01-31 00:34:29 -07:00
67a7995f04 Bump version to 0.22.7 2020-01-30 15:47:22 -07:00
f9d793023c Only error if --expected-shred-version was not provided 2020-01-30 13:25:25 -07:00
99b2504b38 Rename rpc_peers() to all_rpc_peers() for consistency 2020-01-30 13:21:04 -07:00
3f3aec29d1 Add different shred test to test_tvu_peers_and_stakes
(cherry picked from commit 0c55b37976)
2020-01-30 11:28:18 -07:00
7be8124b9e Ignore slow archiver tests (#8032)
automerge

(cherry picked from commit 400412d76c)
2020-01-30 09:38:53 -07:00
81259daa3f Add shred version filters to Crds Accessors (#8027)
* Add shred version filters to Crds Accessors

* Adopt entrypoint shred_version if one isn't provided

(cherry picked from commit 64c42e28dc)
2020-01-30 08:59:00 -07:00
136fa5b561 Add leader-schedule subcommand 2020-01-29 20:08:32 -07:00
63ca6118fa Add --expected-shred-version option 2020-01-29 20:08:32 -07:00
850d729739 Wait for supermajority by default, add --no-wait-for-supermajority flag to override 2020-01-29 20:08:32 -07:00
62f9183d17 getClusterNodes now excludes validators with a different shred version 2020-01-29 20:08:32 -07:00
cfe3481ba4 Log solana-validator args on startup to aid debugging
(cherry picked from commit effe6e3ff3)
2020-01-29 09:40:18 -07:00
788e9f321c Bump version to v0.22.6 2020-01-28 08:44:44 -07:00
265e88e734 Fix compute_shred_version() 2020-01-27 19:05:17 -07:00
e80c74d955 Drop v prefix 2020-01-27 19:05:17 -07:00
d3efe2317b Remove stray key 2020-01-26 14:36:00 -07:00
05a661dd88 Bump version to v0.22.5 2020-01-24 21:52:01 -07:00
84090df770 Bump perf libs to v0.18.0 for CUDA 10.2 support 2020-01-24 21:38:51 -07:00
3f7fe04124 Consensus fix, don't consider threshold check if
lockouts are not increased
2020-01-24 21:34:16 -07:00
ac4e3c2426 Add ability to hard fork at any slot (#7801) (#7970)
automerge
2020-01-24 18:57:08 -08:00
13af049988 Install move-loader binaries (#7768)
(cherry picked from commit 5cb23c814d)
2020-01-24 18:13:03 -07:00
bd07f9bdcb Move testnet.solana.com and TdS to their own GCP projects 2020-01-24 16:28:04 -07:00
82927fee20 Increase --wait-for-supermajority to wait for 75% online stake (#7957)
automerge
2020-01-23 23:03:13 -08:00
57d5534bab Add create-snapshot command 2020-01-23 22:21:36 -07:00
d2c15b596f Add BlockstoreProcessorResult 2020-01-23 21:03:57 -07:00
5d8dc78718 Move snapshot archive generation out of the SnapshotPackagerService 2020-01-23 15:58:59 -07:00
c945e80618 Type grooming 2020-01-23 15:58:59 -07:00
0802793d37 Unify ledger_path arg handling with validator/ 2020-01-23 15:58:59 -07:00
a5c3750a58 Pass bank_forks by reference 2020-01-23 15:58:59 -07:00
dc1c5f8b1e --halt-at-slot 1 now halts at slot 1 2020-01-23 15:58:59 -07:00
653bec01f0 Set BankRc slot correctly when restoring a bank snapshot 2020-01-23 15:58:59 -07:00
49c94fad60 add_snapshot now returns SlotSnapshotPaths 2020-01-23 15:58:59 -07:00
98fd1b3fcb Remove superfluous accounts arg 2020-01-23 15:58:59 -07:00
93301d1c81 Make run.sh not overwrite genesis if existing (#7837) (#7939)
automerge
2020-01-22 23:38:41 -08:00
5aa8ee8ede Uninteresting cleanup (#7938)
automerge
2020-01-22 21:16:25 -08:00
28f81bd0a3 Avoid unsorted recent_blockhashes for determinism (#7918) (#7936)
automerge
2020-01-22 18:52:39 -08:00
1f4ae4318b Reject CI on failed mergify.io backports (#7927)
automerge

(cherry picked from commit 9bd6be779f)
2020-01-22 16:11:07 -07:00
bec1cf3145 CLI: Cleanup authority arg usage inconsistencies (#7922) (#7924)
automerge
2020-01-22 14:09:26 -08:00
5b4b086ebf Add mechanism to load v0.22.3 snapshots on newer Solana versions 2020-01-22 13:19:07 -07:00
0ef33b6462 don't put accounts in a weird location, use the defaults (#7921)
automerge

(cherry picked from commit f9323c5273)
2020-01-22 12:58:06 -07:00
e401bc6997 CLI: Support offline authorities (#7905) (#7920)
automerge
2020-01-22 10:57:16 -08:00
8ffd2c12a3 Add and use minimumLedgerSlot RPC API in block-production command (bp #7901) (#7903)
automerge
2020-01-21 14:07:32 -08:00
ec4134f26d Revert "Generate MAX_DATA_SHREDS_PER_FEC_BLOCK coding shreds for each FEC block (#7474)" (#7898) (#7899)
automerge
2020-01-21 12:40:42 -08:00
35e7b2f975 Remove redundant threadpools in sigverify (bp #7888) (#7890)
automerge
2020-01-20 21:31:56 -08:00
3509f1158f Assume 1 or more validators 2020-01-20 19:19:29 -07:00
1ca33d1967 --limit-ledger-size now accepts an optional slot count value (#7885)
automerge
2020-01-20 14:22:37 -08:00
19474ecaae Create ledger directory if it doesn't already exist (#7878)
automerge
2020-01-20 10:41:40 -08:00
e317940ebc Try running testnet.solana.com with only two validators 2020-01-20 10:23:43 -07:00
fbbfa93524 Spy just for RPC to avoid premature supermajority (#7856) (#7875)
automerge
2020-01-19 18:51:13 -08:00
c759a04fbc If a bad RPC node is selected try another one instead of aborting (#7871)
automerge
2020-01-18 10:52:15 -08:00
d1d37db717 Abort if a snapshot download fails for any reason other than 404
(cherry picked from commit e28508ad56)
2020-01-18 09:35:43 -07:00
4904b6a532 CLI: Support offline and nonced stake subcommands (#7831) (#7861)
automerge
2020-01-17 13:10:38 -08:00
f80a657764 Nonce: Rename instructions with VerbNoun scheme (#7775) (#7778)
automerge
2020-01-17 10:48:33 -08:00
344c528b63 Reduce grace ticks, and ignore grace ticks for missing leaders (#7764) (#7779)
automerge
2020-01-16 19:57:41 -08:00
ee1300a671 Improve bench-tps keypair generation (#7723) (#7853)
automerge
2020-01-16 19:30:00 -08:00
6c2534a8be Add logging surrounding failure in get_slot_entries_with_shred_info() (#7846) (#7851)
automerge
2020-01-16 17:27:52 -08:00
28a979c7d3 Cargo.lock 2020-01-16 16:34:33 -07:00
d071674b03 ignore prost is part of move (#7848) (#7850)
automerge
2020-01-16 15:24:05 -08:00
8c5f676df0 Bump version to 0.22.4 2020-01-15 18:55:50 -07:00
6f098e0145 Fix Rpc inconsistencies (#7826)
* Update rpc account format: remove byte arrays

* Base58-encode pubkeys in getStoragePubkeysForSlot

* Update docs

(cherry picked from commit da165d6943)
2020-01-15 16:56:14 -07:00
f90bc20a8b CLI: Plumb stake authorities throughout (#7822) (#7830)
automerge
2020-01-15 15:29:47 -08:00
60074c9d36 Remove tuple from programNotification (#7819) (#7821)
automerge
2020-01-15 12:13:12 -08:00
5d9354fca7 Remove word pair from address generator seed string (#7802) (#7823)
* Remove word pair from address generator seed string
2020-01-15 14:48:21 -05:00
0ea09d75ed Add new genesis validators (#7814) (#7817)
automerge
2020-01-15 10:22:54 -08:00
f475a46df6 Prefer CUDA_HOME environment variable (#7813)
automerge
2020-01-15 08:51:35 -08:00
5681a24896 Remove tuples from JSON RPC responses (#7806) (#7811)
automerge
2020-01-15 00:32:03 -08:00
214aba6d2f Set bootstrap leader and net/ validator vote account commission to 100% (#7810)
automerge
2020-01-15 00:25:10 -08:00
fa551e5fc1 Fix cluster collapse due to no proper shifted read (#7797) (#7807)
automerge
2020-01-14 19:48:36 -08:00
d9a5a86d10 Add hash stats information to check hashes between validators (#7780)
automerge
2020-01-14 17:55:46 -07:00
83ad921ad6 Rename slot_hash => bank_hash in AcoountsDB (#7579)
* Rename slot_hash => bank_hash in AcoountsDB
2020-01-14 17:55:46 -07:00
5753c719bd Include shred version in gossip (#7800)
automerge
2020-01-14 14:30:10 -08:00
322e2e0c6a Improve KeypairFileNotFound error message (#7792) (#7794)
automerge
2020-01-14 13:05:20 -08:00
371fdc6495 Book: Drop since-fixed nonce known issue (#7789) (#7790)
automerge
2020-01-14 10:18:20 -08:00
d23f2b5754 Unignore advisories as affected ver. is corrected (#7730) (#7783)
automerge
2020-01-13 19:01:23 -08:00
a50a015542 Rename blocktree to blockstore (bp #7757) (#7771)
automerge
2020-01-13 16:15:22 -08:00
353cfb1980 Update getConfirmedBlock examples (#7772) (#7773)
automerge
2020-01-13 14:35:31 -08:00
79d737e760 Book: Update durable nonce proposal entry (#7694) (#7770)
automerge
2020-01-13 13:39:38 -08:00
8745034cec getConfirmedBlock: add encoding optional parameter (#7756) (#7767)
automerge
2020-01-12 22:27:09 -08:00
db979b30c4 Pick an RPC node at random to avoid getting stuck on a bad RPC node (#7763)
automerge
2020-01-12 20:24:03 -08:00
a92855c995 Manage durable nonce stored value in runtime (#7684) (#7760)
automerge
2020-01-10 17:11:47 -08:00
5b006eba57 Handle errors on replaying ledger properly (bp #7741) (#7755)
automerge
2020-01-10 15:17:54 -08:00
32a728d585 Clarify account creation error messages in CLI (bp #7719) (#7745)
automerge
2020-01-10 07:02:11 -08:00
1b3be91e3c Update http crate in bpf program to fix security vulnerability (#7735) (#7743)
automerge
2020-01-09 20:53:56 -08:00
2509002fe4 Print bank hash and hash inputs. (#7733) (#7734)
automerge
2020-01-09 17:13:31 -08:00
9c9a690d0d Correctly integrate buildkite with codecov (#7718) (#7727)
automerge
2020-01-09 13:45:27 -08:00
216cc34224 Update http crate to fix security vulnerability (bp #7725) (#7729)
automerge
2020-01-09 12:51:20 -08:00
71f1459ef9 Remove vote account from genesis validators (#7717)
automerge
2020-01-08 22:40:36 -08:00
f84bdb7d81 Fix rooted slot iterator (#7695) (#7714)
automerge
2020-01-08 13:23:55 -08:00
ed59c58a72 Account for stake held by the current node while waiting for the supermajority to join gossip (#7708)
automerge
2020-01-07 22:13:44 -08:00
de941f4074 validator: Add --wait-for-super-majority to facilitate asynchronous cluster restarts (bp #7701) (#7704)
automerge
2020-01-07 15:48:11 -08:00
b7fb050d09 Use commas to make a log message more readable (#7696)
automerge
2020-01-06 22:12:03 -08:00
9ee2e768d6 Bump version to 0.22.3 2020-01-06 08:17:56 -07:00
d6d3a3c3d8 getBlockTime: Fix RootedSlotIterator lowest root (#7681) (#7687)
automerge
2020-01-05 23:24:34 -08:00
3e229b248f Update getBlockTime rpc docs (#7688) (#7689)
automerge
2020-01-05 23:16:04 -08:00
0470072436 Cli: fund validator-info accounts with rent-exempt lamports
(cherry picked from commit 580ca36a62)
2020-01-04 23:20:38 -07:00
f74fa60c8b Revert "Add a stand-alone gossip node on the blocksteamer instance"
This reverts commit a217920561.

This commit is causing trouble when the TdS cluster is reset and
validators running an older genesis config are still present.
Occasionally an RPC URL from an older validator will be selected,
causing a new node to fail to boot.
2020-01-04 16:44:28 -07:00
c189767090 Bump version to 0.22.2 2020-01-04 14:17:42 -07:00
c82c18353d Don't panic if peer_addr() fails (#7678) (#7679)
automerge
2020-01-04 10:39:22 -08:00
da58a272dd Set default vote account commission to 100% (#7677)
automerge
2020-01-04 09:52:33 -08:00
001f5fbb6b bank: Prune older epoch stakes (bp #7668) (#7676)
automerge
2020-01-04 09:32:16 -08:00
63cd452ab5 Minor book fixes 2020-01-04 08:53:51 -07:00
6ee77e9754 Make validator timestamping more coincident, and increase timestamp sample range (#7673) (#7674)
automerge
2020-01-03 23:30:12 -08:00
cee22262fc Move nonce into system program (bp #7645) (#7671)
automerge
2020-01-03 18:33:40 -08:00
0d13352916 CLI: Fix default nonce authority resolution (#7657) (#7672)
automerge
2020-01-03 17:18:43 -08:00
78a9832f13 Measure heap usage while processing the ledger at validator startup (bp #7667) (#7670)
automerge
2020-01-03 15:43:11 -08:00
795cf14650 Publish bpf-sdk only in Linux build
(cherry picked from commit 078e7246ac)
2020-01-02 23:22:29 -07:00
8c112e8bc4 Publish bpf-sdk releases (#7655) (#7662)
automerge
2020-01-02 21:25:59 -08:00
8e6d213459 Revert "Remov dead code from TdS testnet manager config (#7414)"
This reverts commit 8920ac02f6.
2020-01-02 21:07:23 -07:00
b33df42640 net: Add a stand-alone gossip node on the blocksteamer instance (bp #7654) (#7659)
automerge
2020-01-02 17:26:40 -08:00
e0462e6933 Book - Document nonceable CLI subcommands (#7656) (#7660)
automerge
2020-01-02 17:14:08 -08:00
1f5e30a366 Add input validation for --creation-time/--lockup-date args (#7646) (#7647)
automerge
2019-12-30 22:39:51 -08:00
633eeb1586 Book: Document CLI durable nonce account management (#7595) (#7640)
automerge
2019-12-30 10:17:23 -08:00
c1148a6da3 Use lamports in genesis (#7631) (#7634)
automerge
2019-12-29 10:22:28 -08:00
713e86670d Use lamports in genesis (#7631) (#7633)
automerge
2019-12-29 10:17:16 -08:00
c004c726e7 Support nonced transactions in the CLI (#7624) (#7630)
automerge
2019-12-27 13:22:06 -08:00
5ffb8631e0 Account for rent (#7626) (#7627)
automerge
2019-12-24 18:41:22 -08:00
fd32a0280e Cargo.lock 2019-12-24 09:12:11 -07:00
e76f202eb3 Update gitbook-cage first 2019-12-23 18:17:43 -07:00
ba4558cb92 Update cargo files to 0.22.1 (#7620) 2019-12-23 19:42:33 -05:00
74e5577dd4 Move cleanup to a script so it doesn't kill itself (#7603) (#7619)
automerge
2019-12-23 15:23:47 -08:00
b878002cf5 Specify version for solana-sdk-macro to enable crate.io publishing (#7616) 2019-12-23 12:38:21 -08:00
f111250e3b Groom log messages (#7610) (#7614)
automerge
2019-12-23 10:29:15 -08:00
3d91f650db Fix key in genesis (#7585) (#7608)
automerge
2019-12-22 22:41:01 -08:00
91a88cda6a show-block-production: Rename "missed" to "skipped" as not all skipped slots are missed slots (#7599) (#7607)
(cherry picked from commit 419da18405)

Co-authored-by: Michael Vines <mvines@gmail.com>
2019-12-22 23:21:24 -07:00
2128c17ed0 Extend Stable CI job timeout to 60 minutes (#7604) (#7606)
automerge
2019-12-22 19:57:43 -08:00
7b819c9b74 MISSED -> SKIPPED 2019-12-22 10:19:12 -07:00
eec5c661af Remove stray SOLANA_CUDA=1 2019-12-22 10:09:26 -07:00
0398f6b87a ledger-tool: Add --all option to bounds, to display all non-empty slots (#7592) (#7598)
automerge
2019-12-20 21:30:47 -08:00
241 changed files with 10843 additions and 6074 deletions

604
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "0.22.0"
version = "0.22.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.9.1"
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
solana-core = { path = "../core", version = "0.22.0" }
solana-logger = { path = "../logger", version = "0.22.0" }
solana-metrics = { path = "../metrics", version = "0.22.0" }
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" }
solana-core = { path = "../core", version = "0.22.10" }
solana-logger = { path = "../logger", version = "0.22.10" }
solana-metrics = { path = "../metrics", version = "0.22.10" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" }
solana-sdk = { path = "../sdk", version = "0.22.10" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "0.22.0"
version = "0.22.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "0.22.0" }
solana-ledger = { path = "../ledger", version = "0.22.0" }
solana-logger = { path = "../logger", version = "0.22.0" }
solana-runtime = { path = "../runtime", version = "0.22.0" }
solana-measure = { path = "../measure", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-core = { path = "../core", version = "0.22.10" }
solana-ledger = { path = "../ledger", version = "0.22.10" }
solana-logger = { path = "../logger", version = "0.22.10" }
solana-runtime = { path = "../runtime", version = "0.22.10" }
solana-measure = { path = "../measure", version = "0.22.10" }
solana-sdk = { path = "../sdk", version = "0.22.10" }
rand = "0.6.5"
crossbeam-channel = "0.3"

View File

@ -10,7 +10,7 @@ use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::PohRecorder;
use solana_core::poh_recorder::WorkingBankEntry;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_measure::measure::Measure;
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash;
@ -139,11 +139,11 @@ fn main() {
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree, None);
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
@ -300,5 +300,5 @@ fn main() {
sleep(Duration::from_secs(1));
debug!("waited for poh_service");
}
let _unused = Blocktree::destroy(&ledger_path);
let _unused = Blockstore::destroy(&ledger_path);
}

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.22.0"
version = "0.22.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -23,19 +23,19 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
solana-core = { path = "../core", version = "0.22.0" }
solana-genesis = { path = "../genesis", version = "0.22.0" }
solana-client = { path = "../client", version = "0.22.0" }
solana-faucet = { path = "../faucet", version = "0.22.0" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.0" }
solana-logger = { path = "../logger", version = "0.22.0" }
solana-metrics = { path = "../metrics", version = "0.22.0" }
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
solana-runtime = { path = "../runtime", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" }
solana-core = { path = "../core", version = "0.22.10" }
solana-genesis = { path = "../genesis", version = "0.22.10" }
solana-client = { path = "../client", version = "0.22.10" }
solana-faucet = { path = "../faucet", version = "0.22.10" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.10" }
solana-logger = { path = "../logger", version = "0.22.10" }
solana-metrics = { path = "../metrics", version = "0.22.10" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" }
solana-runtime = { path = "../runtime", version = "0.22.10" }
solana-sdk = { path = "../sdk", version = "0.22.10" }
untrusted = "0.7.0"
ws = "0.9.1"
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "0.22.0" }
solana-local-cluster = { path = "../local-cluster", version = "0.22.10" }

View File

@ -2,14 +2,14 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.22.0"
version = "0.22.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
solana-core = { path = "../core", version = "0.22.0" }
solana-logger = { path = "../logger", version = "0.22.0" }
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" }
solana-core = { path = "../core", version = "0.22.10" }
solana-logger = { path = "../logger", version = "0.22.10" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.22.0"
version = "0.22.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,24 +16,24 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
solana-core = { path = "../core", version = "0.22.0" }
solana-genesis = { path = "../genesis", version = "0.22.0" }
solana-client = { path = "../client", version = "0.22.0" }
solana-faucet = { path = "../faucet", version = "0.22.0" }
solana-librapay = { path = "../programs/librapay", version = "0.22.0", optional = true }
solana-logger = { path = "../logger", version = "0.22.0" }
solana-metrics = { path = "../metrics", version = "0.22.0" }
solana-measure = { path = "../measure", version = "0.22.0" }
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
solana-runtime = { path = "../runtime", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.0", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" }
solana-core = { path = "../core", version = "0.22.10" }
solana-genesis = { path = "../genesis", version = "0.22.10" }
solana-client = { path = "../client", version = "0.22.10" }
solana-faucet = { path = "../faucet", version = "0.22.10" }
solana-librapay = { path = "../programs/librapay", version = "0.22.10", optional = true }
solana-logger = { path = "../logger", version = "0.22.10" }
solana-metrics = { path = "../metrics", version = "0.22.10" }
solana-measure = { path = "../measure", version = "0.22.10" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" }
solana-runtime = { path = "../runtime", version = "0.22.10" }
solana-sdk = { path = "../sdk", version = "0.22.10" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.10", optional = true }
[dev-dependencies]
serial_test = "0.3.2"
serial_test_derive = "0.3.1"
solana-local-cluster = { path = "../local-cluster", version = "0.22.0" }
solana-local-cluster = { path = "../local-cluster", version = "0.22.10" }
[features]
move = ["solana-librapay", "solana-move-loader-program"]

View File

@ -21,8 +21,7 @@ use solana_sdk::{
transaction::Transaction,
};
use std::{
cmp,
collections::VecDeque,
collections::{HashSet, VecDeque},
net::SocketAddr,
process::exit,
sync::{
@ -66,10 +65,9 @@ fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
}
pub fn do_bench_tps<T>(
clients: Vec<T>,
client: Arc<T>,
config: Config,
gen_keypairs: Vec<Keypair>,
keypair0_balance: u64,
libra_args: Option<LibraKeys>,
) -> u64
where
@ -82,13 +80,9 @@ where
duration,
tx_count,
sustained,
num_lamports_per_account,
..
} = config;
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
let client = &clients[0];
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
assert!(gen_keypairs.len() >= 2 * tx_count);
@ -115,20 +109,17 @@ where
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
info!("Sampling TPS every {} second...", sample_period);
let v_threads: Vec<_> = clients
.iter()
.map(|client| {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
let client = client.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_txs(&exit_signal, &maxes, sample_period, &client);
})
.unwrap()
})
.collect();
let sample_thread = {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
let client = client.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_txs(&exit_signal, &maxes, sample_period, &client);
})
.unwrap()
};
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
@ -174,11 +165,10 @@ where
// generate and send transactions for the specified duration
let start = Instant::now();
let keypair_chunks = source_keypair_chunks.len() as u64;
let keypair_chunks = source_keypair_chunks.len();
let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance;
let mut chunk_index = 0;
while start.elapsed() < duration {
let chunk_index = (i % keypair_chunks) as usize;
generate_txs(
&shared_txs,
&recent_blockhash,
@ -206,8 +196,11 @@ where
// transaction signatures even when blockhash is reused.
dest_keypair_chunks[chunk_index].rotate_left(1);
i += 1;
if should_switch_directions(num_lamports_per_account, keypair_chunks, i) {
// Move on to next chunk
chunk_index = (chunk_index + 1) % keypair_chunks;
// Switch directions after transfering for each "chunk"
if chunk_index == 0 {
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
}
}
@ -215,11 +208,9 @@ where
// Stop the sampling threads so it will collect the stats
exit_signal.store(true, Ordering::Relaxed);
info!("Waiting for validator threads...");
for t in v_threads {
if let Err(err) = t.join() {
info!(" join() failed with: {:?}", err);
}
info!("Waiting for sampler threads...");
if let Err(err) = sample_thread.join() {
info!(" join() failed with: {:?}", err);
}
// join the tx send threads
@ -500,177 +491,218 @@ fn do_tx_transfers<T: Client>(
}
}
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount: u64) -> bool {
for a in &tx.message().account_keys[1..] {
if client
.get_balance_with_commitment(a, CommitmentConfig::recent())
.unwrap_or(0)
>= amount
{
return true;
match client.get_balance_with_commitment(a, CommitmentConfig::recent()) {
Ok(balance) => return balance >= amount,
Err(err) => error!("failed to get balance {:?}", err),
}
}
false
}
trait FundingTransactions<'a> {
fn fund<T: 'static + Client + Send + Sync>(
&mut self,
client: &Arc<T>,
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
to_lamports: u64,
);
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]);
fn sign(&mut self, blockhash: Hash);
fn send<T: Client>(&self, client: &Arc<T>);
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64);
}
impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
fn fund<T: 'static + Client + Send + Sync>(
&mut self,
client: &Arc<T>,
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
to_lamports: u64,
) {
self.make(to_fund);
let mut tries = 0;
while !self.is_empty() {
info!(
"{} {} each to {} accounts in {} txs",
if tries == 0 {
"transferring"
} else {
" retrying"
},
to_lamports,
self.len() * MAX_SPENDS_PER_TX as usize,
self.len(),
);
let (blockhash, _fee_calculator) = get_recent_blockhash(client.as_ref());
// re-sign retained to_fund_txes with updated blockhash
self.sign(blockhash);
self.send(&client);
// Sleep a few slots to allow transactions to process
sleep(Duration::from_secs(1));
self.verify(&client, to_lamports);
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
// retry
tries += 1;
}
info!("transferred");
}
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]) {
let mut make_txs = Measure::start("make_txs");
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
.par_iter()
.map(|(k, t)| {
let tx = Transaction::new_unsigned_instructions(system_instruction::transfer_many(
&k.pubkey(),
&t,
));
(*k, tx)
})
.collect();
make_txs.stop();
debug!(
"make {} unsigned txs: {}us",
to_fund_txs.len(),
make_txs.as_us()
);
self.extend(to_fund_txs);
}
fn sign(&mut self, blockhash: Hash) {
let mut sign_txs = Measure::start("sign_txs");
self.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[*k], blockhash);
});
sign_txs.stop();
debug!("sign {} txs: {}us", self.len(), sign_txs.as_us());
}
fn send<T: Client>(&self, client: &Arc<T>) {
let mut send_txs = Measure::start("send_txs");
self.iter().for_each(|(_, tx)| {
client.async_send_transaction(tx.clone()).expect("transfer");
});
send_txs.stop();
debug!("send {} txs: {}us", self.len(), send_txs.as_us());
}
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64) {
let starting_txs = self.len();
let verified_txs = Arc::new(AtomicUsize::new(0));
let too_many_failures = Arc::new(AtomicBool::new(false));
let loops = if starting_txs < 1000 { 3 } else { 1 };
// Only loop multiple times for small (quick) transaction batches
for _ in 0..loops {
let failed_verify = Arc::new(AtomicUsize::new(0));
let client = client.clone();
let verified_txs = &verified_txs;
let failed_verify = &failed_verify;
let too_many_failures = &too_many_failures;
let verified_set: HashSet<Pubkey> = self
.par_iter()
.filter_map(move |(k, tx)| {
if too_many_failures.load(Ordering::Relaxed) {
return None;
}
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
verified_txs.fetch_add(1, Ordering::Relaxed);
Some(k.pubkey())
} else {
failed_verify.fetch_add(1, Ordering::Relaxed);
None
};
let verified_txs = verified_txs.load(Ordering::Relaxed);
let failed_verify = failed_verify.load(Ordering::Relaxed);
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
if failed_verify > 100 && failed_verify > verified_txs {
too_many_failures.store(true, Ordering::Relaxed);
warn!(
"Too many failed transfers... {} remaining, {} verified, {} failures",
remaining_count, verified_txs, failed_verify
);
}
if remaining_count % 100 == 0 {
info!(
"Verifying transfers... {} remaining, {} verified, {} failures",
remaining_count, verified_txs, failed_verify
);
}
verified
})
.collect();
self.retain(|(k, _)| !verified_set.contains(&k.pubkey()));
if self.is_empty() {
break;
}
info!("Looping verifications");
let verified_txs = verified_txs.load(Ordering::Relaxed);
let failed_verify = failed_verify.load(Ordering::Relaxed);
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
info!(
"Verifying transfers... {} remaining, {} verified, {} failures",
remaining_count, verified_txs, failed_verify
);
sleep(Duration::from_millis(100));
}
}
}
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
/// on every iteration. This allows us to replay the transfers because the source is either empty,
/// or full
pub fn fund_keys<T: Client>(
client: &T,
pub fn fund_keys<T: 'static + Client + Send + Sync>(
client: Arc<T>,
source: &Keypair,
dests: &[Keypair],
total: u64,
max_fee: u64,
mut extra: u64,
lamports_per_account: u64,
) {
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
info!(
"funding keys {} with lamports: {:?} total: {}",
dests.len(),
client.get_balance(&source.pubkey()),
total
);
while !notfunded.is_empty() {
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
let mut to_fund = vec![];
info!("creating from... {}", funded.len());
let mut build_to_fund = Measure::start("build_to_fund");
for f in &mut funded {
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
if max_units == 0 {
break;
}
let start = notfunded.len() - max_units as usize;
let fees = if extra > 0 { max_fee } else { 0 };
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
let moves: Vec<_> = notfunded[start..]
.iter()
.map(|k| (k.pubkey(), per_unit))
.collect();
notfunded[start..]
.iter()
.for_each(|k| new_funded.push((k, per_unit)));
notfunded.truncate(start);
if !moves.is_empty() {
to_fund.push((f.0, moves));
}
extra -= 1;
let mut funded: Vec<&Keypair> = vec![source];
let mut funded_funds = total;
let mut not_funded: Vec<&Keypair> = dests.iter().collect();
while !not_funded.is_empty() {
// Build to fund list and prepare funding sources for next iteration
let mut new_funded: Vec<&Keypair> = vec![];
let mut to_fund: Vec<(&Keypair, Vec<(Pubkey, u64)>)> = vec![];
let to_lamports = (funded_funds - lamports_per_account - max_fee) / MAX_SPENDS_PER_TX;
for f in funded {
let start = not_funded.len() - MAX_SPENDS_PER_TX as usize;
let dests: Vec<_> = not_funded.drain(start..).collect();
let spends: Vec<_> = dests.iter().map(|k| (k.pubkey(), to_lamports)).collect();
to_fund.push((f, spends));
new_funded.extend(dests.into_iter());
}
build_to_fund.stop();
debug!("build to_fund vec: {}us", build_to_fund.as_us());
// try to transfer a "few" at a time with recent blockhash
// assume 4MB network buffers, and 512 byte packets
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
let mut tries = 0;
let mut make_txs = Measure::start("make_txs");
// this set of transactions just initializes us for bookkeeping
#[allow(clippy::clone_double_ref)] // sigh
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
let tx = Transaction::new_unsigned_instructions(
system_instruction::transfer_many(&k.pubkey(), &m),
);
(k.clone(), tx)
})
.collect();
make_txs.stop();
debug!(
"make {} unsigned txs: {}us",
to_fund_txs.len(),
make_txs.as_us()
Vec::<(&Keypair, Transaction)>::with_capacity(chunk.len()).fund(
&client,
chunk,
to_lamports,
);
let amount = chunk[0].1[0].1;
while !to_fund_txs.is_empty() {
let receivers = to_fund_txs
.iter()
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
info!(
"{} {} to {} in {} txs",
if tries == 0 {
"transferring"
} else {
" retrying"
},
amount,
receivers,
to_fund_txs.len(),
);
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
// re-sign retained to_fund_txes with updated blockhash
let mut sign_txs = Measure::start("sign_txs");
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[*k], blockhash);
});
sign_txs.stop();
debug!("sign {} txs: {}us", to_fund_txs.len(), sign_txs.as_us());
let mut send_txs = Measure::start("send_txs");
to_fund_txs.iter().for_each(|(_, tx)| {
client.async_send_transaction(tx.clone()).expect("transfer");
});
send_txs.stop();
debug!("send {} txs: {}us", to_fund_txs.len(), send_txs.as_us());
let mut verify_txs = Measure::start("verify_txs");
let mut starting_txs = to_fund_txs.len();
let mut verified_txs = 0;
let mut failed_verify = 0;
// Only loop multiple times for small (quick) transaction batches
for _ in 0..(if starting_txs < 1000 { 3 } else { 1 }) {
let mut timer = Instant::now();
to_fund_txs.retain(|(_, tx)| {
if timer.elapsed() >= Duration::from_secs(5) {
if failed_verify > 0 {
debug!("total txs failed verify: {}", failed_verify);
}
info!(
"Verifying transfers... {} remaining",
starting_txs - verified_txs
);
timer = Instant::now();
}
let verified = verify_funding_transfer(client, &tx, amount);
if verified {
verified_txs += 1;
} else {
failed_verify += 1;
}
!verified
});
if to_fund_txs.is_empty() {
break;
}
debug!("Looping verifications");
info!("Verifying transfers... {} remaining", to_fund_txs.len());
sleep(Duration::from_millis(100));
}
starting_txs -= to_fund_txs.len();
verify_txs.stop();
debug!("verified {} txs: {}us", starting_txs, verify_txs.as_us());
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
// retry
tries += 1;
}
info!("transferred");
});
info!("funded: {} left: {}", new_funded.len(), notfunded.len());
info!("funded: {} left: {}", new_funded.len(), not_funded.len());
funded = new_funded;
funded_funds = to_lamports;
}
}
@ -678,14 +710,14 @@ pub fn airdrop_lamports<T: Client>(
client: &T,
faucet_addr: &SocketAddr,
id: &Keypair,
tx_count: u64,
desired_balance: u64,
) -> Result<()> {
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(starting_balance);
info!("starting balance {}", starting_balance);
if starting_balance < tx_count {
let airdrop_amount = tx_count - starting_balance;
if starting_balance < desired_balance {
let airdrop_amount = desired_balance - starting_balance;
info!(
"Airdropping {:?} lamports from {} for {}",
airdrop_amount,
@ -810,17 +842,6 @@ fn compute_and_report_stats(
);
}
// First transfer 2/3 of the lamports to the dest accounts
// then ping-pong 1/3 of the lamports back to the other account
// this leaves 1/3 lamport buffer in each account
fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool {
if i < keypair_chunks * (2 * num_lamports_per_account) / 3 {
return false;
}
i % (keypair_chunks * num_lamports_per_account / 3) == 0
}
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
@ -1004,23 +1025,25 @@ fn fund_move_keys<T: Client>(
info!("done funding keys, took {} ms", funding_time.as_ms());
}
pub fn generate_and_fund_keypairs<T: Client>(
client: &T,
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
client: Arc<T>,
faucet_addr: Option<SocketAddr>,
funding_key: &Keypair,
keypair_count: usize,
lamports_per_account: u64,
use_move: bool,
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
info!("Creating {} keypairs...", keypair_count);
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
info!("Get lamports...");
// Sample the first keypair, see if it has lamports, if so then resume.
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
let last_keypair_balance = client
.get_balance(&keypairs[keypair_count - 1].pubkey())
.unwrap_or(0);
// Sample the first keypair, to prevent lamport loss on repeated solana-bench-tps executions
let first_key = keypairs[0].pubkey();
let first_keypair_balance = client.get_balance(&first_key).unwrap_or(0);
// Sample the last keypair, to check if funding was already completed
let last_key = keypairs[keypair_count - 1].pubkey();
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
#[cfg(feature = "move")]
let mut move_keypairs_ret = None;
@ -1028,31 +1051,38 @@ pub fn generate_and_fund_keypairs<T: Client>(
#[cfg(not(feature = "move"))]
let move_keypairs_ret = None;
if lamports_per_account > last_keypair_balance {
let (_blockhash, fee_calculator) = get_recent_blockhash(client);
let account_desired_balance =
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
// start another bench-tps run without re-funding all of the keypairs, check if the
// keypairs still have at least 80% of the expected funds. That should be enough to
// pay for the transaction fees in a new run.
let enough_lamports = 8 * lamports_per_account / 10;
if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports {
let (_blockhash, fee_calculator) = get_recent_blockhash(client.as_ref());
let max_fee = fee_calculator.max_lamports_per_signature;
let extra_fees = extra * max_fee;
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
let mut total = lamports_per_account * total_keypairs + extra_fees;
if use_move {
total *= 3;
}
info!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
account_desired_balance, total
);
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
info!(
"Funding keypair balance: {} max_fee: {} lamports_per_account: {} extra: {} total: {}",
funding_key_balance, max_fee, lamports_per_account, extra, total
);
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &faucet_addr.unwrap(), funding_key, total)?;
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
}
#[cfg(feature = "move")]
{
if use_move {
let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000);
let libra_mint_program_id = upload_mint_script(&funding_key, client);
let libra_pay_program_id = upload_payment_script(&funding_key, client);
let libra_genesis_keypair =
create_genesis(&funding_key, client.as_ref(), 10_000_000);
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
// Generate another set of keypairs for move accounts.
// Still fund the solana ones which will be used for fees.
@ -1060,7 +1090,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
let mut rnd = GenKeys::new(seed);
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
fund_move_keys(
client,
client.as_ref(),
funding_key,
&move_keypairs,
total / 3,
@ -1085,15 +1115,15 @@ pub fn generate_and_fund_keypairs<T: Client>(
funding_key,
&keypairs,
total,
fee_calculator.max_lamports_per_signature,
extra,
max_fee,
lamports_per_account,
);
}
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
keypairs.truncate(keypair_count);
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
Ok((keypairs, move_keypairs_ret))
}
#[cfg(test)]
@ -1105,30 +1135,11 @@ mod tests {
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::genesis_config::create_genesis_config;
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(30, 1, 0), false);
assert_eq!(should_switch_directions(30, 1, 1), false);
assert_eq!(should_switch_directions(30, 1, 20), true);
assert_eq!(should_switch_directions(30, 1, 21), false);
assert_eq!(should_switch_directions(30, 1, 30), true);
assert_eq!(should_switch_directions(30, 1, 90), true);
assert_eq!(should_switch_directions(30, 1, 91), false);
assert_eq!(should_switch_directions(30, 2, 0), false);
assert_eq!(should_switch_directions(30, 2, 1), false);
assert_eq!(should_switch_directions(30, 2, 20), false);
assert_eq!(should_switch_directions(30, 2, 40), true);
assert_eq!(should_switch_directions(30, 2, 90), false);
assert_eq!(should_switch_directions(30, 2, 100), true);
assert_eq!(should_switch_directions(30, 2, 101), false);
}
#[test]
fn test_bench_tps_bank_client() {
let (genesis_config, id) = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let clients = vec![BankClient::new(bank)];
let client = Arc::new(BankClient::new(bank));
let mut config = Config::default();
config.id = id;
@ -1136,23 +1147,24 @@ mod tests {
config.duration = Duration::from_secs(5);
let keypair_count = config.tx_count * config.keypair_multiplier;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false)
let (keypairs, _move_keypairs) =
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
.unwrap();
do_bench_tps(clients, config, keypairs, 0, None);
do_bench_tps(client, config, keypairs, None);
}
#[test]
fn test_bench_tps_fund_keys() {
let (genesis_config, id) = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let client = Arc::new(BankClient::new(bank));
let keypair_count = 20;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
let (keypairs, _move_keypairs) =
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
.unwrap();
for kp in &keypairs {
assert_eq!(
@ -1170,23 +1182,16 @@ mod tests {
let fee_calculator = FeeCalculator::new(11, 0);
genesis_config.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let client = Arc::new(BankClient::new(bank));
let keypair_count = 20;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
let (keypairs, _move_keypairs) =
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
.unwrap();
let max_fee = client
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.unwrap()
.1
.max_lamports_per_signature;
for kp in &keypairs {
assert_eq!(
client.get_balance(&kp.pubkey()).unwrap(),
lamports + max_fee
);
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
}
}
}

View File

@ -6,7 +6,7 @@ use solana_genesis::Base64Account;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_program;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit};
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
/// Number of signatures for all transactions in ~1 week at ~100K TPS
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
@ -82,12 +82,12 @@ fn main() {
);
exit(1);
}
client
Arc::new(client)
} else {
get_client(&nodes)
Arc::new(get_client(&nodes))
};
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
let path = Path::new(&client_ids_and_stake_file);
let file = File::open(path).unwrap();
@ -117,10 +117,10 @@ fn main() {
// This prevents the amount of storage needed for bench-tps accounts from creeping up
// across multiple runs.
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
(keypairs, None, last_balance)
(keypairs, None)
} else {
generate_and_fund_keypairs(
&client,
client.clone(),
Some(*faucet_addr),
&id,
keypair_count,
@ -133,11 +133,5 @@ fn main() {
})
};
do_bench_tps(
vec![client],
cli_config,
keypairs,
keypair_balance,
move_keypairs,
);
do_bench_tps(client, cli_config, keypairs, move_keypairs);
}

View File

@ -9,7 +9,7 @@ use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
#[cfg(feature = "move")]
use solana_sdk::move_loader::solana_move_loader_program;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::mpsc::channel;
use std::sync::{mpsc::channel, Arc};
use std::time::Duration;
fn test_bench_tps_local_cluster(config: Config) {
@ -36,10 +36,10 @@ fn test_bench_tps_local_cluster(config: Config) {
100_000_000,
);
let client = create_client(
let client = Arc::new(create_client(
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
VALIDATOR_PORT_RANGE,
);
));
let (addr_sender, addr_receiver) = channel();
run_local_faucet(faucet_keypair, addr_sender, None);
@ -48,8 +48,8 @@ fn test_bench_tps_local_cluster(config: Config) {
let lamports_per_account = 100;
let keypair_count = config.tx_count * config.keypair_multiplier;
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
&client,
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
client.clone(),
Some(faucet_addr),
&config.id,
keypair_count,
@ -58,7 +58,7 @@ fn test_bench_tps_local_cluster(config: Config) {
)
.unwrap();
let _total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
#[cfg(not(debug_assertions))]
assert!(_total > 100);

View File

@ -18,9 +18,9 @@
| | `-------` `--------` `--+---------` | | | | |
| | ^ ^ | | | `------------` |
| | | | v | | |
| | | .--+--------. | | |
| | | | Blocktree | | | |
| | | `-----------` | | .------------. |
| | | .--+---------. | | |
| | | | Blockstore | | | |
| | | `------------` | | .------------. |
| | | ^ | | | | |
| | | | | | | Downstream | |
| | .--+--. .-------+---. | | | Validators | |

View File

@ -21,7 +21,7 @@
* [Anatomy of a Validator](validator/README.md)
* [TPU](validator/tpu.md)
* [TVU](validator/tvu/README.md)
* [Blocktree](validator/tvu/blocktree.md)
* [Blockstore](validator/tvu/blockstore.md)
* [Gossip Service](validator/gossip.md)
* [The Runtime](validator/runtime.md)
* [Anatomy of a Transaction](transaction.md)
@ -39,6 +39,7 @@
* [Installation](paper-wallet/installation.md)
* [Paper Wallet Usage](paper-wallet/usage.md)
* [Offline Signing](offline-signing/README.md)
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
* [API Reference](api-reference/README.md)
* [Transaction](api-reference/transaction-api.md)
* [Instruction](api-reference/instruction-api.md)
@ -58,7 +59,7 @@
* [Bankless Leader](proposals/bankless-leader.md)
* [Slashing](proposals/slashing.md)
* [Implemented Design Proposals](implemented-proposals/README.md)
* [Blocktree](implemented-proposals/blocktree.md)
* [Blockstore](implemented-proposals/blockstore.md)
* [Cluster Software Installation and Updates](implemented-proposals/installer.md)
* [Cluster Economics](implemented-proposals/ed_overview/README.md)
* [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md)

View File

@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
## Usage
### solana-cli
```text
solana-cli 0.22.0 [channel=unknown commit=unknown]
solana-cli 0.22.10 [channel=unknown commit=unknown]
Blockchain, Rebuilt for Scale
USAGE:
@ -201,6 +201,7 @@ OPTIONS:
SUBCOMMANDS:
address Get your public key
airdrop Request lamports
authorize-nonce-account Assign account authority to a new entity
balance Get your balance
cancel Cancel a transfer
catchup Wait for a validator to catch up to the cluster
@ -305,6 +306,38 @@ ARGS:
<UNIT> Specify unit to use for request and balance display [possible values: SOL, lamports]
```
#### solana-authorize-nonce-account
```text
solana-authorize-nonce-account
Assign account authority to a new entity
USAGE:
solana authorize-nonce-account [FLAGS] [OPTIONS] <NONCE_ACCOUNT> <NEW_AUTHORITY_PUBKEY>
FLAGS:
-h, --help Prints help information
--skip-seed-phrase-validation Skip validation of seed phrases. Use this if your phrase does not use the BIP39
official English word list
-V, --version Prints version information
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
ARGS:
<NONCE_ACCOUNT> Address of the nonce account
<NEW_AUTHORITY_PUBKEY> Account to be granted authority of the nonce account
```
#### solana-balance
```text
solana-balance
@ -664,14 +697,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--stake-authority <KEYPAIR of PUBKEY> Public key of authorized staker (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account to be deactivated.
@ -694,14 +738,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--stake-authority <KEYPAIR of PUBKEY> Public key of authorized staker (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account to delegate
@ -967,13 +1022,17 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR> Specify nonce authority if different from account
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
ARGS:
<NONCE ACCOUNT> Address of the nonce account
@ -997,17 +1056,27 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--after <DATETIME> A timestamp after which transaction will execute
--require-timestamp-from <PUBKEY> Require timestamp from this third party
--require-signature-from <PUBKEY>... Any third party signatures required to unlock the lamports
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--after <DATETIME> A timestamp after which transaction will execute
--require-timestamp-from <PUBKEY> Require timestamp from this third party
--require-signature-from <PUBKEY>... Any third party signatures required to unlock the lamports
ARGS:
<TO PUBKEY> The pubkey of recipient
@ -1383,12 +1452,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--stake-authority <KEYPAIR of PUBKEY> Public key of authorized staker (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account in which to set the authorized staker
@ -1411,12 +1493,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--withdraw-authority <KEYPAIR or PUBKEY> Public key of authorized withdrawer (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account in which to set the authorized withdrawer
@ -1582,13 +1677,17 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR> Specify nonce authority if different from account
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
ARGS:
<NONCE ACCOUNT> Nonce account from to withdraw from
@ -1613,12 +1712,15 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--withdraw-authority <KEYPAIR or PUBKEY> Public key of authorized withdrawer (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account from which to withdraw

View File

@ -40,6 +40,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [getTotalSupply](jsonrpc-api.md#gettotalsupply)
* [getVersion](jsonrpc-api.md#getversion)
* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
* [sendTransaction](jsonrpc-api.md#sendtransaction)
* [startSubscriptionChannel](jsonrpc-api.md#startsubscriptionchannel)
@ -146,8 +147,8 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
* `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing:
* `lamports`, number of lamports assigned to this account, as a u64
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
* `owner`, base-58 encoded pubkey of the program this account has been assigned to
* `data`, base-58 encoded data associated with the account
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
#### Example:
@ -157,7 +158,7 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"}},"id":1}
```
### getBalance
@ -193,13 +194,13 @@ Returns commitment for particular block
#### Results:
The result field will be an array with two fields:
The result field will be a JSON object containing:
* Commitment
* `commitment` - commitment, comprising either:
* `null` - Unknown block
* `object` - BlockCommitment
* `array` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
* 'integer' - total active stake, in lamports, of the current epoch
* `totalStake` - total active stake, in lamports, of the current epoch
#### Example:
@ -213,9 +214,17 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### getBlockTime
Returns the estimated production time of a block. Validators report their UTC
time to the ledger on a regular interval. A block's time is calculated as an
offset from the median value of the most recent validator time report.
Returns the estimated production time of a block.
Each validator reports their UTC time to the ledger on a regular interval by
intermittently adding a timestamp to a Vote for a particular block. A requested
block's time is calculated from the stake-weighted mean of the Vote timestamps
in a set of recent blocks recorded on the ledger.
Nodes that are booting from snapshot or limiting ledger size (by purging old
slots) will return null timestamps for blocks below their lowest root +
`TIMESTAMP_SLOT_RANGE`. Users interested in having this historical data must
query a node that is built from genesis and retains the entire ledger.
#### Parameters:
@ -270,17 +279,18 @@ Returns identity and transaction information about a confirmed block in the ledg
#### Parameters:
* `integer` - slot, as u64 integer
* `string` - (optional) encoding for each returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
#### Results:
The result field will be an object with the following fields:
* `blockhash` - the blockhash of this block
* `previousBlockhash` - the blockhash of this block's parent
* `blockhash` - the blockhash of this block, as base-58 encoded string
* `previousBlockhash` - the blockhash of this block's parent, as base-58 encoded string
* `parentSlot` - the slot index of this block's parent
* `transactions` - an array of tuples containing:
* [Transaction](transaction-api.md) object, in JSON format
* Transaction status object, containing:
* `transactions` - an array of JSON objects containing:
* `transaction` - [Transaction](transaction-api.md) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
* `meta` - transaction status metadata object, containing `null` or:
* `status` - Transaction status:
* `"Ok": null` - Transaction was successful
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
@ -292,10 +302,16 @@ The result field will be an object with the following fields:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":{"blockhash":[165,245,120,183,32,205,89,222,249,114,229,49,250,231,149,122,156,232,181,83,238,194,157,153,7,213,180,54,177,6,25,101],"parentSlot":429,"previousBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166],"transactions":[[{"message":{"accountKeys":[[5],[219,181,202,40,52,148,34,136,186,59,137,160,250,225,234,17,244,160,88,116,24,176,30,227,68,11,199,38,141,68,131,228],[233,48,179,56,91,40,254,206,53,48,196,176,119,248,158,109,121,77,11,69,108,160,128,27,228,122,146,249,53,184,68,87],[6,167,213,23,25,47,10,175,198,242,101,227,251,119,204,122,218,130,197,41,208,190,59,19,110,45,0,85,32,0,0,0],[6,167,213,23,24,199,116,201,40,86,99,152,105,29,94,182,139,94,184,163,155,75,109,92,115,85,91,33,0,0,0,0],[7,97,72,29,53,116,116,187,124,77,118,36,235,211,189,179,216,53,94,115,209,16,67,252,13,163,83,128,0,0,0,0]],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[[1],{"accounts":[[3],1,2,3],"data":[[52],2,0,0,0,1,0,0,0,0,0,0,0,173,1,0,0,0,0,0,0,86,55,9,248,142,238,135,114,103,83,247,124,67,68,163,233,55,41,59,129,64,50,110,221,234,234,27,213,205,193,219,50],"program_id_index":4}],"recentBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166]},"signatures":[[2],[119,9,95,108,35,95,7,1,69,101,65,45,5,204,61,114,172,88,123,238,32,201,135,229,57,50,13,21,106,216,129,183,238,43,37,101,148,81,56,232,88,136,80,65,46,189,39,106,94,13,238,54,186,48,118,186,0,62,121,122,172,171,66,5],[78,40,77,250,10,93,6,157,48,173,100,40,251,9,7,218,7,184,43,169,76,240,254,34,235,48,41,175,119,126,75,107,106,248,45,161,119,48,174,213,57,69,111,225,245,60,148,73,124,82,53,6,203,126,120,180,111,169,89,64,29,23,237,13]]},{"fee":100000,"status":{"Ok":null},"preBalances":[499998337500,15298080,1,1,1],"postBalances":[499998237500,15298080,1,1,1]}]]},"id":1}
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[[{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1}
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[["81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ",{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1}
```
### getConfirmedBlocks
@ -361,11 +377,11 @@ None
The result field will be an object with the following fields:
* `slots_per_epoch`, the maximum number of slots in each epoch
* `leader_schedule_slot_offset`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
* `slotsPerEpoch`, the maximum number of slots in each epoch
* `leaderScheduleSlotOffset`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
* `warmup`, whether epochs start short and grow
* `first_normal_epoch`, first normal-length epoch, log2(slots_per_epoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
* `first_normal_slot`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(first_normal_epoch) - 1)
* `firstNormalEpoch`, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
* `firstNormalSlot`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(firstNormalEpoch) - 1)
#### Example:
@ -374,7 +390,7 @@ The result field will be an object with the following fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochSchedule"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"first_normal_epoch":8,"first_normal_slot":8160,"leader_schedule_slot_offset":8192,"slots_per_epoch":8192,"warmup":true},"id":1}
{"jsonrpc":"2.0","result":{"firstNormalEpoch":8,"firstNormalSlot":8160,"leaderScheduleSlotOffset":8192,"slotsPerEpoch":8192,"warmup":true},"id":1}
```
### getGenesisHash
@ -485,18 +501,18 @@ The result field will be an array of arrays. Each sub array will contain:
* `string` - the account Pubkey as base-58 encoded string and a JSON object, with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a u64
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
* `owner`, base-58 encoded pubkey of the program this account has been assigned to
* `data`, base-58 encoded data associated with the account
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":1,"data":"", ["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":10,"data":[]]]},"id":1}
```
### getRecentBlockhash
@ -509,11 +525,11 @@ Returns a recent block hash from the ledger, and a fee schedule that can be used
#### Results:
An RpcResponse containing an array consisting of a string blockhash and FeeCalculator JSON object.
An RpcResponse containing a JSON object consisting of a string blockhash and FeeCalculator JSON object.
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to an array including:
* `string` - a Hash as base-58 encoded string
* `FeeCalculator object` - the fee schedule for this block hash
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to a JSON object including:
* `blockhash` - a Hash as base-58 encoded string
* `feeCalculator` - FeeCalculator object, the fee schedule for this block hash
#### Example:
@ -522,7 +538,7 @@ An RpcResponse containing an array consisting of a string blockhash and FeeCalcu
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC",{"lamportsPerSignature": 0}]},"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","feeCalculator":{"lamportsPerSignature": 0}}},"id":1}
```
### getSignatureStatus
@ -570,7 +586,7 @@ Returns the current slot the node is processing
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlot"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"1234","id":1}
{"jsonrpc":"2.0","result":1234,"id":1}
```
### getSlotLeader
@ -613,7 +629,7 @@ Returns the current storage segment size in terms of slots
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"1024","id":1}
{"jsonrpc":"2.0","result":1024,"id":1}
```
### getStorageTurn
@ -626,10 +642,10 @@ None
#### Results:
An array consisting of
A JSON object consisting of
* `string` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
* `u64` - the current storage turn slot
* `blockhash` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
* `slot` - the current storage turn slot
#### Example:
@ -637,7 +653,7 @@ An array consisting of
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "2048"],"id":1}
{"jsonrpc":"2.0","result":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "slot": "2048"},"id":1}
```
### getStorageTurnRate
@ -658,7 +674,7 @@ None
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"1024","id":1}
{"jsonrpc":"2.0","result":1024,"id":1}
```
### getTransactionCount
@ -757,6 +773,29 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
```
### minimumLedgerSlot
Returns the lowest slot that the node has information about in its ledger. This
value may increase over time if the node is configured to purge older ledger data
#### Parameters:
None
#### Results:
* `u64` - Minimum ledger slot
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"minimumLedgerSlot"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":1234,"id":1}
```
### requestAirdrop
Requests an airdrop of lamports to a Pubkey
@ -855,7 +894,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"},"subscription":0}}
```
### accountUnsubscribe
@ -913,7 +952,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":{"pubkey": "8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM","account":{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd"}},"subscription":0}}
```
### programUnsubscribe

View File

@ -1,6 +1,6 @@
# Managing Forks
The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blocktree_. When the validator interprets the blocktree, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork.
The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blockstore_. When the validator interprets the blockstore, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork.
A validator selects a fork by submiting a vote to a slot leader on that fork. The vote commits the validator for a duration of time called a _lockout period_. The validator is not permitted to vote on a different fork until that lockout period expires. Each subsequent vote on the same fork doubles the length of the lockout period. After some cluster-configured number of votes \(currently 32\), the length of the lockout period reaches what's called _max lockout_. Until the max lockout is reached, the validator has the option to wait until the lockout period is over and then vote on another fork. When it votes on another fork, it performs a operation called _rollback_, whereby the state rolls back in time to a shared checkpoint and then jumps forward to the tip of the fork that it just voted on. The maximum distance that a fork may roll back is called the _rollback depth_. Rollback depth is the number of votes required to achieve max lockout. Whenever a validator votes, any checkpoints beyond the rollback depth become unreachable. That is, there is no scenario in which the validator will need to roll back beyond rollback depth. It therefore may safely _prune_ unreachable forks and _squash_ all checkpoints beyond rollback depth into the root checkpoint.

View File

@ -1,16 +1,16 @@
# Blocktree
# Blockstore
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized.
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree.
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
## Functionalities of Blocktree
## Functionalities of Blockstore
1. Persistence: the Blocktree lives in the front of the nodes verification
1. Persistence: the Blockstore lives in the front of the nodes verification
pipeline, right behind network receive and signature verification. If the
@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out
2. Repair: repair is the same as window repair above, but able to serve any
shred that's been received. Blocktree stores shreds with signatures,
shred that's been received. Blockstore stores shreds with signatures,
preserving the chain of origination.
3. Forks: Blocktree supports random access of shreds, so can support a
3. Forks: Blockstore supports random access of shreds, so can support a
validator's need to rollback and replay from a Bank checkpoint.
4. Restart: with proper pruning/culling, the Blocktree can be replayed by
4. Restart: with proper pruning/culling, the Blockstore can be replayed by
ordered enumeration of entries from slot 0. The logic of the replay stage
\(i.e. dealing with forks\) will have to be used for the most recent entries in
the Blocktree.
the Blockstore.
## Blocktree Design
## Blockstore Design
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing:
1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
* `slot_index` - The index of this slot
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details.
5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
## Blocktree APIs
## Blockstore APIs
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree.
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
## Interfacing with Bank
@ -80,11 +80,11 @@ The bank exposes to replay stage:
be able to be chained below this vote
Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
## Pruning Blocktree
## Pruning Blockstore
Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged.
Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.

View File

@ -26,10 +26,7 @@ account data. A transaction is now constructed in the normal way, but with the
following additional requirements:
1) The durable nonce value is used in the `recent_blockhash` field
2) A `Nonce` instruction is issued (first?)
3) The appropriate transaction flag is set, signaling that the usual
hash age check should be skipped and the previous requirements enforced. This
may be unnecessary, see [Runtime Support](#runtime-support) below
2) An `AdvanceNonceAccount` instruction is the first issued in the transaction
### Contract Mechanics
@ -66,21 +63,43 @@ WithdrawInstruction(to, lamports)
success
```
A client wishing to use this feature starts by creating a nonce account and
depositing sufficient lamports as to make it rent-exempt. The resultant account
will be in the `Uninitialized` state with no stored hash and thus unusable.
A client wishing to use this feature starts by creating a nonce account under
the system program. This account will be in the `Uninitialized` state with no
stored hash, and thus unusable.
The `Nonce` instruction is used to request that a new nonce be stored for the
calling account. The first `Nonce` instruction run on a newly created account
will drive the account's state to `Initialized`. As such, a `Nonce` instruction
MUST be issued before the account can be used.
To initialize a newly created account, an `InitializeNonceAccount` instruction must be
issued. This instruction takes one parameter, the `Pubkey` of the account's
[authority](../offline-signing/durable-nonce.md#nonce-authority). Nonce accounts
must be [rent-exempt](rent.md#two-tiered-rent-regime) to meet the data-persistence
requirements of the feature, and as such, require that sufficient lamports be
deposited before they can be initialized. Upon successful initialization, the
cluster's most recent blockhash is stored along with specified nonce authority
`Pubkey`.
To discard a `NonceAccount`, the client should issue a `Withdraw` instruction
which withdraws all lamports, leaving a zero balance and making the account
eligible for deletion.
The `AdvanceNonceAccount` instruction is used to manage the account's stored nonce
value. It stores the cluster's most recent blockhash in the account's state data,
failing if that matches the value already stored there. This check prevents
replaying transactions within the same block.
`Nonce` and `Withdraw` instructions each will only succeed if the stored
blockhash is no longer resident in sysvar.recent_blockhashes.
Due to nonce accounts' [rent-exempt](rent.md#two-tiered-rent-regime) requirement,
a custom withdraw instruction is used to move funds out of the account.
The `WithdrawNonceAccount` instruction takes a single argument, lamports to withdraw,
and enforces rent-exemption by preventing the account's balance from falling
below the rent-exempt minimum. An exception to this check is if the final balance
would be zero lamports, which makes the account eligible for deletion. This
account closure detail has an additional requirement that the stored nonce value
must not match the cluster's most recent blockhash, as per `AdvanceNonceAccount`.
The account's [nonce authority](../offline-signing/durable-nonce.md#nonce-authority)
can be changed using the `AuthorizeNonceAccount` instruction. It takes one parameter,
the `Pubkey` of the new authority. Executing this instruction grants full
control over the account and its balance to the new authority.
{% hint style="info" %}
`AdvanceNonceAccount`, `WithdrawNonceAccount` and `AuthorizeNonceAccount` all require the current
[nonce authority](../offline-signing/durable-nonce.md#nonce-authority) for the
account to sign the transaction.
{% endhint %}
### Runtime Support
@ -89,25 +108,11 @@ an extant `recent_blockhash` on the transaction and prevent fee theft via
failed transaction replay, runtime modifications are necessary.
Any transaction failing the usual `check_hash_age` validation will be tested
for a Durable Transaction Nonce. This specifics of this test are undecided, some
options:
for a Durable Transaction Nonce. This is signaled by including a `AdvanceNonceAccount`
instruction as the first instruction in the transaction.
1) Require that the `Nonce` instruction be the first in the transaction
* + No ABI changes
* + Fast and simple
* - Sets a precedent that may lead to incompatible instruction combinations
2) Blind search for a `Nonce` instruction over all instructions in the
transaction
* + No ABI changes
* - Potentially slow
3) [2], but guarded by a transaction flag
* - ABI changes
* - Wire size increase
* + We'll probably end up with some sort of flags eventually anyway
Current prototyping will use [1]. If it is determined that a Durable Transaction
Nonce is in use, the runtime will take the following actions to validate the
transaction:
If the runtime determines that a Durable Transaction Nonce is in use, it will
take the following additional actions to validate the transaction:
1) The `NonceAccount` specified in the `Nonce` instruction is loaded.
2) The `NonceState` is deserialized from the `NonceAccount`'s data field and
@ -118,6 +123,11 @@ one specified in the transaction's `recent_blockhash` field.
If all three of the above checks succeed, the transaction is allowed to continue
validation.
### Open Questions
* Should this feature be restricted in the number of uses per transaction?
Since transactions that fail with an `InstructionError` are charged a fee and
changes to their state rolled back, there is an opportunity for fee theft if an
`AdvanceNonceAccount` instruction is reverted. A malicious validator could replay the
failed transaction until the stored nonce is successfully advanced. Runtime
changes prevent this behavior. When a durable nonce transaction fails with an
`InstructionError` aside from the `AdvanceNonceAccount` instruction, the nonce account
is rolled back to its pre-execution state as usual. Then the runtime advances
its nonce value and the advanced nonce account stored as if it succeeded.

View File

@ -8,32 +8,32 @@ The RepairService is in charge of retrieving missing shreds that failed to be de
1\) Validators can fail to receive particular shreds due to network failures
2\) Consider a scenario where blocktree contains the set of slots {1, 3, 5}. Then Blocktree receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -&gt; 7 is stored in blocktree. However, there is no way to chain these slots to any of the existing banks in Blocktree, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node.
2\) Consider a scenario where blockstore contains the set of slots {1, 3, 5}. Then Blockstore receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -&gt; 7 is stored in blockstore. However, there is no way to chain these slots to any of the existing banks in Blockstore, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node.
3\) Validators that find themselves behind the cluster by an entire epoch struggle/fail to catch up because they do not have a leader schedule for future epochs. If nodes were to blindly accept repair shreds in these future epochs, this exposes nodes to spam.
## Repair Protocols
The repair protocol makes best attempts to progress the forking structure of Blocktree.
The repair protocol makes best attempts to progress the forking structure of Blockstore.
The different protocol strategies to address the above challenges:
1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blocktree tracks the latest root slot. RepairService will then periodically iterate every fork in blocktree starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration.
1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blockstore tracks the latest root slot. RepairService will then periodically iterate every fork in blockstore starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration.
Note: Validators will only accept shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
2. Preemptive Slot Repair \(Addresses Challenge \#2\): The goal of this protocol is to discover the chaining relationship of "orphan" slots that do not currently chain to any known fork.
* Blocktree will track the set of "orphan" slots in a separate column family.
* RepairService will periodically make `RequestOrphan` requests for each of the orphans in blocktree.
* Blockstore will track the set of "orphan" slots in a separate column family.
* RepairService will periodically make `RequestOrphan` requests for each of the orphans in blockstore.
`RequestOrphan(orphan)` request - `orphan` is the orphan slot that the requestor wants to know the parents of `RequestOrphan(orphan)` response - The highest shreds for each of the first `N` parents of the requested `orphan`
On receiving the responses `p`, where `p` is some shred in a parent slot, validators will:
* Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist.
* Insert an empty `SlotMeta` in blockstore for `p.slot` if it doesn't already exist.
* If `p.slot` does exist, update the parent of `p` based on `parents`
Note: that once these empty slots are added to blocktree, the `Shred Repair` protocol should attempt to fill those slots.
Note: that once these empty slots are added to blockstore, the `Shred Repair` protocol should attempt to fill those slots.
Note: Validators will only accept responses containing shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
3. Repairmen \(Addresses Challenge \#3\): This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every shred that they receive against a known leader schedule.
@ -45,5 +45,5 @@ The different protocol strategies to address the above challenges:
Observers of this gossip message with higher epochs \(repairmen\) send shreds to catch the lagging node up with the rest of the cluster. The repairmen are responsible for sending the slots within the epochs that are confrimed by the advertised `root` in gossip. The repairmen divide the responsibility of sending each of the missing slots in these epochs based on a random seed \(simple shred.index iteration by N, seeded with the repairman's node\_pubkey\). Ideally, each repairman in an N node cluster \(N nodes whose epochs are higher than that of the repairee\) sends 1/N of the missing shreds. Both data and coding shreds for missing slots are sent. Repairmen do not send shreds again to the same validator until they see the message in gossip updated, at which point they perform another iteration of this protocol.
Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blocktree and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blocktree, which holds the latest root.
Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blockstore and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blockstore, which holds the latest root.

View File

@ -52,5 +52,5 @@ Solana's trustless sense of time and ordering provided by its PoH data structure
As discussed in the [Economic Design](../implemented-proposals/ed_overview/) section, annual validator interest rates are to be specified as a function of total percentage of circulating supply that has been staked. The cluster rewards validators who are online and actively participating in the validation process throughout the entirety of their _validation period_. For validators that go offline/fail to validate transactions during this period, their annual reward is effectively reduced.
Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered active \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a super-majority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the active amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set.
Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered active \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a supermajority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the active amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set.

View File

@ -2,7 +2,7 @@
This design describes Solana's _Tower BFT_ algorithm. It addresses the following problems:
* Some forks may not end up accepted by the super-majority of the cluster, and voters need to recover from voting on such forks.
* Some forks may not end up accepted by the supermajority of the cluster, and voters need to recover from voting on such forks.
* Many forks may be votable by different voters, and each voter may see a different set of votable forks. The selected forks should eventually converge for the cluster.
* Reward based votes have an associated risk. Voters should have the ability to configure how much risk they take on.
* The [cost of rollback](tower-bft.md#cost-of-rollback) needs to be computable. It is important to clients that rely on some measurable form of Consistency. The costs to break consistency need to be computable, and increase super-linearly for older votes.

View File

@ -84,7 +84,7 @@ let timestamp_slot = floor(current_slot / timestamp_interval);
```
Then the validator needs to gather all Vote WithTimestamp transactions from the
ledger that reference that slot, using `Blocktree::get_slot_entries()`. As these
ledger that reference that slot, using `Blockstore::get_slot_entries()`. As these
transactions could have taken some time to reach and be processed by the leader,
the validator needs to scan several completed blocks after the timestamp\_slot to
get a reasonable set of Timestamps. The exact number of slots will need to be

View File

@ -75,3 +75,11 @@ Output
```text
4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
```
## Buying More Time to Sign
Typically a Solana transaction must be signed and accepted by the network within
a number of slots from the blockhash in its `recent_blockhash` field (~2min at
the time of this writing). If your signing procedure takes longer than this, a
[Durable Transaction Nonce](durable-nonce.md) can give you the extra time you
need.

View File

@ -0,0 +1,263 @@
# Durable Transaction Nonces
Durable transaction nonces are a mechanism for getting around the typical
short lifetime of a transaction's [`recent_blockhash`](../transaction.md#recent-blockhash).
They are implemented as a Solana Program, the mechanics of which can be read
about in the [proposal](../implemented-proposals/durable-tx-nonces.md).
## Usage Examples
Full usage details for durable nonce CLI commands can be found in the
[CLI reference](../api-reference/cli.md).
### Nonce Authority
Authority over a nonce account can optionally be assigned to another account. In
doing so the new authority inherits full control over the nonce account from the
previous authority, including the account creator. This feature enables the
creation of more complex account ownership arrangements and derived account
addresses not associated with a keypair. The `--nonce-authority <AUTHORITY_KEYPAIR>`
argument is used to specify this account and is supported by the following
commands
* `create-nonce-account`
* `new-nonce`
* `withdraw-from-nonce-account`
* `authorize-nonce-account`
### Nonce Account Creation
The durable transaction nonce feature uses an account to store the next nonce
value. Durable nonce accounts must be [rent-exempt](../implemented-proposals/rent.md#two-tiered-rent-regime),
so need to carry the minimum balance to achieve this.
A nonce account is created by first generating a new keypair, then create the account on chain
- Command
```bash
solana-keygen new -o nonce-keypair.json
solana create-nonce-account nonce-keypair.json 1 SOL
```
- Output
```text
2SymGjGV4ksPdpbaqWFiDoBz8okvtiik4KE9cnMQgRHrRLySSdZ6jrEcpPifW4xUpp4z66XM9d9wM48sA7peG2XL
```
{% hint style="info" %}
To keep the keypair entirely offline, use the [Paper Wallet](../paper-wallet/README.md)
keypair generation [instructions](../paper-wallet/usage.md#seed-phrase-generation.md)
instead
{% endhint %}
{% hint style="info" %}
[Full usage documentation](../api-reference/cli.md#solana-create-nonce-account)
{% endhint %}
### Querying the Stored Nonce Value
Creating a durable nonce transaction requires passing the stored nonce value as
the value to the `--blockhash` argument upon signing and submission. Obtain the
presently stored nonce value with
- Command
```bash
solana get-nonce nonce-keypair.json
```
- Output
```text
8GRipryfxcsxN8mAGjy8zbFo9ezaUsh47TsPzmZbuytU
```
{% hint style="info" %}
[Full usage documentation](../api-reference/cli.md#solana-get-nonce)
{% endhint %}
### Advancing the Stored Nonce Value
While not typically needed outside a more useful transaction, the stored nonce
value can be advanced by
- Command
```bash
solana new-nonce nonce-keypair.json
```
- Output
```text
44jYe1yPKrjuYDmoFTdgPjg8LFpYyh1PFKJqm5SC1PiSyAL8iw1bhadcAX1SL7KDmREEkmHpYvreKoNv6fZgfvUK
```
{% hint style="info" %}
[Full usage documentation](../api-reference/cli.md#solana-new-nonce)
{% endhint %}
### Display Nonce Account
Inspect a nonce account in a more human friendly format with
- Command
```bash
solana show-nonce-account nonce-keypair.json
```
- Output
```text
balance: 0.5 SOL
minimum balance required: 0.00136416 SOL
nonce: DZar6t2EaCFQTbUP4DHKwZ1wT8gCPW2aRfkVWhydkBvS
```
{% hint style="info" %}
[Full usage documentation](../api-reference/cli.md#solana-show-nonce-account)
{% endhint %}
### Withdraw Funds from a Nonce Account
Withdraw funds from a nonce account with
- Command
```bash
solana withdraw-from-nonce-account nonce-keypair.json ~/.config/solana/id.json 0.5 SOL
```
- Output
```text
3foNy1SBqwXSsfSfTdmYKDuhnVheRnKXpoPySiUDBVeDEs6iMVokgqm7AqfTjbk7QBE8mqomvMUMNQhtdMvFLide
```
{% hint style="info" %}
Close a nonce account by withdrawing the full balance
{% endhint %}
{% hint style="info" %}
[Full usage documentation](../api-reference/cli.md#solana-withdraw-from-nonce-account)
{% endhint %}
### Assign a New Authority to a Nonce Account
Reassign the authority of a nonce account after creation with
- Command
```bash
solana authorize-nonce-account nonce-keypair.json nonce-authority.json
```
- Output
```text
3F9cg4zN9wHxLGx4c3cUKmqpej4oa67QbALmChsJbfxTgTffRiL3iUehVhR9wQmWgPua66jPuAYeL1K2pYYjbNoT
```
{% hint style="info" %}
[Full usage documentation](../api-reference/cli.md#solana-authorize-nonce-account)
{% endhint %}
## Other Commands Supporting Durable Nonces
To make use of durable nonces with other CLI subcommands, two arguments must be
supported.
* `--nonce`, specifies the account storing the nonce value
* `--nonce-authority`, specifies an optional [nonce authority](#nonce-authority)
The following subcommands have received this treatment so far
* [`pay`](../api-reference/cli.md#solana-pay)
* [`delegate-stake`](../api-reference/cli.md#solana-delegate-stake)
* [`deactivate-stake`](../api-reference/cli.md#solana-deactivate-stake)
### Example Pay Using Durable Nonce
Here we demonstrate Alice paying Bob 1 SOL using a durable nonce. The procedure
is the same for all subcommands supporting durable nonces
#### - Create accounts
First we need some accounts for Alice, Alice's nonce and Bob
```bash
$ solana-keygen new -o alice.json
$ solana-keygen new -o nonce.json
$ solana-keygen new -o bob.json
```
#### - Fund Alice's account
Alice will need some funds to create a nonce account and send to Bob. Airdrop
her some SOL
```bash
$ solana airdrop -k alice.json 10 SOL
10 SOL
```
#### - Create Alice's nonce account
Now Alice needs a nonce account. Create one
{% hint style="info" %}
Here, no separate [nonce authority](#nonce-authority) is employed, so `alice.json`
has full authority over the nonce account
{% endhint %}
```bash
$ solana create-nonce-account -k alice.json nonce.json 1 SOL
3KPZr96BTsL3hqera9up82KAU462Gz31xjqJ6eHUAjF935Yf8i1kmfEbo6SVbNaACKE5z6gySrNjVRvmS8DcPuwV
```
#### - A failed first attempt to pay Bob
Alice attempts to pay Bob, but takes too long to sign. The specified blockhash
expires and the transaction fails
```bash
$ solana pay -k alice.json --blockhash expiredDTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7 bob.json 1 SOL
[2020-01-02T18:48:28.462911000Z ERROR solana_cli::cli] Io(Custom { kind: Other, error: "Transaction \"33gQQaoPc9jWePMvDAeyJpcnSPiGUAdtVg8zREWv4GiKjkcGNufgpcbFyRKRrA25NkgjZySEeKue5rawyeH5TzsV\" failed: None" })
Error: Io(Custom { kind: Other, error: "Transaction \"33gQQaoPc9jWePMvDAeyJpcnSPiGUAdtVg8zREWv4GiKjkcGNufgpcbFyRKRrA25NkgjZySEeKue5rawyeH5TzsV\" failed: None" })
```
#### - Nonce to the rescue!
Alice retries the transaction, this time specifying her nonce account and the
blockhash stored there
{% hint style="info" %}
Remember, `alice.json` is the [nonce authority](#nonce-authority) in this example
{% endhint %}
```bash
$ solana show-nonce-account nonce.json
balance: 1 SOL
minimum balance required: 0.00136416 SOL
nonce: F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7
```
```bash
$ solana pay -k alice.json --blockhash F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7 --nonce nonce.json bob.json 1 SOL
HR1368UKHVZyenmH7yVz5sBAijV6XAPeWbEiXEGVYQorRMcoijeNAbzZqEZiH8cDB8tk65ckqeegFjK8dHwNFgQ
```
#### - Success!
The transaction succeeds! Bob receives 1 SOL from Alice and Alice's stored
nonce advances to a new value
```bash
$ solana balance -k bob.json
1 SOL
```
```bash
$ solana show-nonce-account nonce.json
balance: 1 SOL
minimum balance required: 0.00136416 SOL
nonce: 6bjroqDcZgTv6Vavhqf81oBHTv3aMnX19UTB51YhAZnN
```

View File

@ -28,17 +28,17 @@ slashing proof to punish this bad behavior.
2) Otherwise, we can simply mark the slot as dead and not playable. A slashing
proof may or may not be necessary depending on feasibility.
# Blocktree receiving shreds
# Blockstore receiving shreds
When blocktree receives a new shred `s`, there are two cases:
When blockstore receives a new shred `s`, there are two cases:
1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred
`s'` in blocktree for that slot where `s'.index > s.index` If so, together `s`
`s'` in blockstore for that slot where `s'.index > s.index` If so, together `s`
and `s'` constitute a slashing proof.
2) Blocktree has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT`
2) Blockstore has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT`
with index `i`. If `s.index > i`, then together `s` and `s'`constitute a
slashing proof. In this case, blocktree will also not insert `s`.
slashing proof. In this case, blockstore will also not insert `s`.
3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for
the same index are a slashable condition. Details for this case are covered
@ -47,7 +47,7 @@ in the `Leader Duplicate Block Slashing` section.
# Replaying and validating ticks
1) Replay stage replays entries from blocktree, keeping track of the number of
1) Replay stage replays entries from blockstore, keeping track of the number of
ticks it has seen per slot, and verifying there are `hashes_per_tick` number of
hashes between ticcks. After the tick from this last shred has been played,
replay stage then checks the total number of ticks.

View File

@ -21,7 +21,6 @@ We unwrap the many abstraction layers and build a single pipeline that can toggl
should forward transactions to the next leader.
* Hoist FetchStage and BroadcastStage out of TPU
* Blocktree renamed to Blockstore
* BankForks renamed to Banktree
* TPU moves to new socket-free crate called solana-tpu.
* TPU's BankingStage absorbs ReplayStage

View File

@ -95,7 +95,7 @@ Download the binaries by navigating to [https://github.com/solana-labs/solana/re
Try running following command to join the gossip network and view all the other nodes in the cluster:
```bash
solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip spy --entrypoint testnet.solana.com:8001
# Press ^C to exit
```
@ -146,7 +146,7 @@ solana-archiver --entrypoint testnet.solana.com:8001 --identity-keypair archiver
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
```bash
solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip spy --entrypoint testnet.solana.com:8001
```
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:

View File

@ -6,7 +6,7 @@ Confirm the IP address and **identity pubkey** of your validator is visible in
the gossip network by running:
```bash
solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip spy --entrypoint testnet.solana.com:8001
```
## Check Your Balance

View File

@ -89,7 +89,7 @@ To monitor your validator during its warmup period:
* View your stake account, the delegation preference and details of your stake:`solana show-stake-account ~/validator-stake-keypair.json`
* `solana uptime ~/validator-vote-keypair.json` will display the voting history \(aka, uptime\) of your validator over recent Epochs
* `solana show-validators` displays the current active stake of all validators, including yours
* `solana show-show-stake-history ` shows the history of stake warming up and cooling down over recent epochs
* `solana show-stake-history ` shows the history of stake warming up and cooling down over recent epochs
* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####`
* Once your stake is warmed up, you will see a stake balance listed for your validator on the [Solana Network Explorer](http://explorer.solana.com/validators)

View File

@ -33,7 +33,7 @@ Try running following command to join the gossip network and view all the other
nodes in the cluster:
```bash
solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip spy --entrypoint testnet.solana.com:8001
# Press ^C to exit
```
@ -42,10 +42,6 @@ solana-gossip --entrypoint testnet.solana.com:8001 spy
If your machine has a GPU with CUDA installed \(Linux-only currently\), include
the `--cuda` argument to `solana-validator`.
```bash
export SOLANA_CUDA=1
```
When your validator is started look for the following log message to indicate
that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"`

View File

@ -1,16 +1,16 @@
# Blocktree
# Blockstore
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized.
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree.
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
## Functionalities of Blocktree
## Functionalities of Blockstore
1. Persistence: the Blocktree lives in the front of the nodes verification
1. Persistence: the Blockstore lives in the front of the nodes verification
pipeline, right behind network receive and signature verification. If the
@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out
2. Repair: repair is the same as window repair above, but able to serve any
shred that's been received. Blocktree stores shreds with signatures,
shred that's been received. Blockstore stores shreds with signatures,
preserving the chain of origination.
3. Forks: Blocktree supports random access of shreds, so can support a
3. Forks: Blockstore supports random access of shreds, so can support a
validator's need to rollback and replay from a Bank checkpoint.
4. Restart: with proper pruning/culling, the Blocktree can be replayed by
4. Restart: with proper pruning/culling, the Blockstore can be replayed by
ordered enumeration of entries from slot 0. The logic of the replay stage
\(i.e. dealing with forks\) will have to be used for the most recent entries in
the Blocktree.
the Blockstore.
## Blocktree Design
## Blockstore Design
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing:
1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
* `slot_index` - The index of this slot
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details.
5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
## Blocktree APIs
## Blockstore APIs
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree.
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
## Interfacing with Bank
@ -80,11 +80,11 @@ The bank exposes to replay stage:
be able to be chained below this vote
Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
## Pruning Blocktree
## Pruning Blockstore
Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged.
Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.22.0"
version = "0.22.10"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -20,7 +20,7 @@ steps:
timeout_in_minutes: 30
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 40
timeout_in_minutes: 60
artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
name: "move"

View File

@ -72,10 +72,14 @@ ARGS+=(
--env CI_JOB_ID
--env CI_PULL_REQUEST
--env CI_REPO_SLUG
--env CODECOV_TOKEN
--env CRATES_IO_TOKEN
)
# Also propagate environment variables needed for codecov
# https://docs.codecov.io/docs/testing-with-docker#section-codecov-inside-docker
# We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite...
CODECOV_ENVS=$(CI=true bash <(curl -s https://codecov.io/env))
if $INTERACTIVE; then
if [[ -n $1 ]]; then
echo
@ -83,8 +87,10 @@ if $INTERACTIVE; then
echo
fi
set -x
exec docker run --interactive --tty "${ARGS[@]}" "$IMAGE" bash
# shellcheck disable=SC2086
exec docker run --interactive --tty "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" bash
fi
set -x
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
# shellcheck disable=SC2086
exec docker run "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" "$@"

View File

@ -5,7 +5,28 @@ cd "$(dirname "$0")/.."
me=$(basename "$0")
BOOK="book"
echo --- update gitbook-cage
if [[ -n $CI_BRANCH ]]; then
(
set -x
(
. ci/rust-version.sh stable
ci/docker-run.sh "$rust_stable_docker_image" make -Cbook -B svg
)
# make a local commit for the svgs
git add -A -f book/src/.gitbook/assets/.
if ! git diff-index --quiet HEAD; then
git config user.email maintainers@solana.com
git config user.name "$me"
git commit -m "gitbook-cage update $(date -Is)"
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
# pop off the local commit
git reset --hard HEAD~
fi
)
fi
source ci/rust-version.sh stable
eval "$(ci/channel-info.sh)"
@ -31,6 +52,7 @@ EOF
exit 0
fi
repo=git@github.com:solana-labs/book.git
BOOK="book"
else
# book-edge and book-beta are published automatically on the tip of the branch
case $CHANNEL in
@ -73,27 +95,4 @@ echo "--- publish $BOOK"
fi
)
echo --- update gitbook-cage
(
if [[ -z $CI_BRANCH ]]; then
exit 0
fi
set -x
(
. ci/rust-version.sh
ci/docker-run.sh $rust_stable_docker_image make -Cbook -B svg
)
# make a local commit for the svgs
git add -A -f book/src/.gitbook/assets/.
if ! git diff-index --quiet HEAD; then
git config user.email maintainers@solana.com
git config user.name "$me"
git commit -m "gitbook-cage update $(date -Is)"
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
# pop off the local commit
git reset --hard HEAD~
fi
)
exit 0

View File

@ -4,7 +4,12 @@ set -e
cd "$(dirname "$0")/.."
eval "$(ci/channel-info.sh)"
echo --- Creating tarball
if [[ -n "$CI_TAG" ]]; then
CHANNEL_OR_TAG=$CI_TAG
else
CHANNEL_OR_TAG=$CHANNEL
fi
(
set -x
sdk/bpf/scripts/package.sh
@ -12,7 +17,7 @@ echo --- Creating tarball
)
echo --- AWS S3 Store
if [[ -z $CHANNEL ]]; then
if [[ -z $CHANNEL_OR_TAG ]]; then
echo Skipped
else
(
@ -24,7 +29,7 @@ else
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/bpf-sdk.tar.bz2 \
s3://solana-sdk/"$CHANNEL"/bpf-sdk.tar.bz2
s3://solana-sdk/"$CHANNEL_OR_TAG"/bpf-sdk.tar.bz2
)
fi

View File

@ -53,7 +53,7 @@ windows)
;;
esac
echo --- Creating tarball
echo --- Creating release tarball
(
set -x
rm -rf solana-release/
@ -89,15 +89,21 @@ echo --- Creating tarball
)
# Metrics tarball is platform agnostic, only publish it from Linux
MAYBE_METRICS_TARBALL=
MAYBE_TARBALLS=
if [[ "$CI_OS_NAME" = linux ]]; then
metrics/create-metrics-tarball.sh
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
(
set -x
sdk/bpf/scripts/package.sh
[[ -f bpf-sdk.tar.bz2 ]]
)
MAYBE_TARBALLS="bpf-sdk.tar.bz2 solana-metrics.tar.bz2"
fi
source ci/upload-ci-artifact.sh
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
upload-ci-artifact "$file"
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then

View File

@ -10,6 +10,9 @@ source ci/rust-version.sh nightly
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
# Look for failed mergify.io backports
_ git show HEAD --check --oneline
_ cargo +"$rust_stable" fmt --all -- --check
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
@ -19,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
_ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0013 --ignore RUSTSEC-2018-0015 --ignore RUSTSEC-2019-0031
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0013 --ignore RUSTSEC-2018-0015 --ignore RUSTSEC-2019-0031 --ignore RUSTSEC-2020-0002
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ book/build.sh

View File

@ -41,7 +41,8 @@ if [[ -z "$CODECOV_TOKEN" ]]; then
echo "^^^ +++"
echo CODECOV_TOKEN undefined, codecov.io upload skipped
else
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
# We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite...
CI=true bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
annotate --style success --context codecov.io \
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}"

View File

@ -111,7 +111,7 @@ test-move)
;;
test-local-cluster)
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
*)

View File

@ -142,6 +142,7 @@ testnet-beta|testnet-beta-perf)
testnet)
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
CHANNEL_BRANCH=$STABLE_CHANNEL
export CLOUDSDK_CORE_PROJECT=testnet-solana-com
;;
testnet-perf)
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
@ -157,6 +158,7 @@ tds)
: "${TDS_CHANNEL_OR_TAG:=edge}"
CHANNEL_OR_TAG="$TDS_CHANNEL_OR_TAG"
CHANNEL_BRANCH="$CI_BRANCH"
export CLOUDSDK_CORE_PROJECT=tour-de-sol
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
@ -375,7 +377,7 @@ deploy() {
(
set -x
ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-t "$CHANNEL_OR_TAG" -n 1 -c 0 -u -P \
-a testnet-solana-com --letsencrypt testnet.solana.com \
--limit-ledger-size \
${skipCreate:+-e} \
@ -460,6 +462,14 @@ deploy() {
maybeGpu=(-G "${ENABLE_GPU}")
fi
if [[ -z $HASHES_PER_TICK ]]; then
maybeHashesPerTick="--hashes-per-tick auto"
elif [[ $HASHES_PER_TICK == skip ]]; then
maybeHashesPerTick=""
else
maybeHashesPerTick="--hashes-per-tick ${HASHES_PER_TICK}"
fi
if [[ -z $DISABLE_AIRDROPS ]]; then
DISABLE_AIRDROPS="true"
fi
@ -470,6 +480,22 @@ deploy() {
maybeDisableAirdrops=""
fi
if [[ -z $INTERNAL_NODES_STAKE_LAMPORTS ]]; then
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports 1000000000" # 1 SOL
elif [[ $INTERNAL_NODES_STAKE_LAMPORTS == skip ]]; then
maybeInternalNodesStakeLamports=""
else
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports ${INTERNAL_NODES_STAKE_LAMPORTS}"
fi
if [[ -z $INTERNAL_NODES_LAMPORTS ]]; then
maybeInternalNodesLamports="--internal-nodes-lamports 500000000000" # 500 SOL
elif [[ $INTERNAL_NODES_LAMPORTS == skip ]]; then
maybeInternalNodesLamports=""
else
maybeInternalNodesLamports="--internal-nodes-lamports ${INTERNAL_NODES_LAMPORTS}"
fi
EXTERNAL_ACCOUNTS_FILE=/tmp/validator.yml
if [[ -z $EXTERNAL_ACCOUNTS_FILE_URL ]]; then
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/validators/all.yml
@ -503,11 +529,14 @@ deploy() {
--idle-clients \
-P -u \
-a tds-solana-com --letsencrypt tds.solana.com \
${maybeHashesPerTick} \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
${maybeDelete:+-D} \
${maybeDisableAirdrops} \
${maybeInternalNodesStakeLamports} \
${maybeInternalNodesLamports} \
${maybeExternalAccountsFile} \
--target-lamports-per-signature 0 \
--slots-per-epoch 4096 \

View File

@ -60,7 +60,7 @@ trap shutdown EXIT INT
set -x
for zone in "$@"; do
echo "--- $cloudProvider config [$zone]"
timeout 5m net/"$cloudProvider".sh config $maybePublicNetwork -p "$netName" -z "$zone"
timeout 5m net/"$cloudProvider".sh config $maybePublicNetwork -n 1 -p "$netName" -z "$zone"
net/init-metrics.sh -e
echo "+++ $cloudProvider.sh info"
net/"$cloudProvider".sh info

View File

@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "0.22.0"
version = "0.22.10"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ edition = "2018"
clap = "2.33.0"
rpassword = "4.0"
semver = "0.9.0"
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.10" }
tiny-bip39 = "0.6.2"
url = "2.1.0"
chrono = "0.4"

View File

@ -33,7 +33,10 @@ where
}
}
pub fn unix_timestamp_of(matches: &ArgMatches<'_>, name: &str) -> Option<UnixTimestamp> {
pub fn unix_timestamp_from_rfc3339_datetime(
matches: &ArgMatches<'_>,
name: &str,
) -> Option<UnixTimestamp> {
matches.value_of(name).and_then(|value| {
DateTime::parse_from_rfc3339(value)
.ok()

View File

@ -1,4 +1,5 @@
use crate::keypair::ASK_KEYWORD;
use chrono::DateTime;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{read_keypair_file, Signature};
@ -42,6 +43,11 @@ pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
}
// Return an error if string cannot be parsed as pubkey or keypair file or keypair ask keyword
pub fn is_pubkey_or_keypair_or_ask_keyword(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair_or_ask_keyword(string))
}
// Return an error if string cannot be parsed as pubkey=signature string
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
let mut signer = string.split('=');
@ -129,3 +135,9 @@ pub fn is_amount(amount: String) -> Result<(), String> {
))
}
}
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
DateTime::parse_from_rfc3339(&value)
.map(|_| ())
.map_err(|e| format!("{:?}", e))
}

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.0"
version = "0.22.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -28,24 +28,24 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-budget-program = { path = "../programs/budget", version = "0.22.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
solana-client = { path = "../client", version = "0.22.0" }
solana-config-program = { path = "../programs/config", version = "0.22.0" }
solana-faucet = { path = "../faucet", version = "0.22.0" }
solana-logger = { path = "../logger", version = "0.22.0" }
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
solana-runtime = { path = "../runtime", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-stake-program = { path = "../programs/stake", version = "0.22.0" }
solana-storage-program = { path = "../programs/storage", version = "0.22.0" }
solana-vote-program = { path = "../programs/vote", version = "0.22.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.0" }
solana-budget-program = { path = "../programs/budget", version = "0.22.10" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" }
solana-client = { path = "../client", version = "0.22.10" }
solana-config-program = { path = "../programs/config", version = "0.22.10" }
solana-faucet = { path = "../faucet", version = "0.22.10" }
solana-logger = { path = "../logger", version = "0.22.10" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" }
solana-runtime = { path = "../runtime", version = "0.22.10" }
solana-sdk = { path = "../sdk", version = "0.22.10" }
solana-stake-program = { path = "../programs/stake", version = "0.22.10" }
solana-storage-program = { path = "../programs/storage", version = "0.22.10" }
solana-vote-program = { path = "../programs/vote", version = "0.22.10" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.10" }
url = "2.1.0"
[dev-dependencies]
solana-core = { path = "../core", version = "0.22.0" }
solana-budget-program = { path = "../programs/budget", version = "0.22.0" }
solana-core = { path = "../core", version = "0.22.10" }
solana-budget-program = { path = "../programs/budget", version = "0.22.10" }
tempfile = "3.1.0"
[[bin]]

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@ use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::{rpc_client::RpcClient, rpc_request::RpcVoteAccountInfo};
use solana_client::{rpc_client::RpcClient, rpc_response::RpcVoteAccountInfo};
use solana_sdk::{
clock::{self, Slot},
commitment_config::CommitmentConfig,
@ -54,6 +54,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.about("Get the version of the cluster entrypoint"),
)
.subcommand(SubCommand::with_name("fees").about("Display current cluster fees"))
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule"))
.subcommand(SubCommand::with_name("get-block-time")
.about("Get estimated production time of a block")
.arg(
@ -364,6 +365,41 @@ pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
))
}
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info()?;
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot_in_epoch))?;
if leader_schedule.is_none() {
return Err(format!(
"Unable to fetch leader schedule for slot {}",
first_slot_in_epoch
)
.into());
}
let leader_schedule = leader_schedule.unwrap();
let mut leader_per_slot_index = Vec::new();
for (pubkey, leader_slots) in leader_schedule.iter() {
for slot_index in leader_slots.iter() {
if *slot_index >= leader_per_slot_index.len() {
leader_per_slot_index.resize(*slot_index + 1, "?");
}
leader_per_slot_index[*slot_index] = pubkey;
}
}
for (slot_index, leader) in leader_per_slot_index.iter().enumerate() {
println!(
" {:<15} {:<44}",
first_slot_in_epoch + slot_index as u64,
leader
);
}
Ok("".to_string())
}
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
let timestamp = rpc_client.get_block_time(slot)?;
Ok(timestamp.to_string())
@ -434,19 +470,39 @@ pub fn process_show_block_production(
return Err(format!("Epoch {} is in the future", epoch).into());
}
let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?;
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
let end_slot = std::cmp::min(
epoch_info.absolute_slot,
epoch_schedule.get_last_slot_in_epoch(epoch),
);
let start_slot = if let Some(slot_limit) = slot_limit {
let mut start_slot = if let Some(slot_limit) = slot_limit {
std::cmp::max(end_slot.saturating_sub(slot_limit), first_slot_in_epoch)
} else {
first_slot_in_epoch
};
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
let end_slot_index = (end_slot - start_slot) as usize;
if minimum_ledger_slot > end_slot {
return Err(format!(
"Ledger data not available for slots {} to {} (minimum ledger slot is {})",
start_slot, end_slot, minimum_ledger_slot
)
.into());
}
if minimum_ledger_slot > start_slot {
println!(
"\n{}",
style(format!(
"Note: Requested start slot was {} but minimum ledger slot is {}",
start_slot, minimum_ledger_slot
))
.italic(),
);
start_slot = minimum_ledger_slot;
}
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message(&format!(
@ -455,12 +511,14 @@ pub fn process_show_block_production(
));
let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?;
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
let total_slots = end_slot_index - start_slot_index + 1;
let total_blocks = confirmed_blocks.len();
assert!(total_blocks <= total_slots);
let total_slots_missed = total_slots - total_blocks;
let total_slots_skipped = total_slots - total_blocks;
let mut leader_slot_count = HashMap::new();
let mut leader_missed_slots = HashMap::new();
let mut leader_skipped_slots = HashMap::new();
progress_bar.set_message(&format!("Fetching leader schedule for epoch {}...", epoch));
let leader_schedule = rpc_client
@ -482,7 +540,7 @@ pub fn process_show_block_production(
progress_bar.set_message(&format!(
"Processing {} slots containing {} blocks and {} empty slots...",
total_slots, total_blocks, total_slots_missed
total_slots, total_blocks, total_slots_skipped
));
let mut confirmed_blocks_index = 0;
@ -491,7 +549,7 @@ pub fn process_show_block_production(
let slot = start_slot + slot_index as u64;
let slot_count = leader_slot_count.entry(leader).or_insert(0);
*slot_count += 1;
let missed_slots = leader_missed_slots.entry(leader).or_insert(0);
let skipped_slots = leader_skipped_slots.entry(leader).or_insert(0);
loop {
if !confirmed_blocks.is_empty() {
@ -506,9 +564,9 @@ pub fn process_show_block_production(
break;
}
}
*missed_slots += 1;
*skipped_slots += 1;
individual_slot_status.push(
style(format!(" {:<15} {:<44} MISSED", slot, leader))
style(format!(" {:<15} {:<44} SKIPPED", slot, leader))
.red()
.to_string(),
);
@ -524,23 +582,23 @@ pub fn process_show_block_production(
"Identity Pubkey",
"Leader Slots",
"Blocks Produced",
"Missed Slots",
"Missed Block Percentage",
"Skipped Slots",
"Skipped Slot Percentage",
))
.bold()
);
let mut table = vec![];
for (leader, leader_slots) in leader_slot_count.iter() {
let missed_slots = leader_missed_slots.get(leader).unwrap();
let blocks_produced = leader_slots - missed_slots;
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
let blocks_produced = leader_slots - skipped_slots;
table.push(format!(
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
leader,
leader_slots,
blocks_produced,
missed_slots,
*missed_slots as f64 / *leader_slots as f64 * 100.
skipped_slots,
*skipped_slots as f64 / *leader_slots as f64 * 100.
));
}
table.sort();
@ -551,8 +609,8 @@ pub fn process_show_block_production(
format!("Epoch {} total:", epoch),
total_slots,
total_blocks,
total_slots_missed,
total_slots_missed as f64 / total_slots as f64 * 100.
total_slots_skipped,
total_slots_skipped as f64 / total_slots as f64 * 100.
);
println!(
" (using data from {} slots: {} to {})",

View File

@ -110,9 +110,10 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
} else {
let default_keypair_path = CliConfig::default_keypair_path();
if !std::path::Path::new(&default_keypair_path).exists() {
return Err(CliError::KeypairFileNotFound(
"Generate a new keypair with `solana-keygen new`".to_string(),
)
return Err(CliError::KeypairFileNotFound(format!(
"Generate a new keypair at {} with `solana-keygen new`",
default_keypair_path
))
.into());
}
default_keypair_path

View File

@ -1,34 +1,71 @@
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, required_lamports_from, CliCommand, CliCommandInfo, CliConfig,
CliError, ProcessResult,
CliError, ProcessResult, SigningAuthority,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
use solana_client::rpc_client::RpcClient;
use solana_sdk::{
account::Account,
account_utils::State,
hash::Hash,
nonce_instruction::{authorize, create_nonce_account, nonce, withdraw, NonceError},
nonce_program,
nonce_state::NonceState,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
system_instruction::SystemError,
system_instruction::{
create_nonce_account, nonce_advance, nonce_authorize, nonce_withdraw, NonceError,
SystemError,
},
system_program,
transaction::Transaction,
};
#[derive(Debug, Clone, PartialEq)]
pub enum CliNonceError {
InvalidAccountOwner,
InvalidAccountData,
InvalidHash,
InvalidAuthority,
InvalidState,
}
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
name: "nonce",
long: "nonce",
help: "Provide the nonce account to use when creating a nonced \n\
transaction. Nonced transactions are useful when a transaction \n\
requires a lengthy signing process. Learn more about nonced \n\
transactions at https://docs.solana.com/offline-signing/durable-nonce",
};
pub const NONCE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant {
name: "nonce_authority",
long: "nonce-authority",
help: "Provide the nonce authority keypair to use when signing a nonced transaction",
};
pub trait NonceSubCommands {
fn nonce_subcommands(self) -> Self;
}
fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("nonce_authority")
.long("nonce-authority")
pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(NONCE_ARG.name)
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_keypair_or_ask_keyword)
.help("Specify nonce authority if different from account")
.value_name("PUBKEY")
.requires("blockhash")
.validator(is_pubkey)
.help(NONCE_ARG.help)
}
pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("KEYPAIR or PUBKEY")
.validator(is_pubkey_or_keypair_or_ask_keyword)
.help(NONCE_AUTHORITY_ARG.help)
}
impl NonceSubCommands for App<'_, '_> {
@ -86,8 +123,8 @@ impl NonceSubCommands for App<'_, '_> {
.help("Specify unit to use for request"),
)
.arg(
Arg::with_name("nonce_authority")
.long("nonce-authority")
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("BASE58_PUBKEY")
.validator(is_pubkey_or_keypair)
@ -183,20 +220,23 @@ impl NonceSubCommands for App<'_, '_> {
}
}
fn resolve_nonce_authority(matches: &ArgMatches<'_>) -> Keypair {
keypair_of(matches, "nonce_authority")
.unwrap_or_else(|| keypair_of(matches, "nonce_account_keypair").unwrap())
}
pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let new_authority = pubkey_of(matches, "new_authority").unwrap();
let nonce_authority = resolve_nonce_authority(matches);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account,
nonce_authority: nonce_authority.into(),
nonce_authority,
new_authority,
},
require_keypair: true,
@ -206,8 +246,7 @@ pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliComm
pub fn parse_nonce_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = keypair_of(matches, "nonce_account_keypair").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?;
let nonce_authority =
pubkey_of(matches, "nonce_authority").unwrap_or_else(|| nonce_account.pubkey());
let nonce_authority = pubkey_of(matches, NONCE_AUTHORITY_ARG.name);
Ok(CliCommandInfo {
command: CliCommand::CreateNonceAccount {
@ -230,12 +269,20 @@ pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
pub fn parse_new_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let nonce_authority = resolve_nonce_authority(matches);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account,
nonce_authority: nonce_authority.into(),
nonce_authority,
},
require_keypair: true,
})
@ -260,12 +307,20 @@ pub fn parse_withdraw_from_nonce_account(
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?;
let nonce_authority = resolve_nonce_authority(matches);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account,
nonce_authority: nonce_authority.into(),
nonce_authority,
destination_account_pubkey,
lamports,
},
@ -273,16 +328,47 @@ pub fn parse_withdraw_from_nonce_account(
})
}
/// Check if a nonce account is initialized with the given authority and hash
pub fn check_nonce_account(
nonce_account: &Account,
nonce_authority: &Pubkey,
nonce_hash: &Hash,
) -> Result<(), Box<CliError>> {
if nonce_account.owner != system_program::ID {
return Err(CliError::InvalidNonce(CliNonceError::InvalidAccountOwner).into());
}
let nonce_state: NonceState = nonce_account
.state()
.map_err(|_| Box::new(CliError::InvalidNonce(CliNonceError::InvalidAccountData)))?;
match nonce_state {
NonceState::Initialized(meta, hash) => {
if &hash != nonce_hash {
Err(CliError::InvalidNonce(CliNonceError::InvalidHash).into())
} else if nonce_authority != &meta.nonce_authority {
Err(CliError::InvalidNonce(CliNonceError::InvalidAuthority).into())
} else {
Ok(())
}
}
NonceState::Uninitialized => {
Err(CliError::InvalidNonce(CliNonceError::InvalidState).into())
}
}
}
pub fn process_authorize_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: &Keypair,
nonce_authority: Option<&SigningAuthority>,
new_authority: &Pubkey,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ix = authorize(nonce_account, &nonce_authority.pubkey(), new_authority);
let nonce_authority = nonce_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ix = nonce_authorize(nonce_account, &nonce_authority.pubkey(), new_authority);
let mut tx = Transaction::new_signed_with_payer(
vec![ix],
Some(&config.keypair.pubkey()),
@ -304,7 +390,7 @@ pub fn process_create_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Keypair,
nonce_authority: &Pubkey,
nonce_authority: Option<Pubkey>,
lamports: u64,
) -> ProcessResult {
let nonce_account_pubkey = nonce_account.pubkey();
@ -313,12 +399,18 @@ pub fn process_create_nonce_account(
(&nonce_account_pubkey, "nonce_account_pubkey".to_string()),
)?;
if rpc_client.get_account(&nonce_account_pubkey).is_ok() {
return Err(CliError::BadParameter(format!(
"Unable to create nonce account. Nonce account already exists: {}",
nonce_account_pubkey,
))
.into());
if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_pubkey) {
let err_msg = if nonce_account.owner == system_program::id()
&& State::<NonceState>::state(&nonce_account).is_ok()
{
format!("Nonce account {} already exists", nonce_account_pubkey)
} else {
format!(
"Account {} already exists and is not a nonce account",
nonce_account_pubkey
)
};
return Err(CliError::BadParameter(err_msg).into());
}
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(NonceState::size())?;
@ -330,10 +422,11 @@ pub fn process_create_nonce_account(
.into());
}
let nonce_authority = nonce_authority.unwrap_or_else(|| config.keypair.pubkey());
let ixs = create_nonce_account(
&config.keypair.pubkey(),
&nonce_account_pubkey,
nonce_authority,
&nonce_authority,
lamports,
);
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
@ -356,7 +449,7 @@ pub fn process_create_nonce_account(
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
if nonce_account.owner != nonce_program::id() {
if nonce_account.owner != system_program::id() {
return Err(CliError::RpcRequestError(format!(
"{:?} is not a nonce account",
nonce_account_pubkey
@ -378,7 +471,7 @@ pub fn process_new_nonce(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: &Keypair,
nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult {
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
@ -392,7 +485,10 @@ pub fn process_new_nonce(
.into());
}
let ix = nonce(&nonce_account, &nonce_authority.pubkey());
let nonce_authority = nonce_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ix = nonce_advance(&nonce_account, &nonce_authority.pubkey());
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let mut tx = Transaction::new_signed_with_payer(
vec![ix],
@ -417,7 +513,7 @@ pub fn process_show_nonce_account(
use_lamports_unit: bool,
) -> ProcessResult {
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
if nonce_account.owner != nonce_program::id() {
if nonce_account.owner != system_program::id() {
return Err(CliError::RpcRequestError(format!(
"{:?} is not a nonce account",
nonce_account_pubkey
@ -458,13 +554,16 @@ pub fn process_withdraw_from_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: &Keypair,
nonce_authority: Option<&SigningAuthority>,
destination_account_pubkey: &Pubkey,
lamports: u64,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ix = withdraw(
let nonce_authority = nonce_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ix = nonce_withdraw(
nonce_account,
&nonce_authority.pubkey(),
destination_account_pubkey,
@ -491,7 +590,13 @@ pub fn process_withdraw_from_nonce_account(
mod tests {
use super::*;
use crate::cli::{app, parse_command};
use solana_sdk::signature::{read_keypair_file, write_keypair};
use solana_sdk::{
account::Account,
hash::hash,
nonce_state::{Meta as NonceMeta, NonceState},
signature::{read_keypair_file, write_keypair},
system_program,
};
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
@ -524,7 +629,7 @@ mod tests {
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: nonce_account_pubkey,
nonce_authority: read_keypair_file(&keypair_file).unwrap().into(),
nonce_authority: None,
new_authority: Pubkey::default(),
},
require_keypair: true,
@ -545,7 +650,9 @@ mod tests {
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
nonce_authority: Some(
read_keypair_file(&authority_keypair_file).unwrap().into()
),
new_authority: Pubkey::default(),
},
require_keypair: true,
@ -565,7 +672,7 @@ mod tests {
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
nonce_authority: nonce_account_pubkey,
nonce_authority: None,
lamports: 50,
},
require_keypair: true
@ -587,7 +694,9 @@ mod tests {
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().pubkey(),
nonce_authority: Some(
read_keypair_file(&authority_keypair_file).unwrap().pubkey()
),
lamports: 50,
},
require_keypair: true
@ -619,7 +728,7 @@ mod tests {
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
nonce_authority: nonce_account.into(),
nonce_authority: None,
},
require_keypair: true
}
@ -639,7 +748,9 @@ mod tests {
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
nonce_authority: Some(
read_keypair_file(&authority_keypair_file).unwrap().into()
),
},
require_keypair: true
}
@ -676,7 +787,7 @@ mod tests {
CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&keypair_file).unwrap().into(),
nonce_authority: None,
destination_account_pubkey: nonce_account_pubkey,
lamports: 42
},
@ -697,7 +808,7 @@ mod tests {
CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&keypair_file).unwrap().into(),
nonce_authority: None,
destination_account_pubkey: nonce_account_pubkey,
lamports: 42000000000
},
@ -721,7 +832,9 @@ mod tests {
CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
nonce_authority: Some(
read_keypair_file(&authority_keypair_file).unwrap().into()
),
destination_account_pubkey: nonce_account_pubkey,
lamports: 42
},
@ -729,4 +842,66 @@ mod tests {
}
);
}
#[test]
fn test_check_nonce_account() {
let blockhash = Hash::default();
let nonce_pubkey = Pubkey::new_rand();
let valid = Account::new_data(
1,
&NonceState::Initialized(NonceMeta::new(&nonce_pubkey), blockhash),
&system_program::ID,
);
assert!(check_nonce_account(&valid.unwrap(), &nonce_pubkey, &blockhash).is_ok());
let invalid_owner = Account::new_data(
1,
&NonceState::Initialized(NonceMeta::new(&nonce_pubkey), blockhash),
&Pubkey::new(&[1u8; 32]),
);
assert_eq!(
check_nonce_account(&invalid_owner.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidAccountOwner
))),
);
let invalid_data = Account::new_data(1, &"invalid", &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_data.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidAccountData
))),
);
let invalid_hash = Account::new_data(
1,
&NonceState::Initialized(NonceMeta::new(&nonce_pubkey), hash(b"invalid")),
&system_program::ID,
);
assert_eq!(
check_nonce_account(&invalid_hash.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(CliNonceError::InvalidHash))),
);
let invalid_authority = Account::new_data(
1,
&NonceState::Initialized(NonceMeta::new(&Pubkey::new_rand()), blockhash),
&system_program::ID,
);
assert_eq!(
check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidAuthority
))),
);
let invalid_state = Account::new_data(1, &NonceState::Uninitialized, &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_state.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidState
))),
);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -163,6 +163,19 @@ pub fn process_create_storage_account(
"storage_account_pubkey".to_string(),
),
)?;
if let Ok(storage_account) = rpc_client.get_account(&storage_account_pubkey) {
let err_msg = if storage_account.owner == solana_storage_program::id() {
format!("Storage account {} already exists", storage_account_pubkey)
} else {
format!(
"Account {} already exists and is not a storage account",
storage_account_pubkey
)
};
return Err(CliError::BadParameter(err_msg).into());
}
use solana_storage_program::storage_contract::STORAGE_ACCOUNT_SPACE;
let required_balance = rpc_client
.get_minimum_balance_for_rent_exemption(STORAGE_ACCOUNT_SPACE as usize)?

View File

@ -312,10 +312,12 @@ pub fn process_set_validator_info(
"Publishing info for Validator {:?}",
config.keypair.pubkey()
);
let lamports = rpc_client
.get_minimum_balance_for_rent_exemption(ValidatorInfo::max_space() as usize)?;
let mut instructions = config_instruction::create_account::<ValidatorInfo>(
&config.keypair.pubkey(),
&info_keypair.pubkey(),
1,
lamports,
keys.clone(),
);
instructions.extend_from_slice(&[config_instruction::store(

View File

@ -9,10 +9,9 @@ use crate::{
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::rpc_client::RpcClient;
use solana_sdk::signature::Keypair;
use solana_sdk::{
account::Account, pubkey::Pubkey, signature::KeypairUtil, system_instruction::SystemError,
transaction::Transaction,
account::Account, pubkey::Pubkey, signature::Keypair, signature::KeypairUtil,
system_instruction::SystemError, transaction::Transaction,
};
use solana_vote_program::{
vote_instruction::{self, VoteError},
@ -51,7 +50,8 @@ impl VoteSubCommands for App<'_, '_> {
.long("commission")
.value_name("NUM")
.takes_value(true)
.help("The commission taken on reward redemption (0-100), default: 0"),
.default_value("100")
.help("The commission taken on reward redemption (0-100)"),
)
.arg(
Arg::with_name("authorized_voter")
@ -195,7 +195,7 @@ impl VoteSubCommands for App<'_, '_> {
pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let vote_account = keypair_of(matches, "vote_account").unwrap();
let identity_pubkey = pubkey_of(matches, "identity_pubkey").unwrap();
let commission = value_of(&matches, "commission").unwrap_or(0);
let commission = value_t_or_exit!(matches, "commission", u8);
let authorized_voter = pubkey_of(matches, "authorized_voter");
let authorized_withdrawer = pubkey_of(matches, "authorized_withdrawer");
@ -294,6 +294,18 @@ pub fn process_create_vote_account(
(&vote_account_pubkey, "vote_account_pubkey".to_string()),
)?;
if let Ok(vote_account) = rpc_client.get_account(&vote_account_pubkey) {
let err_msg = if vote_account.owner == solana_vote_program::id() {
format!("Vote account {} already exists", vote_account_pubkey)
} else {
format!(
"Account {} already exists and is not a vote account",
vote_account_pubkey
)
};
return Err(CliError::BadParameter(err_msg).into());
}
let required_balance = rpc_client
.get_minimum_balance_for_rent_exemption(VoteState::size_of())?
.max(1);
@ -609,7 +621,7 @@ mod tests {
node_pubkey,
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
commission: 100,
},
require_keypair: true
}
@ -637,7 +649,7 @@ mod tests {
node_pubkey,
authorized_voter: Some(authed),
authorized_withdrawer: None,
commission: 0
commission: 100
},
require_keypair: true
}
@ -663,7 +675,7 @@ mod tests {
node_pubkey,
authorized_voter: None,
authorized_withdrawer: Some(authed),
commission: 0
commission: 100
},
require_keypair: true
}

View File

@ -1,4 +1,6 @@
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
use solana_cli::cli::{
process_command, request_and_confirm_airdrop, CliCommand, CliConfig, SigningAuthority,
};
use solana_client::rpc_client::RpcClient;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
@ -59,7 +61,7 @@ fn test_nonce() {
&mut config_payer,
&mut config_nonce,
&keypair_file,
&keypair_file,
None,
);
server.close().unwrap();
@ -95,20 +97,24 @@ fn test_nonce_with_authority() {
&mut config_payer,
&mut config_nonce,
&nonce_keypair_file,
&authority_keypair_file,
Some(&authority_keypair_file),
);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
fn read_keypair_from_option(keypair_file: &Option<&str>) -> Option<SigningAuthority> {
keypair_file.map(|akf| read_keypair_file(&akf).unwrap().into())
}
fn full_battery_tests(
rpc_client: &RpcClient,
faucet_addr: &std::net::SocketAddr,
config_payer: &mut CliConfig,
config_nonce: &mut CliConfig,
nonce_keypair_file: &str,
authority_keypair_file: &str,
authority_keypair_file: Option<&str>,
) {
request_and_confirm_airdrop(
&rpc_client,
@ -122,7 +128,8 @@ fn full_battery_tests(
// Create nonce account
config_payer.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_from_option(&authority_keypair_file)
.map(|na: SigningAuthority| na.pubkey()),
lamports: 1000,
};
process_command(&config_payer).unwrap();
@ -144,7 +151,7 @@ fn full_battery_tests(
// New nonce
config_payer.command = CliCommand::NewNonce {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
nonce_authority: read_keypair_from_option(&authority_keypair_file),
};
process_command(&config_payer).unwrap();
@ -159,7 +166,7 @@ fn full_battery_tests(
let payee_pubkey = Pubkey::new_rand();
config_payer.command = CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
nonce_authority: read_keypair_from_option(&authority_keypair_file),
destination_account_pubkey: payee_pubkey,
lamports: 100,
};
@ -181,7 +188,7 @@ fn full_battery_tests(
write_keypair(&new_authority, tmp_file.as_file_mut()).unwrap();
config_payer.command = CliCommand::AuthorizeNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
nonce_authority: read_keypair_from_option(&authority_keypair_file),
new_authority: read_keypair_file(&new_authority_keypair_file)
.unwrap()
.pubkey(),
@ -191,25 +198,29 @@ fn full_battery_tests(
// Old authority fails now
config_payer.command = CliCommand::NewNonce {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
nonce_authority: read_keypair_from_option(&authority_keypair_file),
};
process_command(&config_payer).unwrap_err();
// New authority can advance nonce
config_payer.command = CliCommand::NewNonce {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&new_authority_keypair_file)
.unwrap()
.into(),
nonce_authority: Some(
read_keypair_file(&new_authority_keypair_file)
.unwrap()
.into(),
),
};
process_command(&config_payer).unwrap();
// New authority can withdraw from nonce account
config_payer.command = CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
nonce_authority: read_keypair_file(&new_authority_keypair_file)
.unwrap()
.into(),
nonce_authority: Some(
read_keypair_file(&new_authority_keypair_file)
.unwrap()
.into(),
),
destination_account_pubkey: payee_pubkey,
lamports: 100,
};

View File

@ -1,9 +1,17 @@
use chrono::prelude::*;
use serde_json::Value;
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
use solana_cli::cli::{
process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand,
};
use solana_client::rpc_client::RpcClient;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{hash::Hash, pubkey::Pubkey, signature::KeypairUtil, signature::Signature};
use solana_sdk::{
account_utils::State,
hash::Hash,
nonce_state::NonceState,
pubkey::Pubkey,
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
};
use std::fs::remove_dir_all;
use std::str::FromStr;
use std::sync::mpsc::channel;
@ -12,6 +20,12 @@ use std::sync::mpsc::channel;
use solana_core::validator::new_validator_for_tests;
use std::thread::sleep;
use std::time::Duration;
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
let tmp_file = NamedTempFile::new().unwrap();
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
}
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
(0..5).for_each(|tries| {
@ -69,17 +83,13 @@ fn test_cli_timestamp_tx() {
// Make transaction (from config_payer to bob_pubkey) requiring timestamp from config_witness
let date_string = "\"2018-09-19T17:30:59Z\"";
let dt: DateTime<Utc> = serde_json::from_str(&date_string).unwrap();
config_payer.command = CliCommand::Pay {
config_payer.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
timestamp: Some(dt),
timestamp_pubkey: Some(config_witness.keypair.pubkey()),
witnesses: None,
cancelable: false,
sign_only: false,
signers: None,
blockhash: None,
};
..PayCommand::default()
});
let sig_response = process_command(&config_payer);
let object: Value = serde_json::from_str(&sig_response.unwrap()).unwrap();
@ -144,17 +154,12 @@ fn test_cli_witness_tx() {
.unwrap();
// Make transaction (from config_payer to bob_pubkey) requiring witness signature from config_witness
config_payer.command = CliCommand::Pay {
config_payer.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
timestamp: None,
timestamp_pubkey: None,
witnesses: Some(vec![config_witness.keypair.pubkey()]),
cancelable: false,
sign_only: false,
signers: None,
blockhash: None,
};
..PayCommand::default()
});
let sig_response = process_command(&config_payer);
let object: Value = serde_json::from_str(&sig_response.unwrap()).unwrap();
@ -212,17 +217,13 @@ fn test_cli_cancel_tx() {
.unwrap();
// Make transaction (from config_payer to bob_pubkey) requiring witness signature from config_witness
config_payer.command = CliCommand::Pay {
config_payer.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
timestamp: None,
timestamp_pubkey: None,
witnesses: Some(vec![config_witness.keypair.pubkey()]),
cancelable: true,
sign_only: false,
signers: None,
blockhash: None,
};
..PayCommand::default()
});
let sig_response = process_command(&config_payer).unwrap();
let object: Value = serde_json::from_str(&sig_response).unwrap();
@ -288,17 +289,12 @@ fn test_offline_pay_tx() {
check_balance(50, &rpc_client, &config_offline.keypair.pubkey());
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
config_offline.command = CliCommand::Pay {
config_offline.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
timestamp: None,
timestamp_pubkey: None,
witnesses: None,
cancelable: false,
sign_only: true,
signers: None,
blockhash: None,
};
..PayCommand::default()
});
let sig_response = process_command(&config_offline).unwrap();
check_balance(50, &rpc_client, &config_offline.keypair.pubkey());
@ -318,17 +314,13 @@ fn test_offline_pay_tx() {
})
.collect();
config_online.command = CliCommand::Pay {
config_online.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
timestamp: None,
timestamp_pubkey: None,
witnesses: None,
cancelable: false,
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
};
..PayCommand::default()
});
process_command(&config_online).unwrap();
check_balance(40, &rpc_client, &config_offline.keypair.pubkey());
@ -338,3 +330,81 @@ fn test_offline_pay_tx() {
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_nonced_pay_tx() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&config.keypair.pubkey(),
50 + minimum_nonce_balance,
)
.unwrap();
check_balance(
50 + minimum_nonce_balance,
&rpc_client,
&config.keypair.pubkey(),
);
// Create nonce account
let nonce_account = Keypair::new();
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
nonce_authority: Some(config.keypair.pubkey()),
lamports: minimum_nonce_balance,
};
process_command(&config).unwrap();
check_balance(50, &rpc_client, &config.keypair.pubkey());
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
let bob_pubkey = Pubkey::new_rand();
config.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
blockhash: Some(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
..PayCommand::default()
});
process_command(&config).expect("failed to process pay command");
check_balance(40, &rpc_client, &config.keypair.pubkey());
check_balance(10, &rpc_client, &bob_pubkey);
// Verify that nonce has been used
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
match nonce_state {
NonceState::Initialized(_meta, hash) => assert_ne!(hash, nonce_hash),
_ => assert!(false, "Nonce is not initialized"),
}
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@ -3,11 +3,13 @@ use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand,
use solana_client::rpc_client::RpcClient;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::State,
hash::Hash,
nonce_state::NonceState,
pubkey::Pubkey,
signature::{read_keypair_file, write_keypair, KeypairUtil, Signature},
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
};
use solana_stake_program::stake_state::Lockup;
use solana_stake_program::stake_state::{Lockup, StakeAuthorize, StakeState};
use std::fs::remove_dir_all;
use std::str::FromStr;
use std::sync::mpsc::channel;
@ -37,6 +39,23 @@ fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
});
}
fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
(blockhash, signers)
}
#[test]
fn test_stake_delegation_and_deactivation() {
solana_logger::setup();
@ -97,19 +116,25 @@ fn test_stake_delegation_and_deactivation() {
config_validator.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
stake_authority: None,
force: true,
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
process_command(&config_validator).unwrap();
// Deactivate stake
config_validator.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
stake_authority: None,
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
process_command(&config_validator).unwrap();
@ -181,66 +206,351 @@ fn test_stake_delegation_and_deactivation_offline() {
config_validator.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
stake_authority: None,
force: true,
sign_only: true,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
let sig_response = process_command(&config_validator).unwrap();
let object: Value = serde_json::from_str(&sig_response).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers: Vec<_> = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
// Delegate stake online
config_payer.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
stake_authority: None,
force: true,
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
};
process_command(&config_payer).unwrap();
// Deactivate stake offline
config_validator.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
stake_authority: None,
sign_only: true,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
let sig_response = process_command(&config_validator).unwrap();
let object: Value = serde_json::from_str(&sig_response).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers: Vec<_> = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
// Deactivate stake online
config_payer.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
stake_authority: None,
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
};
process_command(&config_payer).unwrap();
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_nonced_stake_delegation_and_deactivation() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
.unwrap();
// Create vote account
let vote_keypair = Keypair::new();
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&vote_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
node_pubkey: config.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config).unwrap();
// Create stake account
let stake_keypair = Keypair::new();
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
staker: None,
withdrawer: None,
lockup: Lockup::default(),
lamports: 50_000,
};
process_command(&config).unwrap();
// Create nonce account
let nonce_account = Keypair::new();
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
nonce_authority: Some(config.keypair.pubkey()),
lamports: minimum_nonce_balance,
};
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
// Delegate stake
config.command = CliCommand::DelegateStake {
stake_account_pubkey: stake_keypair.pubkey(),
vote_account_pubkey: vote_keypair.pubkey(),
stake_authority: None,
force: true,
sign_only: false,
signers: None,
blockhash: Some(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
};
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
// Deactivate stake
let config_keypair = Keypair::from_bytes(&config.keypair.to_bytes()).unwrap();
config.command = CliCommand::DeactivateStake {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: None,
sign_only: false,
signers: None,
blockhash: Some(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: Some(config_keypair.into()),
};
process_command(&config).unwrap();
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_stake_authorize() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
.unwrap();
// Create stake account, identity is authority
let stake_keypair = Keypair::new();
let stake_account_pubkey = stake_keypair.pubkey();
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
staker: None,
withdrawer: None,
lockup: Lockup::default(),
lamports: 50_000,
};
process_command(&config).unwrap();
// Assign new online stake authority
let online_authority = Keypair::new();
let online_authority_pubkey = online_authority.pubkey();
let (online_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&online_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: None,
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
let stake_state: StakeState = stake_account.state().unwrap();
let current_authority = match stake_state {
StakeState::Initialized(meta) => meta.authorized.staker,
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_authority, online_authority_pubkey);
// Assign new offline stake authority
let offline_authority = Keypair::new();
let offline_authority_pubkey = offline_authority.pubkey();
let (offline_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&offline_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: offline_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&online_authority_file).unwrap().into()),
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
let stake_state: StakeState = stake_account.state().unwrap();
let current_authority = match stake_state {
StakeState::Initialized(meta) => meta.authorized.staker,
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_authority, offline_authority_pubkey);
// Offline assignment of new nonced stake authority
let nonced_authority = Keypair::new();
let nonced_authority_pubkey = nonced_authority.pubkey();
let (nonced_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonced_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: nonced_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&offline_authority_file).unwrap().into()),
sign_only: true,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
let sign_reply = process_command(&config).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: nonced_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(offline_authority_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
let stake_state: StakeState = stake_account.state().unwrap();
let current_authority = match stake_state {
StakeState::Initialized(meta) => meta.authorized.staker,
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_authority, nonced_authority_pubkey);
// Create nonce account
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
let nonce_account = Keypair::new();
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
nonce_authority: Some(config.keypair.pubkey()),
lamports: minimum_nonce_balance,
};
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
// Nonced assignment of new online stake authority
let online_authority = Keypair::new();
let online_authority_pubkey = online_authority.pubkey();
let (_online_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&online_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&nonced_authority_file).unwrap().into()),
sign_only: true,
signers: None,
blockhash: Some(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
};
let sign_reply = process_command(&config).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
assert_eq!(blockhash, nonce_hash);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(nonced_authority_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
let stake_state: StakeState = stake_account.state().unwrap();
let current_authority = match stake_state {
StakeState::Initialized(meta) => meta.authorized.staker,
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_authority, online_authority_pubkey);
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let new_nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
assert_ne!(nonce_hash, new_nonce_hash);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.22.0"
version = "0.22.10"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,11 +19,11 @@ reqwest = { version = "0.9.24", default-features = false, features = ["rustls-tl
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" }
solana-sdk = { path = "../sdk", version = "0.22.10" }
[dev-dependencies]
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.5"
solana-logger = { path = "../logger", version = "0.22.0" }
solana-logger = { path = "../logger", version = "0.22.10" }

View File

@ -8,4 +8,5 @@ pub mod perf_utils;
pub mod rpc_client;
pub mod rpc_client_request;
pub mod rpc_request;
pub mod rpc_response;
pub mod thin_client;

View File

@ -1,7 +1,8 @@
use crate::rpc_request::{Response, RpcResponseContext};
use crate::{
client_error::ClientError, generic_rpc_client_request::GenericRpcClientRequest,
client_error::ClientError,
generic_rpc_client_request::GenericRpcClientRequest,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
};
use serde_json::{Number, Value};
use solana_sdk::{
@ -9,18 +10,28 @@ use solana_sdk::{
instruction::InstructionError,
transaction::{self, TransactionError},
};
use std::{collections::HashMap, sync::RwLock};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
pub const SIGNATURE: &str =
"43yNSFC6fYTuPgTNFFhF4axw7AfWxB2BPdurme8yrsWEYwm8299xh8n6TAHjGymiSub1XtyxTNyd9GBfY2hxoBw8";
pub type Mocks = HashMap<RpcRequest, Value>;
pub struct MockRpcClientRequest {
mocks: RwLock<Mocks>,
url: String,
}
impl MockRpcClientRequest {
pub fn new(url: String) -> Self {
Self { url }
Self::new_with_mocks(url, Mocks::default())
}
pub fn new_with_mocks(url: String, mocks: Mocks) -> Self {
Self {
url,
mocks: RwLock::new(mocks),
}
}
}
@ -31,6 +42,9 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
params: serde_json::Value,
_retries: usize,
) -> Result<serde_json::Value, ClientError> {
if let Some(value) = self.mocks.write().unwrap().remove(request) {
return Ok(value);
}
if self.url == "fails" {
return Ok(Value::Null);
}

View File

@ -1,12 +1,13 @@
use crate::rpc_request::{Response, RpcResponse};
use crate::{
client_error::ClientError,
generic_rpc_client_request::GenericRpcClientRequest,
mock_rpc_client_request::MockRpcClientRequest,
mock_rpc_client_request::{MockRpcClientRequest, Mocks},
rpc_client_request::RpcClientRequest,
rpc_request::{
RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule, RpcRequest,
RpcVersionInfo, RpcVoteAccountStatus,
rpc_request::RpcRequest,
rpc_response::{
Response, RpcAccount, RpcBlockhashFeeCalculator, RpcConfirmedBlock, RpcContactInfo,
RpcEpochInfo, RpcKeyedAccount, RpcLeaderSchedule, RpcResponse, RpcVersionInfo,
RpcVoteAccountStatus,
},
};
use bincode::serialize;
@ -48,6 +49,12 @@ impl RpcClient {
}
}
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
Self {
client: Box::new(MockRpcClientRequest::new_with_mocks(url, mocks)),
}
}
pub fn new_socket(addr: SocketAddr) -> Self {
Self::new(get_rpc_request_str(addr, false))
}
@ -379,6 +386,25 @@ impl RpcClient {
})
}
pub fn minimum_ledger_slot(&self) -> io::Result<Slot> {
let response = self
.client
.send(&RpcRequest::MinimumLedgerSlot, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("MinimumLedgerSlot request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("MinimumLedgerSlot parse failure: {}", err),
)
})
}
pub fn send_and_confirm_transaction<T: KeypairUtil>(
&self,
transaction: &mut Transaction,
@ -572,9 +598,16 @@ impl RpcClient {
format!("AccountNotFound: pubkey={}", pubkey),
));
}
let result = serde_json::from_value::<Response<Option<Account>>>(result_json)?;
trace!("Response account {:?} {:?}", pubkey, result);
Ok(result)
let Response {
context,
value: rpc_account,
} = serde_json::from_value::<Response<Option<RpcAccount>>>(result_json)?;
trace!("Response account {:?} {:?}", pubkey, rpc_account);
let account = rpc_account.and_then(|rpc_account| rpc_account.decode().ok());
Ok(Response {
context,
value: account,
})
})
.map_err(|err| {
io::Error::new(
@ -669,8 +702,8 @@ impl RpcClient {
)
})?;
let accounts: Vec<(String, Account)> =
serde_json::from_value::<Vec<(String, Account)>>(response).map_err(|err| {
let accounts: Vec<RpcKeyedAccount> =
serde_json::from_value::<Vec<RpcKeyedAccount>>(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
@ -678,14 +711,14 @@ impl RpcClient {
})?;
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for (string, account) in accounts.into_iter() {
let pubkey = string.parse().map_err(|err| {
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
let pubkey = pubkey.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
)
})?;
pubkey_accounts.push((pubkey, account));
pubkey_accounts.push((pubkey, account.decode().unwrap()));
}
Ok(pubkey_accounts)
}
@ -747,8 +780,12 @@ impl RpcClient {
let Response {
context,
value: (blockhash_str, fee_calculator),
} = serde_json::from_value::<Response<(String, FeeCalculator)>>(response).map_err(
value:
RpcBlockhashFeeCalculator {
blockhash,
fee_calculator,
},
} = serde_json::from_value::<Response<RpcBlockhashFeeCalculator>>(response).map_err(
|err| {
io::Error::new(
io::ErrorKind::Other,
@ -756,7 +793,7 @@ impl RpcClient {
)
},
)?;
let blockhash = blockhash_str.parse().map_err(|err| {
let blockhash = blockhash.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetRecentBlockhash hash parse failure: {:?}", err),

View File

@ -1,119 +1,7 @@
use jsonrpc_core::Result as JsonResult;
use serde_json::{json, Value};
use solana_sdk::{
clock::{Epoch, Slot},
hash::Hash,
transaction::{Result, Transaction},
};
use std::{collections::HashMap, error, fmt, io, net::SocketAddr};
use std::{error, fmt};
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
pub type RpcResponse<T> = io::Result<Response<T>>;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RpcResponseContext {
pub slot: u64,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Response<T> {
pub context: RpcResponseContext,
pub value: T,
}
#[derive(Debug, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedBlock {
pub previous_blockhash: Hash,
pub blockhash: Hash,
pub parent_slot: Slot,
pub transactions: Vec<(Transaction, Option<RpcTransactionStatus>)>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcTransactionStatus {
pub status: Result<()>,
pub fee: u64,
pub pre_balances: Vec<u64>,
pub post_balances: Vec<u64>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcContactInfo {
/// Pubkey of the node as a base-58 string
pub pubkey: String,
/// Gossip port
pub gossip: Option<SocketAddr>,
/// Tpu port
pub tpu: Option<SocketAddr>,
/// JSON RPC port
pub rpc: Option<SocketAddr>,
}
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
pub type RpcLeaderSchedule = HashMap<String, Vec<usize>>;
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcEpochInfo {
/// The current epoch
pub epoch: Epoch,
/// The current slot, relative to the start of the current epoch
pub slot_index: u64,
/// The number of slots in this epoch
pub slots_in_epoch: u64,
/// The absolute current slot
pub absolute_slot: Slot,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct RpcVersionInfo {
/// The current version of solana-core
pub solana_core: String,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcVoteAccountStatus {
pub current: Vec<RpcVoteAccountInfo>,
pub delinquent: Vec<RpcVoteAccountInfo>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcVoteAccountInfo {
/// Vote account pubkey as base-58 encoded string
pub vote_pubkey: String,
/// The pubkey of the node that votes using this account
pub node_pubkey: String,
/// The current stake, in lamports, delegated to this vote account
pub activated_stake: u64,
/// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
pub commission: u8,
/// Whether this account is staked for the current epoch
pub epoch_vote_account: bool,
/// History of how many credits earned by the end of each epoch
/// each tuple is (Epoch, credits, prev_credits)
pub epoch_credits: Vec<(Epoch, u64, u64)>,
/// Most recent slot voted on by this vote account (0 if no votes exist)
pub last_vote: u64,
/// Current root slot for this vote account (0 if not root slot exists)
pub root_slot: Slot,
}
#[derive(Debug, PartialEq)]
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum RpcRequest {
ConfirmTransaction,
DeregisterNode,
@ -147,6 +35,7 @@ pub enum RpcRequest {
SendTransaction,
SignVote,
GetMinimumBalanceForRentExemption,
MinimumLedgerSlot,
}
impl RpcRequest {
@ -187,6 +76,7 @@ impl RpcRequest {
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
};
json!({
"jsonrpc": jsonrpc,

277
client/src/rpc_response.rs Normal file
View File

@ -0,0 +1,277 @@
use crate::rpc_request::RpcError;
use bincode::serialize;
use jsonrpc_core::Result as JsonResult;
use solana_sdk::{
account::Account,
clock::{Epoch, Slot},
fee_calculator::FeeCalculator,
message::MessageHeader,
pubkey::Pubkey,
transaction::{Result, Transaction},
};
use std::{collections::HashMap, io, net::SocketAddr, str::FromStr};
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
pub type RpcResponse<T> = io::Result<Response<T>>;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RpcResponseContext {
pub slot: u64,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Response<T> {
pub context: RpcResponseContext,
pub value: T,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockCommitment<T> {
pub commitment: Option<T>,
pub total_stake: u64,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedBlock {
pub previous_blockhash: String,
pub blockhash: String,
pub parent_slot: Slot,
pub transactions: Vec<RpcTransactionWithStatusMeta>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcTransactionWithStatusMeta {
pub transaction: RpcEncodedTransaction,
pub meta: Option<RpcTransactionStatus>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum RpcTransactionEncoding {
Binary,
Json,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", untagged)]
pub enum RpcEncodedTransaction {
Binary(String),
Json(RpcTransaction),
}
impl RpcEncodedTransaction {
pub fn encode(transaction: Transaction, encoding: RpcTransactionEncoding) -> Self {
if encoding == RpcTransactionEncoding::Json {
RpcEncodedTransaction::Json(RpcTransaction {
signatures: transaction
.signatures
.iter()
.map(|sig| sig.to_string())
.collect(),
message: RpcMessage {
header: transaction.message.header,
account_keys: transaction
.message
.account_keys
.iter()
.map(|pubkey| pubkey.to_string())
.collect(),
recent_blockhash: transaction.message.recent_blockhash.to_string(),
instructions: transaction
.message
.instructions
.iter()
.map(|instruction| RpcCompiledInstruction {
program_id_index: instruction.program_id_index,
accounts: instruction.accounts.clone(),
data: bs58::encode(instruction.data.clone()).into_string(),
})
.collect(),
},
})
} else {
RpcEncodedTransaction::Binary(
bs58::encode(serialize(&transaction).unwrap()).into_string(),
)
}
}
}
/// A duplicate representation of a Transaction for pretty JSON serialization
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcTransaction {
pub signatures: Vec<String>,
pub message: RpcMessage,
}
/// A duplicate representation of a Message for pretty JSON serialization
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcMessage {
pub header: MessageHeader,
pub account_keys: Vec<String>,
pub recent_blockhash: String,
pub instructions: Vec<RpcCompiledInstruction>,
}
/// A duplicate representation of a Message for pretty JSON serialization
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcCompiledInstruction {
pub program_id_index: u8,
pub accounts: Vec<u8>,
pub data: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcTransactionStatus {
pub status: Result<()>,
pub fee: u64,
pub pre_balances: Vec<u64>,
pub post_balances: Vec<u64>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockhashFeeCalculator {
pub blockhash: String,
pub fee_calculator: FeeCalculator,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcKeyedAccount {
pub pubkey: String,
pub account: RpcAccount,
}
/// A duplicate representation of a Message for pretty JSON serialization
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcAccount {
pub lamports: u64,
pub data: String,
pub owner: String,
pub executable: bool,
pub rent_epoch: Epoch,
}
impl RpcAccount {
pub fn encode(account: Account) -> Self {
RpcAccount {
lamports: account.lamports,
data: bs58::encode(account.data.clone()).into_string(),
owner: account.owner.to_string(),
executable: account.executable,
rent_epoch: account.rent_epoch,
}
}
pub fn decode(&self) -> std::result::Result<Account, RpcError> {
Ok(Account {
lamports: self.lamports,
data: bs58::decode(self.data.clone()).into_vec().map_err(|_| {
RpcError::RpcRequestError("Could not parse encoded account data".to_string())
})?,
owner: Pubkey::from_str(&self.owner).map_err(|_| {
RpcError::RpcRequestError("Could not parse encoded account owner".to_string())
})?,
executable: self.executable,
rent_epoch: self.rent_epoch,
..Account::default()
})
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcContactInfo {
/// Pubkey of the node as a base-58 string
pub pubkey: String,
/// Gossip port
pub gossip: Option<SocketAddr>,
/// Tpu port
pub tpu: Option<SocketAddr>,
/// JSON RPC port
pub rpc: Option<SocketAddr>,
}
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
pub type RpcLeaderSchedule = HashMap<String, Vec<usize>>;
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcEpochInfo {
/// The current epoch
pub epoch: Epoch,
/// The current slot, relative to the start of the current epoch
pub slot_index: u64,
/// The number of slots in this epoch
pub slots_in_epoch: u64,
/// The absolute current slot
pub absolute_slot: Slot,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct RpcVersionInfo {
/// The current version of solana-core
pub solana_core: String,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcVoteAccountStatus {
pub current: Vec<RpcVoteAccountInfo>,
pub delinquent: Vec<RpcVoteAccountInfo>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcVoteAccountInfo {
/// Vote account pubkey as base-58 encoded string
pub vote_pubkey: String,
/// The pubkey of the node that votes using this account
pub node_pubkey: String,
/// The current stake, in lamports, delegated to this vote account
pub activated_stake: u64,
/// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
pub commission: u8,
/// Whether this account is staked for the current epoch
pub epoch_vote_account: bool,
/// History of how many credits earned by the end of each epoch
/// each tuple is (Epoch, credits, prev_credits)
pub epoch_credits: Vec<(Epoch, u64, u64)>,
/// Most recent slot voted on by this vote account (0 if no votes exist)
pub last_vote: u64,
/// Current root slot for this vote account (0 if not root slot exists)
pub root_slot: Slot,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureConfirmation {
pub confirmations: usize,
pub status: Result<()>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcStorageTurn {
pub blockhash: String,
pub slot: Slot,
}

View File

@ -3,8 +3,7 @@
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use crate::rpc_client::RpcClient;
use crate::rpc_request::Response;
use crate::{rpc_client::RpcClient, rpc_response::Response};
use bincode::{serialize_into, serialized_size};
use log::*;
use solana_sdk::{

View File

@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.0"
version = "0.22.10"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -41,26 +41,26 @@ rayon = "1.2.0"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-budget-program = { path = "../programs/budget", version = "0.22.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.22.0" }
solana-client = { path = "../client", version = "0.22.0" }
solana-faucet = { path = "../faucet", version = "0.22.0" }
solana-budget-program = { path = "../programs/budget", version = "0.22.10" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.22.10" }
solana-client = { path = "../client", version = "0.22.10" }
solana-faucet = { path = "../faucet", version = "0.22.10" }
ed25519-dalek = "=1.0.0-pre.1"
solana-ledger = { path = "../ledger", version = "0.22.0" }
solana-logger = { path = "../logger", version = "0.22.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.0" }
solana-metrics = { path = "../metrics", version = "0.22.0" }
solana-measure = { path = "../measure", version = "0.22.0" }
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
solana-perf = { path = "../perf", version = "0.22.0" }
solana-runtime = { path = "../runtime", version = "0.22.0" }
solana-sdk = { path = "../sdk", version = "0.22.0" }
solana-stake-program = { path = "../programs/stake", version = "0.22.0" }
solana-storage-program = { path = "../programs/storage", version = "0.22.0" }
solana-vote-program = { path = "../programs/vote", version = "0.22.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.0" }
solana-sys-tuner = { path = "../sys-tuner", version = "0.22.0" }
solana-ledger = { path = "../ledger", version = "0.22.10" }
solana-logger = { path = "../logger", version = "0.22.10" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.10" }
solana-metrics = { path = "../metrics", version = "0.22.10" }
solana-measure = { path = "../measure", version = "0.22.10" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" }
solana-perf = { path = "../perf", version = "0.22.10" }
solana-runtime = { path = "../runtime", version = "0.22.10" }
solana-sdk = { path = "../sdk", version = "0.22.10" }
solana-stake-program = { path = "../programs/stake", version = "0.22.10" }
solana-storage-program = { path = "../programs/storage", version = "0.22.10" }
solana-vote-program = { path = "../programs/vote", version = "0.22.10" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.10" }
solana-sys-tuner = { path = "../sys-tuner", version = "0.22.10" }
symlink = "0.1.0"
sys-info = "0.5.8"
tempfile = "3.1.0"
@ -69,13 +69,9 @@ tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
untrusted = "0.7.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.10" }
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
[target."cfg(unix)".dependencies]
jemallocator = "0.3.2"
jemalloc-ctl = "0.3.2"
[dev-dependencies]
hex-literal = "0.2.1"
matches = "0.1.6"
@ -88,7 +84,7 @@ systemstat = "0.1.5"
name = "banking_stage"
[[bench]]
name = "blocktree"
name = "blockstore"
[[bench]]
name = "gen_keys"

View File

@ -12,9 +12,9 @@ use solana_core::cluster_info::Node;
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::WorkingBankEntry;
use solana_ledger::blocktree_processor::process_entries;
use solana_ledger::blockstore_processor::process_entries;
use solana_ledger::entry::{next_hash, Entry};
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_sdk::genesis_config::GenesisConfig;
@ -57,11 +57,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let my_pubkey = Pubkey::new_rand();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _signal_receiver) =
create_test_recorder(&bank, &blocktree, None);
create_test_recorder(&bank, &blockstore, None);
let tx = test_tx();
let len = 4096;
@ -87,7 +87,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
let _unused = Blocktree::destroy(&ledger_path);
let _unused = Blockstore::destroy(&ledger_path);
}
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
@ -184,11 +184,11 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree, None);
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(
@ -244,7 +244,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
let _unused = Blocktree::destroy(&ledger_path);
let _unused = Blockstore::destroy(&ledger_path);
}
#[bench]

View File

@ -6,7 +6,7 @@ extern crate test;
use rand::Rng;
use solana_ledger::{
blocktree::{entries_to_test_shreds, Blocktree},
blockstore::{entries_to_test_shreds, Blockstore},
entry::{create_ticks, Entry},
get_tmp_ledger_path,
};
@ -16,19 +16,19 @@ use test::Bencher;
// Given some shreds and a ledger at ledger_path, benchmark writing the shreds to the ledger
fn bench_write_shreds(bench: &mut Bencher, entries: Vec<Entry>, ledger_path: &Path) {
let blocktree =
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger");
let blockstore =
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
});
Blocktree::destroy(ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(ledger_path).expect("Expected successful database destruction");
}
// Insert some shreds into the ledger in preparation for read benchmarks
fn setup_read_bench(
blocktree: &mut Blocktree,
blockstore: &mut Blockstore,
num_small_shreds: u64,
num_large_shreds: u64,
slot: Slot,
@ -42,7 +42,7 @@ fn setup_read_bench(
// Convert the entries to shreds, write the shreds to the ledger
let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0);
blocktree
blockstore
.insert_shreds(shreds, None, false)
.expect("Expectd successful insertion of shreds into ledger");
}
@ -71,15 +71,15 @@ fn bench_write_big(bench: &mut Bencher) {
#[ignore]
fn bench_read_sequential(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let mut blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let mut blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small shreds into the ledger
let num_small_shreds = 32 * 1024;
let num_large_shreds = 32 * 1024;
let total_shreds = num_small_shreds + num_large_shreds;
let slot = 0;
setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot);
setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
let num_reads = total_shreds / 15;
let mut rng = rand::thread_rng();
@ -87,26 +87,26 @@ fn bench_read_sequential(bench: &mut Bencher) {
// Generate random starting point in the range [0, total_shreds - 1], read num_reads shreds sequentially
let start_index = rng.gen_range(0, num_small_shreds + num_large_shreds);
for i in start_index..start_index + num_reads {
let _ = blocktree.get_data_shred(slot, i as u64 % total_shreds);
let _ = blockstore.get_data_shred(slot, i as u64 % total_shreds);
}
});
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_read_random(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let mut blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let mut blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small shreds into the ledger
let num_small_shreds = 32 * 1024;
let num_large_shreds = 32 * 1024;
let total_shreds = num_small_shreds + num_large_shreds;
let slot = 0;
setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot);
setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
let num_reads = total_shreds / 15;
@ -118,39 +118,39 @@ fn bench_read_random(bench: &mut Bencher) {
.collect();
bench.iter(move || {
for i in indexes.iter() {
let _ = blocktree.get_data_shred(slot, *i as u64);
let _ = blockstore.get_data_shred(slot, *i as u64);
}
});
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_insert_data_shred_small(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, 0, Hash::default());
bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
});
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_insert_data_shred_big(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, 0, Hash::default());
bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
});
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}

View File

@ -17,9 +17,12 @@ use crossbeam_channel::unbounded;
use ed25519_dalek;
use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_client::{rpc_client::RpcClient, rpc_request::RpcRequest, thin_client::ThinClient};
use solana_client::{
rpc_client::RpcClient, rpc_request::RpcRequest, rpc_response::RpcStorageTurn,
thin_client::ThinClient,
};
use solana_ledger::{
blocktree::Blocktree, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
};
use solana_net_utils::bind_in_range;
use solana_perf::packet::Packets;
@ -222,13 +225,13 @@ impl Archiver {
// Note for now, this ledger will not contain any of the existing entries
// in the ledger located at ledger_path, and will only append on newly received
// entries after being passed to window_service
let blocktree = Arc::new(
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
);
let gossip_service = GossipService::new(
&cluster_info,
Some(blocktree.clone()),
Some(blockstore.clone()),
None,
node.sockets.gossip,
&exit,
@ -294,7 +297,7 @@ impl Archiver {
let window_service = match Self::setup(
&mut meta,
cluster_info.clone(),
&blocktree,
&blockstore,
&exit,
&node_info,
&storage_keypair,
@ -320,7 +323,7 @@ impl Archiver {
// run archiver
Self::run(
&mut meta,
&blocktree,
&blockstore,
cluster_info,
&keypair,
&storage_keypair,
@ -344,14 +347,14 @@ impl Archiver {
fn run(
meta: &mut ArchiverMeta,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
) {
// encrypt segment
Self::encrypt_ledger(meta, blocktree).expect("ledger encrypt not successful");
Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful");
let enc_file_path = meta.ledger_data_file_encrypted.clone();
// do replicate
loop {
@ -443,7 +446,7 @@ impl Archiver {
fn setup(
meta: &mut ArchiverMeta,
cluster_info: Arc<RwLock<ClusterInfo>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
storage_keypair: &Arc<Keypair>,
@ -498,7 +501,7 @@ impl Archiver {
);
let window_service = WindowService::new(
blocktree.clone(),
blockstore.clone(),
cluster_info.clone(),
verified_receiver,
retransmit_sender,
@ -512,7 +515,7 @@ impl Archiver {
Self::wait_for_segment_download(
slot,
slots_per_segment,
&blocktree,
&blockstore,
&exit,
&node_info,
cluster_info,
@ -523,7 +526,7 @@ impl Archiver {
fn wait_for_segment_download(
start_slot: Slot,
slots_per_segment: u64,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
cluster_info: Arc<RwLock<ClusterInfo>>,
@ -534,7 +537,7 @@ impl Archiver {
);
let mut current_slot = start_slot;
'outer: loop {
while blocktree.is_full(current_slot) {
while blockstore.is_full(current_slot) {
current_slot += 1;
info!("current slot: {}", current_slot);
if current_slot >= start_slot + slots_per_segment {
@ -553,13 +556,15 @@ impl Archiver {
let mut contact_info = node_info.clone();
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
contact_info.wallclock = timestamp();
// copy over the adopted shred_version from the entrypoint
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
{
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.insert_self(contact_info);
}
}
fn encrypt_ledger(meta: &mut ArchiverMeta, blocktree: &Arc<Blocktree>) -> Result<()> {
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME);
{
@ -567,7 +572,7 @@ impl Archiver {
ivec.copy_from_slice(&meta.signature.as_ref());
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
blocktree,
blockstore,
meta.slot,
meta.slots_per_segment,
&meta.ledger_data_file_encrypted,
@ -737,7 +742,7 @@ impl Archiver {
) -> result::Result<u64, Error> {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.rpc_peers()
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
@ -793,7 +798,7 @@ impl Archiver {
loop {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.rpc_peers()
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
@ -811,13 +816,15 @@ impl Archiver {
warn!("Error while making rpc request {:?}", err);
Error::IO(io::Error::new(ErrorKind::Other, "rpc error"))
})?;
let (storage_blockhash, turn_slot) =
serde_json::from_value::<(String, u64)>(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("Couldn't parse response: {:?}", err),
)
})?;
let RpcStorageTurn {
blockhash: storage_blockhash,
slot: turn_slot,
} = serde_json::from_value::<RpcStorageTurn>(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("Couldn't parse response: {:?}", err),
)
})?;
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -844,15 +851,15 @@ impl Archiver {
}
}
/// Ask an archiver to populate a given blocktree with its segment.
/// Ask an archiver to populate a given blockstore with its segment.
/// Return the slot at the start of the archiver's segment
///
/// It is recommended to use a temporary blocktree for this since the download will not verify
/// It is recommended to use a temporary blockstore for this since the download will not verify
/// shreds received and might impact the chaining of shreds across slots
pub fn download_from_archiver(
cluster_info: &Arc<RwLock<ClusterInfo>>,
archiver_info: &ContactInfo,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
slots_per_segment: u64,
) -> Result<u64> {
// Create a client which downloads from the archiver and see that it
@ -884,7 +891,7 @@ impl Archiver {
for _ in 0..120 {
// Strategy used by archivers
let repairs = RepairService::generate_repairs_in_range(
blocktree,
blockstore,
repair_service::MAX_REPAIR_LENGTH,
&repair_slot_range,
);
@ -930,10 +937,10 @@ impl Archiver {
.into_iter()
.filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok())
.collect();
blocktree.insert_shreds(shreds, None, false)?;
blockstore.insert_shreds(shreds, None, false)?;
}
// check if all the slots in the segment are complete
if Self::segment_complete(start_slot, slots_per_segment, blocktree) {
if Self::segment_complete(start_slot, slots_per_segment, blockstore) {
break;
}
sleep(Duration::from_millis(500));
@ -942,7 +949,7 @@ impl Archiver {
t_receiver.join().unwrap();
// check if all the slots in the segment are complete
if !Self::segment_complete(start_slot, slots_per_segment, blocktree) {
if !Self::segment_complete(start_slot, slots_per_segment, blockstore) {
return Err(
io::Error::new(ErrorKind::Other, "Unable to download the full segment").into(),
);
@ -953,10 +960,10 @@ impl Archiver {
fn segment_complete(
start_slot: Slot,
slots_per_segment: u64,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> bool {
for slot in start_slot..(start_slot + slots_per_segment) {
if !blocktree.is_full(slot) {
if !blockstore.is_full(slot) {
return false;
}
}

View File

@ -7,17 +7,16 @@ use crate::{
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
poh_service::PohService,
result::{Error, Result},
thread_mem_usage,
};
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools;
use solana_ledger::{
blocktree::Blocktree,
blocktree_processor::{send_transaction_status_batch, TransactionStatusSender},
blockstore::Blockstore,
blockstore_processor::{send_transaction_status_batch, TransactionStatusSender},
entry::hash_transactions,
leader_schedule_cache::LeaderScheduleCache,
};
use solana_measure::measure::Measure;
use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
use solana_perf::{cuda_runtime::PinnedVec, perf_libs};
use solana_runtime::{
@ -410,7 +409,7 @@ impl BankingStage {
if unprocessed_packets.is_empty() {
continue;
}
let num = unprocessed_packets
let num: usize = unprocessed_packets
.iter()
.map(|(_, unprocessed)| unprocessed.len())
.sum();
@ -988,7 +987,7 @@ impl BankingStage {
pub fn create_test_recorder(
bank: &Arc<Bank>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
poh_config: Option<PohConfig>,
) -> (
Arc<AtomicBool>,
@ -1005,7 +1004,7 @@ pub fn create_test_recorder(
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
blocktree,
blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&poh_config,
);
@ -1029,8 +1028,9 @@ mod tests {
};
use crossbeam_channel::unbounded;
use itertools::Itertools;
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
use solana_ledger::{
blocktree::entries_to_test_shreds,
blockstore::entries_to_test_shreds,
entry::{next_entry, Entry, EntrySlice},
get_tmp_ledger_path,
};
@ -1051,11 +1051,12 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blocktree, None);
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
@ -1071,7 +1072,7 @@ mod tests {
banking_stage.join().unwrap();
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1088,13 +1089,14 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let mut poh_config = PohConfig::default();
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config));
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
@ -1122,7 +1124,7 @@ mod tests {
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
@ -1149,14 +1151,15 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let mut poh_config = PohConfig::default();
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config));
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
@ -1242,7 +1245,7 @@ mod tests {
drop(entry_receiver);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1288,15 +1291,15 @@ mod tests {
let entry_receiver = {
// start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new(&genesis_config));
let blocktree = Arc::new(
Blocktree::open(&ledger_path)
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let mut poh_config = PohConfig::default();
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config));
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info =
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
@ -1339,7 +1342,7 @@ mod tests {
// the account balance below zero before the credit is added.
assert_eq!(bank.get_balance(&alice.pubkey()), 2);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1357,8 +1360,8 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1366,7 +1369,7 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1446,7 +1449,7 @@ mod tests {
// Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1696,8 +1699,8 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1705,7 +1708,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1762,7 +1765,7 @@ mod tests {
assert_eq!(bank.get_balance(&pubkey), 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1789,8 +1792,8 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1798,7 +1801,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1817,7 +1820,7 @@ mod tests {
assert!(result.is_ok());
assert_eq!(unprocessed.len(), 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1877,8 +1880,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1886,7 +1889,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::new_rand(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1905,7 +1908,7 @@ mod tests {
assert_eq!(retryable_txs, expected);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1944,9 +1947,9 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blocktree = Arc::new(blocktree);
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let blockstore = Arc::new(blockstore);
let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1954,7 +1957,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&blocktree,
&blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1963,13 +1966,13 @@ mod tests {
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let shreds = entries_to_test_shreds(entries.clone(), bank.slot(), 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[bank.slot()]).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(&[bank.slot()]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
blocktree.clone(),
blockstore.clone(),
&Arc::new(AtomicBool::new(false)),
);
@ -1983,25 +1986,29 @@ mod tests {
transaction_status_service.join().unwrap();
let confirmed_block = blocktree.get_confirmed_block(bank.slot()).unwrap();
let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for (transaction, result) in confirmed_block.transactions.into_iter() {
if transaction.signatures[0] == success_signature {
assert_eq!(result.unwrap().status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature {
assert_eq!(
result.unwrap().status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
} else {
assert_eq!(result, None);
for RpcTransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == success_signature.to_string() {
assert_eq!(meta.unwrap().status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature.to_string() {
assert_eq!(
meta.unwrap().status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
} else {
assert_eq!(meta, None);
}
}
}
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
}

View File

@ -8,7 +8,7 @@ use crate::blockstream::MockBlockstream as Blockstream;
#[cfg(not(test))]
use crate::blockstream::SocketBlockstream as Blockstream;
use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_sdk::pubkey::Pubkey;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
@ -25,7 +25,7 @@ impl BlockstreamService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
slot_full_receiver: Receiver<(u64, Pubkey)>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
unix_socket: &Path,
exit: &Arc<AtomicBool>,
) -> Self {
@ -38,7 +38,7 @@ impl BlockstreamService {
break;
}
if let Err(e) =
Self::process_entries(&slot_full_receiver, &blocktree, &mut blockstream)
Self::process_entries(&slot_full_receiver, &blockstore, &mut blockstream)
{
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
@ -52,18 +52,18 @@ impl BlockstreamService {
}
fn process_entries(
slot_full_receiver: &Receiver<(u64, Pubkey)>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
blockstream: &mut Blockstream,
) -> Result<()> {
let timeout = Duration::new(1, 0);
let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?;
let entries = blocktree.get_slot_entries(slot, 0, None).unwrap();
let blocktree_meta = blocktree.meta(slot).unwrap().unwrap();
let entries = blockstore.get_slot_entries(slot, 0, None).unwrap();
let blockstore_meta = blockstore.meta(slot).unwrap().unwrap();
let _parent_slot = if slot == 0 {
None
} else {
Some(blocktree_meta.parent_slot)
Some(blockstore_meta.parent_slot)
};
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
let mut tick_height = ticks_per_slot * slot;
@ -113,14 +113,14 @@ mod test {
let ticks_per_slot = 5;
let leader_pubkey = Pubkey::new_rand();
// Set up genesis config and blocktree
// Set up genesis config and blockstore
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(1000);
genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
// Set up blockstream
let mut blockstream = Blockstream::new(&PathBuf::from("test_stream"));
@ -143,7 +143,7 @@ mod test {
let expected_entries = entries.clone();
let expected_tick_heights = [6, 7, 8, 9, 9, 10];
blocktree
blockstore
.write_entries(
1,
0,
@ -160,7 +160,7 @@ mod test {
slot_full_sender.send((1, leader_pubkey)).unwrap();
BlockstreamService::process_entries(
&slot_full_receiver,
&Arc::new(blocktree),
&Arc::new(blockstore),
&mut blockstream,
)
.unwrap();

View File

@ -5,7 +5,7 @@ use self::standard_broadcast_run::StandardBroadcastRun;
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
use crate::poh_recorder::WorkingBankEntry;
use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::shred::Shred;
use solana_ledger::staking_utils;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
@ -44,7 +44,7 @@ impl BroadcastStageType {
cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>,
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
shred_version: u16,
) -> BroadcastStage {
let keypair = cluster_info.read().unwrap().keypair.clone();
@ -54,7 +54,7 @@ impl BroadcastStageType {
cluster_info,
receiver,
exit_sender,
blocktree,
blockstore,
StandardBroadcastRun::new(keypair, shred_version),
),
@ -63,7 +63,7 @@ impl BroadcastStageType {
cluster_info,
receiver,
exit_sender,
blocktree,
blockstore,
FailEntryVerificationBroadcastRun::new(keypair, shred_version),
),
@ -72,7 +72,7 @@ impl BroadcastStageType {
cluster_info,
receiver,
exit_sender,
blocktree,
blockstore,
BroadcastFakeShredsRun::new(keypair, 0, shred_version),
),
}
@ -83,10 +83,10 @@ type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
trait BroadcastRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()>;
fn transmit(
&self,
@ -97,7 +97,7 @@ trait BroadcastRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()>;
}
@ -126,14 +126,15 @@ pub struct BroadcastStage {
impl BroadcastStage {
#[allow(clippy::too_many_arguments)]
fn run(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
mut broadcast_stage_run: impl BroadcastRun,
) -> BroadcastStageReturnType {
loop {
let res = broadcast_stage_run.run(blocktree, receiver, socket_sender, blocktree_sender);
let res =
broadcast_stage_run.run(blockstore, receiver, socket_sender, blockstore_sender);
let res = Self::handle_error(res);
if let Some(res) = res {
return res;
@ -180,19 +181,25 @@ impl BroadcastStage {
cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>,
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone,
) -> Self {
let btree = blocktree.clone();
let btree = blockstore.clone();
let exit = exit_sender.clone();
let (socket_sender, socket_receiver) = channel();
let (blocktree_sender, blocktree_receiver) = channel();
let (blockstore_sender, blockstore_receiver) = channel();
let bs_run = broadcast_stage_run.clone();
let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
let _finalizer = Finalizer::new(exit);
Self::run(&btree, &receiver, &socket_sender, &blocktree_sender, bs_run)
Self::run(
&btree,
&receiver,
&socket_sender,
&blockstore_sender,
bs_run,
)
})
.unwrap();
let mut thread_hdls = vec![thread_hdl];
@ -213,15 +220,15 @@ impl BroadcastStage {
.unwrap();
thread_hdls.push(t);
}
let blocktree_receiver = Arc::new(Mutex::new(blocktree_receiver));
let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
for _ in 0..NUM_INSERT_THREADS {
let blocktree_receiver = blocktree_receiver.clone();
let blockstore_receiver = blockstore_receiver.clone();
let bs_record = broadcast_stage_run.clone();
let btree = blocktree.clone();
let btree = blockstore.clone();
let t = Builder::new()
.name("solana-broadcaster-record".to_string())
.spawn(move || loop {
let res = bs_record.record(&blocktree_receiver, &btree);
let res = bs_record.record(&blockstore_receiver, &btree);
let res = Self::handle_error(res);
if let Some(res) = res {
return res;
@ -248,7 +255,7 @@ mod test {
use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::entry::create_ticks;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
@ -261,7 +268,7 @@ mod test {
use std::time::Duration;
struct MockBroadcastStage {
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
broadcast_service: BroadcastStage,
bank: Arc<Bank>,
}
@ -272,7 +279,7 @@ mod test {
entry_receiver: Receiver<WorkingBankEntry>,
) -> MockBroadcastStage {
// Make the database ledger
let blocktree = Arc::new(Blocktree::open(ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap());
// Make the leader node and scheduler
let leader_info = Node::new_localhost_with_pubkey(leader_pubkey);
@ -298,12 +305,12 @@ mod test {
cluster_info,
entry_receiver,
&exit_sender,
&blocktree,
&blockstore,
StandardBroadcastRun::new(leader_keypair, 0),
);
MockBroadcastStage {
blocktree,
blockstore,
broadcast_service,
bank,
}
@ -350,8 +357,8 @@ mod test {
ticks_per_slot,
);
let blocktree = broadcast_service.blocktree;
let (entries, _, _) = blocktree
let blockstore = broadcast_service.blockstore;
let (entries, _, _) = blockstore
.get_slot_entries_with_shred_info(slot, 0)
.expect("Expect entries to be present");
assert_eq!(entries.len(), max_tick_height as usize);
@ -363,6 +370,6 @@ mod test {
.expect("Expect successful join of broadcast service");
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
}

View File

@ -26,17 +26,17 @@ impl BroadcastFakeShredsRun {
impl BroadcastRun for BroadcastFakeShredsRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
// 1) Pull entries from banking stage
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
let bank = receive_results.bank.clone();
let last_tick_height = receive_results.last_tick_height;
let next_shred_index = blocktree
let next_shred_index = blockstore
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
@ -83,7 +83,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
}
let data_shreds = Arc::new(data_shreds);
blocktree_sender.send(data_shreds.clone())?;
blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step
//some indicates fake shreds
@ -121,10 +121,10 @@ impl BroadcastRun for BroadcastFakeShredsRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
for data_shreds in receiver.lock().unwrap().iter() {
blocktree.insert_shreds(data_shreds.to_vec(), None, true)?;
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
}
Ok(())
}

View File

@ -21,10 +21,10 @@ impl FailEntryVerificationBroadcastRun {
impl BroadcastRun for FailEntryVerificationBroadcastRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
// 1) Pull entries from banking stage
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
@ -38,7 +38,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
last_entry.hash = Hash::default();
}
let next_shred_index = blocktree
let next_shred_index = blockstore
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
@ -61,7 +61,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
);
let data_shreds = Arc::new(data_shreds);
blocktree_sender.send(data_shreds.clone())?;
blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
@ -90,12 +90,12 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let all_shreds = receiver.lock().unwrap().recv()?;
blocktree
blockstore
.insert_shreds(all_shreds.to_vec(), None, true)
.expect("Failed to insert shreds in blocktree");
.expect("Failed to insert shreds in blockstore");
Ok(())
}
}

View File

@ -83,13 +83,13 @@ impl StandardBroadcastRun {
last_unfinished_slot_shred
}
fn init_shredder(&self, blocktree: &Blocktree, reference_tick: u8) -> (Shredder, u32) {
fn init_shredder(&self, blockstore: &Blockstore, reference_tick: u8) -> (Shredder, u32) {
let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
let next_shred_index = self
.unfinished_slot
.map(|s| s.next_shred_index)
.unwrap_or_else(|| {
blocktree
blockstore
.meta(slot)
.expect("Database error")
.map(|meta| meta.consumed)
@ -132,27 +132,27 @@ impl StandardBroadcastRun {
&mut self,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receive_results: ReceiveResults,
) -> Result<()> {
let (bsend, brecv) = channel();
let (ssend, srecv) = channel();
self.process_receive_results(&blocktree, &ssend, &bsend, receive_results)?;
self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?;
let srecv = Arc::new(Mutex::new(srecv));
let brecv = Arc::new(Mutex::new(brecv));
//data
let _ = self.transmit(&srecv, cluster_info, sock);
//coding
let _ = self.transmit(&srecv, cluster_info, sock);
let _ = self.record(&brecv, blocktree);
let _ = self.record(&brecv, blockstore);
Ok(())
}
fn process_receive_results(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
receive_results: ReceiveResults,
) -> Result<()> {
let mut receive_elapsed = receive_results.time_elapsed;
@ -181,7 +181,7 @@ impl StandardBroadcastRun {
// 2) Convert entries to shreds and coding shreds
let (shredder, next_shred_index) = self.init_shredder(
blocktree,
blockstore,
(bank.tick_height() % bank.ticks_per_slot()) as u8,
);
let mut data_shreds = self.entries_to_data_shreds(
@ -190,13 +190,13 @@ impl StandardBroadcastRun {
&receive_results.entries,
last_tick_height == bank.max_tick_height(),
);
//Insert the first shred so blocktree stores that the leader started this block
//Insert the first shred so blockstore stores that the leader started this block
//This must be done before the blocks are sent out over the wire.
if !data_shreds.is_empty() && data_shreds[0].index() == 0 {
let first = vec![data_shreds[0].clone()];
blocktree
blockstore
.insert_shreds(first, None, true)
.expect("Failed to insert shreds in blocktree");
.expect("Failed to insert shreds in blockstore");
}
let last_data_shred = data_shreds.len();
if let Some(last_shred) = last_unfinished_slot_shred {
@ -209,7 +209,7 @@ impl StandardBroadcastRun {
let stakes = stakes.map(Arc::new);
let data_shreds = Arc::new(data_shreds);
socket_sender.send((stakes.clone(), data_shreds.clone()))?;
blocktree_sender.send(data_shreds.clone())?;
blockstore_sender.send(data_shreds.clone())?;
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
let coding_shreds = Arc::new(coding_shreds);
socket_sender.send((stakes, coding_shreds))?;
@ -227,8 +227,8 @@ impl StandardBroadcastRun {
Ok(())
}
fn insert(&self, blocktree: &Arc<Blocktree>, shreds: Arc<Vec<Shred>>) -> Result<()> {
// Insert shreds into blocktree
fn insert(&self, blockstore: &Arc<Blockstore>, shreds: Arc<Vec<Shred>>) -> Result<()> {
// Insert shreds into blockstore
let insert_shreds_start = Instant::now();
//The first shred is inserted synchronously
let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 {
@ -236,9 +236,9 @@ impl StandardBroadcastRun {
} else {
shreds.to_vec()
};
blocktree
blockstore
.insert_shreds(data_shreds, None, true)
.expect("Failed to insert shreds in blocktree");
.expect("Failed to insert shreds in blockstore");
let insert_shreds_elapsed = insert_shreds_start.elapsed();
self.update_broadcast_stats(BroadcastStats {
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
@ -317,13 +317,18 @@ impl StandardBroadcastRun {
impl BroadcastRun for StandardBroadcastRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
self.process_receive_results(blocktree, socket_sender, blocktree_sender, receive_results)
self.process_receive_results(
blockstore,
socket_sender,
blockstore_sender,
receive_results,
)
}
fn transmit(
&self,
@ -337,10 +342,10 @@ impl BroadcastRun for StandardBroadcastRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let shreds = receiver.lock().unwrap().recv()?;
self.insert(blocktree, shreds)
self.insert(blockstore, shreds)
}
}
@ -350,7 +355,7 @@ mod test {
use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::create_genesis_config;
use solana_ledger::{
blocktree::Blocktree, entry::create_ticks, get_tmp_ledger_path,
blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path,
shred::max_ticks_per_n_shreds,
};
use solana_runtime::bank::Bank;
@ -365,7 +370,7 @@ mod test {
fn setup(
num_shreds_per_slot: Slot,
) -> (
Arc<Blocktree>,
Arc<Blockstore>,
GenesisConfig,
Arc<RwLock<ClusterInfo>>,
Arc<Bank>,
@ -374,8 +379,8 @@ mod test {
) {
// Setup
let ledger_path = get_tmp_ledger_path!();
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
@ -388,7 +393,7 @@ mod test {
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
let bank0 = Arc::new(Bank::new(&genesis_config));
(
blocktree,
blockstore,
genesis_config,
cluster_info,
bank0,
@ -433,7 +438,7 @@ mod test {
fn test_slot_interrupt() {
// Setup
let num_shreds_per_slot = 2;
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) =
let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
setup(num_shreds_per_slot);
// Insert 1 less than the number of ticks needed to finish the slot
@ -448,14 +453,14 @@ mod test {
// Step 1: Make an incomplete transmission for slot 0
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone(), 0);
standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap();
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds_per_slot);
assert_eq!(unfinished_slot.slot, 0);
assert_eq!(unfinished_slot.parent, 0);
// Make sure the slot is not complete
assert!(!blocktree.is_full(0));
assert!(!blockstore.is_full(0));
// Modify the stats, should reset later
standard_broadcast_run
.stats
@ -463,10 +468,10 @@ mod test {
.unwrap()
.receive_elapsed = 10;
// Try to fetch ticks from blocktree, nothing should break
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
// Try to fetch ticks from blockstore, nothing should break
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!(
blocktree
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
vec![],
@ -487,7 +492,7 @@ mod test {
last_tick_height: (ticks1.len() - 1) as u64,
};
standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap();
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
@ -503,10 +508,10 @@ mod test {
0
);
// Try to fetch the incomplete ticks from blocktree, should succeed
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
// Try to fetch the incomplete ticks from blockstore, should succeed
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!(
blocktree
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
vec![],
@ -517,7 +522,7 @@ mod test {
fn test_slot_finish() {
// Setup
let num_shreds_per_slot = 2;
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) =
let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
setup(num_shreds_per_slot);
// Insert complete slot of ticks needed to finish the slot
@ -531,7 +536,7 @@ mod test {
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0);
standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap();
assert!(standard_broadcast_run.unfinished_slot.is_none())
}

View File

@ -1,4 +1,4 @@
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_sdk::clock::Slot;
use std::fs::File;
use std::io;
@ -12,7 +12,7 @@ pub const CHACHA_BLOCK_SIZE: usize = 64;
pub const CHACHA_KEY_SIZE: usize = 32;
pub fn chacha_cbc_encrypt_ledger(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
start_slot: Slot,
slots_per_segment: u64,
out_path: &Path,
@ -28,7 +28,7 @@ pub fn chacha_cbc_encrypt_ledger(
let mut current_slot = start_slot;
let mut start_index = 0;
loop {
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
Ok((last_index, mut size)) => {
debug!(
"chacha: encrypting slice: {} num_shreds: {} data_len: {}",
@ -75,7 +75,7 @@ pub fn chacha_cbc_encrypt_ledger(
mod tests {
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::gen_keys::GenKeys;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::entry::Entry;
use solana_ledger::get_tmp_ledger_path;
use solana_sdk::hash::{hash, Hash, Hasher};
@ -131,7 +131,7 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 16;
let slots_per_segment = 32;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let out_path = tmp_file_path("test_encrypt_ledger");
let seed = [2u8; 32];
@ -139,7 +139,7 @@ mod tests {
let keypair = rnd.gen_keypair();
let entries = make_tiny_deterministic_test_entries(slots_per_segment);
blocktree
blockstore
.write_entries(
0,
0,
@ -157,8 +157,14 @@ mod tests {
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
);
chacha_cbc_encrypt_ledger(&blocktree, 0, slots_per_segment as u64, &out_path, &mut key)
.unwrap();
chacha_cbc_encrypt_ledger(
&blockstore,
0,
slots_per_segment as u64,
&out_path,
&mut key,
)
.unwrap();
let mut out_file = File::open(&out_path).unwrap();
let mut buf = vec![];
let size = out_file.read_to_end(&mut buf).unwrap();

View File

@ -1,7 +1,7 @@
// Module used by validators to approve storage mining proofs in parallel using the GPU
use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_perf::perf_libs;
use solana_sdk::hash::Hash;
use std::io;
@ -13,7 +13,7 @@ use std::sync::Arc;
// Then sample each block at the offsets provided by samples argument with sha256
// and return the vec of sha states
pub fn chacha_cbc_encrypt_file_many_keys(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
segment: u64,
slots_per_segment: u64,
ivecs: &mut [u8],
@ -46,7 +46,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
(api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32);
}
loop {
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
Ok((last_index, mut size)) => {
debug!(
"chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}",
@ -134,9 +134,9 @@ mod tests {
let entries = create_ticks(slots_per_segment, 0, Hash::default());
let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blocktree
blockstore
.write_entries(
0,
0,
@ -160,7 +160,7 @@ mod tests {
let mut cpu_iv = ivecs.clone();
chacha_cbc_encrypt_ledger(
&blocktree,
&blockstore,
0,
slots_per_segment as u64,
out_path,
@ -171,7 +171,7 @@ mod tests {
let ref_hash = sample_file(&out_path, &samples).unwrap();
let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree,
&blockstore,
0,
slots_per_segment as u64,
&mut ivecs,
@ -196,8 +196,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 90;
let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blockstore
.write_entries(
0,
0,
@ -224,7 +224,7 @@ mod tests {
ivec[0] = i;
ivecs.extend(ivec.clone().iter());
chacha_cbc_encrypt_ledger(
&blocktree.clone(),
&blockstore.clone(),
0,
DEFAULT_SLOTS_PER_SEGMENT,
out_path,
@ -242,7 +242,7 @@ mod tests {
}
let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree,
&blockstore,
0,
DEFAULT_SLOTS_PER_SEGMENT,
&mut ivecs,
@ -267,9 +267,9 @@ mod tests {
let mut keys = hex!("abc123");
let ledger_path = get_tmp_ledger_path!();
let samples = [0];
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
assert!(chacha_cbc_encrypt_file_many_keys(
&blocktree,
&blockstore,
0,
DEFAULT_SLOTS_PER_SEGMENT,
&mut keys,

View File

@ -24,14 +24,14 @@ use crate::{
repair_service::RepairType,
result::{Error, Result},
sendmmsg::{multicast, send_mmsg},
thread_mem_usage,
weighted_shuffle::{weighted_best, weighted_shuffle},
};
use bincode::{serialize, serialized_size};
use core::cmp;
use itertools::Itertools;
use rand::{thread_rng, Rng};
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree, staking_utils};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore, staking_utils};
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_net_utils::{
bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range,
@ -67,11 +67,11 @@ pub const GOSSIP_SLEEP_MILLIS: u64 = 100;
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
/// The maximum size of a bloom filter
pub const MAX_BLOOM_SIZE: usize = 1030;
pub const MAX_BLOOM_SIZE: usize = 1028;
/// The maximum size of a protocol payload
const MAX_PROTOCOL_PAYLOAD_SIZE: u64 = PACKET_DATA_SIZE as u64 - MAX_PROTOCOL_HEADER_SIZE;
/// The largest protocol header size
const MAX_PROTOCOL_HEADER_SIZE: u64 = 202;
const MAX_PROTOCOL_HEADER_SIZE: u64 = 204;
#[derive(Debug, PartialEq, Eq)]
pub enum ClusterInfoError {
@ -272,7 +272,7 @@ impl ClusterInfo {
let ip_addr = node.gossip.ip();
format!(
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}\n",
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
@ -290,15 +290,16 @@ impl ClusterInfo {
addr_to_string(&ip_addr, &node.storage_addr),
addr_to_string(&ip_addr, &node.rpc),
addr_to_string(&ip_addr, &node.rpc_pubsub),
node.shred_version,
)
})
.collect();
format!(
"IP Address |Age(ms)| Node identifier \
|Gossip| TPU |TPU fwd| TVU |TVU fwd|Repair|Storage| RPC |PubSub\n\
|Gossip| TPU |TPU fwd| TVU |TVU fwd|Repair|Storage| RPC |PubSub|ShredVer\n\
------------------+-------+----------------------------------------------+\
------+------+-------+------+-------+------+-------+------+------\n\
------+------+-------+------+-------+------+-------+------+------+--------\n\
{}\
Nodes: {}{}{}",
nodes.join(""),
@ -404,14 +405,15 @@ impl ClusterInfo {
.map(|x| x.value.contact_info().unwrap())
}
pub fn rpc_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
/// all validators that have a valid rpc port regardless of `shred_version`.
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.filter(|x| ContactInfo::is_valid_address(&x.rpc))
.cloned()
.collect()
@ -439,14 +441,16 @@ impl ClusterInfo {
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me)
/* shred_version not considered for gossip peers (ie, spy nodes do not set
shred_version) */
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
.cloned()
.collect()
}
/// all validators that have a valid tvu port.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
/// all validators that have a valid tvu port regardless of `shred_version`.
pub fn all_tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
@ -454,34 +458,66 @@ impl ClusterInfo {
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| !ClusterInfo::is_archiver(x))
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.cloned()
.collect()
}
/// all peers that have a valid storage addr
pub fn storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
/// all validators that have a valid tvu port and are on the same `shred_version`.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| !ClusterInfo::is_archiver(x))
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.cloned()
.collect()
}
/// all peers that have a valid storage addr regardless of `shred_version`.
pub fn all_storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.cloned()
.collect()
}
/// all peers that have a valid storage addr and are on the same `shred_version`.
pub fn storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.cloned()
.collect()
}
/// all peers that have a valid tvu
pub fn retransmit_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| ContactInfo::is_valid_address(&x.tvu_forwards))
.cloned()
@ -490,10 +526,11 @@ impl ClusterInfo {
/// all tvu peers with valid gossip addrs that likely have the slot being requested
fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
let me = self.my_data().id;
let me = self.my_data();
ClusterInfo::tvu_peers(self)
.into_iter()
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
.filter(|x| {
self.get_epoch_state_for_node(&x.id, None)
@ -1056,6 +1093,7 @@ impl ClusterInfo {
.spawn(move || {
let mut last_push = timestamp();
let mut last_contact_info_trace = timestamp();
let mut adopt_shred_version = obj.read().unwrap().my_data().shred_version == 0;
let recycler = PacketsRecycler::default();
loop {
let start = timestamp();
@ -1093,9 +1131,32 @@ impl ClusterInfo {
let table_size = obj.read().unwrap().gossip.crds.table.len();
datapoint_debug!(
"cluster_info-purge",
("tabel_size", table_size as i64, i64),
("table_size", table_size as i64, i64),
("purge_stake_timeout", timeout as i64, i64)
);
// Adopt the entrypoint's `shred_version` if ours is unset
if adopt_shred_version {
// If gossip was given an entrypoint, lookup its id
let entrypoint_id = obj.read().unwrap().entrypoint.as_ref().map(|e| e.id);
if let Some(entrypoint_id) = entrypoint_id {
// If a pull from the entrypoint was successful, it should exist in the crds table
let entrypoint = obj.read().unwrap().lookup(&entrypoint_id).cloned();
if let Some(entrypoint) = entrypoint {
let mut self_info = obj.read().unwrap().my_data();
if entrypoint.shred_version == 0 {
info!("Unable to adopt entrypoint's shred version");
} else {
info!(
"Setting shred version to {:?} from entrypoint {:?}",
entrypoint.shred_version, entrypoint.id
);
self_info.shred_version = entrypoint.shred_version;
obj.write().unwrap().insert_self(self_info);
adopt_shred_version = false;
}
}
}
}
//TODO: possibly tune this parameter
//we saw a deadlock passing an obj.read().unwrap().timeout into sleep
if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 {
@ -1113,12 +1174,12 @@ impl ClusterInfo {
}
fn get_data_shred_as_packet(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
) -> Result<Option<Packet>> {
let data = blocktree.get_data_shred(slot, shred_index)?;
let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| {
let mut packet = Packet::default();
packet.meta.size = data.len();
@ -1132,14 +1193,14 @@ impl ClusterInfo {
recycler: &PacketsRecycler,
from: &ContactInfo,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
me: &ContactInfo,
slot: Slot,
shred_index: u64,
) -> Option<Packets> {
if let Some(blocktree) = blocktree {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blocktree, slot, shred_index, from_addr);
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
if let Ok(Some(packet)) = packet {
inc_new_counter_debug!("cluster_info-window-request-ledger", 1);
@ -1166,17 +1227,17 @@ impl ClusterInfo {
fn run_highest_window_request(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
) -> Option<Packets> {
let blocktree = blocktree?;
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blocktree.meta(slot).ok()??;
let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index {
// meta.received must be at least 1 by this point
let packet =
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr)
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??;
return Some(Packets::new_with_recycler_data(
recycler,
@ -1190,19 +1251,19 @@ impl ClusterInfo {
fn run_orphan(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blocktree) = blocktree {
if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blocktree.meta(slot) {
while let Ok(Some(meta)) = blockstore.meta(slot) {
if meta.received == 0 {
break;
}
let packet =
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr);
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet {
res.packets.push(packet);
}
@ -1219,17 +1280,20 @@ impl ClusterInfo {
Some(res)
}
#[allow(clippy::cognitive_complexity)]
fn handle_packets(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
stakes: &HashMap<Pubkey, u64>,
packets: Packets,
response_sender: &PacketSender,
epoch_ms: u64,
) {
// iter over the packets, collect pulls separately and process everything else
let allocated = thread_mem_usage::Allocatedp::default();
let mut gossip_pull_data: Vec<PullData> = vec![];
let timeouts = me.read().unwrap().gossip.make_timeouts(&stakes, epoch_ms);
packets.packets.iter().for_each(|packet| {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size])
@ -1271,7 +1335,7 @@ impl ClusterInfo {
}
ret
});
Self::handle_pull_response(me, &from, data);
Self::handle_pull_response(me, &from, data, &timeouts);
datapoint_debug!(
"solana-gossip-listen-memory",
("pull_response", (allocated.get() - start) as i64, i64),
@ -1330,7 +1394,8 @@ impl ClusterInfo {
);
}
_ => {
let rsp = Self::handle_repair(me, recycler, &from_addr, blocktree, request);
let rsp =
Self::handle_repair(me, recycler, &from_addr, blockstore, request);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
@ -1389,7 +1454,12 @@ impl ClusterInfo {
Some(packets)
}
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
fn handle_pull_response(
me: &Arc<RwLock<Self>>,
from: &Pubkey,
data: Vec<CrdsValue>,
timeouts: &HashMap<Pubkey, u64>,
) {
let len = data.len();
let now = Instant::now();
let self_id = me.read().unwrap().gossip.id;
@ -1397,7 +1467,7 @@ impl ClusterInfo {
me.write()
.unwrap()
.gossip
.process_pull_response(from, data, timestamp());
.process_pull_response(from, timeouts, data, timestamp());
inc_new_counter_debug!("cluster_info-pull_request_response", 1);
inc_new_counter_debug!("cluster_info-pull_request_response-size", len);
@ -1475,7 +1545,7 @@ impl ClusterInfo {
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
request: Protocol,
) -> Option<Packets> {
let now = Instant::now();
@ -1511,7 +1581,7 @@ impl ClusterInfo {
recycler,
from,
&from_addr,
blocktree,
blockstore,
&my_info,
*slot,
*shred_index,
@ -1526,7 +1596,7 @@ impl ClusterInfo {
Self::run_highest_window_request(
recycler,
&from_addr,
blocktree,
blockstore,
*slot,
*highest_index,
),
@ -1539,7 +1609,7 @@ impl ClusterInfo {
Self::run_orphan(
recycler,
&from_addr,
blocktree,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
),
@ -1559,7 +1629,7 @@ impl ClusterInfo {
fn run_listen(
obj: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
bank_forks: Option<&Arc<RwLock<BankForks>>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
@ -1567,19 +1637,36 @@ impl ClusterInfo {
//TODO cache connections
let timeout = Duration::new(1, 0);
let reqs = requests_receiver.recv_timeout(timeout)?;
let epoch_ms;
let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => {
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
let bank = bank_forks.read().unwrap().working_bank();
let epoch = bank.epoch();
let epoch_schedule = bank.epoch_schedule();
epoch_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT;
staking_utils::staked_nodes(&bank)
}
None => {
inc_new_counter_info!("cluster_info-purge-no_working_bank", 1);
epoch_ms = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
HashMap::new()
}
None => HashMap::new(),
};
Self::handle_packets(obj, &recycler, blocktree, &stakes, reqs, response_sender);
Self::handle_packets(
obj,
&recycler,
blockstore,
&stakes,
reqs,
response_sender,
epoch_ms,
);
Ok(())
}
pub fn listen(
me: Arc<RwLock<Self>>,
blocktree: Option<Arc<Blocktree>>,
blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
requests_receiver: PacketReceiver,
response_sender: PacketSender,
@ -1593,7 +1680,7 @@ impl ClusterInfo {
let e = Self::run_listen(
&me,
&recycler,
blocktree.as_ref(),
blockstore.as_ref(),
bank_forks.as_ref(),
&requests_receiver,
&response_sender,
@ -1614,6 +1701,30 @@ impl ClusterInfo {
.unwrap()
}
fn gossip_contact_info(id: &Pubkey, gossip_addr: SocketAddr) -> ContactInfo {
let dummy_addr = socketaddr_any!();
ContactInfo::new(
id,
gossip_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
timestamp(),
)
}
pub fn spy_contact_info(id: &Pubkey) -> ContactInfo {
let dummy_addr = socketaddr_any!();
Self::gossip_contact_info(id, dummy_addr)
}
/// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip.
pub fn gossip_node(
id: &Pubkey,
@ -1621,43 +1732,17 @@ impl ClusterInfo {
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let (port, (gossip_socket, ip_echo)) =
Node::get_gossip_port(gossip_addr, VALIDATOR_PORT_RANGE);
let daddr = socketaddr_any!();
let contact_info = Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port));
let node = ContactInfo::new(
id,
SocketAddr::new(gossip_addr.ip(), port),
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
timestamp(),
);
(node, gossip_socket, Some(ip_echo))
(contact_info, gossip_socket, Some(ip_echo))
}
/// A Node with invalid ports to spy on gossip via pull requests
/// A Node with dummy ports to spy on gossip via pull requests
pub fn spy_node(id: &Pubkey) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let (_, gossip_socket) = bind_in_range(VALIDATOR_PORT_RANGE).unwrap();
let daddr = socketaddr_any!();
let contact_info = Self::spy_contact_info(id);
let node = ContactInfo::new(
id,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
timestamp(),
);
(node, gossip_socket, None)
(contact_info, gossip_socket, None)
}
}
@ -1916,9 +2001,9 @@ mod tests {
use crate::repair_service::RepairType;
use crate::result::Error;
use rayon::prelude::*;
use solana_ledger::blocktree::make_many_slot_entries;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blocktree_processor::fill_blocktree_slot_with_ticks;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_processor::fill_blockstore_slot_with_ticks;
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
@ -2062,7 +2147,7 @@ mod tests {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let me = ContactInfo::new(
&Pubkey::new_rand(),
socketaddr!("127.0.0.1:1234"),
@ -2080,7 +2165,7 @@ mod tests {
&recycler,
&me,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
&me,
0,
0,
@ -2097,7 +2182,7 @@ mod tests {
CodingShredHeader::default(),
);
blocktree
blockstore
.insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write");
@ -2105,7 +2190,7 @@ mod tests {
&recycler,
&me,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
&me,
2,
1,
@ -2121,7 +2206,7 @@ mod tests {
assert_eq!(rv[0].slot(), 2);
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
@ -2131,18 +2216,18 @@ mod tests {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
0,
0,
);
assert!(rv.is_none());
let _ = fill_blocktree_slot_with_ticks(
&blocktree,
let _ = fill_blockstore_slot_with_ticks(
&blockstore,
max_ticks_per_n_shreds(1) + 1,
2,
1,
@ -2152,7 +2237,7 @@ mod tests {
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
2,
1,
);
@ -2163,21 +2248,21 @@ mod tests {
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.collect();
assert!(!rv.is_empty());
let index = blocktree.meta(2).unwrap().unwrap().received - 1;
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2);
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
2,
index + 1,
);
assert!(rv.is_none());
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
@ -2186,25 +2271,27 @@ mod tests {
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 2, 0);
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5);
blocktree
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 4, 5);
let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 3, 5)
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
.expect("run_orphan packets")
.packets
.iter()
@ -2213,9 +2300,9 @@ mod tests {
let expected: Vec<_> = (1..=3)
.rev()
.map(|slot| {
let index = blocktree.meta(slot).unwrap().unwrap().received - 1;
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ClusterInfo::get_data_shred_as_packet(
&blocktree,
&blockstore,
slot,
index,
&socketaddr_any!(),
@ -2227,7 +2314,7 @@ mod tests {
assert_eq!(rv, expected)
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
fn assert_in_range(x: u16, range: (u16, u16)) {
@ -2535,10 +2622,12 @@ mod tests {
let entrypoint_crdsvalue =
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
let cluster_info = Arc::new(RwLock::new(cluster_info));
let timeouts = cluster_info.read().unwrap().gossip.make_timeouts_test();
ClusterInfo::handle_pull_response(
&cluster_info,
&entrypoint_pubkey,
vec![entrypoint_crdsvalue],
&timeouts,
);
let pulls = cluster_info
.write()
@ -2572,7 +2661,7 @@ mod tests {
#[test]
fn test_split_messages_packet_size() {
// Test that if a value is smaller than payload size but too large to be wrappe in a vec
// Test that if a value is smaller than payload size but too large to be wrapped in a vec
// that it is still dropped
let payload: Vec<CrdsValue> = vec![];
let vec_size = serialized_size(&payload).unwrap();
@ -2586,7 +2675,7 @@ mod tests {
}));
let mut i = 0;
while value.size() < desired_size {
while value.size() <= desired_size {
let slots = (0..i).collect::<BTreeSet<_>>();
if slots.len() > 200 {
panic!(
@ -2661,6 +2750,14 @@ mod tests {
cluster_info.insert_info(contact_info);
stakes.insert(id3, 10);
// normal but with different shred version
let id4 = Pubkey::new(&[4u8; 32]);
let mut contact_info = ContactInfo::new_localhost(&id4, timestamp());
contact_info.shred_version = 1;
assert_ne!(contact_info.shred_version, d.shred_version);
cluster_info.insert_info(contact_info.clone());
stakes.insert(id4, 10);
let stakes = Arc::new(stakes);
let (peers, peers_and_stakes) = cluster_info.sorted_tvu_peers_and_stakes(Some(stakes));
assert_eq!(peers.len(), 2);

View File

@ -5,7 +5,7 @@ use byteorder::{ByteOrder, LittleEndian};
use rand::seq::SliceRandom;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::rooted_slot_iterator::RootedSlotIterator;
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
@ -89,13 +89,13 @@ pub struct ClusterInfoRepairListener {
impl ClusterInfoRepairListener {
pub fn new(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
cluster_info: Arc<RwLock<ClusterInfo>>,
epoch_schedule: EpochSchedule,
) -> Self {
let exit = exit.clone();
let blocktree = blocktree.clone();
let blockstore = blockstore.clone();
let thread = Builder::new()
.name("solana-cluster_info_repair_listener".to_string())
.spawn(move || {
@ -105,7 +105,7 @@ impl ClusterInfoRepairListener {
// 2) The latest root the peer gossiped
let mut peer_infos: HashMap<Pubkey, RepaireeInfo> = HashMap::new();
let _ = Self::recv_loop(
&blocktree,
&blockstore,
&mut peer_infos,
&exit,
&cluster_info,
@ -119,7 +119,7 @@ impl ClusterInfoRepairListener {
}
fn recv_loop(
blocktree: &Blocktree,
blockstore: &Blockstore,
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
exit: &Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -134,8 +134,8 @@ impl ClusterInfoRepairListener {
return Ok(());
}
let lowest_slot = blocktree.lowest_slot();
let peers = cluster_info.read().unwrap().gossip_peers();
let lowest_slot = blockstore.lowest_slot();
let peers = cluster_info.read().unwrap().tvu_peers();
let mut peers_needing_repairs: HashMap<Pubkey, EpochSlots> = HashMap::new();
// Iterate through all the known nodes in the network, looking for ones that
@ -156,7 +156,7 @@ impl ClusterInfoRepairListener {
// After updating all the peers, send out repairs to those that need it
let _ = Self::serve_repairs(
&my_pubkey,
blocktree,
blockstore,
peer_infos,
&peers_needing_repairs,
&socket,
@ -219,7 +219,7 @@ impl ClusterInfoRepairListener {
fn serve_repairs(
my_pubkey: &Pubkey,
blocktree: &Blocktree,
blockstore: &Blockstore,
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
repairees: &HashMap<Pubkey, EpochSlots>,
socket: &UdpSocket,
@ -258,7 +258,7 @@ impl ClusterInfoRepairListener {
my_pubkey,
repairee_pubkey,
my_root,
blocktree,
blockstore,
&repairee_epoch_slots,
&eligible_repairmen,
socket,
@ -286,7 +286,7 @@ impl ClusterInfoRepairListener {
my_pubkey: &Pubkey,
repairee_pubkey: &Pubkey,
my_root: Slot,
blocktree: &Blocktree,
blockstore: &Blockstore,
repairee_epoch_slots: &EpochSlots,
eligible_repairmen: &[&Pubkey],
socket: &UdpSocket,
@ -295,7 +295,7 @@ impl ClusterInfoRepairListener {
epoch_schedule: &EpochSchedule,
last_repaired_slot_and_ts: (u64, u64),
) -> Result<Option<Slot>> {
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blocktree);
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blockstore);
if slot_iter.is_err() {
info!(
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
@ -325,9 +325,14 @@ impl ClusterInfoRepairListener {
if slot > my_root
|| num_slots_repaired >= num_slots_to_repair
|| slot > max_confirmed_repairee_slot
// Don't repair if the next rooted slot jumps, because that means
// we started from a snapshot and don't have the immediate next
// slot that the repairee needs
|| slot_meta.is_none()
{
break;
}
let slot_meta = slot_meta.unwrap();
if !repairee_epoch_slots.slots.contains(&slot) {
// Calculate the shred indexes this node is responsible for repairing. Note that
// because we are only repairing slots that are before our root, the slot.received
@ -338,7 +343,7 @@ impl ClusterInfoRepairListener {
// the cluster
let num_shreds_in_slot = slot_meta.received as usize;
// Check if I'm responsible for repairing this slots
// Check if I'm responsible for repairing this slot
if let Some(my_repair_indexes) = Self::calculate_my_repairman_index_for_slot(
my_pubkey,
&eligible_repairmen,
@ -361,17 +366,17 @@ impl ClusterInfoRepairListener {
// a database iterator over the slots because by the time this node is
// sending the shreds in this slot for repair, we expect these slots
// to be full.
if let Some(shred_data) = blocktree
if let Some(shred_data) = blockstore
.get_data_shred(slot, shred_index as u64)
.expect("Failed to read data shred from blocktree")
.expect("Failed to read data shred from blockstore")
{
socket.send_to(&shred_data[..], repairee_addr)?;
total_data_shreds_sent += 1;
}
if let Some(coding_bytes) = blocktree
if let Some(coding_bytes) = blockstore
.get_coding_shred(slot, shred_index as u64)
.expect("Failed to read coding shred from blocktree")
.expect("Failed to read coding shred from blockstore")
{
socket.send_to(&coding_bytes[..], repairee_addr)?;
total_coding_shreds_sent += 1;
@ -545,7 +550,7 @@ mod tests {
use crate::packet::Packets;
use crate::streamer;
use crate::streamer::PacketReceiver;
use solana_ledger::blocktree::make_many_slot_entries;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use solana_perf::recycler::Recycler;
use std::collections::BTreeSet;
@ -694,16 +699,16 @@ mod tests {
#[test]
fn test_serve_same_repairs_to_repairee() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 2;
let (shreds, _) = make_many_slot_entries(0, num_slots, 1);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let last_root = num_slots - 1;
let roots: Vec<_> = (0..=last_root).collect();
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();
@ -724,7 +729,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
num_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&eligible_repairmen,
&my_socket,
@ -744,7 +749,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
num_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&eligible_repairmen,
&my_socket,
@ -760,20 +765,20 @@ mod tests {
#[test]
fn test_serve_repairs_to_repairee() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let entries_per_slot = 5;
let num_slots = 10;
assert_eq!(num_slots % 2, 0);
let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
let num_shreds_per_slot = shreds.len() as u64 / num_slots;
// Write slots in the range [0, num_slots] to blocktree
blocktree.insert_shreds(shreds, None, false).unwrap();
// Write slots in the range [0, num_slots] to blockstore
blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=num_slots - 1).collect();
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();
@ -804,7 +809,7 @@ mod tests {
&repairman_pubkey,
&mock_repairee.id,
num_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&eligible_repairmen_refs,
&my_socket,
@ -843,26 +848,26 @@ mod tests {
// Shutdown
mock_repairee.close().unwrap();
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_no_repair_past_confirmed_epoch() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let stakers_slot_offset = 16;
let slots_per_epoch = stakers_slot_offset * 2;
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, false);
// Create shreds for first two epochs and write them to blocktree
// Create shreds for first two epochs and write them to blockstore
let total_slots = slots_per_epoch * 2;
let (shreds, _) = make_many_slot_entries(0, total_slots, 1);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();
@ -891,7 +896,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
total_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&vec![&my_pubkey],
&my_socket,
@ -914,7 +919,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
total_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&vec![&my_pubkey],
&my_socket,
@ -931,8 +936,8 @@ mod tests {
// Shutdown
mock_repairee.close().unwrap();
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]

View File

@ -321,12 +321,21 @@ impl Tower {
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
let lockout = fork_stake.stake as f64 / total_staked as f64;
trace!(
"fork_stake {} {} {} {}",
"fork_stake slot: {} lockout: {} fork_stake: {} total_stake: {}",
slot,
lockout,
fork_stake.stake,
total_staked
);
if vote.confirmation_count as usize > self.threshold_depth {
for old_vote in &self.lockouts.votes {
if old_vote.slot == vote.slot
&& old_vote.confirmation_count == vote.confirmation_count
{
return true;
}
}
}
lockout > self.threshold_size
} else {
false
@ -432,7 +441,7 @@ impl Tower {
fn maybe_timestamp(&mut self, current_slot: Slot) -> Option<UnixTimestamp> {
if self.last_timestamp.slot == 0
|| self.last_timestamp.slot + TIMESTAMP_SLOT_INTERVAL <= current_slot
|| self.last_timestamp.slot < (current_slot - (current_slot % TIMESTAMP_SLOT_INTERVAL))
{
let timestamp = Utc::now().timestamp();
self.last_timestamp = BlockTimestamp {
@ -542,6 +551,24 @@ mod test {
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(4, 0.67);
let mut stakes = HashMap::new();
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
stakes.insert(
i,
StakeLockout {
stake: 1,
lockout: 8,
},
);
tower.record_vote(i, Hash::default());
}
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2));
}
#[test]
fn test_is_slot_confirmed_not_enough_stake_failure() {
let tower = Tower::new_for_tests(1, 0.67);
@ -742,6 +769,34 @@ mod test {
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_lockouts_not_updated() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![
(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
),
(
1,
StakeLockout {
stake: 2,
lockout: 8,
},
),
]
.into_iter()
.collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
assert!(tower.check_vote_stake_threshold(6, &stakes, 2));
}
#[test]
fn test_lockout_is_updated_for_entire_branch() {
let mut stake_lockouts = HashMap::new();

View File

@ -31,6 +31,8 @@ pub struct ContactInfo {
pub rpc_pubsub: SocketAddr,
/// latest wallclock picked
pub wallclock: u64,
/// node shred version
pub shred_version: u16,
}
impl Ord for ContactInfo {
@ -84,6 +86,7 @@ impl Default for ContactInfo {
rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(),
wallclock: 0,
shred_version: 0,
}
}
}
@ -115,6 +118,7 @@ impl ContactInfo {
rpc,
rpc_pubsub,
wallclock: now,
shred_version: 0,
}
}

View File

@ -156,11 +156,12 @@ impl CrdsGossip {
pub fn process_pull_response(
&mut self,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
self.pull
.process_pull_response(&mut self.crds, from, response, now)
.process_pull_response(&mut self.crds, from, timeouts, response, now)
}
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {

View File

@ -25,6 +25,8 @@ use std::collections::HashMap;
use std::collections::VecDeque;
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
// The maximum age of a value received over pull responses
pub const CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS: u64 = 60000;
pub const FALSE_RATE: f64 = 0.1f64;
pub const KEYS: f64 = 8f64;
@ -117,6 +119,7 @@ pub struct CrdsGossipPull {
/// hash and insert time
purged_values: VecDeque<(Hash, u64)>,
pub crds_timeout: u64,
pub msg_timeout: u64,
}
impl Default for CrdsGossipPull {
@ -125,6 +128,7 @@ impl Default for CrdsGossipPull {
purged_values: VecDeque::new(),
pull_request_time: HashMap::new(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
}
}
}
@ -210,12 +214,56 @@ impl CrdsGossipPull {
&mut self,
crds: &mut Crds,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
let mut failed = 0;
for r in response {
let owner = r.label().pubkey();
// Check if the crds value is older than the msg_timeout
if now
> r.wallclock()
.checked_add(self.msg_timeout)
.unwrap_or_else(|| 0)
|| now + self.msg_timeout < r.wallclock()
{
match &r.label() {
CrdsValueLabel::ContactInfo(_) => {
// Check if this ContactInfo is actually too old, it's possible that it has
// stake and so might have a longer effective timeout
let timeout = *timeouts
.get(&owner)
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|| now + timeout < r.wallclock()
{
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
continue;
}
}
_ => {
// Before discarding this value, check if a ContactInfo for the owner
// exists in the table. If it doesn't, that implies that this value can be discarded
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
continue;
} else {
// Silently insert this old value without bumping record timestamps
failed += crds.insert(r, now).is_err() as usize;
continue;
}
}
}
}
let old = crds.insert(r, now);
failed += old.is_err() as usize;
old.ok().map(|opt| {
@ -322,8 +370,9 @@ impl CrdsGossipPull {
mod test {
use super::*;
use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
use crate::crds_value::{CrdsData, Vote};
use itertools::Itertools;
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::hash;
use solana_sdk::packet::PACKET_DATA_SIZE;
@ -534,8 +583,13 @@ mod test {
continue;
}
assert_eq!(rsp.len(), 1);
let failed =
node.process_pull_response(&mut node_crds, &node_pubkey, rsp.pop().unwrap(), 1);
let failed = node.process_pull_response(
&mut node_crds,
&node_pubkey,
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
rsp.pop().unwrap(),
1,
);
assert_eq!(failed, 0);
assert_eq!(
node_crds
@ -675,4 +729,87 @@ mod test {
.collect();
assert_eq!(masks.len(), 2u64.pow(mask_bits) as usize)
}
#[test]
fn test_process_pull_response() {
let mut node_crds = Crds::default();
let mut node = CrdsGossipPull::default();
let peer_pubkey = Pubkey::new_rand();
let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), node.crds_timeout);
timeouts.insert(peer_pubkey, node.msg_timeout + 1);
// inserting a fresh value should be fine.
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone()],
1,
),
0
);
let mut node_crds = Crds::default();
let unstaked_peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));
// check that old contact infos fail if they are too old, regardless of "timeouts"
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone(), unstaked_peer_entry],
node.msg_timeout + 100,
),
2
);
let mut node_crds = Crds::default();
// check that old contact infos can still land as long as they have a "timeouts" entry
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone()],
node.msg_timeout + 1,
),
0
);
// construct something that's not a contact info
let peer_vote =
CrdsValue::new_unsigned(CrdsData::Vote(0, Vote::new(&peer_pubkey, test_tx(), 0)));
// check that older CrdsValues (non-ContactInfos) infos pass even if are too old,
// but a recent contact info (inserted above) exists
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
0
);
let mut node_crds = Crds::default();
// without a contact info, inserting an old value should fail
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
1
);
}
}

View File

@ -30,7 +30,10 @@ use std::collections::{HashMap, HashSet};
pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30;
pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 5000;
// With a fanout of 6, a 1000 node cluster should only take ~4 hops to converge.
// However since pushes are stake weighed, some trailing nodes
// might need more time to receive values. 30 seconds should be plenty.
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
@ -135,7 +138,12 @@ impl CrdsGossipPush {
value: CrdsValue,
now: u64,
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
if now > value.wallclock() + self.msg_timeout {
if now
> value
.wallclock()
.checked_add(self.msg_timeout)
.unwrap_or_else(|| 0)
{
return Err(CrdsGossipError::PushMessageTimeout);
}
if now + self.msg_timeout < value.wallclock() {

View File

@ -5,7 +5,7 @@ use crate::packet::PacketsRecycler;
use crate::poh_recorder::PohRecorder;
use crate::result::{Error, Result};
use crate::streamer::{self, PacketReceiver, PacketSender};
use crate::thread_mem_usage;
use solana_measure::thread_mem_usage;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
use solana_perf::recycler::Recycler;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;

View File

@ -6,7 +6,7 @@ use crate::streamer;
use rand::{thread_rng, Rng};
use solana_client::thin_client::{create_client, ThinClient};
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_perf::recycler::Recycler;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
@ -24,7 +24,7 @@ pub struct GossipService {
impl GossipService {
pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: Option<Arc<Blocktree>>,
blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
gossip_socket: UdpSocket,
exit: &Arc<AtomicBool>,
@ -47,7 +47,7 @@ impl GossipService {
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
let t_listen = ClusterInfo::listen(
cluster_info.clone(),
blocktree,
blockstore,
bank_forks.clone(),
request_receiver,
response_sender.clone(),
@ -197,11 +197,10 @@ fn spy(
tvu_peers = spy_ref
.read()
.unwrap()
.tvu_peers()
.all_tvu_peers()
.into_iter()
.filter(|node| !ClusterInfo::is_archiver(&node))
.collect::<Vec<_>>();
archivers = spy_ref.read().unwrap().storage_peers();
archivers = spy_ref.read().unwrap().all_storage_peers();
if let Some(num) = num_nodes {
if tvu_peers.len() + archivers.len() >= num {
if let Some(gossip_addr) = find_node_by_gossip_addr {

View File

@ -1,7 +1,7 @@
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
@ -17,7 +17,7 @@ use std::time::Duration;
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const MAX_LEDGER_SLOTS: u64 = 6400;
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 6400;
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
@ -28,7 +28,7 @@ pub struct LedgerCleanupService {
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
max_ledger_slots: u64,
exit: &Arc<AtomicBool>,
) -> Self {
@ -46,7 +46,7 @@ impl LedgerCleanupService {
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blocktree,
&blockstore,
max_ledger_slots,
&mut next_purge_batch,
) {
@ -63,20 +63,20 @@ impl LedgerCleanupService {
fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
max_ledger_slots: u64,
next_purge_batch: &mut u64,
) -> Result<()> {
let disk_utilization_pre = blocktree.storage_size();
let disk_utilization_pre = blockstore.storage_size();
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
if root > *next_purge_batch {
//cleanup
blocktree.purge_slots(0, Some(root - max_ledger_slots));
blockstore.purge_slots(0, Some(root - max_ledger_slots));
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
}
let disk_utilization_post = blocktree.storage_size();
let disk_utilization_post = blockstore.storage_size();
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
(disk_utilization_pre, disk_utilization_post)
@ -103,39 +103,39 @@ impl LedgerCleanupService {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blocktree::make_many_slot_entries;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn test_cleanup() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blocktree.insert_shreds(shreds, None, false).unwrap();
let blocktree = Arc::new(blocktree);
blockstore.insert_shreds(shreds, None, false).unwrap();
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill slots 0-40
let mut next_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blocktree, 10, &mut next_purge_slot)
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
.unwrap();
//check that 0-40 don't exist
blocktree
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_compaction() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let n = 10_000;
let batch_size = 100;
@ -144,10 +144,10 @@ mod tests {
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
}
let u1 = blocktree.storage_size().unwrap() as f64;
let u1 = blockstore.storage_size().unwrap() as f64;
// send signal to cleanup slots
let (sender, receiver) = channel();
@ -155,7 +155,7 @@ mod tests {
let mut next_purge_batch = 0;
LedgerCleanupService::cleanup_ledger(
&receiver,
&blocktree,
&blockstore,
max_ledger_slots,
&mut next_purge_batch,
)
@ -163,18 +163,18 @@ mod tests {
thread::sleep(Duration::from_secs(2));
let u2 = blocktree.storage_size().unwrap() as f64;
let u2 = blockstore.storage_size().unwrap() as f64;
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
// check that early slots don't exist
let max_slot = n - max_ledger_slots;
blocktree
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > max_slot));
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}

View File

@ -12,7 +12,6 @@ pub mod chacha_cuda;
pub mod cluster_info_vote_listener;
pub mod commitment;
pub mod shred_fetch_stage;
pub mod thread_mem_usage;
#[macro_use]
pub mod contact_info;
pub mod archiver;
@ -84,10 +83,3 @@ extern crate solana_metrics;
#[cfg(test)]
#[macro_use]
extern crate matches;
#[cfg(unix)]
extern crate jemallocator;
#[cfg(unix)]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;

View File

@ -12,7 +12,7 @@ use solana_metrics::inc_new_counter_debug;
pub use solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE};
use std::{net::UdpSocket, time::Instant};
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: usize) -> Result<usize> {
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
@ -23,9 +23,11 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
socket.set_nonblocking(false)?;
trace!("receiving on {}", socket.local_addr().unwrap());
let start = Instant::now();
let mut total_size = 0;
loop {
obj.packets.resize(i + NUM_RCVMMSGS, Packet::default());
obj.packets.resize(
std::cmp::min(i + NUM_RCVMMSGS, PACKETS_PER_BATCH),
Packet::default(),
);
match recv_mmsg(socket, &mut obj.packets[i..]) {
Err(_) if i > 0 => {
if start.elapsed().as_millis() > 1 {
@ -36,16 +38,15 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
trace!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((size, npkts)) => {
Ok((_, npkts)) => {
if i == 0 {
socket.set_nonblocking(true)?;
}
trace!("got {} packets", npkts);
i += npkts;
total_size += size;
// Try to batch into big enough buffers
// will cause less re-shuffling later on.
if start.elapsed().as_millis() > 1 || total_size >= PACKETS_BATCH_SIZE {
if start.elapsed().as_millis() > max_wait_ms as u128 || i >= PACKETS_PER_BATCH {
break;
}
}
@ -98,7 +99,7 @@ mod tests {
}
send_to(&p, &send_socket).unwrap();
let recvd = recv_from(&mut p, &recv_socket).unwrap();
let recvd = recv_from(&mut p, &recv_socket, 1).unwrap();
assert_eq!(recvd, p.packets.len());
@ -130,4 +131,32 @@ mod tests {
p2.data[0] = 4;
assert!(p1 != p2);
}
#[test]
fn test_packet_resize() {
solana_logger::setup();
let recv_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = recv_socket.local_addr().unwrap();
let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
let mut p = Packets::default();
p.packets.resize(PACKETS_PER_BATCH, Packet::default());
// Should only get PACKETS_PER_BATCH packets per iteration even
// if a lot more were sent, and regardless of packet size
for _ in 0..2 * PACKETS_PER_BATCH {
let mut p = Packets::default();
p.packets.resize(1, Packet::default());
for m in p.packets.iter_mut() {
m.meta.set_addr(&addr);
m.meta.size = 1;
}
send_to(&p, &send_socket).unwrap();
}
let recvd = recv_from(&mut p, &recv_socket, 100).unwrap();
// Check we only got PACKETS_PER_BATCH packets
assert_eq!(recvd, PACKETS_PER_BATCH);
assert_eq!(p.packets.capacity(), PACKETS_PER_BATCH);
}
}

View File

@ -11,7 +11,7 @@
//! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height
//!
use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::entry::Entry;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::poh::Poh;
@ -29,7 +29,7 @@ use std::sync::{Arc, Mutex};
use std::time::Instant;
const GRACE_TICKS_FACTOR: u64 = 2;
const MAX_GRACE_SLOTS: u64 = 3;
const MAX_GRACE_SLOTS: u64 = 2;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PohRecorderError {
@ -60,7 +60,7 @@ pub struct PohRecorder {
leader_last_tick_height: u64, // zero if none
grace_ticks: u64,
id: Pubkey,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
poh_config: Arc<PohConfig>,
ticks_per_slot: u64,
@ -74,7 +74,7 @@ impl PohRecorder {
&self.id,
bank.slot(),
&bank,
Some(&self.blocktree),
Some(&self.blockstore),
);
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
let (leader_first_tick_height, leader_last_tick_height, grace_ticks) =
@ -126,6 +126,33 @@ impl PohRecorder {
self.ticks_per_slot
}
fn received_any_previous_leader_data(&self, slot: Slot) -> bool {
(slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|i| {
// Check if we have received any data in previous leader's slots
if let Ok(slot_meta) = self.blockstore.meta(i as Slot) {
if let Some(slot_meta) = slot_meta {
slot_meta.received > 0
} else {
false
}
} else {
false
}
})
}
fn reached_leader_tick(&self, leader_first_tick_height: u64) -> bool {
let target_tick_height = leader_first_tick_height.saturating_sub(1);
let ideal_target_tick_height = target_tick_height.saturating_sub(self.grace_ticks);
let current_slot = self.tick_height / self.ticks_per_slot;
// we've approached target_tick_height OR poh was reset to run immediately
// Or, previous leader didn't transmit in any of its leader slots, so ignore grace ticks
self.tick_height >= target_tick_height
|| self.start_tick_height + self.grace_ticks == leader_first_tick_height
|| (self.tick_height >= ideal_target_tick_height
&& !self.received_any_previous_leader_data(current_slot))
}
/// returns if leader slot has been reached, how many grace ticks were afforded,
/// imputed leader_slot and self.start_slot
/// reached_leader_slot() == true means "ready for a bank"
@ -143,10 +170,7 @@ impl PohRecorder {
let next_leader_slot = (next_tick_height - 1) / self.ticks_per_slot;
if let Some(leader_first_tick_height) = self.leader_first_tick_height {
let target_tick_height = leader_first_tick_height.saturating_sub(1);
// we've approached target_tick_height OR poh was reset to run immediately
if self.tick_height >= target_tick_height
|| self.start_tick_height + self.grace_ticks == leader_first_tick_height
{
if self.reached_leader_tick(leader_first_tick_height) {
assert!(next_tick_height >= self.start_tick_height);
let ideal_target_tick_height = target_tick_height.saturating_sub(self.grace_ticks);
@ -401,7 +425,7 @@ impl PohRecorder {
next_leader_slot: Option<(Slot, Slot)>,
ticks_per_slot: u64,
id: &Pubkey,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
clear_bank_signal: Option<SyncSender<bool>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>,
@ -427,7 +451,7 @@ impl PohRecorder {
leader_last_tick_height,
grace_ticks,
id: *id,
blocktree: blocktree.clone(),
blockstore: blockstore.clone(),
leader_schedule_cache: leader_schedule_cache.clone(),
ticks_per_slot,
poh_config: poh_config.clone(),
@ -446,7 +470,7 @@ impl PohRecorder {
next_leader_slot: Option<(Slot, Slot)>,
ticks_per_slot: u64,
id: &Pubkey,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>,
) -> (Self, Receiver<WorkingBankEntry>) {
@ -457,7 +481,7 @@ impl PohRecorder {
next_leader_slot,
ticks_per_slot,
id,
blocktree,
blockstore,
None,
leader_schedule_cache,
poh_config,
@ -469,7 +493,8 @@ impl PohRecorder {
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use bincode::serialize;
use solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::hash;
@ -480,8 +505,8 @@ mod tests {
let prev_hash = Hash::default();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
@ -490,7 +515,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -499,7 +524,7 @@ mod tests {
assert_eq!(poh_recorder.tick_cache[0].1, 1);
assert_eq!(poh_recorder.tick_height, 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -507,8 +532,8 @@ mod tests {
let prev_hash = Hash::default();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
@ -517,7 +542,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -527,15 +552,15 @@ mod tests {
assert_eq!(poh_recorder.tick_cache[1].1, 2);
assert_eq!(poh_recorder.tick_height, 2);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_reset_clears_cache() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -543,7 +568,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -552,15 +577,15 @@ mod tests {
poh_recorder.reset(Hash::default(), 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_clear() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -571,7 +596,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -586,15 +611,15 @@ mod tests {
poh_recorder.clear_bank();
assert!(poh_recorder.working_bank.is_none());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_tick_sent_after_min() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -605,7 +630,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -635,15 +660,15 @@ mod tests {
assert_eq!(num_entries, 3);
assert!(poh_recorder.working_bank.is_none());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_tick_sent_upto_and_including_max() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -654,7 +679,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -682,15 +707,15 @@ mod tests {
}
assert_eq!(num_entries, 3);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_to_early() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -701,7 +726,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -720,15 +745,15 @@ mod tests {
.is_err());
assert!(entry_receiver.try_recv().is_err());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_bad_slot() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -739,7 +764,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -760,15 +785,15 @@ mod tests {
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_at_min_passes() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -779,7 +804,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -806,15 +831,15 @@ mod tests {
let (_bank, (e, _tick_height)) = entry_receiver.recv().expect("recv 2");
assert!(!e.is_tick());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_at_max_fails() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -825,7 +850,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -850,15 +875,15 @@ mod tests {
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert!(entry.is_tick());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_cache_on_disconnect() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -869,7 +894,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -888,15 +913,15 @@ mod tests {
assert!(poh_recorder.working_bank.is_none());
assert_eq!(poh_recorder.tick_cache.len(), 3);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reset_current() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -904,7 +929,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -915,15 +940,15 @@ mod tests {
poh_recorder.reset(hash, 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reset_with_cached() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -931,7 +956,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -941,7 +966,7 @@ mod tests {
poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -950,8 +975,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -959,7 +984,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -974,15 +999,15 @@ mod tests {
poh_recorder.tick();
assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reset_clear_bank() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
@ -992,7 +1017,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1005,15 +1030,15 @@ mod tests {
poh_recorder.reset(hash(b"hello"), 0, Some((4, 4)));
assert!(poh_recorder.working_bank.is_none());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
pub fn test_clear_signal() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let (sender, receiver) = sync_channel(1);
@ -1024,7 +1049,7 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
Some(sender),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
@ -1033,7 +1058,7 @@ mod tests {
poh_recorder.clear_bank();
assert!(receiver.try_recv().is_ok());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1041,8 +1066,8 @@ mod tests {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let ticks_per_slot = 5;
let GenesisConfigInfo {
mut genesis_config, ..
@ -1058,7 +1083,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1085,17 +1110,17 @@ mod tests {
// Make sure the starting slot is updated
assert_eq!(poh_recorder.start_slot, end_slot);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reached_leader_slot() {
fn test_reached_leader_tick() {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -1106,7 +1131,61 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
assert_eq!(poh_recorder.reached_leader_tick(0), true);
let grace_ticks = bank.ticks_per_slot() * MAX_GRACE_SLOTS;
let new_tick_height = NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot();
for _ in 0..new_tick_height {
poh_recorder.tick();
}
poh_recorder.grace_ticks = grace_ticks;
// True, as previous leader did not transmit in its slots
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
true
);
let mut parent_meta = SlotMeta::default();
parent_meta.received = 1;
poh_recorder
.blockstore
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
.unwrap();
// False, as previous leader transmitted in one of its recent slots
// and grace ticks have not expired
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
false
);
}
}
#[test]
fn test_reached_leader_slot() {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1134,6 +1213,13 @@ mod tests {
init_ticks + bank.ticks_per_slot()
);
let mut parent_meta = SlotMeta::default();
parent_meta.received = 1;
poh_recorder
.blockstore
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
.unwrap();
// Test that we don't reach the leader slot because of grace ticks
assert_eq!(poh_recorder.reached_leader_slot().0, false);
@ -1207,15 +1293,15 @@ mod tests {
assert_eq!(grace_ticks, overshoot_factor * bank.ticks_per_slot());
assert_eq!(leader_slot, 9);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_would_be_leader_soon() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -1226,7 +1312,7 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1281,8 +1367,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
// test that virtual ticks are flushed into a newly set bank asap
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let genesis_hash = bank.last_blockhash();
@ -1294,7 +1380,7 @@ mod tests {
Some((2, 2)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);

View File

@ -124,7 +124,7 @@ mod tests {
use crate::poh_recorder::WorkingBank;
use crate::result::Result;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_sdk::hash::hash;
@ -138,8 +138,8 @@ mod tests {
let prev_hash = bank.last_blockhash();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let poh_config = Arc::new(PohConfig {
hashes_per_tick: Some(2),
target_tick_duration: Duration::from_millis(42),
@ -152,7 +152,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&poh_config,
);
@ -231,6 +231,6 @@ mod tests {
let _ = poh_service.join().unwrap();
let _ = entry_producer.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
}

View File

@ -6,7 +6,7 @@ use crate::{
};
use solana_ledger::{
bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta},
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
};
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
@ -71,7 +71,7 @@ pub struct RepairService {
impl RepairService {
pub fn new(
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
exit: Arc<AtomicBool>,
repair_socket: Arc<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>,
@ -81,7 +81,7 @@ impl RepairService {
RepairStrategy::RepairAll {
ref epoch_schedule, ..
} => Some(ClusterInfoRepairListener::new(
&blocktree,
&blockstore,
&exit,
cluster_info.clone(),
*epoch_schedule,
@ -94,7 +94,7 @@ impl RepairService {
.name("solana-repair-service".to_string())
.spawn(move || {
Self::run(
&blocktree,
&blockstore,
&exit,
&repair_socket,
&cluster_info,
@ -110,7 +110,7 @@ impl RepairService {
}
fn run(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
repair_socket: &Arc<UdpSocket>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -123,10 +123,10 @@ impl RepairService {
ref epoch_schedule, ..
} = repair_strategy
{
current_root = blocktree.last_root();
current_root = blockstore.last_root();
Self::initialize_epoch_slots(
id,
blocktree,
blockstore,
&mut epoch_slots,
current_root,
epoch_schedule,
@ -143,7 +143,7 @@ impl RepairService {
RepairStrategy::RepairRange(ref repair_slot_range) => {
// Strategy used by archivers
Self::generate_repairs_in_range(
blocktree,
blockstore,
MAX_REPAIR_LENGTH,
repair_slot_range,
)
@ -153,8 +153,8 @@ impl RepairService {
ref completed_slots_receiver,
..
} => {
let new_root = blocktree.last_root();
let lowest_slot = blocktree.lowest_slot();
let new_root = blockstore.last_root();
let lowest_slot = blockstore.lowest_slot();
Self::update_epoch_slots(
id,
new_root,
@ -164,7 +164,7 @@ impl RepairService {
&cluster_info,
completed_slots_receiver,
);
Self::generate_repairs(blocktree, new_root, MAX_REPAIR_LENGTH)
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
}
}
};
@ -195,7 +195,7 @@ impl RepairService {
// Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end
pub fn generate_repairs_in_range(
blocktree: &Blocktree,
blockstore: &Blockstore,
max_repairs: usize,
repair_range: &RepairSlotRange,
) -> Result<Vec<RepairType>> {
@ -206,7 +206,7 @@ impl RepairService {
break;
}
let meta = blocktree
let meta = blockstore
.meta(slot)
.expect("Unable to lookup slot meta")
.unwrap_or(SlotMeta {
@ -215,7 +215,7 @@ impl RepairService {
});
let new_repairs = Self::generate_repairs_for_slot(
blocktree,
blockstore,
slot,
&meta,
max_repairs - repairs.len(),
@ -227,18 +227,18 @@ impl RepairService {
}
fn generate_repairs(
blocktree: &Blocktree,
blockstore: &Blockstore,
root: Slot,
max_repairs: usize,
) -> Result<Vec<RepairType>> {
// Slot height and shred indexes for shreds we want to repair
let mut repairs: Vec<RepairType> = vec![];
Self::generate_repairs_for_fork(blocktree, &mut repairs, max_repairs, root);
Self::generate_repairs_for_fork(blockstore, &mut repairs, max_repairs, root);
// TODO: Incorporate gossip to determine priorities for repair?
// Try to resolve orphans in blocktree
let mut orphans = blocktree.get_orphans(Some(MAX_ORPHANS));
// Try to resolve orphans in blockstore
let mut orphans = blockstore.get_orphans(Some(MAX_ORPHANS));
orphans.retain(|x| *x > root);
Self::generate_repairs_for_orphans(&orphans[..], &mut repairs);
@ -246,7 +246,7 @@ impl RepairService {
}
fn generate_repairs_for_slot(
blocktree: &Blocktree,
blockstore: &Blockstore,
slot: Slot,
slot_meta: &SlotMeta,
max_repairs: usize,
@ -256,7 +256,7 @@ impl RepairService {
} else if slot_meta.consumed == slot_meta.received {
vec![RepairType::HighestShred(slot, slot_meta.received)]
} else {
let reqs = blocktree.find_missing_data_indexes(
let reqs = blockstore.find_missing_data_indexes(
slot,
slot_meta.first_shred_timestamp,
slot_meta.consumed,
@ -275,7 +275,7 @@ impl RepairService {
/// Repairs any fork starting at the input slot
fn generate_repairs_for_fork(
blocktree: &Blocktree,
blockstore: &Blockstore,
repairs: &mut Vec<RepairType>,
max_repairs: usize,
slot: Slot,
@ -283,9 +283,9 @@ impl RepairService {
let mut pending_slots = vec![slot];
while repairs.len() < max_repairs && !pending_slots.is_empty() {
let slot = pending_slots.pop().unwrap();
if let Some(slot_meta) = blocktree.meta(slot).unwrap() {
if let Some(slot_meta) = blockstore.meta(slot).unwrap() {
let new_repairs = Self::generate_repairs_for_slot(
blocktree,
blockstore,
slot,
&slot_meta,
max_repairs - repairs.len(),
@ -300,7 +300,7 @@ impl RepairService {
}
fn get_completed_slots_past_root(
blocktree: &Blocktree,
blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>,
root: Slot,
epoch_schedule: &EpochSchedule,
@ -308,7 +308,7 @@ impl RepairService {
let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root);
let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch);
let meta_iter = blocktree
let meta_iter = blockstore
.slot_meta_iterator(root + 1)
.expect("Couldn't get db iterator");
@ -324,22 +324,22 @@ impl RepairService {
fn initialize_epoch_slots(
id: Pubkey,
blocktree: &Blocktree,
blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>,
root: Slot,
epoch_schedule: &EpochSchedule,
cluster_info: &RwLock<ClusterInfo>,
) {
Self::get_completed_slots_past_root(blocktree, slots_in_gossip, root, epoch_schedule);
Self::get_completed_slots_past_root(blockstore, slots_in_gossip, root, epoch_schedule);
// Safe to set into gossip because by this time, the leader schedule cache should
// also be updated with the latest root (done in blocktree_processor) and thus
// also be updated with the latest root (done in blockstore_processor) and thus
// will provide a schedule to window_service for any incoming shreds up to the
// last_confirmed_epoch.
cluster_info.write().unwrap().push_epoch_slots(
id,
root,
blocktree.lowest_slot(),
blockstore.lowest_slot(),
slots_in_gossip.clone(),
);
}
@ -409,60 +409,60 @@ mod test {
use itertools::Itertools;
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use solana_ledger::blocktree::{
use solana_ledger::blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use std::sync::mpsc::channel;
use std::thread::Builder;
#[test]
pub fn test_repair_orphan() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create some orphan slots
let (mut shreds, _) = make_slot_entries(1, 0, 1);
let (shreds2, _) = make_slot_entries(5, 2, 1);
shreds.extend(shreds2);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(),
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)]
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_repair_empty_slot() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_slot_entries(2, 0, 1);
// Write this shred to slot 2, should chain to slot 0, which we haven't received
// any shreds for
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// Check that repair tries to patch the empty slot
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(),
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0)]
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_generate_repairs() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let nth = 3;
let num_slots = 2;
@ -483,7 +483,7 @@ mod test {
missing_indexes_per_slot.insert(0, index);
}
}
blocktree
blockstore
.insert_shreds(shreds_to_write, None, false)
.unwrap();
// sleep so that the holes are ready for repair
@ -497,23 +497,23 @@ mod test {
.collect();
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(),
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected
);
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, expected.len() - 2).unwrap()[..],
RepairService::generate_repairs(&blockstore, 0, expected.len() - 2).unwrap()[..],
expected[0..expected.len() - 2]
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_generate_highest_repair() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 100;
@ -524,25 +524,25 @@ mod test {
// Remove last shred (which is also last in slot) so that slot is not complete
shreds.pop();
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// We didn't get the last shred for this slot, so ask for the highest shred for that slot
let expected: Vec<RepairType> =
vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)];
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(),
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_repair_range() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
@ -550,7 +550,7 @@ mod test {
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
for (mut slot_shreds, _) in shreds.into_iter() {
slot_shreds.remove(0);
blocktree.insert_shreds(slot_shreds, None, false).unwrap();
blockstore.insert_shreds(slot_shreds, None, false).unwrap();
}
// sleep to make slot eligible for repair
sleep(Duration::from_secs(1));
@ -574,7 +574,7 @@ mod test {
assert_eq!(
RepairService::generate_repairs_in_range(
&blocktree,
&blockstore,
std::usize::MAX,
&repair_slot_range
)
@ -584,14 +584,14 @@ mod test {
}
}
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_repair_range_highest() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 10;
@ -603,7 +603,7 @@ mod test {
let parent = if i > 0 { i - 1 } else { 0 };
let (shreds, _) = make_slot_entries(i, parent, num_entries_per_slot as u64);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
}
let end = 4;
@ -619,7 +619,7 @@ mod test {
assert_eq!(
RepairService::generate_repairs_in_range(
&blocktree,
&blockstore,
std::usize::MAX,
&repair_slot_range
)
@ -627,14 +627,14 @@ mod test {
expected
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_get_completed_slots_past_root() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 10;
let root = 10;
@ -656,8 +656,8 @@ mod test {
.collect();
let mut full_slots = BTreeSet::new();
blocktree.insert_shreds(fork1_shreds, None, false).unwrap();
blocktree
blockstore.insert_shreds(fork1_shreds, None, false).unwrap();
blockstore
.insert_shreds(fork2_incomplete_shreds, None, false)
.unwrap();
@ -665,7 +665,7 @@ mod test {
let epoch_schedule = EpochSchedule::custom(32, 32, false);
RepairService::get_completed_slots_past_root(
&blocktree,
&blockstore,
&mut full_slots,
root,
&epoch_schedule,
@ -682,9 +682,9 @@ mod test {
.into_iter()
.flat_map(|(shreds, _)| shreds)
.collect();
blocktree.insert_shreds(fork3_shreds, None, false).unwrap();
blockstore.insert_shreds(fork3_shreds, None, false).unwrap();
RepairService::get_completed_slots_past_root(
&blocktree,
&blockstore,
&mut full_slots,
root,
&epoch_schedule,
@ -692,25 +692,25 @@ mod test {
expected.insert(last_slot);
assert_eq!(full_slots, expected);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_update_epoch_slots() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
// Create blocktree
let (blocktree, _, completed_slots_receiver) =
Blocktree::open_with_signal(&blocktree_path).unwrap();
// Create blockstore
let (blockstore, _, completed_slots_receiver) =
Blockstore::open_with_signal(&blockstore_path).unwrap();
let blocktree = Arc::new(blocktree);
let blockstore = Arc::new(blockstore);
let mut root = 0;
let num_slots = 100;
let entries_per_slot = 5;
let blocktree_ = blocktree.clone();
let blockstore_ = blockstore.clone();
// Spin up thread to write to blocktree
// Spin up thread to write to blockstore
let writer = Builder::new()
.name("writer".to_string())
.spawn(move || {
@ -729,7 +729,7 @@ mod test {
let step = rng.gen_range(1, max_step + 1) as usize;
let step = std::cmp::min(step, num_shreds - i);
let shreds_to_insert = shreds.drain(..step).collect_vec();
blocktree_
blockstore_
.insert_shreds(shreds_to_insert, None, false)
.unwrap();
sleep(Duration::from_millis(repair_interval_ms));
@ -748,7 +748,7 @@ mod test {
RepairService::update_epoch_slots(
Pubkey::default(),
root,
blocktree.lowest_slot(),
blockstore.lowest_slot(),
&mut root.clone(),
&mut completed_slots,
&cluster_info,
@ -762,7 +762,7 @@ mod test {
// Update with new root, should filter out the slots <= root
root = num_slots / 2;
let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
RepairService::update_epoch_slots(
Pubkey::default(),
root,
@ -777,7 +777,7 @@ mod test {
assert_eq!(completed_slots, expected);
writer.join().unwrap();
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]

View File

@ -7,18 +7,17 @@ use crate::{
poh_recorder::PohRecorder,
result::{Error, Result},
rpc_subscriptions::RpcSubscriptions,
thread_mem_usage,
};
use solana_ledger::{
bank_forks::BankForks,
block_error::BlockError,
blocktree::{Blocktree, BlocktreeError},
blocktree_processor::{self, TransactionStatusSender},
blockstore::{Blockstore, BlockstoreError},
blockstore_processor::{self, TransactionStatusSender},
entry::{Entry, EntrySlice, VerifyRecyclers},
leader_schedule_cache::LeaderScheduleCache,
snapshot_package::SnapshotPackageSender,
};
use solana_measure::measure::Measure;
use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_sdk::{
@ -180,7 +179,7 @@ impl ReplayStage {
#[allow(clippy::new_ret_no_self)]
pub fn new(
config: ReplayStageConfig,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<RwLock<ClusterInfo>>,
ledger_signal_receiver: Receiver<bool>,
@ -209,6 +208,7 @@ impl ReplayStage {
let (lockouts_sender, commitment_service) =
AggregateCommitmentService::new(&exit, block_commitment_cache);
#[allow(clippy::cognitive_complexity)]
let t_replay = Builder::new()
.name("solana-replay-stage".to_string())
.spawn(move || {
@ -237,7 +237,7 @@ impl ReplayStage {
let start = allocated.get();
Self::generate_new_bank_forks(
&blocktree,
&blockstore,
&bank_forks,
&leader_schedule_cache,
&subscriptions,
@ -255,7 +255,7 @@ impl ReplayStage {
let start = allocated.get();
let did_complete_bank = Self::replay_active_banks(
&blocktree,
&blockstore,
&bank_forks,
&my_pubkey,
&mut progress,
@ -311,7 +311,7 @@ impl ReplayStage {
&vote_account,
&voting_keypair,
&cluster_info,
&blocktree,
&blockstore,
&leader_schedule_cache,
&root_bank_sender,
stats.total_staked,
@ -328,7 +328,7 @@ impl ReplayStage {
if last_reset != bank.last_blockhash() {
Self::reset_poh_recorder(
&my_pubkey,
&blocktree,
&blockstore,
&bank,
&poh_recorder,
&leader_schedule_cache,
@ -409,7 +409,7 @@ impl ReplayStage {
match result {
Err(RecvTimeoutError::Timeout) => continue,
Err(_) => break,
Ok(_) => trace!("blocktree signal"),
Ok(_) => trace!("blockstore signal"),
};
}
Ok(())
@ -535,15 +535,15 @@ impl ReplayStage {
!Bank::can_commit(&tx_error)
}
Err(Error::BlockError(_)) => true,
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true,
Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_))) => true,
_ => false,
}
}
// Returns the replay result and the number of replayed transactions
fn replay_blocktree_into_bank(
fn replay_blockstore_into_bank(
bank: &Arc<Bank>,
blocktree: &Blocktree,
blockstore: &Blockstore,
bank_progress: &mut ForkProgress,
transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers,
@ -551,7 +551,7 @@ impl ReplayStage {
let mut tx_count = 0;
let now = Instant::now();
let load_result =
Self::load_blocktree_entries_with_shred_info(bank, blocktree, bank_progress);
Self::load_blockstore_entries_with_shred_info(bank, blockstore, bank_progress);
let fetch_entries_elapsed = now.elapsed().as_micros();
if load_result.is_err() {
bank_progress.stats.fetch_entries_fail_elapsed += fetch_entries_elapsed as u64;
@ -590,17 +590,17 @@ impl ReplayStage {
("error", format!("error: {:?}", replay_result), String),
("slot", bank.slot(), i64)
);
Self::mark_dead_slot(bank.slot(), blocktree, bank_progress);
Self::mark_dead_slot(bank.slot(), blockstore, bank_progress);
}
(replay_result, tx_count)
}
fn mark_dead_slot(slot: Slot, blocktree: &Blocktree, bank_progress: &mut ForkProgress) {
fn mark_dead_slot(slot: Slot, blockstore: &Blockstore, bank_progress: &mut ForkProgress) {
bank_progress.is_dead = true;
blocktree
blockstore
.set_dead_slot(slot)
.expect("Failed to mark slot as dead in blocktree");
.expect("Failed to mark slot as dead in blockstore");
}
#[allow(clippy::too_many_arguments)]
@ -612,7 +612,7 @@ impl ReplayStage {
vote_account: &Pubkey,
voting_keypair: &Option<Arc<Keypair>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
total_staked: u64,
@ -636,12 +636,12 @@ impl ReplayStage {
let mut rooted_banks = root_bank.parents();
rooted_banks.push(root_bank);
let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect();
// Call leader schedule_cache.set_root() before blocktree.set_root() because
// Call leader schedule_cache.set_root() before blockstore.set_root() because
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to
// get shreds for repair on gossip before we update leader schedule, otherwise they may
// get dropped.
leader_schedule_cache.set_root(rooted_banks.last().unwrap());
blocktree
blockstore
.set_roots(&rooted_slots)
.expect("Ledger set roots failed");
bank_forks
@ -698,13 +698,17 @@ impl ReplayStage {
fn reset_poh_recorder(
my_pubkey: &Pubkey,
blocktree: &Blocktree,
blockstore: &Blockstore,
bank: &Arc<Bank>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
) {
let next_leader_slot =
leader_schedule_cache.next_leader_slot(&my_pubkey, bank.slot(), &bank, Some(blocktree));
let next_leader_slot = leader_schedule_cache.next_leader_slot(
&my_pubkey,
bank.slot(),
&bank,
Some(blockstore),
);
poh_recorder
.lock()
.unwrap()
@ -726,7 +730,7 @@ impl ReplayStage {
}
fn replay_active_banks(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
bank_forks: &Arc<RwLock<BankForks>>,
my_pubkey: &Pubkey,
progress: &mut HashMap<u64, ForkProgress>,
@ -755,9 +759,9 @@ impl ReplayStage {
.entry(bank.slot())
.or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash()));
if bank.collector_id() != my_pubkey {
let (replay_result, replay_tx_count) = Self::replay_blocktree_into_bank(
let (replay_result, replay_tx_count) = Self::replay_blockstore_into_bank(
&bank,
&blocktree,
&blockstore,
bank_progress,
transaction_status_sender.clone(),
verify_recyclers,
@ -958,12 +962,12 @@ impl ReplayStage {
}
}
fn load_blocktree_entries_with_shred_info(
fn load_blockstore_entries_with_shred_info(
bank: &Bank,
blocktree: &Blocktree,
blockstore: &Blockstore,
bank_progress: &mut ForkProgress,
) -> Result<(Vec<Entry>, usize, bool)> {
blocktree
blockstore
.get_slot_entries_with_shred_info(bank.slot(), bank_progress.num_shreds as u64)
.map_err(|err| err.into())
}
@ -1073,7 +1077,7 @@ impl ReplayStage {
let mut replay_elapsed = Measure::start("replay_elapsed");
let res =
blocktree_processor::process_entries(bank, entries, true, transaction_status_sender);
blockstore_processor::process_entries(bank, entries, true, transaction_status_sender);
replay_elapsed.stop();
bank_progress.stats.replay_elapsed += replay_elapsed.as_us();
@ -1103,7 +1107,6 @@ impl ReplayStage {
slot_full_senders: &[Sender<(u64, Pubkey)>],
) {
bank.freeze();
info!("bank frozen {}", bank.slot());
slot_full_senders.iter().for_each(|sender| {
if let Err(e) = sender.send((bank.slot(), *bank.collector_id())) {
trace!("{} slot_full alert failed: {:?}", my_pubkey, e);
@ -1112,7 +1115,7 @@ impl ReplayStage {
}
fn generate_new_bank_forks(
blocktree: &Blocktree,
blockstore: &Blockstore,
forks_lock: &RwLock<BankForks>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
@ -1121,7 +1124,7 @@ impl ReplayStage {
let forks = forks_lock.read().unwrap();
let frozen_banks = forks.frozen_banks();
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
let next_slots = blocktree
let next_slots = blockstore
.get_slots_since(&frozen_bank_slots)
.expect("Db error");
// Filter out what we've already seen
@ -1182,9 +1185,10 @@ pub(crate) mod tests {
transaction_status_service::TransactionStatusService,
};
use crossbeam_channel::unbounded;
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
use solana_ledger::{
blocktree::make_slot_entries,
blocktree::{entries_to_test_shreds, BlocktreeError},
blockstore::make_slot_entries,
blockstore::{entries_to_test_shreds, BlockstoreError},
create_new_tmp_ledger,
entry::{self, next_entry},
get_tmp_ledger_path,
@ -1194,22 +1198,21 @@ pub(crate) mod tests {
},
};
use solana_runtime::genesis_utils::GenesisConfigInfo;
use solana_sdk::account::Account;
use solana_sdk::rent::Rent;
use solana_sdk::{
account::Account,
hash::{hash, Hash},
instruction::InstructionError,
packet::PACKET_DATA_SIZE,
rent::Rent,
signature::{Keypair, KeypairUtil, Signature},
system_transaction,
transaction::TransactionError,
};
use solana_stake_program::stake_state;
use solana_vote_program::vote_state;
use solana_vote_program::vote_state::{Vote, VoteState};
use std::iter;
use solana_vote_program::vote_state::{self, Vote, VoteState};
use std::{
fs::remove_dir_all,
iter,
sync::{Arc, RwLock},
};
@ -1494,8 +1497,9 @@ pub(crate) mod tests {
fn test_child_slots_of_same_parent() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let genesis_config = create_genesis_config(10_000).genesis_config;
@ -1507,11 +1511,11 @@ pub(crate) mod tests {
// Insert shred for slot 1, generate new forks, check result
let (shreds, _) = make_slot_entries(1, 0, 8);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks.get(1).is_none());
let bank_forks = RwLock::new(bank_forks);
ReplayStage::generate_new_bank_forks(
&blocktree,
&blockstore,
&bank_forks,
&leader_schedule_cache,
&subscriptions,
@ -1520,10 +1524,10 @@ pub(crate) mod tests {
// Insert shred for slot 3, generate new forks, check result
let (shreds, _) = make_slot_entries(2, 0, 8);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks.read().unwrap().get(2).is_none());
ReplayStage::generate_new_bank_forks(
&blocktree,
&blockstore,
&bank_forks,
&leader_schedule_cache,
&subscriptions,
@ -1745,7 +1749,7 @@ pub(crate) mod tests {
assert_matches!(
res,
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_)))
Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_)))
);
}
@ -1757,8 +1761,9 @@ pub(crate) mod tests {
{
let ledger_path = get_tmp_ledger_path!();
let res = {
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let GenesisConfigInfo {
mut genesis_config,
@ -1773,10 +1778,10 @@ pub(crate) mod tests {
.entry(bank0.slot())
.or_insert_with(|| ForkProgress::new(0, last_blockhash));
let shreds = shred_to_insert(&mint_keypair, bank0.clone());
blocktree.insert_shreds(shreds, None, false).unwrap();
let (res, _tx_count) = ReplayStage::replay_blocktree_into_bank(
blockstore.insert_shreds(shreds, None, false).unwrap();
let (res, _tx_count) = ReplayStage::replay_blockstore_into_bank(
&bank0,
&blocktree,
&blockstore,
&mut bank0_progress,
None,
&VerifyRecyclers::default(),
@ -1788,8 +1793,8 @@ pub(crate) mod tests {
.map(|b| b.is_dead)
.unwrap_or(false));
// Check that the erroring bank was marked as dead in blocktree
assert!(blocktree.is_dead(bank0.slot()));
// Check that the erroring bank was marked as dead in blockstore
assert!(blockstore.is_dead(bank0.slot()));
res
};
let _ignored = remove_dir_all(&ledger_path);
@ -1897,11 +1902,11 @@ pub(crate) mod tests {
);
}
pub fn create_test_transactions_and_populate_blocktree(
pub fn create_test_transactions_and_populate_blockstore(
keypairs: Vec<&Keypair>,
previous_slot: Slot,
bank: Arc<Bank>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
) -> Vec<Signature> {
let mint_keypair = keypairs[0];
let keypair1 = keypairs[1];
@ -1928,19 +1933,19 @@ pub(crate) mod tests {
let entries = vec![entry_1, entry_2, entry_3];
let shreds = entries_to_test_shreds(entries.clone(), slot, previous_slot, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[slot]).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(&[slot]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
blocktree.clone(),
blockstore.clone(),
&Arc::new(AtomicBool::new(false)),
);
// Check that process_entries successfully writes can_commit transactions statuses, and
// that they are matched properly by get_confirmed_block
let _result = blocktree_processor::process_entries(
let _result = blockstore_processor::process_entries(
&bank,
&entries,
true,
@ -1961,9 +1966,9 @@ pub(crate) mod tests {
} = create_genesis_config(1000);
let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config);
{
let blocktree = Blocktree::open(&ledger_path)
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to successfully open database ledger");
let blocktree = Arc::new(blocktree);
let blockstore = Arc::new(blockstore);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
@ -1977,32 +1982,36 @@ pub(crate) mod tests {
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
let slot = bank1.slot();
let signatures = create_test_transactions_and_populate_blocktree(
let signatures = create_test_transactions_and_populate_blockstore(
vec![&mint_keypair, &keypair1, &keypair2, &keypair3],
bank0.slot(),
bank1,
blocktree.clone(),
blockstore.clone(),
);
let confirmed_block = blocktree.get_confirmed_block(slot).unwrap();
let confirmed_block = blockstore.get_confirmed_block(slot, None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for (transaction, result) in confirmed_block.transactions.into_iter() {
if transaction.signatures[0] == signatures[0] {
assert_eq!(result.unwrap().status, Ok(()));
} else if transaction.signatures[0] == signatures[1] {
assert_eq!(
result.unwrap().status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
} else {
assert_eq!(result, None);
for RpcTransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == signatures[0].to_string() {
assert_eq!(meta.unwrap().status, Ok(()));
} else if transaction.signatures[0] == signatures[1].to_string() {
assert_eq!(
meta.unwrap().status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
} else {
assert_eq!(meta, None);
}
}
}
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
}

View File

@ -3,7 +3,7 @@
use crate::cluster_info;
use crate::poh_recorder;
use solana_ledger::block_error;
use solana_ledger::blocktree;
use solana_ledger::blockstore;
use solana_ledger::snapshot_utils;
use solana_sdk::transaction;
use std::any::Any;
@ -27,7 +27,7 @@ pub enum Error {
SendError,
PohRecorderError(poh_recorder::PohRecorderError),
BlockError(block_error::BlockError),
BlocktreeError(blocktree::BlocktreeError),
BlockstoreError(blockstore::BlockstoreError),
FsExtra(fs_extra::error::Error),
SnapshotError(snapshot_utils::SnapshotError),
}
@ -127,9 +127,9 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
Error::PohRecorderError(e)
}
}
impl std::convert::From<blocktree::BlocktreeError> for Error {
fn from(e: blocktree::BlocktreeError) -> Error {
Error::BlocktreeError(e)
impl std::convert::From<blockstore::BlockstoreError> for Error {
fn from(e: blockstore::BlockstoreError) -> Error {
Error::BlockstoreError(e)
}
}
impl std::convert::From<snapshot_utils::SnapshotError> for Error {

View File

@ -12,7 +12,7 @@ use crate::{
use crossbeam_channel::Receiver as CrossbeamReceiver;
use solana_ledger::{
bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver},
blockstore::{Blockstore, CompletedSlotsReceiver},
leader_schedule_cache::LeaderScheduleCache,
staking_utils,
};
@ -205,7 +205,7 @@ impl RetransmitStage {
pub fn new(
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
@ -234,7 +234,7 @@ impl RetransmitStage {
};
let leader_schedule_cache = leader_schedule_cache.clone();
let window_service = WindowService::new(
blocktree,
blockstore,
cluster_info.clone(),
verified_receiver,
retransmit_sender,
@ -281,7 +281,7 @@ mod tests {
use crate::contact_info::ContactInfo;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::packet::{self, Meta, Packet, Packets};
use solana_ledger::blocktree_processor::{process_blocktree, ProcessOptions};
use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions};
use solana_ledger::create_new_tmp_ledger;
use solana_net_utils::find_available_port_in_range;
use solana_sdk::pubkey::Pubkey;
@ -290,13 +290,13 @@ mod tests {
fn test_skip_repair() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let opts = ProcessOptions {
full_leader_cache: true,
..ProcessOptions::default()
};
let (bank_forks, _, cached_leader_schedule) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
let leader_schedule_cache = Arc::new(cached_leader_schedule);
let bank_forks = Arc::new(RwLock::new(bank_forks));
@ -332,7 +332,7 @@ mod tests {
// it should send this over the sockets.
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
packet::recv_from(&mut packets, &me_retransmit).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
@ -348,7 +348,7 @@ mod tests {
let packets = Packets::new(vec![repair, Packet::default()]);
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
packet::recv_from(&mut packets, &me_retransmit).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
}

View File

@ -11,21 +11,21 @@ use crate::{
use bincode::serialize;
use jsonrpc_core::{Error, Metadata, Result};
use jsonrpc_derive::rpc;
use solana_client::rpc_request::{
Response, RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule,
RpcResponseContext, RpcVersionInfo, RpcVoteAccountInfo, RpcVoteAccountStatus,
use solana_client::rpc_response::{
Response, RpcAccount, RpcBlockCommitment, RpcBlockhashFeeCalculator, RpcConfirmedBlock,
RpcContactInfo, RpcEpochInfo, RpcKeyedAccount, RpcLeaderSchedule, RpcResponseContext,
RpcSignatureConfirmation, RpcStorageTurn, RpcTransactionEncoding, RpcVersionInfo,
RpcVoteAccountInfo, RpcVoteAccountStatus,
};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{
bank_forks::BankForks, blocktree::Blocktree, rooted_slot_iterator::RootedSlotIterator,
bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
};
use solana_runtime::bank::Bank;
use solana_sdk::{
account::Account,
clock::{Slot, UnixTimestamp},
commitment_config::{CommitmentConfig, CommitmentLevel},
epoch_schedule::EpochSchedule,
fee_calculator::FeeCalculator,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
@ -68,7 +68,7 @@ impl Default for JsonRpcConfig {
pub struct JsonRpcRequestProcessor {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
config: JsonRpcConfig,
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
@ -93,7 +93,7 @@ impl JsonRpcRequestProcessor {
config: JsonRpcConfig,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
) -> Self {
@ -101,7 +101,7 @@ impl JsonRpcRequestProcessor {
config,
bank_forks,
block_commitment_cache,
blocktree,
blockstore,
storage_state,
validator_exit,
}
@ -111,10 +111,10 @@ impl JsonRpcRequestProcessor {
&self,
pubkey: Result<Pubkey>,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<Option<Account>> {
) -> RpcResponse<Option<RpcAccount>> {
let bank = &*self.bank(commitment);
match pubkey {
Ok(key) => new_response(bank, bank.get_account(&key)),
Ok(key) => new_response(bank, bank.get_account(&key).map(RpcAccount::encode)),
Err(e) => Err(e),
}
}
@ -133,12 +133,15 @@ impl JsonRpcRequestProcessor {
&self,
program_id: &Pubkey,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<(String, Account)>> {
) -> Result<Vec<RpcKeyedAccount>> {
Ok(self
.bank(commitment)
.get_program_accounts(&program_id)
.into_iter()
.map(|(pubkey, account)| (pubkey.to_string(), account))
.map(|(pubkey, account)| RpcKeyedAccount {
pubkey: pubkey.to_string(),
account: RpcAccount::encode(account),
})
.collect())
}
@ -167,10 +170,16 @@ impl JsonRpcRequestProcessor {
fn get_recent_blockhash(
&self,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<(String, FeeCalculator)> {
) -> RpcResponse<RpcBlockhashFeeCalculator> {
let bank = &*self.bank(commitment);
let (blockhash, fee_calculator) = bank.confirmed_last_blockhash();
new_response(bank, (blockhash.to_string(), fee_calculator))
new_response(
bank,
RpcBlockhashFeeCalculator {
blockhash: blockhash.to_string(),
fee_calculator,
},
)
}
pub fn confirm_transaction(
@ -191,21 +200,25 @@ impl JsonRpcRequestProcessor {
}
}
fn get_block_commitment(&self, block: Slot) -> (Option<BlockCommitment>, u64) {
fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<BlockCommitment> {
let r_block_commitment = self.block_commitment_cache.read().unwrap();
(
r_block_commitment.get_block_commitment(block).cloned(),
r_block_commitment.total_stake(),
)
RpcBlockCommitment {
commitment: r_block_commitment.get_block_commitment(block).cloned(),
total_stake: r_block_commitment.total_stake(),
}
}
pub fn get_signature_confirmation_status(
&self,
signature: Signature,
commitment: Option<CommitmentConfig>,
) -> Option<(usize, transaction::Result<()>)> {
) -> Option<RpcSignatureConfirmation> {
self.bank(commitment)
.get_signature_confirmation_status(&signature)
.map(|(confirmations, status)| RpcSignatureConfirmation {
confirmations,
status,
})
}
fn get_slot(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
@ -216,6 +229,19 @@ impl JsonRpcRequestProcessor {
Ok(self.bank(commitment).collector_id().to_string())
}
fn minimum_ledger_slot(&self) -> Result<Slot> {
match self.blockstore.slot_meta_iterator(0) {
Ok(mut metas) => match metas.next() {
Some((slot, _meta)) => Ok(slot),
None => Err(Error::invalid_request()),
},
Err(err) => {
warn!("slot_meta_iterator failed: {:?}", err);
Err(Error::invalid_request())
}
}
}
fn get_transaction_count(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
Ok(self.bank(commitment).transaction_count() as u64)
}
@ -282,21 +308,25 @@ impl JsonRpcRequestProcessor {
Ok(self.storage_state.get_storage_turn_rate())
}
fn get_storage_turn(&self) -> Result<(String, u64)> {
Ok((
self.storage_state.get_storage_blockhash().to_string(),
self.storage_state.get_slot(),
))
fn get_storage_turn(&self) -> Result<RpcStorageTurn> {
Ok(RpcStorageTurn {
blockhash: self.storage_state.get_storage_blockhash().to_string(),
slot: self.storage_state.get_slot(),
})
}
fn get_slots_per_segment(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
Ok(self.bank(commitment).slots_per_segment())
}
fn get_storage_pubkeys_for_slot(&self, slot: Slot) -> Result<Vec<Pubkey>> {
Ok(self
fn get_storage_pubkeys_for_slot(&self, slot: Slot) -> Result<Vec<String>> {
let pubkeys: Vec<String> = self
.storage_state
.get_pubkeys_for_slot(slot, &self.bank_forks))
.get_pubkeys_for_slot(slot, &self.bank_forks)
.iter()
.map(|pubkey| pubkey.to_string())
.collect();
Ok(pubkeys)
}
pub fn validator_exit(&self) -> Result<bool> {
@ -312,8 +342,12 @@ impl JsonRpcRequestProcessor {
}
}
pub fn get_confirmed_block(&self, slot: Slot) -> Result<Option<RpcConfirmedBlock>> {
Ok(self.blocktree.get_confirmed_block(slot).ok())
pub fn get_confirmed_block(
&self,
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>> {
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
}
pub fn get_confirmed_blocks(
@ -326,9 +360,9 @@ impl JsonRpcRequestProcessor {
return Ok(vec![]);
}
let start_slot = (start_slot..end_slot).find(|&slot| self.blocktree.is_root(slot));
let start_slot = (start_slot..end_slot).find(|&slot| self.blockstore.is_root(slot));
if let Some(start_slot) = start_slot {
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blocktree)
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blockstore)
.unwrap()
.map(|(slot, _)| slot)
.collect();
@ -344,14 +378,14 @@ impl JsonRpcRequestProcessor {
// genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being
// queried). If these values will be variable in the future, those timing parameters will
// need to be stored persistently, and the slot_duration calculation will likely need to be
// moved upstream into blocktree. Also, an explicit commitment level will need to be set.
// moved upstream into blockstore. Also, an explicit commitment level will need to be set.
let bank = self.bank(None);
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(slot);
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
Ok(self.blocktree.get_block_time(slot, slot_duration, stakes))
Ok(self.blockstore.get_block_time(slot, slot_duration, stakes))
}
}
@ -394,7 +428,7 @@ pub trait RpcSol {
meta: Self::Metadata,
pubkey_str: String,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<Option<Account>>;
) -> RpcResponse<Option<RpcAccount>>;
#[rpc(meta, name = "getProgramAccounts")]
fn get_program_accounts(
@ -402,7 +436,7 @@ pub trait RpcSol {
meta: Self::Metadata,
program_id_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<(String, Account)>>;
) -> Result<Vec<RpcKeyedAccount>>;
#[rpc(meta, name = "getMinimumBalanceForRentExemption")]
fn get_minimum_balance_for_rent_exemption(
@ -445,7 +479,7 @@ pub trait RpcSol {
&self,
meta: Self::Metadata,
block: Slot,
) -> Result<(Option<BlockCommitment>, u64)>;
) -> Result<RpcBlockCommitment<BlockCommitment>>;
#[rpc(meta, name = "getGenesisHash")]
fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String>;
@ -463,7 +497,7 @@ pub trait RpcSol {
&self,
meta: Self::Metadata,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<(String, FeeCalculator)>;
) -> RpcResponse<RpcBlockhashFeeCalculator>;
#[rpc(meta, name = "getSignatureStatus")]
fn get_signature_status(
@ -509,6 +543,9 @@ pub trait RpcSol {
commitment: Option<CommitmentConfig>,
) -> Result<String>;
#[rpc(meta, name = "minimumLedgerSlot")]
fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot>;
#[rpc(meta, name = "getVoteAccounts")]
fn get_vote_accounts(
&self,
@ -520,7 +557,7 @@ pub trait RpcSol {
fn get_storage_turn_rate(&self, meta: Self::Metadata) -> Result<u64>;
#[rpc(meta, name = "getStorageTurn")]
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<(String, u64)>;
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<RpcStorageTurn>;
#[rpc(meta, name = "getSlotsPerSegment")]
fn get_slots_per_segment(
@ -530,7 +567,7 @@ pub trait RpcSol {
) -> Result<u64>;
#[rpc(meta, name = "getStoragePubkeysForSlot")]
fn get_storage_pubkeys_for_slot(&self, meta: Self::Metadata, slot: u64) -> Result<Vec<Pubkey>>;
fn get_storage_pubkeys_for_slot(&self, meta: Self::Metadata, slot: u64) -> Result<Vec<String>>;
#[rpc(meta, name = "validatorExit")]
fn validator_exit(&self, meta: Self::Metadata) -> Result<bool>;
@ -549,7 +586,7 @@ pub trait RpcSol {
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<(usize, transaction::Result<()>)>>;
) -> Result<Option<RpcSignatureConfirmation>>;
#[rpc(meta, name = "getVersion")]
fn get_version(&self, meta: Self::Metadata) -> Result<RpcVersionInfo>;
@ -562,6 +599,7 @@ pub trait RpcSol {
&self,
meta: Self::Metadata,
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>>;
#[rpc(meta, name = "getBlockTime")]
@ -599,7 +637,7 @@ impl RpcSol for RpcSolImpl {
meta: Self::Metadata,
pubkey_str: String,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<Option<Account>> {
) -> RpcResponse<Option<RpcAccount>> {
debug!("get_account_info rpc request received: {:?}", pubkey_str);
let pubkey = verify_pubkey(pubkey_str);
meta.request_processor
@ -629,7 +667,7 @@ impl RpcSol for RpcSolImpl {
meta: Self::Metadata,
program_id_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<(String, Account)>> {
) -> Result<Vec<RpcKeyedAccount>> {
debug!(
"get_program_accounts rpc request received: {:?}",
program_id_str
@ -688,11 +726,14 @@ impl RpcSol for RpcSolImpl {
None
}
}
let shred_version = cluster_info.my_data().shred_version;
Ok(cluster_info
.all_peers()
.iter()
.filter_map(|(contact_info, _)| {
if ContactInfo::is_valid_address(&contact_info.gossip) {
if shred_version == contact_info.shred_version
&& ContactInfo::is_valid_address(&contact_info.gossip)
{
Some(RpcContactInfo {
pubkey: contact_info.id.to_string(),
gossip: Some(contact_info.gossip),
@ -728,7 +769,7 @@ impl RpcSol for RpcSolImpl {
&self,
meta: Self::Metadata,
block: Slot,
) -> Result<(Option<BlockCommitment>, u64)> {
) -> Result<RpcBlockCommitment<BlockCommitment>> {
Ok(meta
.request_processor
.read()
@ -772,7 +813,7 @@ impl RpcSol for RpcSolImpl {
&self,
meta: Self::Metadata,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<(String, FeeCalculator)> {
) -> RpcResponse<RpcBlockhashFeeCalculator> {
debug!("get_recent_blockhash rpc request received");
meta.request_processor
.read()
@ -787,7 +828,7 @@ impl RpcSol for RpcSolImpl {
commitment: Option<CommitmentConfig>,
) -> Result<Option<transaction::Result<()>>> {
self.get_signature_confirmation(meta, signature_str, commitment)
.map(|res| res.map(|x| x.1))
.map(|res| res.map(|x| x.status))
}
fn get_slot(&self, meta: Self::Metadata, commitment: Option<CommitmentConfig>) -> Result<u64> {
@ -801,7 +842,7 @@ impl RpcSol for RpcSolImpl {
commitment: Option<CommitmentConfig>,
) -> Result<Option<usize>> {
self.get_signature_confirmation(meta, signature_str, commitment)
.map(|res| res.map(|x| x.0))
.map(|res| res.map(|x| x.confirmations))
}
fn get_signature_confirmation(
@ -809,7 +850,7 @@ impl RpcSol for RpcSolImpl {
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<(usize, transaction::Result<()>)>> {
) -> Result<Option<RpcSignatureConfirmation>> {
debug!(
"get_signature_confirmation rpc request received: {:?}",
signature_str
@ -909,7 +950,7 @@ impl RpcSol for RpcSolImpl {
.read()
.unwrap()
.get_signature_confirmation_status(signature, commitment.clone())
.map(|x| x.1);
.map(|x| x.status);
if signature_status == Some(Ok(())) {
info!("airdrop signature ok");
@ -968,6 +1009,10 @@ impl RpcSol for RpcSolImpl {
.get_slot_leader(commitment)
}
fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot> {
meta.request_processor.read().unwrap().minimum_ledger_slot()
}
fn get_vote_accounts(
&self,
meta: Self::Metadata,
@ -986,7 +1031,7 @@ impl RpcSol for RpcSolImpl {
.get_storage_turn_rate()
}
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<(String, u64)> {
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<RpcStorageTurn> {
meta.request_processor.read().unwrap().get_storage_turn()
}
@ -1005,7 +1050,7 @@ impl RpcSol for RpcSolImpl {
&self,
meta: Self::Metadata,
slot: Slot,
) -> Result<Vec<Pubkey>> {
) -> Result<Vec<String>> {
meta.request_processor
.read()
.unwrap()
@ -1031,11 +1076,12 @@ impl RpcSol for RpcSolImpl {
&self,
meta: Self::Metadata,
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>> {
meta.request_processor
.read()
.unwrap()
.get_confirmed_block(slot)
.get_confirmed_block(slot, encoding)
}
fn get_confirmed_blocks(
@ -1061,11 +1107,13 @@ pub mod tests {
use crate::{
contact_info::ContactInfo,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
replay_stage::tests::create_test_transactions_and_populate_blocktree,
replay_stage::tests::create_test_transactions_and_populate_blockstore,
};
use bincode::deserialize;
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
use solana_ledger::{
blocktree::entries_to_test_shreds, blocktree_processor::fill_blocktree_slot_with_ticks,
blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut, get_tmp_ledger_path,
};
use solana_sdk::{
@ -1103,12 +1151,12 @@ pub mod tests {
}
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
start_rpc_handler_with_tx_and_blocktree(pubkey, vec![], 0)
start_rpc_handler_with_tx_and_blockstore(pubkey, vec![], 0)
}
fn start_rpc_handler_with_tx_and_blocktree(
fn start_rpc_handler_with_tx_and_blockstore(
pubkey: &Pubkey,
blocktree_roots: Vec<Slot>,
blockstore_roots: Vec<Slot>,
default_timestamp: i64,
) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
@ -1126,21 +1174,21 @@ pub mod tests {
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blocktree = Arc::new(blocktree);
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(blockstore);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
bank.transfer(4, &alice, &keypair2.pubkey()).unwrap();
let confirmed_block_signatures = create_test_transactions_and_populate_blocktree(
let confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
bank.clone(),
blocktree.clone(),
blockstore.clone(),
);
// Add timestamp vote to blocktree
// Add timestamp vote to blockstore
let vote = Vote {
slots: vec![1],
hash: Hash::default(),
@ -1163,10 +1211,10 @@ pub mod tests {
true,
0,
);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[1]).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(&[1]).unwrap();
let mut roots = blocktree_roots.clone();
let mut roots = blockstore_roots.clone();
if !roots.is_empty() {
roots.retain(|&x| x > 1);
let mut parent_bank = bank;
@ -1177,9 +1225,9 @@ pub mod tests {
parent_bank.squash();
bank_forks.write().unwrap().set_root(*root, &None);
let parent = if i > 0 { roots[i - 1] } else { 1 };
fill_blocktree_slot_with_ticks(&blocktree, 5, *root, parent, Hash::default());
fill_blockstore_slot_with_ticks(&blockstore, 5, *root, parent, Hash::default());
}
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
let new_bank = Bank::new_from_parent(
&parent_bank,
parent_bank.collector_id(),
@ -1205,7 +1253,7 @@ pub mod tests {
JsonRpcConfig::default(),
bank_forks.clone(),
block_commitment_cache.clone(),
blocktree,
blockstore,
StorageState::default(),
validator_exit,
)));
@ -1252,12 +1300,12 @@ pub mod tests {
let bank = bank_forks.read().unwrap().working_bank();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let request_processor = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
bank_forks,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1354,6 +1402,21 @@ pub mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_minimum_ledger_slot() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"minimumLedgerSlot"}}"#);
let res = io.handle_request_sync(&req, meta);
let expected = r#"{"jsonrpc":"2.0","result":0,"id":1}"#;
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
fn test_rpc_get_total_supply() {
let bob_pubkey = Pubkey::new_rand();
@ -1525,11 +1588,11 @@ pub mod tests {
"result": {
"context":{"slot":0},
"value":{
"owner": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"owner": "11111111111111111111111111111111",
"lamports": 20,
"data": [],
"data": "",
"executable": false,
"rent_epoch": 0
"rentEpoch": 0
},
},
"id": 1,
@ -1563,17 +1626,22 @@ pub mod tests {
let expected = format!(
r#"{{
"jsonrpc":"2.0",
"result":[["{}", {{
"owner": {:?},
"lamports": 20,
"data": [],
"executable": false,
"rent_epoch": 0
}}]],
"result":[
{{
"pubkey": "{}",
"account": {{
"owner": "{}",
"lamports": 20,
"data": "",
"executable": false,
"rentEpoch": 0
}}
}}
],
"id":1}}
"#,
bob.pubkey(),
new_program_id.as_ref()
new_program_id
);
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
@ -1700,14 +1768,17 @@ pub mod tests {
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":[ blockhash.to_string(), {
"burnPercent": DEFAULT_BURN_PERCENT,
"lamportsPerSignature": 0,
"maxLamportsPerSignature": 0,
"minLamportsPerSignature": 0,
"targetLamportsPerSignature": 0,
"targetSignaturesPerSlot": 0
}]},
"value":{
"blockhash": blockhash.to_string(),
"feeCalculator": {
"burnPercent": DEFAULT_BURN_PERCENT,
"lamportsPerSignature": 0,
"maxLamportsPerSignature": 0,
"minLamportsPerSignature": 0,
"targetLamportsPerSignature": 0,
"targetSignaturesPerSlot": 0
}
}},
"id": 1
});
let expected: Response =
@ -1743,7 +1814,7 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut io = MetaIoHandler::default();
let rpc = RpcSolImpl;
@ -1754,7 +1825,7 @@ pub mod tests {
JsonRpcConfig::default(),
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1847,12 +1918,12 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let request_processor = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1866,14 +1937,14 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut config = JsonRpcConfig::default();
config.enable_validator_exit = true;
let request_processor = JsonRpcRequestProcessor::new(
config,
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1918,7 +1989,7 @@ pub mod tests {
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut config = JsonRpcConfig::default();
config.enable_validator_exit = true;
@ -1926,19 +1997,31 @@ pub mod tests {
config,
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
assert_eq!(
request_processor.get_block_commitment(0),
(Some(commitment_slot0), 42)
RpcBlockCommitment {
commitment: Some(commitment_slot0),
total_stake: 42,
}
);
assert_eq!(
request_processor.get_block_commitment(1),
(Some(commitment_slot1), 42)
RpcBlockCommitment {
commitment: Some(commitment_slot1),
total_stake: 42,
}
);
assert_eq!(
request_processor.get_block_commitment(2),
RpcBlockCommitment {
commitment: None,
total_stake: 42,
}
);
assert_eq!(request_processor.get_block_commitment(2), (None, 42));
}
#[test]
@ -1956,16 +2039,18 @@ pub mod tests {
let res = io.handle_request_sync(&req, meta.clone());
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let (commitment, total_staked): (Option<BlockCommitment>, u64) =
if let Response::Single(res) = result {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
} else {
panic!("Expected success");
}
let RpcBlockCommitment {
commitment,
total_stake,
} = if let Response::Single(res) = result {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
} else {
panic!("Expected single response");
};
panic!("Expected success");
}
} else {
panic!("Expected single response");
};
assert_eq!(
commitment,
block_commitment_cache
@ -1974,14 +2059,14 @@ pub mod tests {
.get_block_commitment(0)
.cloned()
);
assert_eq!(total_staked, 42);
assert_eq!(total_stake, 42);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}}"#);
let res = io.handle_request_sync(&req, meta);
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let (commitment, total_staked): (Option<BlockCommitment>, u64) =
let commitment_response: RpcBlockCommitment<BlockCommitment> =
if let Response::Single(res) = result {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
@ -1991,8 +2076,8 @@ pub mod tests {
} else {
panic!("Expected single response");
};
assert_eq!(commitment, None);
assert_eq!(total_staked, 42);
assert_eq!(commitment_response.commitment, None);
assert_eq!(commitment_response.total_stake, 42);
}
#[test]
@ -2008,6 +2093,38 @@ pub mod tests {
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0]}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_block: Option<RpcConfirmedBlock> =
serde_json::from_value(result["result"].clone()).unwrap();
let confirmed_block = confirmed_block.unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for RpcTransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == confirmed_block_signatures[0].to_string() {
assert_eq!(transaction.message.recent_blockhash, blockhash.to_string());
assert_eq!(meta.unwrap().status, Ok(()));
} else if transaction.signatures[0] == confirmed_block_signatures[1].to_string() {
assert_eq!(
meta.unwrap().status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
} else {
assert_eq!(meta, None);
}
}
}
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0, "binary"]}}"#
);
let res = io.handle_request_sync(&req, meta);
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
@ -2016,20 +2133,26 @@ pub mod tests {
let confirmed_block = confirmed_block.unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for (transaction, result) in confirmed_block.transactions.into_iter() {
if transaction.signatures[0] == confirmed_block_signatures[0] {
assert_eq!(transaction.message.recent_blockhash, blockhash);
assert_eq!(result.unwrap().status, Ok(()));
} else if transaction.signatures[0] == confirmed_block_signatures[1] {
assert_eq!(
result.unwrap().status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
} else {
assert_eq!(result, None);
for RpcTransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Binary(transaction) = transaction {
let decoded_transaction: Transaction =
deserialize(&bs58::decode(&transaction).into_vec().unwrap()).unwrap();
if decoded_transaction.signatures[0] == confirmed_block_signatures[0] {
assert_eq!(decoded_transaction.message.recent_blockhash, blockhash);
assert_eq!(meta.unwrap().status, Ok(()));
} else if decoded_transaction.signatures[0] == confirmed_block_signatures[1] {
assert_eq!(
meta.unwrap().status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
} else {
assert_eq!(meta, None);
}
}
}
}
@ -2039,7 +2162,7 @@ pub mod tests {
let bob_pubkey = Pubkey::new_rand();
let roots = vec![0, 1, 3, 4, 8];
let RpcHandler { io, meta, .. } =
start_rpc_handler_with_tx_and_blocktree(&bob_pubkey, roots.clone(), 0);
start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone(), 0);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
@ -2086,7 +2209,7 @@ pub mod tests {
fn test_get_block_time() {
let bob_pubkey = Pubkey::new_rand();
let base_timestamp = 1576183541;
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blocktree(
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blockstore(
&bob_pubkey,
vec![1, 2, 3, 4, 5, 6, 7],
base_timestamp,

View File

@ -3,12 +3,9 @@
use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
use jsonrpc_core::{Error, ErrorCode, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::typed::Subscriber;
use jsonrpc_pubsub::{Session, SubscriptionId};
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signature;
use solana_sdk::transaction;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
use solana_client::rpc_response::{RpcAccount, RpcKeyedAccount};
use solana_sdk::{pubkey::Pubkey, signature::Signature, transaction};
use std::sync::{atomic, Arc};
// Suppress needless_return due to
@ -28,10 +25,10 @@ pub trait RpcSolPubSub {
)]
fn account_subscribe(
&self,
_: Self::Metadata,
_: Subscriber<Account>,
_: String,
_: Option<Confirmations>,
meta: Self::Metadata,
subscriber: Subscriber<RpcAccount>,
pubkey_str: String,
confirmations: Option<Confirmations>,
);
// Unsubscribe from account notification subscription.
@ -40,7 +37,8 @@ pub trait RpcSolPubSub {
unsubscribe,
name = "accountUnsubscribe"
)]
fn account_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
fn account_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId)
-> Result<bool>;
// Get notification every time account data owned by a particular program is changed
// Accepts pubkey parameter as base-58 encoded string
@ -51,10 +49,10 @@ pub trait RpcSolPubSub {
)]
fn program_subscribe(
&self,
_: Self::Metadata,
_: Subscriber<(String, Account)>,
_: String,
_: Option<Confirmations>,
meta: Self::Metadata,
subscriber: Subscriber<RpcKeyedAccount>,
pubkey_str: String,
confirmations: Option<Confirmations>,
);
// Unsubscribe from account notification subscription.
@ -63,7 +61,8 @@ pub trait RpcSolPubSub {
unsubscribe,
name = "programUnsubscribe"
)]
fn program_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
fn program_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId)
-> Result<bool>;
// Get notification when signature is verified
// Accepts signature parameter as base-58 encoded string
@ -74,10 +73,10 @@ pub trait RpcSolPubSub {
)]
fn signature_subscribe(
&self,
_: Self::Metadata,
_: Subscriber<transaction::Result<()>>,
_: String,
_: Option<Confirmations>,
meta: Self::Metadata,
subscriber: Subscriber<transaction::Result<()>>,
signature_str: String,
confirmations: Option<Confirmations>,
);
// Unsubscribe from signature notification subscription.
@ -86,11 +85,15 @@ pub trait RpcSolPubSub {
unsubscribe,
name = "signatureUnsubscribe"
)]
fn signature_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
fn signature_unsubscribe(
&self,
meta: Option<Self::Metadata>,
id: SubscriptionId,
) -> Result<bool>;
// Get notification when slot is encountered
#[pubsub(subscription = "slotNotification", subscribe, name = "slotSubscribe")]
fn slot_subscribe(&self, _: Self::Metadata, _: Subscriber<SlotInfo>);
fn slot_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<SlotInfo>);
// Unsubscribe from slot notification subscription.
#[pubsub(
@ -98,7 +101,7 @@ pub trait RpcSolPubSub {
unsubscribe,
name = "slotUnsubscribe"
)]
fn slot_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
fn slot_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
}
#[derive(Default)]
@ -130,7 +133,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
fn account_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<Account>,
subscriber: Subscriber<RpcAccount>,
pubkey_str: String,
confirmations: Option<Confirmations>,
) {
@ -168,7 +171,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
fn program_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<(String, Account)>,
subscriber: Subscriber<RpcKeyedAccount>,
pubkey_str: String,
confirmations: Option<Confirmations>,
) {
@ -277,21 +280,18 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use jsonrpc_core::futures::sync::mpsc;
use jsonrpc_core::Response;
use jsonrpc_core::{futures::sync::mpsc, Response};
use jsonrpc_pubsub::{PubSubHandler, Session};
use solana_budget_program;
use solana_budget_program::budget_instruction;
use solana_budget_program::{self, budget_instruction};
use solana_ledger::bank_forks::BankForks;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_program;
use solana_sdk::system_transaction;
use solana_sdk::transaction::{self, Transaction};
use std::sync::RwLock;
use std::thread::sleep;
use std::time::Duration;
use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
system_program, system_transaction,
transaction::{self, Transaction},
};
use std::{sync::RwLock, thread::sleep, time::Duration};
use tokio::prelude::{Async, Stream};
fn process_transaction_and_notify(
@ -467,11 +467,11 @@ mod tests {
"method": "accountNotification",
"params": {
"result": {
"owner": budget_program_id,
"owner": budget_program_id.to_string(),
"lamports": 51,
"data": expected_data,
"data": bs58::encode(expected_data).into_string(),
"executable": false,
"rent_epoch": 1,
"rentEpoch": 1,
},
"subscription": 0,
}
@ -614,11 +614,11 @@ mod tests {
"method": "accountNotification",
"params": {
"result": {
"owner": system_program::id(),
"owner": system_program::id().to_string(),
"lamports": 100,
"data": [],
"data": "",
"executable": false,
"rent_epoch": 1,
"rentEpoch": 1,
},
"subscription": 0,
}

View File

@ -9,7 +9,7 @@ use jsonrpc_http_server::{
hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware,
RequestMiddlewareAction, ServerBuilder,
};
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_sdk::hash::Hash;
use std::{
net::SocketAddr,
@ -91,7 +91,7 @@ impl JsonRpcService {
config: JsonRpcConfig,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
genesis_hash: Hash,
ledger_path: &Path,
@ -104,7 +104,7 @@ impl JsonRpcService {
config,
bank_forks,
block_commitment_cache,
blocktree,
blockstore,
storage_state,
validator_exit.clone(),
)));
@ -204,13 +204,13 @@ mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut rpc_service = JsonRpcService::new(
rpc_addr,
JsonRpcConfig::default(),
bank_forks,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
cluster_info,
Hash::default(),
&PathBuf::from("farf"),

View File

@ -4,14 +4,17 @@ use core::hash::Hash;
use jsonrpc_core::futures::Future;
use jsonrpc_pubsub::{typed::Sink, SubscriptionId};
use serde::Serialize;
use solana_client::rpc_response::{RpcAccount, RpcKeyedAccount};
use solana_ledger::bank_forks::BankForks;
use solana_runtime::bank::Bank;
use solana_sdk::{
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use std::{
collections::HashMap,
sync::{Arc, RwLock},
};
pub type Confirmations = usize;
@ -23,9 +26,9 @@ pub struct SlotInfo {
}
type RpcAccountSubscriptions =
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<Account>, Confirmations)>>>;
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<RpcAccount>, Confirmations)>>>;
type RpcProgramSubscriptions =
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<(String, Account)>, Confirmations)>>>;
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<RpcKeyedAccount>, Confirmations)>>>;
type RpcSignatureSubscriptions = RwLock<
HashMap<Signature, HashMap<SubscriptionId, (Sink<transaction::Result<()>>, Confirmations)>>,
>;
@ -127,13 +130,10 @@ fn check_confirmations_and_notify<K, S, F, N, X>(
}
}
fn notify_account<S>(result: Option<(S, Slot)>, sink: &Sink<S>, root: Slot)
where
S: Clone + Serialize,
{
fn notify_account(result: Option<(Account, Slot)>, sink: &Sink<RpcAccount>, root: Slot) {
if let Some((account, fork)) = result {
if fork >= root {
sink.notify(Ok(account)).wait().unwrap();
sink.notify(Ok(RpcAccount::encode(account))).wait().unwrap();
}
}
}
@ -147,11 +147,14 @@ where
}
}
fn notify_program(accounts: Vec<(Pubkey, Account)>, sink: &Sink<(String, Account)>, _root: Slot) {
fn notify_program(accounts: Vec<(Pubkey, Account)>, sink: &Sink<RpcKeyedAccount>, _root: Slot) {
for (pubkey, account) in accounts.iter() {
sink.notify(Ok((pubkey.to_string(), account.clone())))
.wait()
.unwrap();
sink.notify(Ok(RpcKeyedAccount {
pubkey: pubkey.to_string(),
account: RpcAccount::encode(account.clone()),
}))
.wait()
.unwrap();
}
}
@ -231,7 +234,7 @@ impl RpcSubscriptions {
pubkey: &Pubkey,
confirmations: Option<Confirmations>,
sub_id: &SubscriptionId,
sink: &Sink<Account>,
sink: &Sink<RpcAccount>,
) {
let mut subscriptions = self.account_subscriptions.write().unwrap();
add_subscription(&mut subscriptions, pubkey, confirmations, sub_id, sink);
@ -247,7 +250,7 @@ impl RpcSubscriptions {
program_id: &Pubkey,
confirmations: Option<Confirmations>,
sub_id: &SubscriptionId,
sink: &Sink<(String, Account)>,
sink: &Sink<RpcKeyedAccount>,
) {
let mut subscriptions = self.program_subscriptions.write().unwrap();
add_subscription(&mut subscriptions, program_id, confirmations, sub_id, sink);
@ -328,8 +331,10 @@ mod tests {
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use jsonrpc_pubsub::typed::Subscriber;
use solana_budget_program;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use solana_sdk::{
signature::{Keypair, KeypairUtil},
system_transaction,
};
use tokio::prelude::{Async, Stream};
#[test]
@ -376,7 +381,7 @@ mod tests {
let string = transport_receiver.poll();
if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[2,203,81,223,225,24,34,35,203,214,138,130,144,208,35,77,63,16,87,51,47,198,115,123,98,188,19,160,0,0,0,0],"rent_epoch":1}},"subscription":0}}}}"#
r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":"1111111111111111","executable":false,"lamports":1,"owner":"Budget1111111111111111111111111111111111111","rentEpoch":1}},"subscription":0}}}}"#
);
assert_eq!(expected, response);
}
@ -433,7 +438,7 @@ mod tests {
let string = transport_receiver.poll();
if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[2,203,81,223,225,24,34,35,203,214,138,130,144,208,35,77,63,16,87,51,47,198,115,123,98,188,19,160,0,0,0,0],"rent_epoch":1}}],"subscription":0}}}}"#,
r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":{{"account":{{"data":"1111111111111111","executable":false,"lamports":1,"owner":"Budget1111111111111111111111111111111111111","rentEpoch":1}},"pubkey":"{:?}"}},"subscription":0}}}}"#,
alice.pubkey()
);
assert_eq!(expected, response);

View File

@ -1,21 +1,15 @@
use crate::result::{Error, Result};
use bincode::serialize_into;
use solana_ledger::snapshot_package::{SnapshotPackage, SnapshotPackageReceiver};
use solana_ledger::snapshot_utils::{self, TAR_ACCOUNTS_DIR, TAR_SNAPSHOTS_DIR};
use solana_measure::measure::Measure;
use solana_metrics::datapoint_info;
use solana_runtime::status_cache::SlotDelta;
use solana_sdk::transaction::Result as TransactionResult;
use std::fs;
use std::fs::File;
use std::io::{BufWriter, Error as IOError, ErrorKind};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::RecvTimeoutError;
use std::sync::Arc;
use std::thread::{self, Builder, JoinHandle};
use std::time::Duration;
use symlink;
use tempfile::TempDir;
use solana_ledger::{
snapshot_package::SnapshotPackageReceiver, snapshot_utils::archive_snapshot_package,
};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::RecvTimeoutError,
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub struct SnapshotPackagerService {
t_snapshot_packager: JoinHandle<()>,
@ -30,12 +24,19 @@ impl SnapshotPackagerService {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::run(&snapshot_package_receiver) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => info!("Error from package_snapshots: {:?}", e),
match snapshot_package_receiver.recv_timeout(Duration::from_secs(1)) {
Ok(mut snapshot_package) => {
// Only package the latest
while let Ok(new_snapshot_package) = snapshot_package_receiver.try_recv() {
snapshot_package = new_snapshot_package;
}
if let Err(err) = archive_snapshot_package(&snapshot_package) {
warn!("Failed to create snapshot archive: {}", err);
}
}
Err(RecvTimeoutError::Disconnected) => break,
Err(RecvTimeoutError::Timeout) => (),
}
})
.unwrap();
@ -44,144 +45,6 @@ impl SnapshotPackagerService {
}
}
pub fn package_snapshots(snapshot_package: &SnapshotPackage) -> Result<()> {
info!(
"Generating snapshot tarball for root {}",
snapshot_package.root
);
Self::serialize_status_cache(
&snapshot_package.slot_deltas,
&snapshot_package.snapshot_links,
)?;
let mut timer = Measure::start("snapshot_package-package_snapshots");
let tar_dir = snapshot_package
.tar_output_file
.parent()
.expect("Tar output path is invalid");
fs::create_dir_all(tar_dir)?;
// Create the staging directories
let staging_dir = TempDir::new()?;
let staging_accounts_dir = staging_dir.path().join(TAR_ACCOUNTS_DIR);
let staging_snapshots_dir = staging_dir.path().join(TAR_SNAPSHOTS_DIR);
fs::create_dir_all(&staging_accounts_dir)?;
// Add the snapshots to the staging directory
symlink::symlink_dir(
snapshot_package.snapshot_links.path(),
&staging_snapshots_dir,
)?;
// Add the AppendVecs into the compressible list
for storage in &snapshot_package.storage_entries {
storage.flush()?;
let storage_path = storage.get_path();
let output_path = staging_accounts_dir.join(
storage_path
.file_name()
.expect("Invalid AppendVec file path"),
);
// `storage_path` - The file path where the AppendVec itself is located
// `output_path` - The directory where the AppendVec will be placed in the staging directory.
let storage_path =
fs::canonicalize(storage_path).expect("Could not get absolute path for accounts");
symlink::symlink_dir(storage_path, &output_path)?;
if !output_path.is_file() {
return Err(Self::get_io_error(
"Error trying to generate snapshot archive: storage path symlink is invalid",
));
}
}
// Tar the staging directory into the archive at `archive_path`
let archive_path = tar_dir.join("new_state.tar.bz2");
let args = vec![
"jcfhS",
archive_path.to_str().unwrap(),
"-C",
staging_dir.path().to_str().unwrap(),
TAR_ACCOUNTS_DIR,
TAR_SNAPSHOTS_DIR,
];
let output = std::process::Command::new("tar").args(&args).output()?;
if !output.status.success() {
warn!("tar command failed with exit code: {}", output.status);
use std::str::from_utf8;
info!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?"));
info!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?"));
return Err(Self::get_io_error(&format!(
"Error trying to generate snapshot archive: {}",
output.status
)));
}
// Once everything is successful, overwrite the previous tarball so that other validators
// can fetch this newly packaged snapshot
let metadata = fs::metadata(&archive_path)?;
fs::rename(&archive_path, &snapshot_package.tar_output_file)?;
timer.stop();
info!(
"Successfully created tarball. slot: {}, elapsed ms: {}, size={}",
snapshot_package.root,
timer.as_ms(),
metadata.len()
);
datapoint_info!(
"snapshot-package",
("slot", snapshot_package.root, i64),
("duration_ms", timer.as_ms(), i64),
("size", metadata.len(), i64)
);
Ok(())
}
fn run(snapshot_receiver: &SnapshotPackageReceiver) -> Result<()> {
let mut snapshot_package = snapshot_receiver.recv_timeout(Duration::from_secs(1))?;
// Only package the latest
while let Ok(new_snapshot_package) = snapshot_receiver.try_recv() {
snapshot_package = new_snapshot_package;
}
Self::package_snapshots(&snapshot_package)?;
Ok(())
}
fn get_io_error(error: &str) -> Error {
warn!("Snapshot Packaging Error: {:?}", error);
Error::IO(IOError::new(ErrorKind::Other, error))
}
fn serialize_status_cache(
slot_deltas: &[SlotDelta<TransactionResult<()>>],
snapshot_links: &TempDir,
) -> Result<()> {
// the status cache is stored as snapshot_path/status_cache
let snapshot_status_cache_file_path = snapshot_links
.path()
.join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILE_NAME);
let status_cache = File::create(&snapshot_status_cache_file_path)?;
// status cache writer
let mut status_cache_stream = BufWriter::new(status_cache);
let mut status_cache_serialize = Measure::start("status_cache_serialize-ms");
// write the status cache
serialize_into(&mut status_cache_stream, slot_deltas)
.map_err(|_| Self::get_io_error("serialize status cache error"))?;
status_cache_serialize.stop();
inc_new_counter_info!(
"serialize-status-cache-ms",
status_cache_serialize.as_ms() as usize
);
Ok(())
}
pub fn join(self) -> thread::Result<()> {
self.t_snapshot_packager.join()
}
@ -190,11 +53,13 @@ impl SnapshotPackagerService {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::snapshot_utils;
use solana_runtime::accounts_db::AccountStorageEntry;
use bincode::serialize_into;
use solana_ledger::{snapshot_package::SnapshotPackage, snapshot_utils};
use solana_runtime::{accounts_db::AccountStorageEntry, status_cache::SlotDelta};
use solana_sdk::transaction;
use std::{
fs::{remove_dir_all, OpenOptions},
io::Write,
fs::{self, remove_dir_all, File, OpenOptions},
io::{BufWriter, Write},
path::{Path, PathBuf},
};
use tempfile::TempDir;
@ -262,7 +127,8 @@ mod tests {
}
// Create a packageable snapshot
let output_tar_path = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
let output_tar_path =
snapshot_utils::get_snapshot_archive_path(&snapshot_package_output_path);
let snapshot_package = SnapshotPackage::new(
5,
vec![],
@ -272,18 +138,18 @@ mod tests {
);
// Make tarball from packageable snapshot
SnapshotPackagerService::package_snapshots(&snapshot_package).unwrap();
snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap();
// before we compare, stick an empty status_cache in this dir so that the package comparision works
// This is needed since the status_cache is added by the packager and is not collected from
// the source dir for snapshots
let slot_deltas: Vec<SlotDelta<TransactionResult<()>>> = vec![];
let slot_deltas: Vec<SlotDelta<transaction::Result<()>>> = vec![];
let dummy_status_cache = File::create(snapshots_dir.join("status_cache")).unwrap();
let mut status_cache_stream = BufWriter::new(dummy_status_cache);
serialize_into(&mut status_cache_stream, &slot_deltas).unwrap();
status_cache_stream.flush().unwrap();
// Check tarball is correct
snapshot_utils::verify_snapshot_tar(output_tar_path, snapshots_dir, accounts_dir);
// Check archive is correct
snapshot_utils::verify_snapshot_archive(output_tar_path, snapshots_dir, accounts_dir);
}
}

View File

@ -10,7 +10,7 @@ use crate::{
};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
use solana_sdk::{
account::Account,
@ -177,7 +177,7 @@ impl StorageStage {
pub fn new(
storage_state: &StorageState,
bank_receiver: Receiver<Vec<Arc<Bank>>>,
blocktree: Option<Arc<Blocktree>>,
blockstore: Option<Arc<Blockstore>>,
keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
@ -197,12 +197,12 @@ impl StorageStage {
let mut current_key = 0;
let mut storage_slots = StorageSlots::default();
loop {
if let Some(ref some_blocktree) = blocktree {
if let Some(ref some_blockstore) = blockstore {
if let Err(e) = Self::process_entries(
&storage_keypair,
&storage_state_inner,
&bank_receiver,
&some_blocktree,
&some_blockstore,
&mut storage_slots,
&mut current_key,
slots_per_turn,
@ -368,7 +368,7 @@ impl StorageStage {
fn process_turn(
storage_keypair: &Arc<Keypair>,
state: &Arc<RwLock<StorageStateInner>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
blockhash: Hash,
slot: Slot,
slots_per_segment: u64,
@ -431,7 +431,7 @@ impl StorageStage {
let mut statew = state.write().unwrap();
match chacha_cbc_encrypt_file_many_keys(
blocktree,
blockstore,
segment as u64,
statew.slots_per_segment,
&mut statew.storage_keys,
@ -502,7 +502,7 @@ impl StorageStage {
storage_keypair: &Arc<Keypair>,
storage_state: &Arc<RwLock<StorageStateInner>>,
bank_receiver: &Receiver<Vec<Arc<Bank>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
storage_slots: &mut StorageSlots,
current_key_idx: &mut usize,
slots_per_turn: u64,
@ -541,7 +541,7 @@ impl StorageStage {
let _ignored = Self::process_turn(
&storage_keypair,
&storage_state,
&blocktree,
&blockstore,
bank.last_blockhash(),
bank.slot(),
bank.slots_per_segment(),

View File

@ -4,7 +4,7 @@
use crate::packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH};
use crate::recvmmsg::NUM_RCVMMSGS;
use crate::result::{Error, Result};
use crate::thread_mem_usage;
use solana_measure::thread_mem_usage;
use solana_sdk::timing::duration_as_ms;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@ -35,7 +35,7 @@ fn recv_loop(
if exit.load(Ordering::Relaxed) {
return Ok(());
}
if let Ok(len) = packet::recv_from(&mut msgs, sock) {
if let Ok(len) = packet::recv_from(&mut msgs, sock, 1) {
if len == NUM_RCVMMSGS {
num_max_received += 1;
}

Some files were not shown because too many files have changed in this diff Show More