Compare commits
132 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
16e9b4207e | ||
|
b1b217f12c | ||
|
575f30c54f | ||
|
73d2c636f4 | ||
|
4d4c4b9904 | ||
|
7adaf329cf | ||
|
ed83367141 | ||
|
b5ca6e8e5a | ||
|
16f50729e9 | ||
|
c119fdf711 | ||
|
b2a467fa7d | ||
|
cb6f14004d | ||
|
952e28343e | ||
|
cdfeee1854 | ||
|
5eb5589bb3 | ||
|
c1d78ddbb5 | ||
|
cf7c5cdb03 | ||
|
8c23508cd5 | ||
|
3ca91c316a | ||
|
be3a025491 | ||
|
94c757013d | ||
|
d6372a930c | ||
|
4746902074 | ||
|
c6b95a8f65 | ||
|
fdfdf2eb39 | ||
|
3448842c0c | ||
|
f5f1efe94a | ||
|
50e0c806da | ||
|
e613a0aa7e | ||
|
2c54cdd07e | ||
|
cbb0ed7c56 | ||
|
50aa099400 | ||
|
53c901471c | ||
|
4badf63cfd | ||
|
d04fd3c3ab | ||
|
a8a9907ace | ||
|
a7644630cc | ||
|
58666543e2 | ||
|
a8a5d16278 | ||
|
01ebbe367a | ||
|
2cada71151 | ||
|
d6ce97bcbd | ||
|
3ddc92ab86 | ||
|
5a99e86a60 | ||
|
d9e1a8b492 | ||
|
4244a0f716 | ||
|
5f1d86c040 | ||
|
f9d9c1fcbf | ||
|
7c59c105cf | ||
|
a8ea9f2738 | ||
|
651f87a937 | ||
|
88f8e2f332 | ||
|
a2cb289503 | ||
|
89bd9d5b72 | ||
|
7edaaeb2a1 | ||
|
1c3ade80c2 | ||
|
3606d51507 | ||
|
281fd88ea7 | ||
|
ee6b625c13 | ||
|
4cc1b85376 | ||
|
f8312ce125 | ||
|
6a4cd02f64 | ||
|
50f238d900 | ||
|
23e3f4e8a2 | ||
|
27f70dfa49 | ||
|
72d366a84e | ||
|
2da9de8861 | ||
|
f4288961d5 | ||
|
143ad436cf | ||
|
0a9fbc3e4c | ||
|
7aa091bf8c | ||
|
91d8bfa828 | ||
|
c501c19750 | ||
|
acd55660da | ||
|
855bd7d3b8 | ||
|
a2e9d8e0bf | ||
|
81dbe3c49b | ||
|
086e20f6c7 | ||
|
d08a810c08 | ||
|
400610bf6a | ||
|
f759ac3a8d | ||
|
558411364e | ||
|
d0b5be3051 | ||
|
dc6da6fcca | ||
|
8ae11a74fa | ||
|
11f0333728 | ||
|
aac74d2357 | ||
|
508abcf4ed | ||
|
6dbb6c7fe2 | ||
|
2f58658f61 | ||
|
0ec7ff5e2f | ||
|
4d49820188 | ||
|
6e51babff9 | ||
|
872cf100d7 | ||
|
78cc4e644c | ||
|
81c0152187 | ||
|
4779625f23 | ||
|
3c0b03ba84 | ||
|
c53f163ef5 | ||
|
ca35854417 | ||
|
ab1fda2a54 | ||
|
a6ec77c230 | ||
|
1d7894f1be | ||
|
4866a1fc39 | ||
|
60c5e59a5e | ||
|
fd93bdadf6 | ||
|
6089db2a07 | ||
|
462d0cfc6c | ||
|
e6d6fc4391 | ||
|
092556ae5e | ||
|
ad9fa54a47 | ||
|
2d68170747 | ||
|
20f3d18458 | ||
|
be79efe9b7 | ||
|
5db377f743 | ||
|
9c2f45a1e0 | ||
|
8646918d00 | ||
|
7c44fc3561 | ||
|
686403eb1d | ||
|
6cf9b60a9c | ||
|
aca142df16 | ||
|
b2582196db | ||
|
85a77bec5f | ||
|
e781cbf4ba | ||
|
59956e4543 | ||
|
303417f981 | ||
|
fea03fdf33 | ||
|
e8160efc46 | ||
|
e0ba0d581c | ||
|
36eda29fc9 | ||
|
2ec73db6bd | ||
|
ef6ce2765e |
@@ -34,7 +34,7 @@ deploy:
|
||||
|
||||
- provider: GitHub
|
||||
auth_token:
|
||||
secure: JdggY+mrznklWDcV0yvetHhD9eRcNdc627q6NcZdZAJsDidYcGgZ/tgYJiXb9D1A
|
||||
secure: vQ3jMl5LQrit6+TQONA3ZgQjZ/Ej62BN2ReVb2NSOwjITHMu1131hjc3dOrMEZL6
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
|
@@ -33,10 +33,3 @@ source ci/env.sh
|
||||
kill -9 "$victim" || true
|
||||
done
|
||||
)
|
||||
|
||||
# HACK: These are in our docker images, need to be removed from CARGO_HOME
|
||||
# because we try to cache downloads across builds with CARGO_HOME
|
||||
# cargo lacks a facility for "system" tooling, always tries CARGO_HOME first
|
||||
cargo uninstall cargo-audit || true
|
||||
cargo uninstall svgbob_cli || true
|
||||
cargo uninstall mdbook || true
|
||||
|
45
.mergify.yml
45
.mergify.yml
@@ -1,45 +0,0 @@
|
||||
# Validate your changes with:
|
||||
#
|
||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: remove outdated reviews
|
||||
conditions:
|
||||
- base=master
|
||||
actions:
|
||||
dismiss_reviews:
|
||||
changes_requested: true
|
||||
- name: set automerge label on mergify backport PRs
|
||||
conditions:
|
||||
- author=mergify[bot]
|
||||
- head~=^mergify/bp/
|
||||
- "#status-failure=0"
|
||||
actions:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v0.16 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.16
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.16
|
||||
- name: v0.17 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.17
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.17
|
||||
- name: v0.18 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.18
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.18
|
@@ -4,7 +4,7 @@ os:
|
||||
language: rust
|
||||
cache: cargo
|
||||
rust:
|
||||
- 1.36.0
|
||||
- 1.35.0
|
||||
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
|
3621
Cargo.lock
generated
3621
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
11
Cargo.toml
11
Cargo.toml
@@ -3,7 +3,6 @@ members = [
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"sdk-c",
|
||||
"chacha-sys",
|
||||
"client",
|
||||
"core",
|
||||
@@ -17,7 +16,6 @@ members = [
|
||||
"ledger-tool",
|
||||
"logger",
|
||||
"merkle-tree",
|
||||
"measure",
|
||||
"metrics",
|
||||
"netutil",
|
||||
"programs/bpf",
|
||||
@@ -30,13 +28,9 @@ members = [
|
||||
"programs/exchange_api",
|
||||
"programs/exchange_program",
|
||||
"programs/failure_program",
|
||||
"programs/move_loader_api",
|
||||
"programs/move_loader_program",
|
||||
"programs/librapay_api",
|
||||
"programs/noop_program",
|
||||
"programs/stake_api",
|
||||
"programs/stake_program",
|
||||
"programs/stake_tests",
|
||||
"programs/storage_api",
|
||||
"programs/storage_program",
|
||||
"programs/token_api",
|
||||
@@ -51,7 +45,4 @@ members = [
|
||||
"vote-signer",
|
||||
"wallet",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
"programs/bpf/rust/noop",
|
||||
]
|
||||
exclude = ["programs/bpf/rust/noop"]
|
||||
|
1
bench-exchange/.gitignore
vendored
1
bench-exchange/.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
/target/
|
||||
/config/
|
||||
/config-local/
|
||||
/farf/
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.17.0"
|
||||
version = "0.16.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,29 +12,29 @@ publish = false
|
||||
bincode = "1.1.4"
|
||||
bs58 = "0.2.0"
|
||||
clap = "2.32.0"
|
||||
env_logger = "0.6.2"
|
||||
env_logger = "0.6.0"
|
||||
itertools = "0.8.0"
|
||||
log = "0.4.7"
|
||||
log = "0.4.6"
|
||||
num-derive = "0.2"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rayon = "1.1.0"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
serde_json = "1.0.40"
|
||||
serde = "1.0.92"
|
||||
serde_derive = "1.0.92"
|
||||
serde_json = "1.0.39"
|
||||
serde_yaml = "0.8.9"
|
||||
# solana-runtime = { path = "../solana/runtime"}
|
||||
solana = { path = "../core", version = "0.17.0" }
|
||||
solana-client = { path = "../client", version = "0.17.0" }
|
||||
solana-drone = { path = "../drone", version = "0.17.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.17.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.17.0" }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
untrusted = "0.7.0"
|
||||
solana = { path = "../core", version = "0.16.7" }
|
||||
solana-client = { path = "../client", version = "0.16.7" }
|
||||
solana-drone = { path = "../drone", version = "0.16.7" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.7" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.7" }
|
||||
solana-logger = { path = "../logger", version = "0.16.7" }
|
||||
solana-metrics = { path = "../metrics", version = "0.16.7" }
|
||||
solana-netutil = { path = "../netutil", version = "0.16.7" }
|
||||
solana-runtime = { path = "../runtime", version = "0.16.7" }
|
||||
solana-sdk = { path = "../sdk", version = "0.16.7" }
|
||||
untrusted = "0.6.2"
|
||||
ws = "0.8.1"
|
||||
|
||||
[features]
|
||||
|
@@ -6,10 +6,10 @@ learn how to start and interact with the exchange.
|
||||
|
||||
### Table of Contents
|
||||
[Overview](#Overview)<br>
|
||||
[Premise](#Premise)<br>
|
||||
[Premiss](#Premiss)<br>
|
||||
[Exchange startup](#Exchange-startup)<br>
|
||||
[Order Requests](#Trade-requests)<br>
|
||||
[Order Cancellations](#Trade-cancellations)<br>
|
||||
[Trade requests](#Trade-requests)<br>
|
||||
[Trade cancellations](#Trade-cancellations)<br>
|
||||
[Trade swap](#Trade-swap)<br>
|
||||
[Exchange program operations](#Exchange-program-operations)<br>
|
||||
[Quotes and OHLCV](#Quotes-and-OHLCV)<br>
|
||||
@@ -22,9 +22,9 @@ An exchange is a marketplace where one asset can be traded for another. This
|
||||
demo demonstrates one way to host an exchange on the Solana blockchain by
|
||||
emulating a currency exchange.
|
||||
|
||||
The assets are virtual tokens held by investors who may post order requests to
|
||||
The assets are virtual tokens held by investors who may post trade requests to
|
||||
the exchange. A Swapper monitors the exchange and posts swap requests for
|
||||
matching orders. All the transactions can execute concurrently.
|
||||
matching trade orders. All the transactions can execute concurrently.
|
||||
|
||||
## Premise
|
||||
|
||||
@@ -59,43 +59,43 @@ matching orders. All the transactions can execute concurrently.
|
||||
ratios are represented as fixed point numbers. The fixed point scaler is
|
||||
defined in
|
||||
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
|
||||
- Order request
|
||||
- Trade request
|
||||
- A Solana transaction executed by the exchange requesting the trade of one
|
||||
type of token for another. order requests are made up of the token pair,
|
||||
type of token for another. Trade requests are made up of the token pair,
|
||||
the direction of the trade, quantity of the primary token, the price ratio,
|
||||
and the two token accounts to be credited/deducted. An example trade
|
||||
request looks like "T AB 5 2" which reads "Exchange 5 A tokens to B tokens
|
||||
at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
||||
deducted and 10 B tokens credited to the trade initiator's token accounts.
|
||||
Successful order requests result in an order.
|
||||
- Order
|
||||
- The result of a successful order request. orders are stored in
|
||||
accounts owned by the submitter of the order request. They can only be
|
||||
Successful trade requests result in a trade order.
|
||||
- Trade order
|
||||
- The result of a successful trade request. Trade orders are stored in
|
||||
accounts owned by the submitter of the trade request. They can only be
|
||||
canceled by their owner but can be used by anyone in a trade swap. They
|
||||
contain the same information as the order request.
|
||||
contain the same information as the trade request.
|
||||
- Price spread
|
||||
- The difference between the two matching orders. The spread is the
|
||||
- The difference between the two matching trade orders. The spread is the
|
||||
profit of the Swapper initiating the swap request.
|
||||
- Swap requirements
|
||||
- Policies that result in a successful trade swap.
|
||||
- Swap request
|
||||
- A request to exchange tokens between to orders
|
||||
- A request to exchange tokens between to trade orders
|
||||
- Trade swap
|
||||
- A successful trade. A swap consists of two matching orders that meet
|
||||
- A successful trade. A swap consists of two matching trade orders that meet
|
||||
swap requirements. A trade swap may not wholly satisfy one or both of the
|
||||
orders in which case the orders are adjusted appropriately. As
|
||||
trade orders in which case the trade orders are adjusted appropriately. As
|
||||
long as the swap requirements are met there will be an exchange of tokens
|
||||
between accounts. Any price spread is deposited into the Swapper's profit
|
||||
account. All trade swaps are recorded in a new account for posterity.
|
||||
- Investor
|
||||
- Individual investors who hold a number of tokens and wish to trade them on
|
||||
the exchange. Investors operate as Solana thin clients who own a set of
|
||||
accounts containing tokens and/or order requests. Investors post
|
||||
accounts containing tokens and/or trade requests. Investors post
|
||||
transactions to the exchange in order to request tokens and post or cancel
|
||||
order requests.
|
||||
trade requests.
|
||||
- Swapper
|
||||
- An agent who facilitates trading between investors. Swappers operate as
|
||||
Solana thin clients who monitor all the orders looking for a trade
|
||||
Solana thin clients who monitor all the trade orders looking for a trade
|
||||
match. Once found, the Swapper issues a swap request to the exchange.
|
||||
Swappers are the engine of the exchange and are rewarded for their efforts by
|
||||
accumulating the price spreads of the swaps they initiate. Swappers also
|
||||
@@ -123,7 +123,7 @@ the investors that trades submitted after that point will be analyzed. <!--This
|
||||
is not ideal, and instead investors should be able to submit trades at any time,
|
||||
and the Swapper could come and go without missing a trade. One way to achieve
|
||||
this is for the Swapper to read the current state of all accounts looking for all
|
||||
open orders.-->
|
||||
open trade orders.-->
|
||||
|
||||
Investors will initially query the exchange to discover their current balance
|
||||
for each type of token. If the investor does not already have an account for
|
||||
@@ -181,19 +181,19 @@ pub enum ExchangeInstruction {
|
||||
}
|
||||
```
|
||||
|
||||
## Order Requests
|
||||
## Trade requests
|
||||
|
||||
When an investor decides to exchange a token of one type for another, they
|
||||
submit a transaction to the Solana Blockchain containing an order request, which,
|
||||
if successful, is turned into an order. orders do not expire but are
|
||||
cancellable. <!-- orders should have a timestamp to enable trade
|
||||
expiration --> When an order is created, tokens are deducted from a token
|
||||
account and the order acts as an escrow. The tokens are held until the
|
||||
order is fulfilled or canceled. If the direction is `To`, then the number
|
||||
submit a transaction to the Solana Blockchain containing a trade request, which,
|
||||
if successful, is turned into a trade order. Trade orders do not expire but are
|
||||
cancellable. <!-- Trade orders should have a timestamp to enable trade
|
||||
expiration --> When a trade order is created, tokens are deducted from a token
|
||||
account and the trade order acts as an escrow. The tokens are held until the
|
||||
trade order is fulfilled or canceled. If the direction is `To`, then the number
|
||||
of `tokens` are deducted from the primary account, if `From` then `tokens`
|
||||
multiplied by `price` are deducted from the secondary account. orders are
|
||||
multiplied by `price` are deducted from the secondary account. Trade orders are
|
||||
no longer valid when the number of `tokens` goes to zero, at which point they
|
||||
can no longer be used. <!-- Could support refilling orders, so order
|
||||
can no longer be used. <!-- Could support refilling trade orders, so trade order
|
||||
accounts are refilled rather than accumulating -->
|
||||
|
||||
```rust
|
||||
@@ -205,7 +205,7 @@ pub enum Direction {
|
||||
From,
|
||||
}
|
||||
|
||||
pub struct OrderRequestInfo {
|
||||
pub struct TradeRequestInfo {
|
||||
/// Direction of trade
|
||||
pub direction: Direction,
|
||||
|
||||
@@ -224,7 +224,7 @@ pub struct OrderRequestInfo {
|
||||
}
|
||||
|
||||
pub enum ExchangeInstruction {
|
||||
/// order request
|
||||
/// Trade request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - Token account associated with this trade
|
||||
@@ -233,7 +233,7 @@ pub enum ExchangeInstruction {
|
||||
|
||||
/// Trade accounts are populated with this structure
|
||||
pub struct TradeOrderInfo {
|
||||
/// Owner of the order
|
||||
/// Owner of the trade order
|
||||
pub owner: Pubkey,
|
||||
/// Direction of the exchange
|
||||
pub direction: Direction,
|
||||
@@ -252,7 +252,7 @@ pub struct TradeOrderInfo {
|
||||
}
|
||||
```
|
||||
|
||||
## Order cancellations
|
||||
## Trade cancellations
|
||||
|
||||
An investor may cancel a trade at anytime, but only trades they own. If the
|
||||
cancellation is successful, any tokens held in escrow are returned to the
|
||||
@@ -260,9 +260,9 @@ account from which they came.
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// order cancellation
|
||||
/// Trade cancellation
|
||||
/// key 0 - Signer
|
||||
/// key 1 -order to cancel
|
||||
/// key 1 -Trade order to cancel
|
||||
TradeCancellation,
|
||||
}
|
||||
```
|
||||
@@ -270,14 +270,14 @@ pub enum ExchangeInstruction {
|
||||
## Trade swaps
|
||||
|
||||
The Swapper is monitoring the accounts assigned to the exchange program and
|
||||
building a trade-order table. The order table is used to identify
|
||||
matching orders which could be fulfilled. When a match is found the
|
||||
building a trade-order table. The trade order table is used to identify
|
||||
matching trade orders which could be fulfilled. When a match is found the
|
||||
Swapper should issue a swap request. Swap requests may not satisfy the entirety
|
||||
of either order, but the exchange will greedily fulfill it. Any leftover tokens
|
||||
in either account will keep the order valid for further swap requests in
|
||||
in either account will keep the trade order valid for further swap requests in
|
||||
the future.
|
||||
|
||||
Matching orders are defined by the following swap requirements:
|
||||
Matching trade orders are defined by the following swap requirements:
|
||||
|
||||
- Opposite polarity (one `To` and one `From`)
|
||||
- Operate on the same token pair
|
||||
@@ -379,8 +379,8 @@ pub enum ExchangeInstruction {
|
||||
/// Trade swap request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - 'To' order
|
||||
/// key 3 - `From` order
|
||||
/// key 2 - 'To' trade order
|
||||
/// key 3 - `From` trade order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
||||
@@ -391,9 +391,9 @@ pub enum ExchangeInstruction {
|
||||
pub struct TradeSwapInfo {
|
||||
/// Pair swapped
|
||||
pub pair: TokenPair,
|
||||
/// `To` order
|
||||
/// `To` trade order
|
||||
pub to_trade_order: Pubkey,
|
||||
/// `From` order
|
||||
/// `From` trade order
|
||||
pub from_trade_order: Pubkey,
|
||||
/// Number of primary tokens exchanged
|
||||
pub primary_tokens: u64,
|
||||
@@ -424,22 +424,22 @@ pub enum ExchangeInstruction {
|
||||
/// the exchange has a limitless number of tokens it can transfer.
|
||||
TransferRequest(Token, u64),
|
||||
|
||||
/// order request
|
||||
/// Trade request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - Token account associated with this trade
|
||||
TradeRequest(TradeRequestInfo),
|
||||
|
||||
/// order cancellation
|
||||
/// Trade cancellation
|
||||
/// key 0 - Signer
|
||||
/// key 1 -order to cancel
|
||||
/// key 1 -Trade order to cancel
|
||||
TradeCancellation,
|
||||
|
||||
/// Trade swap request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - 'To' order
|
||||
/// key 3 - `From` order
|
||||
/// key 2 - 'To' trade order
|
||||
/// key 3 - `From` trade order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
||||
@@ -478,3 +478,6 @@ To also see the cluster messages:
|
||||
```bash
|
||||
$ RUST_LOG=solana_bench_exchange=info,solana=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
@@ -332,7 +332,7 @@ fn do_tx_transfers<T>(
|
||||
|
||||
struct TradeInfo {
|
||||
trade_account: Pubkey,
|
||||
order_info: OrderInfo,
|
||||
order_info: TradeOrderInfo,
|
||||
}
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn swapper<T>(
|
||||
@@ -509,7 +509,7 @@ fn trader<T>(
|
||||
T: Client,
|
||||
{
|
||||
// TODO Hard coded for now
|
||||
let pair = AssetPair::default();
|
||||
let pair = TokenPair::AB;
|
||||
let tokens = 1;
|
||||
let price = 1000;
|
||||
let mut account_group: usize = 0;
|
||||
@@ -538,7 +538,7 @@ fn trader<T>(
|
||||
} else {
|
||||
Direction::To
|
||||
};
|
||||
let order_info = OrderInfo {
|
||||
let order_info = TradeOrderInfo {
|
||||
/// Owner of the trade order
|
||||
owner: Pubkey::default(), // don't care
|
||||
direction,
|
||||
@@ -646,20 +646,6 @@ where
|
||||
false
|
||||
}
|
||||
|
||||
fn verify_funding_transfer<T: SyncClient + ?Sized>(
|
||||
client: &T,
|
||||
tx: &Transaction,
|
||||
amount: u64,
|
||||
) -> bool {
|
||||
for a in &tx.message().account_keys[1..] {
|
||||
if client.get_balance(a).unwrap_or(0) >= amount {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
||||
let total = lamports * (dests.len() as u64 + 1);
|
||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||
@@ -717,7 +703,6 @@ pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamp
|
||||
.collect();
|
||||
|
||||
let mut retries = 0;
|
||||
let amount = chunk[0].1[0].1;
|
||||
while !to_fund_txs.is_empty() {
|
||||
let receivers = to_fund_txs
|
||||
.iter()
|
||||
@@ -746,7 +731,7 @@ pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamp
|
||||
let mut waits = 0;
|
||||
loop {
|
||||
sleep(Duration::from_millis(200));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_transfer(client, &tx));
|
||||
if to_fund_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ use std::{error, fmt};
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ToOrder {
|
||||
pub pubkey: Pubkey,
|
||||
pub info: OrderInfo,
|
||||
pub info: TradeOrderInfo,
|
||||
}
|
||||
|
||||
impl Ord for ToOrder {
|
||||
@@ -26,7 +26,7 @@ impl PartialOrd for ToOrder {
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct FromOrder {
|
||||
pub pubkey: Pubkey,
|
||||
pub info: OrderInfo,
|
||||
pub info: TradeOrderInfo,
|
||||
}
|
||||
|
||||
impl Ord for FromOrder {
|
||||
@@ -95,7 +95,11 @@ impl OrderBook {
|
||||
// pub fn cancel(&mut self, pubkey: Pubkey) -> Result<(), Box<dyn error::Error>> {
|
||||
// Ok(())
|
||||
// }
|
||||
pub fn push(&mut self, pubkey: Pubkey, info: OrderInfo) -> Result<(), Box<dyn error::Error>> {
|
||||
pub fn push(
|
||||
&mut self,
|
||||
pubkey: Pubkey,
|
||||
info: TradeOrderInfo,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
check_trade(info.direction, info.tokens, info.price)?;
|
||||
match info.direction {
|
||||
Direction::To => {
|
||||
|
1
bench-streamer/.gitignore
vendored
1
bench-streamer/.gitignore
vendored
@@ -1,2 +1 @@
|
||||
/target/
|
||||
/farf/
|
||||
|
@@ -2,16 +2,16 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.17.0"
|
||||
version = "0.16.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana = { path = "../core", version = "0.17.0" }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana = { path = "../core", version = "0.16.7" }
|
||||
solana-logger = { path = "../logger", version = "0.16.7" }
|
||||
solana-netutil = { path = "../netutil", version = "0.16.7" }
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
|
@@ -1,5 +1,4 @@
|
||||
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
||||
use solana::packet::PacketsRecycler;
|
||||
use solana::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use solana::result::Result;
|
||||
use solana::streamer::{receiver, PacketReceiver};
|
||||
@@ -17,7 +16,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut msgs = Packets::default();
|
||||
msgs.packets.resize(10, Packet::default());
|
||||
for w in msgs.packets.iter_mut() {
|
||||
for w in &mut msgs.packets {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
@@ -75,7 +74,6 @@ fn main() -> Result<()> {
|
||||
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketsRecycler::default();
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_netutil::bind_to(port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
@@ -85,13 +83,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
read_channels.push(r_reader);
|
||||
read_threads.push(receiver(
|
||||
Arc::new(read),
|
||||
&exit,
|
||||
s_reader,
|
||||
recycler.clone(),
|
||||
"bench-streamer-test",
|
||||
));
|
||||
read_threads.push(receiver(Arc::new(read), &exit, s_reader));
|
||||
}
|
||||
|
||||
let t_producer1 = producer(&addr, exit.clone());
|
||||
|
1
bench-tps/.gitignore
vendored
1
bench-tps/.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
/target/
|
||||
/config/
|
||||
/config-local/
|
||||
/farf/
|
||||
|
@@ -2,30 +2,27 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.17.0"
|
||||
version = "0.16.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.1.4"
|
||||
clap = "2.33.0"
|
||||
log = "0.4.7"
|
||||
log = "0.4.6"
|
||||
rayon = "1.1.0"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
serde_json = "1.0.40"
|
||||
serde = "1.0.92"
|
||||
serde_derive = "1.0.92"
|
||||
serde_json = "1.0.39"
|
||||
serde_yaml = "0.8.9"
|
||||
solana = { path = "../core", version = "0.17.0" }
|
||||
solana-client = { path = "../client", version = "0.17.0" }
|
||||
solana-drone = { path = "../drone", version = "0.17.0" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.17.0" }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.17.0" }
|
||||
solana-measure = { path = "../measure", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
solana = { path = "../core", version = "0.16.7" }
|
||||
solana-client = { path = "../client", version = "0.16.7" }
|
||||
solana-drone = { path = "../drone", version = "0.16.7" }
|
||||
solana-logger = { path = "../logger", version = "0.16.7" }
|
||||
solana-metrics = { path = "../metrics", version = "0.16.7" }
|
||||
solana-netutil = { path = "../netutil", version = "0.16.7" }
|
||||
solana-runtime = { path = "../runtime", version = "0.16.7" }
|
||||
solana-sdk = { path = "../sdk", version = "0.16.7" }
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
|
@@ -1,16 +1,13 @@
|
||||
use solana_metrics;
|
||||
|
||||
use bincode;
|
||||
use log::*;
|
||||
use rayon::prelude::*;
|
||||
use solana::gen_keys::GenKeys;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_drone::drone::request_airdrop_transaction;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_sdk::client::Client;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_instruction;
|
||||
use solana_sdk::system_transaction;
|
||||
@@ -27,8 +24,6 @@ use std::thread::Builder;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use solana_librapay_api::librapay_transaction;
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 128;
|
||||
|
||||
@@ -48,7 +43,6 @@ pub struct Config {
|
||||
pub duration: Duration,
|
||||
pub tx_count: usize,
|
||||
pub sustained: bool,
|
||||
pub use_move: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@@ -60,7 +54,6 @@ impl Default for Config {
|
||||
duration: Duration::new(std::u64::MAX, 0),
|
||||
tx_count: 500_000,
|
||||
sustained: false,
|
||||
use_move: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -70,8 +63,6 @@ pub fn do_bench_tps<T>(
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
keypair0_balance: u64,
|
||||
program_id: &Pubkey,
|
||||
libra_mint_id: &Pubkey,
|
||||
) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
@@ -82,7 +73,6 @@ where
|
||||
thread_batch_sleep_ms,
|
||||
duration,
|
||||
tx_count,
|
||||
use_move,
|
||||
sustained,
|
||||
} = config;
|
||||
|
||||
@@ -175,9 +165,6 @@ where
|
||||
&keypairs[len..],
|
||||
threads,
|
||||
reclaim_lamports_back_to_source_account,
|
||||
use_move,
|
||||
&program_id,
|
||||
&libra_mint_id,
|
||||
);
|
||||
// In sustained mode overlap the transfers with generation
|
||||
// this has higher average performance but lower peak performance
|
||||
@@ -241,9 +228,6 @@ fn generate_txs(
|
||||
dest: &[Keypair],
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
use_move: bool,
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_id: &Pubkey,
|
||||
) {
|
||||
let tx_count = source.len();
|
||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||
@@ -257,25 +241,10 @@ fn generate_txs(
|
||||
let transactions: Vec<_> = pairs
|
||||
.par_iter()
|
||||
.map(|(id, keypair)| {
|
||||
if use_move {
|
||||
(
|
||||
librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
libra_mint_id,
|
||||
&id,
|
||||
&id,
|
||||
&keypair.pubkey(),
|
||||
1,
|
||||
*blockhash,
|
||||
),
|
||||
timestamp(),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
system_transaction::create_user_account(id, &keypair.pubkey(), 1, *blockhash),
|
||||
timestamp(),
|
||||
)
|
||||
}
|
||||
(
|
||||
system_transaction::create_user_account(id, &keypair.pubkey(), 1, *blockhash),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -423,10 +392,13 @@ pub fn fund_keys<T: Client>(
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
let tx = Transaction::new_unsigned_instructions(
|
||||
system_instruction::transfer_many(&k.pubkey(), &m),
|
||||
);
|
||||
(k.clone(), tx)
|
||||
(
|
||||
k.clone(),
|
||||
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&m,
|
||||
)),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -614,11 +586,7 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
|
||||
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
||||
}
|
||||
|
||||
pub fn generate_keypairs(
|
||||
seed_keypair: &Keypair,
|
||||
count: u64,
|
||||
use_move: bool,
|
||||
) -> (Vec<Keypair>, u64) {
|
||||
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
@@ -631,149 +599,18 @@ pub fn generate_keypairs(
|
||||
delta *= MAX_SPENDS_PER_TX;
|
||||
total_keys += delta;
|
||||
}
|
||||
if use_move {
|
||||
// Move funding is a naive loop that doesn't
|
||||
// need aligned number of keys.
|
||||
(rnd.gen_n_keypairs(count), extra)
|
||||
} else {
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
}
|
||||
|
||||
fn fund_move_keys<T: Client>(
|
||||
client: &T,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
total: u64,
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_program_id: &Pubkey,
|
||||
libra_mint_key: &Keypair,
|
||||
) {
|
||||
let (mut blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
|
||||
info!("creating the libra funding account..");
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(
|
||||
funding_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
1,
|
||||
blockhash,
|
||||
);
|
||||
let sig = client
|
||||
.async_send_transaction(tx)
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
client.poll_for_signature(&sig).unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
let tx = librapay_transaction::mint_tokens(
|
||||
&libra_mint_program_id,
|
||||
funding_key,
|
||||
libra_mint_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
total,
|
||||
blockhash,
|
||||
);
|
||||
let sig = client
|
||||
.async_send_transaction(tx)
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
client.poll_for_signature(&sig).unwrap();
|
||||
|
||||
info!("creating move accounts.. {}", keypairs.len());
|
||||
let create_len = 8;
|
||||
let mut funding_time = Measure::start("funding_time");
|
||||
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
||||
let mut tx_send = Measure::start("poll");
|
||||
let pubkeys: Vec<_> = keys.iter().map(|k| k.pubkey()).collect();
|
||||
let tx = librapay_transaction::create_accounts(funding_key, &pubkeys, 1, blockhash);
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
let sig = client
|
||||
.async_send_transaction(tx)
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
tx_send.stop();
|
||||
let mut poll = Measure::start("poll");
|
||||
client.poll_for_signature(&sig).unwrap();
|
||||
poll.stop();
|
||||
if i % 10 == 0 {
|
||||
blockhash = client.get_recent_blockhash().unwrap().0;
|
||||
info!(
|
||||
"size: {} created {} accounts of {} sig: {}us send: {}us",
|
||||
ser_size,
|
||||
i,
|
||||
(keypairs.len() / create_len),
|
||||
poll.as_us(),
|
||||
tx_send.as_us()
|
||||
);
|
||||
}
|
||||
}
|
||||
funding_time.stop();
|
||||
info!("funding accounts {}ms", funding_time.as_ms());
|
||||
let mut sigs = vec![];
|
||||
let tx_count = keypairs.len();
|
||||
let amount = total / (tx_count as u64);
|
||||
for (i, key) in keypairs[..tx_count].iter().enumerate() {
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_mint_key.pubkey(),
|
||||
funding_key,
|
||||
&libra_funding_key,
|
||||
&key.pubkey(),
|
||||
amount,
|
||||
blockhash,
|
||||
);
|
||||
|
||||
let sig = client
|
||||
.async_send_transaction(tx.clone())
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
sigs.push((sig, key));
|
||||
|
||||
if i % 50 == 0 {
|
||||
blockhash = client.get_recent_blockhash().unwrap().0;
|
||||
}
|
||||
}
|
||||
|
||||
for (i, (sig, key)) in sigs.iter().enumerate() {
|
||||
let mut times = 0;
|
||||
loop {
|
||||
match client.poll_for_signature(&sig) {
|
||||
Ok(_) => {
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("e :{:?} waiting times: {} sig: {}", e, times, sig);
|
||||
times += 1;
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
times = 0;
|
||||
loop {
|
||||
let balance = librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
||||
if amount != balance {
|
||||
info!("i: {} balance: {} times: {}", i, balance, times);
|
||||
times += 1;
|
||||
sleep(Duration::from_secs(1));
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if i % 10 == 0 {
|
||||
info!("funding {} of {}", i, tx_count);
|
||||
}
|
||||
}
|
||||
info!("done..");
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: Client>(
|
||||
client: &T,
|
||||
drone_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
funding_pubkey: &Keypair,
|
||||
tx_count: usize,
|
||||
lamports_per_account: u64,
|
||||
libra_keys: Option<(&Pubkey, &Pubkey, &Arc<Keypair>)>,
|
||||
) -> Result<(Vec<Keypair>, u64)> {
|
||||
info!("Creating {} keypairs...", tx_count * 2);
|
||||
let (mut keypairs, extra) =
|
||||
generate_keypairs(funding_key, tx_count as u64 * 2, libra_keys.is_some());
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
|
||||
info!("Get lamports...");
|
||||
|
||||
// Sample the first keypair, see if it has lamports, if so then resume.
|
||||
@@ -783,34 +620,23 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
||||
.unwrap_or(0);
|
||||
|
||||
if lamports_per_account > last_keypair_balance {
|
||||
let (_blockhash, fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
let account_desired_balance =
|
||||
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
||||
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
|
||||
let total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
||||
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
||||
airdrop_lamports(client, &drone_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
if let Some((libra_pay_program_id, libra_mint_program_id, libra_mint_key)) = libra_keys {
|
||||
fund_move_keys(
|
||||
client,
|
||||
funding_key,
|
||||
&keypairs,
|
||||
total,
|
||||
libra_pay_program_id,
|
||||
libra_mint_program_id,
|
||||
libra_mint_key,
|
||||
);
|
||||
} else {
|
||||
fund_keys(
|
||||
client,
|
||||
funding_key,
|
||||
&keypairs,
|
||||
total,
|
||||
fee_calculator.max_lamports_per_signature,
|
||||
extra,
|
||||
);
|
||||
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
|
||||
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
|
||||
}
|
||||
info!("adding more lamports {}", account_desired_balance);
|
||||
fund_keys(
|
||||
client,
|
||||
funding_pubkey,
|
||||
&keypairs,
|
||||
total,
|
||||
fee_calculator.max_lamports_per_signature,
|
||||
extra,
|
||||
);
|
||||
}
|
||||
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
@@ -827,7 +653,6 @@ mod tests {
|
||||
use solana::validator::ValidatorConfig;
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_librapay_api::{upload_mint_program, upload_payment_program};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::client::SyncClient;
|
||||
@@ -850,93 +675,47 @@ mod tests {
|
||||
assert_eq!(should_switch_directions(20, 101), false);
|
||||
}
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
#[test]
|
||||
fn test_bench_tps_local_cluster() {
|
||||
solana_logger::setup();
|
||||
const NUM_NODES: usize = 1;
|
||||
let cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
cluster_lamports: 2_000_000,
|
||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let drone_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&drone_keypair.pubkey(),
|
||||
100_000_000,
|
||||
);
|
||||
cluster.transfer(&cluster.funding_keypair, &drone_keypair.pubkey(), 1_000_000);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_drone(drone_keypair, addr_sender, None);
|
||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let client = create_client(
|
||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
|
||||
let (libra_mint_id, libra_pay_program_id) = if config.use_move {
|
||||
let libra_mint_id = upload_mint_program(&drone_keypair, &client);
|
||||
let libra_pay_program_id = upload_payment_program(&drone_keypair, &client);
|
||||
(libra_mint_id, libra_pay_program_id)
|
||||
} else {
|
||||
(Pubkey::default(), Pubkey::default())
|
||||
};
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_drone(drone_keypair, addr_sender, None);
|
||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let libra_keys = if config.use_move {
|
||||
Some((
|
||||
&libra_pay_program_id,
|
||||
&libra_mint_id,
|
||||
&cluster.libra_mint_keypair,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
||||
&client,
|
||||
Some(drone_addr),
|
||||
&config.id,
|
||||
config.tx_count,
|
||||
lamports_per_account,
|
||||
libra_keys,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let total = do_bench_tps(
|
||||
vec![client],
|
||||
config,
|
||||
keypairs,
|
||||
0,
|
||||
&libra_pay_program_id,
|
||||
&cluster.libra_mint_keypair.pubkey(),
|
||||
);
|
||||
let total = do_bench_tps(vec![client], config, keypairs, 0);
|
||||
assert!(total > 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_local_cluster_solana() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
let (genesis_block, id) = create_genesis_block(10_000);
|
||||
@@ -949,17 +728,9 @@ mod tests {
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let (keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20, None)
|
||||
.unwrap();
|
||||
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20).unwrap();
|
||||
|
||||
do_bench_tps(
|
||||
clients,
|
||||
config,
|
||||
keypairs,
|
||||
0,
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
do_bench_tps(clients, config, keypairs, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -971,7 +742,7 @@ mod tests {
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, None).unwrap();
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
@@ -989,7 +760,7 @@ mod tests {
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, None).unwrap();
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
||||
|
||||
let max_fee = client
|
||||
.get_recent_blockhash()
|
||||
|
@@ -22,7 +22,6 @@ pub struct Config {
|
||||
pub write_to_client_file: bool,
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub use_move: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@@ -41,7 +40,6 @@ impl Default for Config {
|
||||
write_to_client_file: false,
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
||||
use_move: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -102,11 +100,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("use-move")
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tx_count")
|
||||
.long("tx_count")
|
||||
@@ -218,7 +211,5 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
|
||||
args
|
||||
}
|
||||
|
@@ -6,7 +6,6 @@ use crate::bench::{
|
||||
};
|
||||
use solana::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
@@ -38,11 +37,10 @@ fn main() {
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
} = cli_config;
|
||||
|
||||
if write_to_client_file {
|
||||
let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2, use_move);
|
||||
let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2);
|
||||
let num_accounts = keypairs.len() as u64;
|
||||
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
|
||||
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
||||
@@ -105,7 +103,6 @@ fn main() {
|
||||
&id,
|
||||
tx_count,
|
||||
NUM_LAMPORTS_PER_ACCOUNT,
|
||||
None,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
@@ -120,15 +117,7 @@ fn main() {
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
use_move,
|
||||
};
|
||||
|
||||
do_bench_tps(
|
||||
vec![client],
|
||||
config,
|
||||
keypairs,
|
||||
keypair_balance,
|
||||
&Pubkey::new_rand(),
|
||||
&Pubkey::new_rand(),
|
||||
);
|
||||
do_bench_tps(vec![client], config, keypairs, keypair_balance);
|
||||
}
|
||||
|
@@ -1,18 +0,0 @@
|
||||
+------------+
|
||||
| Bank-Merkle|
|
||||
+------------+
|
||||
^ ^
|
||||
/ \
|
||||
+-----------------+ +-------------+
|
||||
| Bank-Diff-Merkle| | Block-Merkle|
|
||||
+-----------------+ +-------------+
|
||||
^ ^
|
||||
/ \
|
||||
+------+ +--------------------------+
|
||||
| Hash | | Previous Bank-Diff-Merkle|
|
||||
+------+ +--------------------------+
|
||||
^ ^
|
||||
/ \
|
||||
+---------------+ +---------------+
|
||||
| Hash(Account1)| | Hash(Account2)|
|
||||
+---------------+ +---------------+
|
@@ -1,19 +0,0 @@
|
||||
+---------------+
|
||||
| Block-Merkle |
|
||||
+---------------+
|
||||
^ ^
|
||||
/ \
|
||||
+-------------+ +-------------+
|
||||
| Entry-Merkle| | Entry-Merkle|
|
||||
+-------------+ +-------------+
|
||||
^ ^
|
||||
/ \
|
||||
+-------+ +-------+
|
||||
| Hash | | Hash |
|
||||
+-------+ +-------+
|
||||
^ ^ ^ ^
|
||||
/ | | \
|
||||
+-----------------+ +-----------------+ +-----------------+ +---+
|
||||
| Hash(T1, status)| | Hash(T2, status)| | Hash(T3, status)| | 0 |
|
||||
+-----------------+ +-----------------+ +-----------------+ +---+
|
||||
|
@@ -3,4 +3,4 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
make -j"$(nproc)" test
|
||||
make -j"$(nproc)"
|
||||
|
@@ -4,14 +4,11 @@ MD_SRCS=$(wildcard src/*.md)
|
||||
|
||||
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg) $(MSC_SRCS:art/%.msc=src/img/%.svg)
|
||||
|
||||
TARGET=html/index.html
|
||||
TEST_STAMP=src/tests.ok
|
||||
all: html/index.html
|
||||
|
||||
all: $(TARGET)
|
||||
test: src/tests.ok
|
||||
|
||||
test: $(TEST_STAMP)
|
||||
|
||||
open: $(TEST_STAMP)
|
||||
open: all
|
||||
mdbook build --open
|
||||
|
||||
watch: $(SVG_IMGS)
|
||||
@@ -29,11 +26,11 @@ src/%.md: %.md
|
||||
@mkdir -p $(@D)
|
||||
@cp $< $@
|
||||
|
||||
$(TEST_STAMP): $(TARGET)
|
||||
src/tests.ok: $(SVG_IMGS) $(MD_SRCS)
|
||||
mdbook test
|
||||
touch $@
|
||||
|
||||
$(TARGET): $(SVG_IMGS) $(MD_SRCS)
|
||||
html/index.html: src/tests.ok
|
||||
mdbook build
|
||||
|
||||
clean:
|
||||
|
@@ -30,12 +30,8 @@
|
||||
- [Blocktree](blocktree.md)
|
||||
- [Gossip Service](gossip.md)
|
||||
- [The Runtime](runtime.md)
|
||||
|
||||
- [Anatomy of a Transaction](transaction.md)
|
||||
|
||||
- [API Reference](api-reference.md)
|
||||
- [Transaction](transaction-api.md)
|
||||
- [Instruction](instruction-api.md)
|
||||
- [Blockstreamer](blockstreamer.md)
|
||||
- [JSON RPC API](jsonrpc-api.md)
|
||||
- [JavaScript API](javascript-api.md)
|
||||
@@ -59,21 +55,18 @@
|
||||
- [Economic Design MVP](ed_mvp.md)
|
||||
- [References](ed_references.md)
|
||||
- [Cluster Test Framework](cluster-test-framework.md)
|
||||
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
||||
- [Validator](validator-proposal.md)
|
||||
- [Simple Payment and State Verification](simple-payment-and-state-verification.md)
|
||||
- [Cross-Program Invocation](cross-program-invocation.md)
|
||||
|
||||
- [Implemented Design Proposals](implemented-proposals.md)
|
||||
- [Blocktree](blocktree.md)
|
||||
- [Cluster Software Installation and Updates](installer.md)
|
||||
- [Deterministic Transaction Fees](transaction-fees.md)
|
||||
- [Tower BFT](tower-bft.md)
|
||||
- [Fork Selection](fork-selection.md)
|
||||
- [Leader-to-Leader Transition](leader-leader-transition.md)
|
||||
- [Leader-to-Validator Transition](leader-validator-transition.md)
|
||||
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
|
||||
- [Persistent Account Storage](persistent-account-storage.md)
|
||||
- [Reliable Vote Transmission](reliable-vote-transmission.md)
|
||||
- [Repair Service](repair-service.md)
|
||||
- [Testing Programs](testing-programs.md)
|
||||
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
||||
- [Embedding the Move Langauge](embedding-move.md)
|
||||
- [Testing Programs](testing-programs.md)
|
@@ -4,7 +4,7 @@ A validator votes on a PoH hash for two purposes. First, the vote indicates it
|
||||
believes the ledger is valid up until that point in time. Second, since many
|
||||
valid forks may exist at a given height, the vote also indicates exclusive
|
||||
support for the fork. This document describes only the former. The latter is
|
||||
described in [Tower BFT](tower-bft.md).
|
||||
described in [fork selection](fork-selection.md).
|
||||
|
||||
## Current Design
|
||||
|
||||
@@ -50,11 +50,12 @@ log the time since the NewBlock transaction was submitted.
|
||||
|
||||
### Finality and Payouts
|
||||
|
||||
[Tower BFT](tower-bft.md) is the proposed fork selection algorithm. It proposes
|
||||
that payment to miners be postponed until the *stack* of validator votes reaches
|
||||
a certain depth, at which point rollback is not economically feasible. The vote
|
||||
program may therefore implement Tower BFT. Vote instructions would need to
|
||||
reference a global Tower account so that it can track cross-block state.
|
||||
Locktower is the proposed [fork selection](fork-selection.md) algorithm. It
|
||||
proposes that payment to miners be postponed until the *stack* of validator
|
||||
votes reaches a certain depth, at which point rollback is not economically
|
||||
feasible. The vote program may therefore implement locktower. Vote instructions
|
||||
would need to reference a global locktower account so that it can track
|
||||
cross-block state.
|
||||
|
||||
## Challenges
|
||||
|
||||
|
@@ -1,111 +0,0 @@
|
||||
# Cross-Program Invocation
|
||||
|
||||
## Problem
|
||||
|
||||
In today's implementation a client can create a transaction that modifies two
|
||||
accounts, each owned by a separate on-chain program:
|
||||
|
||||
```rust,ignore
|
||||
let message = Message::new(vec![
|
||||
token_instruction::pay(&alice_pubkey),
|
||||
acme_instruction::launch_missiles(&bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
The current implementation does not, however, allow the `acme` program to
|
||||
conveniently invoke `token` instructions on the client's behalf:
|
||||
|
||||
```rust,ignore
|
||||
let message = Message::new(vec![
|
||||
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes
|
||||
`token_instruction::pay` from the `acme` program. The workaround is to extend the
|
||||
`acme` program with the implementation of the `token` program, and create `token`
|
||||
accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify.
|
||||
With that workaround, `acme` can modify token-like accounts created by the `acme`
|
||||
program, but not token accounts created by the `token` program.
|
||||
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
The goal of this design is to modify Solana's runtime such that an on-chain
|
||||
program can invoke an instruction from another program.
|
||||
|
||||
Given two on-chain programs `token` and `acme`, each implementing instructions
|
||||
`pay()` and `launch_missiles()` respectively, we would ideally like to implement
|
||||
the `acme` module with a call to a function defined in the `token` module:
|
||||
|
||||
```rust,ignore
|
||||
use token;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
token::pay(&keyed_accounts[1..])?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
The above code would require that the `token` crate be dynamically linked,
|
||||
so that a custom linker could intercept calls and validate accesses to
|
||||
`keyed_accounts`. That is, even though the client intends to modify both
|
||||
`token` and `acme` accounts, only `token` program is permitted to modify
|
||||
the `token` account, and only the `acme` program is permitted to modify
|
||||
the `acme` account.
|
||||
|
||||
Backing off from that ideal cross-program call, a slightly more
|
||||
verbose solution is to expose token's existing `process_instruction()`
|
||||
entrypoint to the acme program:
|
||||
|
||||
```rust,ignore
|
||||
use token_instruction;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
let alice_pubkey = keyed_accounts[1].key;
|
||||
let instruction = token_instruction::pay(&alice_pubkey);
|
||||
process_instruction(&instruction)?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
where `process_instruction()` is built into Solana's runtime and responsible
|
||||
for routing the given instruction to the `token` program via the instruction's
|
||||
`program_id` field. Before invoking `pay()`, the runtime must also ensure that
|
||||
`acme` didn't modify any accounts owned by `token`. It does this by calling
|
||||
`runtime::verify_instruction()` and then afterward updating all the `pre_*`
|
||||
variables to tentatively commit `acme`'s account modifications. After `pay()`
|
||||
completes, the runtime must again ensure that `token` didn't modify any
|
||||
accounts owned by `acme`. It should call `verify_instruction()` again, but this
|
||||
time with the `token` program ID. Lastly, after `pay_and_launch_missiles()`
|
||||
completes, the runtime must call `verify_instruction()` one more time, where it
|
||||
normally would, but using all updated `pre_*` variables. If executing
|
||||
`pay_and_launch_missiles()` up to `pay()` made no invalid account changes,
|
||||
`pay()` made no invalid changes, and executing from `pay()` until
|
||||
`pay_and_launch_missiles()` returns made no invalid changes, then the runtime
|
||||
can transitively assume `pay_and_launch_missiles()` as whole made no invalid
|
||||
account changes, and therefore commit all account modifications.
|
||||
|
||||
### Setting `KeyedAccount.is_signer`
|
||||
|
||||
When `process_instruction()` is invoked, the runtime must create a new
|
||||
`KeyedAccounts` parameter using the signatures from the *original* transaction
|
||||
data. Since the `token` program is immutable and existed on-chain prior to the
|
||||
`acme` program, the runtime can safely treat the transaction signature as a
|
||||
signature of a transaction with a `token` instruction. When the runtime sees
|
||||
the given instruction references `alice_pubkey`, it looks up the key in the
|
||||
transaction to see if that key corresponds to a transaction signature. In this
|
||||
case it does and so sets `KeyedAccount.is_signer`, thereby authorizing the
|
||||
`token` program to modify Alice's account.
|
@@ -1,66 +0,0 @@
|
||||
# Embedding the Move Language
|
||||
|
||||
## Problem
|
||||
|
||||
Solana enables developers to write on-chain programs in general purpose
|
||||
programming languages such as C or Rust, but those programs contain
|
||||
Solana-specific mechanisms. For example, there isn't another chain that asks
|
||||
developers to create a Rust module with a `process_instruction(KeyedAccounts)`
|
||||
function. Whenever practical, Solana should offer dApp developers more portable
|
||||
options.
|
||||
|
||||
Until just recently, no popular blockchain offered a language that could expose
|
||||
the value of Solana's massively parallel [runtime](runtime.md). Solidity
|
||||
contracts, for example, do not separate references to shared data from contract
|
||||
code, and therefore need to be executed serially to ensure deterministic
|
||||
behavior. In practice we see that the most aggressively optimized EVM-based
|
||||
blockchains all seem to peak out around 1,200 TPS - a small fraction of what
|
||||
Solana can do. The Libra project, on the other hand, designed an on-chain
|
||||
programming language called Move that is more suitable for parallel execution.
|
||||
Like Solana's runtime, Move programs depend on accounts for all shared state.
|
||||
|
||||
The biggest design difference between Solana's runtime and Libra's Move VM is
|
||||
how they manage safe invocations between modules. Solana took an operating
|
||||
systems approach and Libra took the domain-specific language approach. In the
|
||||
runtime, a module must trap back into the runtime to ensure the caller's module
|
||||
did not write to data owned by the callee. Likewise, when the callee completes,
|
||||
it must again trap back to the runtime to ensure the callee did not write to
|
||||
data owned by the caller. Move, on the other hand, includes an advanced type
|
||||
system that allows these checks to be run by its bytecode verifier. Because
|
||||
Move bytecode can be verified, the cost of verification is paid just once, at
|
||||
the time the module is loaded on-chain. In the runtime, the cost is paid each
|
||||
time a transaction crosses between modules. The difference is similar in spirit
|
||||
to the difference between a dynamically-typed language like Python versus a
|
||||
statically-typed language like Java. Solana's runtime allows dApps to be
|
||||
written in general purpose programming languages, but that comes with the cost
|
||||
of runtime checks when jumping between programs.
|
||||
|
||||
This proposal attempts to define a way to embed the Move VM such that:
|
||||
|
||||
* cross-module invocations within Move do not require the runtime's
|
||||
cross-program runtime checks
|
||||
* Move programs can leverage functionality in other Solana programs and vice
|
||||
versa
|
||||
* Solana's runtime parallelism is exposed to batches of Move and non-Move
|
||||
transactions
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
### Move VM as a Solana loader
|
||||
|
||||
The Move VM shall be embedded as a Solana loader under the identifier
|
||||
`MOVE_PROGRAM_ID`, so that Move modules can be marked as `executable` with the
|
||||
VM as its `owner`. This will allow modules to load module dependencies, as well
|
||||
as allow for parallel execution of Move scripts.
|
||||
|
||||
All data accounts owned by Move modules must set their owners to the loader,
|
||||
`MOVE_PROGRAM_ID`. Since Move modules encapsulate their account data in the
|
||||
same way Solana programs encapsulate theirs, the Move module owner should be
|
||||
embedded in the account data. The runtime will grant write access to the Move
|
||||
VM, and Move grants access to the module accounts.
|
||||
|
||||
### Interacting with Solana programs
|
||||
|
||||
To invoke instructions in non-Move programs, Solana would need to extend the
|
||||
Move VM with a `process_instruction()` system call. It would work the same as
|
||||
`process_instruction()` Rust BPF programs.
|
@@ -55,7 +55,7 @@ Validators can ignore forks at other points (e.g. from the wrong leader), or
|
||||
slash the leader responsible for the fork.
|
||||
|
||||
Validators vote based on a greedy choice to maximize their reward described in
|
||||
[Tower BFT](tower-bft.md).
|
||||
[forks selection](fork-selection.md).
|
||||
|
||||
### Validator's View
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# Tower BFT
|
||||
# Fork Selection
|
||||
|
||||
This design describes Solana's *Tower BFT* algorithm. It addresses the
|
||||
following problems:
|
||||
This design describes a *Fork Selection* algorithm. It addresses the following
|
||||
problems:
|
||||
|
||||
* Some forks may not end up accepted by the super-majority of the cluster, and
|
||||
voters need to recover from voting on such forks.
|
@@ -34,8 +34,8 @@ Nodes send push messages to `PUSH_FANOUT` push peers.
|
||||
|
||||
Upon receiving a push message, a node examines the message for:
|
||||
|
||||
1. Duplication: if the message has been seen before, the node drops the message
|
||||
and may respond with `PushMessagePrune` if forwarded from a low staked node
|
||||
1. Duplication: if the message has been seen before, the node responds with
|
||||
`PushMessagePrune` and drops the message
|
||||
|
||||
2. New data: if the message is new to the node
|
||||
* Stores the new information with an updated version in its cluster info and
|
||||
@@ -51,7 +51,7 @@ Upon receiving a push message, a node examines the message for:
|
||||
A nodes selects its push peers at random from the active set of known peers.
|
||||
The node keeps this selection for a relatively long time. When a prune message
|
||||
is received, the node drops the push peer that sent the prune. Prune is an
|
||||
indication that there is another, higher stake weighted path to that node than direct push.
|
||||
indication that there is another, faster path to that node than direct push.
|
||||
|
||||
The set of push peers is kept fresh by rotating a new node into the set every
|
||||
`PUSH_MSG_TIMEOUT/2` milliseconds.
|
||||
|
@@ -1,25 +0,0 @@
|
||||
# Instructions
|
||||
|
||||
For the purposes of building a [Transaction](transaction.md), a more
|
||||
verbose instruction format is used:
|
||||
|
||||
* **Instruction:**
|
||||
* **program_id:** The pubkey of the on-chain program that executes the
|
||||
instruction
|
||||
* **accounts:** An ordered list of accounts that should be passed to
|
||||
the program processing the instruction, including metadata detailing
|
||||
if an account is a signer of the transaction and if it is a credit
|
||||
only account.
|
||||
* **data:** A byte array that is passed to the program executing the
|
||||
instruction
|
||||
|
||||
A more compact form is actually included in a `Transaction`:
|
||||
|
||||
* **CompiledInstruction:**
|
||||
* **program_id_index:** The index of the `program_id` in the
|
||||
`account_keys` list
|
||||
* **accounts:** An ordered list of indices into `account_keys`
|
||||
specifying the accounds that should be passed to the program
|
||||
processing the instruction.
|
||||
* **data:** A byte array that is passed to the program executing the
|
||||
instruction
|
@@ -1,13 +1,13 @@
|
||||
# What is Solana?
|
||||
|
||||
Solana is an open source project implementing a new,
|
||||
Solana is the name of an open source project that is implementing a new,
|
||||
high-performance, permissionless blockchain. Solana is also the name of a
|
||||
company headquartered in San Francisco that maintains the open source project.
|
||||
|
||||
# About this Book
|
||||
|
||||
This book describes the Solana open source project, a blockchain built from the
|
||||
ground up for scale. The book covers why Solana is useful, how to use it, how it
|
||||
ground up for scale. The book covers why it's useful, how to use it, how it
|
||||
works, and why it will continue to work long after the company Solana closes
|
||||
its doors. The goal of the Solana architecture is to demonstrate there exists a
|
||||
set of software algorithms that when used in combination to implement a
|
||||
|
@@ -31,9 +31,6 @@ Methods
|
||||
* [getRecentBlockhash](#getrecentblockhash)
|
||||
* [getSignatureStatus](#getsignaturestatus)
|
||||
* [getSlotLeader](#getslotleader)
|
||||
* [getSlotsPerSegment](#getslotspersegment)
|
||||
* [getStorageTurn](#getstorageturn)
|
||||
* [getStorageTurnRate](#getstorageturnrate)
|
||||
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
||||
* [getTransactionCount](#gettransactioncount)
|
||||
* [getTotalSupply](#gettotalsupply)
|
||||
@@ -311,67 +308,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
|
||||
```
|
||||
|
||||
----
|
||||
|
||||
### getSlotsPerSegment
|
||||
Returns the current storage segment size in terms of slots
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
* `u64` - Number of slots in a storage segment
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
||||
```
|
||||
|
||||
----
|
||||
|
||||
### getStorageTurn
|
||||
Returns the current storage turn's blockhash and slot
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
An array consisting of
|
||||
* `string` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
||||
* `u64` - the current storage turn slot
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "2048"],"id":1}
|
||||
```
|
||||
|
||||
----
|
||||
|
||||
### getStorageTurnRate
|
||||
Returns the current storage turn rate in terms of slots per turn
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
* `u64` - Number of slots in storage turn
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
||||
|
||||
```
|
||||
|
||||
----
|
||||
-----
|
||||
|
||||
### getNumBlocksSinceSignatureConfirmation
|
||||
Returns the current number of blocks since signature has been confirmed.
|
||||
|
@@ -96,7 +96,7 @@ ends up scheduled for the first two epochs because the leader schedule is also
|
||||
generated at slot 0 for the next epoch. The length of the first two epochs can
|
||||
be specified in the genesis block as well. The minimum length of the first
|
||||
epochs must be greater than or equal to the maximum rollback depth as defined in
|
||||
[Tower BFT](tower-bft.md).
|
||||
[fork selection](fork-selection.md).
|
||||
|
||||
## Leader Schedule Generation Algorithm
|
||||
|
||||
|
@@ -74,7 +74,7 @@ The program should have a list of slots which are valid storage mining slots.
|
||||
This list should be maintained by keeping track of slots which are rooted slots in which a significant
|
||||
portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT
|
||||
number of slots would be added to this set. The program should check that the slot is in this set. The set can
|
||||
be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state.
|
||||
be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/locktower state.
|
||||
|
||||
The program should do a signature verify check on the signature, public key from the transaction submitter and the message of
|
||||
the previous storage epoch PoH value.
|
||||
|
@@ -60,7 +60,7 @@ The read is satisfied by pointing to a memory-mapped location in the
|
||||
|
||||
## Root Forks
|
||||
|
||||
[Tower BFT](tower-bft.md) eventually selects a fork as a
|
||||
The [fork selection algorithm](fork-selection.md) eventually selects a fork as a
|
||||
root fork and the fork is squashed. A squashed/root fork cannot be rolled back.
|
||||
|
||||
When a fork is squashed, all accounts in its parents not already present in the
|
||||
|
@@ -1,172 +0,0 @@
|
||||
# Simple Payment and State Verification
|
||||
|
||||
It is often useful to allow low resourced clients to participate in a Solana
|
||||
cluster. Be this participation economic or contract execution, verification
|
||||
that a client's activity has been accepted by the network is typically
|
||||
expensive. This proposal lays out a mechanism for such clients to confirm that
|
||||
their actions have been committed to the ledger state with minimal resource
|
||||
expenditure and third-party trust.
|
||||
|
||||
## A Naive Approach
|
||||
|
||||
Validators store the signatures of recently confirmed transactions for a short
|
||||
period of time to ensure that they are not processed more than once. Validators
|
||||
provide a JSON RPC endpoint, which clients can use to query the cluster if a
|
||||
transaction has been recently processed. Validators also provide a PubSub
|
||||
notification, whereby a client registers to be notified when a given signature
|
||||
is observed by the validator. While these two mechanisms allow a client to
|
||||
verify a payment, they are not a proof and rely on completely trusting a
|
||||
fullnode.
|
||||
|
||||
We will describe a way to minimize this trust using Merkle Proofs to anchor the
|
||||
fullnode's response in the ledger, allowing the client to confirm on their own
|
||||
that a sufficient number of their preferred validators have confirmed a
|
||||
transaction. Requiring multiple validator attestations further reduces trust in
|
||||
the fullnode, as it increases both the technical and economic difficulty of
|
||||
compromising several other network participants.
|
||||
|
||||
## Light Clients
|
||||
|
||||
A 'light client' is a cluster participant that does not itself run a fullnode.
|
||||
This light client would provide a level of security greater than trusting a
|
||||
remote fullnode, without requiring the light client to spend a lot of resources
|
||||
verifying the ledger.
|
||||
|
||||
Rather than providing transaction signatures directly to a light client, the
|
||||
fullnode instead generates a Merkle Proof from the transaction of interest to
|
||||
the root of a Merkle Tree of all transactions in the including block. This Merkle
|
||||
Root is stored in a ledger entry which is voted on by validators, providing it
|
||||
consensus legitimacy. The additional level of security for a light client depends
|
||||
on an initial canonical set of validators the light client considers to be the
|
||||
stakeholders of the cluster. As that set is changed, the client can update its
|
||||
internal set of known validators with [receipts](#receipts). This may become
|
||||
challenging with a large number of delegated stakes.
|
||||
|
||||
Fullnodes themselves may want to use light client APIs for performance reasons.
|
||||
For example, during the initial launch of a fullnode, the fullnode may use a
|
||||
cluster provided checkpoint of the state and verify it with a receipt.
|
||||
|
||||
## Receipts
|
||||
|
||||
A receipt is a minimal proof that; a transaction has been included in a block,
|
||||
that the block has been voted on by the client's preferred set of validators and
|
||||
that the votes have reached the desired confirmation depth.
|
||||
|
||||
The receipts for both state and payments start with a Merkle Path from the
|
||||
value into a Bank-Merkle that has been voted on and included in the ledger. A
|
||||
chain of PoH Entries containing subsequent validator votes, deriving from the
|
||||
Bank-Merkle, is the confirmation proof.
|
||||
|
||||
Clients can examine this ledger data and compute the finality using Solana's fork
|
||||
selection rules.
|
||||
|
||||
### Payment Merkle Path
|
||||
|
||||
A payment receipt is a data structure that contains a Merkle Path from a
|
||||
transaction to the required set of validator votes.
|
||||
|
||||
An Entry-Merkle is a Merkle Root including all transactions in the entry, sorted
|
||||
by signature.
|
||||
|
||||
<img alt="Block Merkle Diagram" src="img/spv-block-merkle.svg" class="center"/>
|
||||
|
||||
A Block-Merkle is a Merkle root of all the Entry-Merkles sequenced in the block.
|
||||
Transaction status is necessary for the receipt because the state receipt is
|
||||
constructed for the block. Two transactions over the same state can appear in
|
||||
the block, and therefore, there is no way to infer from just the state whether a
|
||||
transaction that is committed to the ledger has succeeded or failed in modifying
|
||||
the intended state. It may not be necessary to encode the full status code, but
|
||||
a single status bit to indicate the transaction's success.
|
||||
|
||||
### State Merkle Path
|
||||
|
||||
A state receipt provides a confirmation that a specific state is committed at the
|
||||
end of the block. Inter-block state transitions do not generate a receipt.
|
||||
|
||||
For example:
|
||||
|
||||
* A sends 5 Lamports to B
|
||||
* B spends 5 Lamports
|
||||
* C sends 5 Lamports to A
|
||||
|
||||
At the end of the block, A and B are in the exact same starting state, and any
|
||||
state receipt would point to the same value for A or B.
|
||||
|
||||
The Bank-Merkle is computed from the Merkle Tree of the new state changes, along
|
||||
with the Previous Bank-Merkle, and the Block-Merkle.
|
||||
|
||||
<img alt="Bank Merkle Diagram" src="img/spv-bank-merkle.svg" class="center"/>
|
||||
|
||||
A state receipt contains only the state changes occurring in the block. A direct
|
||||
Merkle Path to the current Bank-Merkle guarantees the state value at that bank
|
||||
hash, but it cannot be used to generate a “current” receipt to the latest state
|
||||
if the state modification occurred in some previous block. There is no guarantee
|
||||
that the path provided by the validator is the latest one available out of all
|
||||
the previous Bank-Merkles.
|
||||
|
||||
Clients that want to query the chain for a receipt of the "latest" state would
|
||||
need to create a transaction that would update the Merkle Path for that account,
|
||||
such as a credit of 0 Lamports.
|
||||
|
||||
### Validator Votes
|
||||
|
||||
Leaders should coalesce the validator votes by stake weight into a single entry.
|
||||
This will reduce the number of entries necessary to create a receipt.
|
||||
|
||||
### Chain of Entries
|
||||
|
||||
A receipt has a PoH link from the payment or state Merkle Path root to a list of
|
||||
consecutive validation votes.
|
||||
|
||||
It contains the following:
|
||||
* State -> Bank-Merkle
|
||||
or
|
||||
* Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Merkle
|
||||
|
||||
And a vector of PoH entries:
|
||||
|
||||
* Validator vote entries
|
||||
* Ticks
|
||||
* Light entries
|
||||
|
||||
|
||||
```rust,ignore
|
||||
/// This Entry definition skips over the transactions and only contains the
|
||||
/// hash of the transactions used to modify PoH.
|
||||
LightEntry {
|
||||
/// The number of hashes since the previous Entry ID.
|
||||
pub num_hashes: u64,
|
||||
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
|
||||
hash: Hash,
|
||||
/// The Merkle Root of the transactions encoded into the Entry.
|
||||
entry_hash: Hash,
|
||||
}
|
||||
```
|
||||
|
||||
The light entries are reconstructed from Entries and simply show the entry Merkle
|
||||
Root that was mixed in to the PoH hash, instead of the full transaction set.
|
||||
|
||||
Clients do not need the starting vote state. The [fork selection](book/src/fork-selection.md) algorithm is
|
||||
defined such that only votes that appear after the transaction provide finality
|
||||
for the transaction, and finality is independent of the starting state.
|
||||
|
||||
### Verification
|
||||
|
||||
A light client that is aware of the supermajority set validators can verify a
|
||||
receipt by following the Merkle Path to the PoH chain. The Bank-Merkle is the
|
||||
Merkle Root and will appear in votes included in an Entry. The light client can
|
||||
simulate [fork selection](book/src/fork-selection.md) for the consecutive votes
|
||||
and verify that the receipt is confirmed at the desired lockout threshold.
|
||||
|
||||
### Synthetic State
|
||||
|
||||
Synthetic state should be computed into the Bank-Merkle along with the bank
|
||||
generated state.
|
||||
|
||||
For example:
|
||||
|
||||
* Epoch validator accounts and their stakes and weights.
|
||||
* Computed fee rates
|
||||
|
||||
These values should have an entry in the Bank-Merkle. They should live under
|
||||
known accounts, and therefore have an exact address in the Merkle Path.
|
@@ -20,7 +20,7 @@ stakes delegated to it and has no staking weight.
|
||||
|
||||
A separate Stake account (created by a staker) names a Vote account to which the
|
||||
stake is delegated. Rewards generated are proportional to the amount of
|
||||
lamports staked. The Stake account is owned by the staker only. Some portion of the lamports
|
||||
lamports staked. The Stake account is owned by the staker only. Lamports
|
||||
stored in this account are the stake.
|
||||
|
||||
## Passive Delegation
|
||||
@@ -31,7 +31,7 @@ the Vote account or submitting votes to the account.
|
||||
|
||||
The total stake allocated to a Vote account can be calculated by the sum of
|
||||
all the Stake accounts that have the Vote account pubkey as the
|
||||
`StakeState::Stake::voter_pubkey`.
|
||||
`StakeState::Delegate::voter_pubkey`.
|
||||
|
||||
## Vote and Stake accounts
|
||||
|
||||
@@ -46,15 +46,15 @@ program that its delegate has participated in validating the ledger.
|
||||
VoteState is the current state of all the votes the validator has submitted to
|
||||
the network. VoteState contains the following state information:
|
||||
|
||||
* `votes` - The submitted votes data structure.
|
||||
* votes - The submitted votes data structure.
|
||||
|
||||
* `credits` - The total number of rewards this vote program has generated over its
|
||||
* credits - The total number of rewards this vote program has generated over its
|
||||
lifetime.
|
||||
|
||||
* `root_slot` - The last slot to reach the full lockout commitment necessary for
|
||||
* root\_slot - The last slot to reach the full lockout commitment necessary for
|
||||
rewards.
|
||||
|
||||
* `commission` - The commission taken by this VoteState for any rewards claimed by
|
||||
* commission - The commission taken by this VoteState for any rewards claimed by
|
||||
staker's Stake accounts. This is the percentage ceiling of the reward.
|
||||
|
||||
* Account::lamports - The accumulated lamports from the commission. These do not
|
||||
@@ -71,17 +71,13 @@ count as stakes.
|
||||
### VoteInstruction::AuthorizeVoteSigner(Pubkey)
|
||||
|
||||
* `account[0]` - RW - The VoteState
|
||||
`VoteState::authorized_vote_signer` is set to to `Pubkey`, the transaction must by
|
||||
signed by the Vote account's current `authorized_vote_signer`. <br>
|
||||
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
|
||||
for its votes. That service is responsible for ensuring the vote won't cause
|
||||
the staker to be slashed.
|
||||
|
||||
`VoteState::authorized_vote_signer` is set to to `Pubkey`, instruction must by
|
||||
signed by Pubkey
|
||||
|
||||
### VoteInstruction::Vote(Vec<Vote>)
|
||||
|
||||
* `account[0]` - RW - The VoteState
|
||||
`VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Tower BFT](tower-bft.md)
|
||||
`VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Fork Selection](fork-selection.md)
|
||||
|
||||
|
||||
* `account[1]` - RO - A list of some N most recent slots and their hashes for the vote to be verified against.
|
||||
@@ -89,16 +85,14 @@ the staker to be slashed.
|
||||
|
||||
### StakeState
|
||||
|
||||
A StakeState takes one of three forms, StakeState::Uninitialized, StakeState::Stake and StakeState::RewardsPool.
|
||||
A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
|
||||
|
||||
### StakeState::Stake
|
||||
### StakeState::Delegate
|
||||
|
||||
StakeState::Stake is the current delegation preference of the **staker** and
|
||||
StakeState is the current delegation preference of the **staker**. StakeState
|
||||
contains the following state information:
|
||||
|
||||
* Account::lamports - The lamports available for staking.
|
||||
|
||||
* `stake` - the staked amount (subject to warm up and cool down) for generating rewards, always less than or equal to Account::lamports
|
||||
* Account::lamports - The staked lamports.
|
||||
|
||||
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
||||
delegated to.
|
||||
@@ -106,53 +100,62 @@ delegated to.
|
||||
* `credits_observed` - The total credits claimed over the lifetime of the
|
||||
program.
|
||||
|
||||
* `activated` - the epoch at which this stake was activated/delegated. The full stake will be counted after warm up.
|
||||
### StakeState::MiningPool
|
||||
|
||||
* `deactivated` - the epoch at which this stake will be completely de-activated, which is `cool down` epochs after StakeInstruction::Deactivate is issued.
|
||||
There are two approaches to the mining pool. The bank could allow the
|
||||
StakeState program to bypass the token balance check, or a program representing
|
||||
the mining pool could run on the network. To avoid a single network wide lock,
|
||||
the pool can be split into several mining pools. This design focuses on using
|
||||
StakeState::MiningPool instances as the cluster wide mining pools.
|
||||
|
||||
### StakeState::RewardsPool
|
||||
* 256 StakeState::MiningPool are initialized, each with 1/256 number of mining pool
|
||||
tokens stored as `Account::lamports`.
|
||||
|
||||
To avoid a single network wide lock or contention in redemption, 256 RewardsPools are part of genesis under pre-determined keys, each with std::u64::MAX credits to be able to satisfy redemptions according to point value.
|
||||
|
||||
The Stakes and the RewardsPool are accounts that are owned by the same `Stake` program.
|
||||
|
||||
### StakeInstruction::DelegateStake(u64)
|
||||
The stakes and the MiningPool are accounts that are owned by the same `Stake`
|
||||
program.
|
||||
|
||||
<<<<<<< HEAD
|
||||
### StakeInstruction::Initialize
|
||||
=======
|
||||
The Stake account is moved from Uninitialized to StakeState::Stake form. This is
|
||||
how stakers choose their initial delegate validator node and activate their
|
||||
stake account lamports.
|
||||
>>>>>>> 25080f1a3... fix book typos (#5185)
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake instance. <br>
|
||||
`StakeState::Stake::credits_observed` is initialized to `VoteState::credits`,<br>
|
||||
`StakeState::Stake::voter_pubkey` is initialized to `account[1]`,<br>
|
||||
`StakeState::Stake::stake` is initialized to the u64 passed as an argument above,<br>
|
||||
`StakeState::Stake::activated` is initialized to current Bank epoch, and<br>
|
||||
`StakeState::Stake::deactivated` is initialized to std::u64::MAX
|
||||
* `account[0]` - RW - The StakeState::Delegate instance.
|
||||
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
|
||||
`StakeState::Delegate::voter_pubkey` is initialized to `account[1]`
|
||||
|
||||
* `account[1]` - R - The VoteState instance.
|
||||
|
||||
* `account[2]` - R - syscall::current account, carries information about current Bank epoch
|
||||
|
||||
### StakeInstruction::RedeemVoteCredits
|
||||
|
||||
The staker or the owner of the Stake account sends a transaction with this
|
||||
The Staker or the owner of the Stake account sends a transaction with this
|
||||
instruction to claim rewards.
|
||||
|
||||
The Vote account and the Stake account pair maintain a lifetime counter of total
|
||||
rewards generated and claimed. Rewards are paid according to a point value
|
||||
supplied by the Bank from inflation. A `point` is one credit * one staked
|
||||
lamport, rewards paid are proportional to the number of lamports staked.
|
||||
The Vote account and the Stake account pair maintain a lifetime counter
|
||||
of total rewards generated and claimed. When claiming rewards, the total lamports
|
||||
deposited into the Stake account and as validator commission is proportional to
|
||||
`VoteState::credits - StakeState::credits_observed`.
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake instance that is redeeming rewards.
|
||||
* `account[1]` - R - The VoteState instance, must be the same as `StakeState::voter_pubkey`
|
||||
* `account[2]` - RW - The StakeState::RewardsPool instance that will fulfill the request (picked at random).
|
||||
* `account[3]` - R - syscall::rewards account from the Bank that carries point value.
|
||||
|
||||
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
|
||||
reward.
|
||||
* `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
|
||||
credits.
|
||||
* `account[2]` - R - The VoteState instance, must be the same as
|
||||
`StakeState::voter_pubkey`
|
||||
|
||||
Reward is paid out for the difference between `VoteState::credits` to
|
||||
`StakeState::Stake::credits_observed`, multiplied by `syscall::rewards::Rewards::validator_point_value`.
|
||||
`StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token
|
||||
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
|
||||
`VoteState::credits`. The commission is deposited into the Vote account token
|
||||
balance, and the reward is deposited to the Stake account token balance.
|
||||
|
||||
The total lamports paid is a percentage-rate of the lamports staked muiltplied by
|
||||
the ratio of rewards being redeemed to rewards that could have been generated
|
||||
during the rate period.
|
||||
|
||||
Any random MiningPool can be used to redeem the credits.
|
||||
|
||||
```rust,ignore
|
||||
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
|
||||
@@ -160,26 +163,24 @@ stake_state.credits_observed = vote_state.credits;
|
||||
```
|
||||
|
||||
`credits_to_claim` is used to compute the reward and commission, and
|
||||
`StakeState::Stake::credits_observed` is updated to the latest
|
||||
`StakeState::Delegate::credits_observed` is updated to the latest
|
||||
`VoteState::credits` value.
|
||||
|
||||
### StakeInstruction::Deactivate
|
||||
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
|
||||
## Collecting network fees into the MiningPool
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake instance that is deactivating, the transaction must be signed by this key.
|
||||
* `account[1]` - R - syscall::current account from the Bank that carries current epoch
|
||||
At the end of the block, before the bank is frozen, but after it processed all
|
||||
the transactions for the block, a virtual instruction is executed to collect
|
||||
the transaction fees.
|
||||
|
||||
StakeState::Stake::deactivated is set to the current epoch + cool down. The account's stake will ramp down to zero by
|
||||
that epoch, and Account::lamports will be available for withdrawal.
|
||||
* A portion of the fees are deposited into the leader's account.
|
||||
* A portion of the fees are deposited into the smallest StakeState::MiningPool
|
||||
account.
|
||||
|
||||
## Authorizing a Vote Signer
|
||||
|
||||
### StakeInstruction::Withdraw(u64)
|
||||
Lamports build up over time in a Stake account and any excess over activated stake can be withdrawn.
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake from which to withdraw, the transaction must be signed by this key.
|
||||
* `account[1]` - RW - Account that should be credited with the withdrawn lamports.
|
||||
* `account[2]` - R - syscall::current account from the Bank that carries current epoch, to calculate stake.
|
||||
|
||||
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
|
||||
for its votes. That service is responsible for ensuring the vote won't cause
|
||||
the staker to be slashed.
|
||||
|
||||
## Benefits of the design
|
||||
|
||||
@@ -192,6 +193,9 @@ Lamports build up over time in a Stake account and any excess over activated sta
|
||||
* Commission for the work is deposited when a reward is claimed by the delegated
|
||||
stake.
|
||||
|
||||
This proposal would benefit from the `read-only` accounts proposal to allow for
|
||||
many rewards to be claimed concurrently.
|
||||
|
||||
## Example Callflow
|
||||
|
||||
<img alt="Passive Staking Callflow" src="img/passive-staking-callflow.svg" class="center"/>
|
||||
|
@@ -74,7 +74,7 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||
software on Linux x86_64 and mac OS systems.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.17.0/install/solana-install-init.sh | sh -s
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.6/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the
|
||||
@@ -275,7 +275,7 @@ Keybase:
|
||||
|
||||
1. Join https://keybase.io/ and complete the profile for your validator
|
||||
2. Add your validator **identity pubkey** to Keybase:
|
||||
* Create an empty file on your local computer called `validator-<PUBKEY>`
|
||||
* Create an empty file on your local computer called `solana_pubkey_<PUBKEY>`
|
||||
* In Keybase, navigate to the Files section, and upload your pubkey file to
|
||||
a `solana` subdirectory in your public folder: `/keybase/public/<KEYBASE_USERNAME>/solana`
|
||||
* To check your pubkey, ensure you can successfully browse to
|
||||
|
@@ -53,7 +53,8 @@ software.
|
||||
|
||||
##### Linux and mac OS
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.17.0/install/solana-install-init.sh | sh -s
|
||||
$ export SOLANA_RELEASE=v0.16.6 # skip this line to install the latest release
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.6/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the
|
||||
|
@@ -1,48 +0,0 @@
|
||||
# The Transaction
|
||||
|
||||
### Components of a `Transaction`
|
||||
|
||||
* **Transaction:**
|
||||
* **message:** Defines the transaction
|
||||
* **header:** Details the account types of and signatures required by
|
||||
the transaction
|
||||
* **num_required_signatures:** The total number of signatures
|
||||
required to make the transaction valid.
|
||||
* **num_credit_only_signed_accounts:** The last
|
||||
`num_credit_only_signed_accounts` signatures refer to signing
|
||||
credit only accounts. Credit only accounts can be used concurrently
|
||||
by multiple parallel transactions, but their balance may only be
|
||||
increased, and their account data is read-only.
|
||||
* **num_credit_only_unsigned_accounts:** The last
|
||||
`num_credit_only_unsigned_accounts` pubkeys in `account_keys` refer
|
||||
to non-signing credit only accounts
|
||||
* **account_keys:** List of pubkeys used by the transaction, including
|
||||
by the instructions and for signatures. The first
|
||||
`num_required_signatures` pubkeys must sign the transaction.
|
||||
* **recent_blockhash:** The ID of a recent ledger entry. Validators will
|
||||
reject transactions with a `recent_blockhash` that is too old.
|
||||
* **instructions:** A list of [instructions](instruction.md) that are
|
||||
run sequentially and committed in one atomic transaction if all
|
||||
succeed.
|
||||
* **signatures:** A list of signatures applied to the transaction. The
|
||||
list is always of length `num_required_signatures`, and the signature
|
||||
at index `i` corresponds to the pubkey at index `i` in `account_keys`.
|
||||
The list is initialized with empty signatures (i.e. zeros), and
|
||||
populated as signatures are added.
|
||||
|
||||
### Transaction Signing
|
||||
|
||||
A `Transaction` is signed by using an ed25519 keypair to sign the
|
||||
serialization of the `message`. The resulting signature is placed at the
|
||||
index of `signatures` matching the index of the keypair's pubkey in
|
||||
`account_keys`.
|
||||
|
||||
### Transaction Serialization
|
||||
|
||||
`Transaction`s (and their `message`s) are serialized and deserialized
|
||||
using the [bincode](https://crates.io/crates/bincode) crate with a
|
||||
non-standard vector serialization that uses only one byte for the length
|
||||
if it can be encoded in 7 bits, 2 bytes if it fits in 14 bits, or 3
|
||||
bytes if it requires 15 or 16 bits. The vector serialization is defined
|
||||
by Solana's
|
||||
[short-vec](https://github.com/solana-labs/solana/blob/master/sdk/src/short_vec.rs).
|
@@ -1,43 +0,0 @@
|
||||
# Anatomy of a Transaction
|
||||
|
||||
Transactions encode lists of instructions that are executed
|
||||
sequentially, and only committed if all the instructions complete
|
||||
successfully. All account states are reverted upon the failure of a
|
||||
transaction. Each Transaction details the accounts used, including which
|
||||
must sign and which are credit only, a recent blockhash, the
|
||||
instructions, and any signatures.
|
||||
|
||||
## Accounts and Signatures
|
||||
|
||||
Each transaction explicitly lists all accounts that it needs access to.
|
||||
This includes accounts that are transferring tokens, accounts whose user
|
||||
data is being modified, and the program accounts that are being called
|
||||
by the instructions. Each account that is not an executable program can
|
||||
be marked as a requiring a signature and/or as credit only. All accounts
|
||||
marked as signers must have a valid signature in the transaction's list
|
||||
of signatures before the transaction is considered valid. Any accounts
|
||||
marked as credit only may only have their token value increased, and
|
||||
their user data is read only. Accounts are locked by the runtime,
|
||||
ensuring that they are not modified by a concurrent program while the
|
||||
transaction is running. Credit only accounts can safely be shared, so
|
||||
the runtime will allow multiple concurrent credit only locks on an
|
||||
account.
|
||||
|
||||
## Recent Blockhash
|
||||
|
||||
A Transaction includes a recent blockhash to prevent duplication and to
|
||||
give transactions lifetimes. Any transaction that is completely
|
||||
identical to a previous one is rejected, so adding a newer blockhash
|
||||
allows multiple transactions to repeat the exact same action.
|
||||
Transactions also have lifetimes that are defined by the blockhash, as
|
||||
any transaction whose blockhash is too old will be rejected.
|
||||
|
||||
## Instructions
|
||||
|
||||
Each instruction specifies a single program account (which must be
|
||||
marked executable), a subset of the transaction's accounts that should
|
||||
be passed to the program, and a data byte array instruction that is
|
||||
passed to the program. The program interprets the data array and
|
||||
operates on the accounts specified by the instructions. The program can
|
||||
return successfully, or with an error code. An error return causes the
|
||||
entire transaction to fail immediately.
|
1
chacha-sys/.gitignore
vendored
1
chacha-sys/.gitignore
vendored
@@ -1,2 +1 @@
|
||||
/target/
|
||||
/farf/
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.17.0"
|
||||
version = "0.16.7"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -9,4 +9,4 @@ license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.38"
|
||||
cc = "1.0.37"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
steps:
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 20
|
||||
name: "publish docker"
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 90
|
||||
@@ -10,7 +10,7 @@ steps:
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 45
|
||||
timeout_in_minutes: 25
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-book.sh"
|
||||
timeout_in_minutes: 15
|
||||
|
@@ -4,7 +4,7 @@ steps:
|
||||
timeout_in_minutes: 5
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 35
|
||||
timeout_in_minutes: 15
|
||||
- wait
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
|
@@ -1,10 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.36.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
ENV PROTOC_ZIP protoc-$PROTOC_VERSION-linux-x86_64.zip
|
||||
FROM rust:1.35.0
|
||||
|
||||
RUN set -x \
|
||||
&& apt update \
|
||||
@@ -24,8 +20,6 @@ RUN set -x \
|
||||
mscgen \
|
||||
rsync \
|
||||
sudo \
|
||||
golang \
|
||||
unzip \
|
||||
\
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rustup component add rustfmt \
|
||||
@@ -34,8 +28,4 @@ RUN set -x \
|
||||
&& cargo install svgbob_cli \
|
||||
&& cargo install mdbook \
|
||||
&& rustc --version \
|
||||
&& cargo --version \
|
||||
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
|
||||
&& unzip -o $PROTOC_ZIP -d /usr/local bin/protoc \
|
||||
&& unzip -o $PROTOC_ZIP -d /usr/local include/* \
|
||||
&& rm -f $PROTOC_ZIP
|
||||
&& cargo --version
|
||||
|
@@ -5,6 +5,7 @@ skipSetup=false
|
||||
iterations=1
|
||||
restartInterval=never
|
||||
rollingRestart=false
|
||||
maybeNoLeaderRotation=
|
||||
extraNodes=0
|
||||
walletRpcPort=:8899
|
||||
|
||||
@@ -53,6 +54,9 @@ while getopts "ch?i:k:brxR" opt; do
|
||||
k)
|
||||
restartInterval=$OPTARG
|
||||
;;
|
||||
b)
|
||||
maybeNoLeaderRotation="--stake 0"
|
||||
;;
|
||||
x)
|
||||
extraNodes=$((extraNodes + 1))
|
||||
;;
|
||||
@@ -78,6 +82,7 @@ nodes=(
|
||||
--no-restart \
|
||||
--init-complete-file init-complete-node1.log"
|
||||
"multinode-demo/validator.sh \
|
||||
$maybeNoLeaderRotation \
|
||||
--enable-rpc-exit \
|
||||
--no-restart \
|
||||
--init-complete-file init-complete-node2.log \
|
||||
@@ -89,7 +94,8 @@ for i in $(seq 1 $extraNodes); do
|
||||
"multinode-demo/validator.sh \
|
||||
--no-restart \
|
||||
--label dyn$i \
|
||||
--init-complete-file init-complete-node$((2 + i)).log"
|
||||
--init-complete-file init-complete-node$((2 + i)).log \
|
||||
$maybeNoLeaderRotation"
|
||||
)
|
||||
done
|
||||
numNodes=$((2 + extraNodes))
|
||||
@@ -119,26 +125,21 @@ startNode() {
|
||||
echo "log: $log"
|
||||
}
|
||||
|
||||
waitForNodeToInit() {
|
||||
declare initCompleteFile=$1
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 240 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for $initCompleteFile ($SECONDS)..."
|
||||
sleep 2
|
||||
done
|
||||
echo "Found $initCompleteFile"
|
||||
}
|
||||
|
||||
initCompleteFiles=()
|
||||
waitForAllNodesToInit() {
|
||||
echo "--- ${#initCompleteFiles[@]} nodes booting"
|
||||
SECONDS=
|
||||
for initCompleteFile in "${initCompleteFiles[@]}"; do
|
||||
waitForNodeToInit "$initCompleteFile"
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 240 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for $initCompleteFile ($SECONDS)..."
|
||||
sleep 2
|
||||
done
|
||||
echo "Found $initCompleteFile"
|
||||
done
|
||||
echo "All nodes finished booting in $SECONDS seconds"
|
||||
}
|
||||
@@ -161,13 +162,6 @@ startNodes() {
|
||||
if $addLogs; then
|
||||
logs+=("$(getNodeLogFile "$i" "$cmd")")
|
||||
fi
|
||||
|
||||
# 1 == bootstrap leader, wait until it boots before starting
|
||||
# other validators
|
||||
if [[ "$i" -eq 1 ]]; then
|
||||
SECONDS=
|
||||
waitForNodeToInit "$initCompleteFile"
|
||||
fi
|
||||
done
|
||||
|
||||
waitForAllNodesToInit
|
||||
@@ -306,6 +300,7 @@ else
|
||||
fi
|
||||
startNodes
|
||||
lastTransactionCount=
|
||||
enforceTransactionCountAdvance=true
|
||||
while [[ $iteration -le $iterations ]]; do
|
||||
echo "--- Node count ($iteration)"
|
||||
(
|
||||
@@ -341,20 +336,36 @@ while [[ $iteration -le $iterations ]]; do
|
||||
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
|
||||
if [[ -n $lastTransactionCount ]]; then
|
||||
echo "--- Transaction count check: $lastTransactionCount < $transactionCount"
|
||||
if [[ $lastTransactionCount -ge $transactionCount ]]; then
|
||||
echo "Error: Transaction count is not advancing"
|
||||
echo "* lastTransactionCount: $lastTransactionCount"
|
||||
echo "* transactionCount: $transactionCount"
|
||||
flag_error
|
||||
if $enforceTransactionCountAdvance; then
|
||||
if [[ $lastTransactionCount -ge $transactionCount ]]; then
|
||||
echo "Error: Transaction count is not advancing"
|
||||
echo "* lastTransactionCount: $lastTransactionCount"
|
||||
echo "* transactionCount: $transactionCount"
|
||||
flag_error
|
||||
fi
|
||||
else
|
||||
echo "enforceTransactionCountAdvance=false"
|
||||
fi
|
||||
enforceTransactionCountAdvance=true
|
||||
fi
|
||||
lastTransactionCount=$transactionCount
|
||||
|
||||
echo "--- Wallet sanity ($iteration)"
|
||||
flag_error_if_no_leader_rotation() {
|
||||
# TODO: Stop ignoring wallet sanity failures when leader rotation is enabled
|
||||
# once https://github.com/solana-labs/solana/issues/2474 is fixed
|
||||
if [[ -n $maybeNoLeaderRotation ]]; then
|
||||
flag_error
|
||||
else
|
||||
# Wallet error occurred (and was ignored) so transactionCount may not
|
||||
# advance on the next iteration
|
||||
enforceTransactionCountAdvance=false
|
||||
fi
|
||||
}
|
||||
(
|
||||
set -x
|
||||
timeout 60s scripts/wallet-sanity.sh --url http://127.0.0.1"$walletRpcPort"
|
||||
) || flag_error
|
||||
) || flag_error_if_no_leader_rotation
|
||||
|
||||
iteration=$((iteration + 1))
|
||||
|
||||
|
10
ci/nits.sh
10
ci/nits.sh
@@ -46,22 +46,16 @@ if _ git --no-pager grep -n 'Default::default()' -- '*.rs'; then
|
||||
fi
|
||||
|
||||
# Let's keep a .gitignore for every crate, ensure it's got
|
||||
# /target/ and /farf/ in it
|
||||
# /target/ in it
|
||||
declare gitignores_ok=true
|
||||
for i in $(git --no-pager ls-files \*/Cargo.toml ); do
|
||||
dir=$(dirname "$i")
|
||||
if [[ ! -f $dir/.gitignore ]]; then
|
||||
echo 'error: nits.sh .gitnore missing for crate '"$dir" >&2
|
||||
gitignores_ok=false
|
||||
else
|
||||
if ! grep -q -e '^/target/$' "$dir"/.gitignore; then
|
||||
elif ! grep -q -e '^/target/$' "$dir"/.gitignore; then
|
||||
echo 'error: nits.sh "/target/" apparently missing from '"$dir"'/.gitignore' >&2
|
||||
gitignores_ok=false
|
||||
fi
|
||||
if ! grep -q -e '^/farf/$' "$dir"/.gitignore ; then
|
||||
echo 'error: nits.sh "/farf/" apparently missing from '"$dir"'/.gitignore' >&2
|
||||
gitignores_ok=false
|
||||
fi
|
||||
fi
|
||||
done
|
||||
"$gitignores_ok"
|
||||
|
@@ -13,8 +13,8 @@
|
||||
# $ source ci/rust-version.sh
|
||||
#
|
||||
|
||||
stable_version=1.36.0
|
||||
nightly_version=2019-07-19
|
||||
stable_version=1.35.0
|
||||
nightly_version=2019-06-20
|
||||
|
||||
export rust_stable="$stable_version"
|
||||
export rust_stable_docker_image=solanalabs/rust:"$stable_version"
|
||||
|
@@ -12,25 +12,16 @@ export RUSTFLAGS="-D warnings"
|
||||
|
||||
do_bpf_check() {
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
_ cargo +"$rust_nightly" test --all
|
||||
_ cargo +"$rust_nightly" clippy --version
|
||||
_ cargo +"$rust_nightly" clippy --all -- --version
|
||||
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings
|
||||
_ cargo +"$rust_stable" audit
|
||||
# _ cargo +"$rust_stable" audit
|
||||
}
|
||||
|
||||
(
|
||||
(
|
||||
cd sdk/bpf/rust/rust-no-std
|
||||
do_bpf_check
|
||||
)
|
||||
(
|
||||
cd sdk/bpf/rust/rust-utils
|
||||
do_bpf_check
|
||||
)
|
||||
(
|
||||
cd sdk/bpf/rust/rust-test
|
||||
do_bpf_check
|
||||
)
|
||||
for project in programs/bpf/rust/*/ ; do
|
||||
(
|
||||
cd "$project"
|
||||
@@ -40,10 +31,9 @@ do_bpf_check() {
|
||||
)
|
||||
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
_ cargo +"$rust_stable" clippy --version
|
||||
_ cargo +"$rust_stable" clippy --all -- --version
|
||||
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0011 # https://github.com/solana-labs/solana/issues/5207
|
||||
#_ cargo +"$rust_stable" audit
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ book/build.sh
|
||||
|
@@ -33,7 +33,7 @@ test-stable)
|
||||
echo "Executing $testName"
|
||||
|
||||
_ cargo +"$rust_stable" build --all ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
;;
|
||||
test-stable-perf)
|
||||
echo "Executing $testName"
|
||||
@@ -77,7 +77,7 @@ test-stable-perf)
|
||||
|
||||
# Run root package library tests
|
||||
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
|
||||
_ cargo +"$rust_stable" test --manifest-path=core/Cargo.toml ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path=core/Cargo.toml ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown test: $testName"
|
||||
|
1
client/.gitignore
vendored
1
client/.gitignore
vendored
@@ -1,2 +1 @@
|
||||
/target/
|
||||
/farf/
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.17.0"
|
||||
version = "0.16.7"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,18 +11,18 @@ edition = "2018"
|
||||
[dependencies]
|
||||
bincode = "1.1.4"
|
||||
bs58 = "0.2.0"
|
||||
jsonrpc-core = "12.1.0"
|
||||
log = "0.4.7"
|
||||
jsonrpc-core = "12.0.0"
|
||||
log = "0.4.2"
|
||||
rand = "0.6.5"
|
||||
rayon = "1.1.0"
|
||||
reqwest = "0.9.19"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
serde_json = "1.0.40"
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
reqwest = "0.9.18"
|
||||
serde = "1.0.92"
|
||||
serde_derive = "1.0.92"
|
||||
serde_json = "1.0.39"
|
||||
solana-netutil = { path = "../netutil", version = "0.16.7" }
|
||||
solana-sdk = { path = "../sdk", version = "0.16.7" }
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "12.1.0"
|
||||
jsonrpc-http-server = "12.1.0"
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
jsonrpc-core = "12.0.0"
|
||||
jsonrpc-http-server = "12.0.0"
|
||||
solana-logger = { path = "../logger", version = "0.16.7" }
|
||||
|
@@ -476,7 +476,7 @@ impl RpcClient {
|
||||
&self,
|
||||
signature: &Signature,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> io::Result<usize> {
|
||||
) -> io::Result<()> {
|
||||
let mut now = Instant::now();
|
||||
let mut confirmed_blocks = 0;
|
||||
loop {
|
||||
@@ -485,11 +485,8 @@ impl RpcClient {
|
||||
Ok(count) => {
|
||||
if confirmed_blocks != count {
|
||||
info!(
|
||||
"signature {} confirmed {} out of {} after {} ms",
|
||||
signature,
|
||||
count,
|
||||
min_confirmed_blocks,
|
||||
now.elapsed().as_millis()
|
||||
"signature {} confirmed {} out of {}",
|
||||
signature, count, min_confirmed_blocks
|
||||
);
|
||||
now = Instant::now();
|
||||
confirmed_blocks = count;
|
||||
@@ -503,23 +500,12 @@ impl RpcClient {
|
||||
}
|
||||
};
|
||||
if now.elapsed().as_secs() > 15 {
|
||||
info!(
|
||||
"signature {} confirmed {} out of {} failed after {} ms",
|
||||
signature,
|
||||
confirmed_blocks,
|
||||
min_confirmed_blocks,
|
||||
now.elapsed().as_millis()
|
||||
);
|
||||
if confirmed_blocks > 0 {
|
||||
return Ok(confirmed_blocks);
|
||||
} else {
|
||||
// TODO: Return a better error.
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "signature not found"));
|
||||
}
|
||||
// TODO: Return a better error.
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "signature not found"));
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
sleep(Duration::from_millis(250));
|
||||
}
|
||||
Ok(confirmed_blocks)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_num_blocks_since_signature_confirmation(
|
||||
|
@@ -16,9 +16,8 @@ pub enum RpcRequest {
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
GetEpochVoteAccounts,
|
||||
GetStorageTurn,
|
||||
GetStorageTurnRate,
|
||||
GetSlotsPerSegment,
|
||||
GetStorageBlockhash,
|
||||
GetStorageSlot,
|
||||
GetStoragePubkeysForSlot,
|
||||
GetTransactionCount,
|
||||
RegisterNode,
|
||||
@@ -46,9 +45,8 @@ impl RpcRequest {
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
RpcRequest::GetEpochVoteAccounts => "getEpochVoteAccounts",
|
||||
RpcRequest::GetStorageTurn => "getStorageTurn",
|
||||
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
|
||||
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
|
||||
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
|
||||
RpcRequest::GetStorageSlot => "getStorageSlot",
|
||||
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::RegisterNode => "registerNode",
|
||||
|
@@ -16,7 +16,7 @@ use solana_sdk::packet::PACKET_DATA_SIZE;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||
use solana_sdk::system_instruction;
|
||||
use solana_sdk::timing::{duration_as_ms, MAX_PROCESSING_AGE};
|
||||
use solana_sdk::timing::duration_as_ms;
|
||||
use solana_sdk::transaction::{self, Transaction};
|
||||
use solana_sdk::transport::Result as TransportResult;
|
||||
use std::io;
|
||||
@@ -203,39 +203,20 @@ impl ThinClient {
|
||||
keypairs: &[&Keypair],
|
||||
transaction: &mut Transaction,
|
||||
tries: usize,
|
||||
pending_confirmations: usize,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> io::Result<Signature> {
|
||||
for x in 0..tries {
|
||||
let now = Instant::now();
|
||||
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
|
||||
let mut wr = std::io::Cursor::new(&mut buf[..]);
|
||||
let mut num_confirmed = 0;
|
||||
let mut wait_time = MAX_PROCESSING_AGE;
|
||||
serialize_into(&mut wr, &transaction)
|
||||
.expect("serialize Transaction in pub fn transfer_signed");
|
||||
// resend the same transaction until the transaction has no chance of succeeding
|
||||
while now.elapsed().as_secs() < wait_time as u64 {
|
||||
if num_confirmed == 0 {
|
||||
// Send the transaction if there has been no confirmation (e.g. the first time)
|
||||
self.transactions_socket
|
||||
.send_to(&buf[..], &self.transactions_addr())?;
|
||||
}
|
||||
|
||||
if let Ok(confirmed_blocks) = self.poll_for_signature_confirmation(
|
||||
&transaction.signatures[0],
|
||||
pending_confirmations,
|
||||
) {
|
||||
num_confirmed = confirmed_blocks;
|
||||
if confirmed_blocks >= pending_confirmations {
|
||||
return Ok(transaction.signatures[0]);
|
||||
}
|
||||
// Since network has seen the transaction, wait longer to receive
|
||||
// all pending confirmations. Resending the transaction could result into
|
||||
// extra transaction fees
|
||||
wait_time = wait_time.max(
|
||||
MAX_PROCESSING_AGE * pending_confirmations.saturating_sub(num_confirmed),
|
||||
);
|
||||
}
|
||||
self.transactions_socket
|
||||
.send_to(&buf[..], &self.transactions_addr())?;
|
||||
if self
|
||||
.poll_for_signature_confirmation(&transaction.signatures[0], min_confirmed_blocks)
|
||||
.is_ok()
|
||||
{
|
||||
return Ok(transaction.signatures[0]);
|
||||
}
|
||||
info!(
|
||||
"{} tries failed transfer to {}",
|
||||
@@ -397,7 +378,7 @@ impl SyncClient for ThinClient {
|
||||
&self,
|
||||
signature: &Signature,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> TransportResult<usize> {
|
||||
) -> TransportResult<()> {
|
||||
Ok(self
|
||||
.rpc_client()
|
||||
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?)
|
||||
|
1
core/.gitignore
vendored
1
core/.gitignore
vendored
@@ -1,2 +1 @@
|
||||
/target/
|
||||
/farf/
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.17.0"
|
||||
version = "0.16.7"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -21,61 +21,55 @@ kvstore = ["solana-kvstore"]
|
||||
bincode = "1.1.4"
|
||||
bs58 = "0.2.0"
|
||||
byteorder = "1.3.2"
|
||||
chrono = { version = "0.4.7", features = ["serde"] }
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
core_affinity = "0.5.9"
|
||||
crc = { version = "1.8.1", optional = true }
|
||||
crossbeam-channel = "0.3"
|
||||
hashbrown = "0.2.0"
|
||||
indexmap = "1.0"
|
||||
itertools = "0.8.0"
|
||||
jsonrpc-core = "12.1.0"
|
||||
jsonrpc-derive = "12.1.0"
|
||||
jsonrpc-http-server = "12.1.0"
|
||||
jsonrpc-core = "12.0.0"
|
||||
jsonrpc-derive = "12.0.0"
|
||||
jsonrpc-http-server = "12.0.0"
|
||||
jsonrpc-pubsub = "12.0.0"
|
||||
jsonrpc-ws-server = "12.1.0"
|
||||
jsonrpc-ws-server = "12.0.0"
|
||||
libc = "0.2.58"
|
||||
log = "0.4.7"
|
||||
log = "0.4.2"
|
||||
memmap = { version = "0.7.0", optional = true }
|
||||
nix = "0.14.1"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
rayon = "1.1.0"
|
||||
reqwest = "0.9.19"
|
||||
reqwest = "0.9.18"
|
||||
rocksdb = "0.11.0"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
serde_json = "1.0.40"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.17.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.17.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.17.0" }
|
||||
solana-client = { path = "../client", version = "0.17.0" }
|
||||
solana-config-program = { path = "../programs/config_program", version = "0.17.0" }
|
||||
solana-drone = { path = "../drone", version = "0.17.0" }
|
||||
serde = "1.0.92"
|
||||
serde_derive = "1.0.92"
|
||||
serde_json = "1.0.39"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.16.7" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.16.7" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.7" }
|
||||
solana-client = { path = "../client", version = "0.16.7" }
|
||||
solana-config-program = { path = "../programs/config_program", version = "0.16.7" }
|
||||
solana-drone = { path = "../drone", version = "0.16.7" }
|
||||
solana-ed25519-dalek = "0.2.0"
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.17.0" }
|
||||
solana-kvstore = { path = "../kvstore", version = "0.17.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.17.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.17.0" }
|
||||
solana-measure = { path = "../measure", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.17.0" }
|
||||
solana-stake-program = { path = "../programs/stake_program", version = "0.17.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.17.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.17.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.17.0" }
|
||||
solana-vote-program = { path = "../programs/vote_program", version = "0.17.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.17.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.17.0" }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.17.0" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.17.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.7" }
|
||||
solana-kvstore = { path = "../kvstore", version = "0.16.7", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.16.7" }
|
||||
solana-metrics = { path = "../metrics", version = "0.16.7" }
|
||||
solana-netutil = { path = "../netutil", version = "0.16.7" }
|
||||
solana-runtime = { path = "../runtime", version = "0.16.7" }
|
||||
solana-sdk = { path = "../sdk", version = "0.16.7" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.16.7" }
|
||||
solana-stake-program = { path = "../programs/stake_program", version = "0.16.7" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.16.7" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.16.7" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.16.7" }
|
||||
solana-vote-program = { path = "../programs/vote_program", version = "0.16.7" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.16.7" }
|
||||
sys-info = "0.5.7"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
untrusted = "0.7.0"
|
||||
untrusted = "0.6.2"
|
||||
|
||||
# reed-solomon-erasure's simd_c feature fails to build for x86_64-pc-windows-msvc, use pure-rust
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
@@ -86,8 +80,6 @@ reed-solomon-erasure = "3.1.1"
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.0"
|
||||
matches = "0.1.6"
|
||||
serial_test = "0.2.0"
|
||||
serial_test_derive = "0.2.0"
|
||||
|
||||
[[bench]]
|
||||
name = "banking_stage"
|
||||
|
@@ -4,7 +4,6 @@ extern crate test;
|
||||
#[macro_use]
|
||||
extern crate solana;
|
||||
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
@@ -18,18 +17,16 @@ use solana::poh_recorder::WorkingBankEntries;
|
||||
use solana::service::Service;
|
||||
use solana::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::system_instruction;
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::timing::{duration_as_us, timestamp};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_sdk::timing::{
|
||||
duration_as_us, timestamp, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES,
|
||||
};
|
||||
use std::iter;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
use test::Bencher;
|
||||
@@ -79,12 +76,8 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
// This tests the performance of buffering packets.
|
||||
// If the packet buffers are copied, performance will be poor.
|
||||
bencher.iter(move || {
|
||||
let _ignored = BankingStage::consume_buffered_packets(
|
||||
&my_pubkey,
|
||||
&poh_recorder,
|
||||
&mut packets,
|
||||
10_000,
|
||||
);
|
||||
let _ignored =
|
||||
BankingStage::consume_buffered_packets(&my_pubkey, &poh_recorder, &mut packets);
|
||||
});
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
@@ -93,52 +86,13 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
let _unused = Blocktree::destroy(&ledger_path);
|
||||
}
|
||||
|
||||
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
|
||||
(0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.message.account_keys[0] = Pubkey::new_rand();
|
||||
new.message.account_keys[1] = Pubkey::new_rand();
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
|
||||
let progs = 4;
|
||||
(0..txes)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let mut instructions = vec![];
|
||||
let from_key = Keypair::new();
|
||||
for _ in 1..progs {
|
||||
let to_key = Pubkey::new_rand();
|
||||
instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1));
|
||||
}
|
||||
let mut new = Transaction::new_unsigned_instructions(instructions);
|
||||
new.sign(&[&from_key], hash);
|
||||
new
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
enum TransactionType {
|
||||
Accounts,
|
||||
Programs,
|
||||
}
|
||||
|
||||
fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
// a multiple of packet chunk duplicates to avoid races
|
||||
const CHUNKS: usize = 8;
|
||||
const PACKETS_PER_BATCH: usize = 192;
|
||||
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
|
||||
// a multiple of packet chunk 2X duplicates to avoid races
|
||||
const CHUNKS: usize = 32;
|
||||
let txes = 192 * num_threads * CHUNKS;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let GenesisBlockInfo {
|
||||
mut genesis_block,
|
||||
@@ -150,17 +104,25 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
// during the benchmark
|
||||
genesis_block.ticks_per_slot = 10_000;
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (vote_sender, vote_receiver) = channel();
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
|
||||
debug!("threads: {} txs: {}", num_threads, txes);
|
||||
|
||||
let transactions = match tx_type {
|
||||
TransactionType::Accounts => make_accounts_txs(txes, &mint_keypair, genesis_block.hash()),
|
||||
TransactionType::Programs => make_programs_txs(txes, genesis_block.hash()),
|
||||
};
|
||||
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
let dummy = system_transaction::transfer(&mint_keypair, &to_pubkey, 1, genesis_block.hash());
|
||||
trace!("txs: {}", txes);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.message.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.message.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
})
|
||||
.collect();
|
||||
// fund all the accounts
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = system_transaction::transfer(
|
||||
@@ -184,7 +146,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH)
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.packets.len();
|
||||
@@ -220,13 +182,12 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
let now = Instant::now();
|
||||
let mut sent = 0;
|
||||
|
||||
for v in verified[start..start + chunk_len].chunks(chunk_len / num_threads) {
|
||||
debug!(
|
||||
"sending... {}..{} {} v.len: {}",
|
||||
for v in verified[start..start + chunk_len].chunks(verified.len() / num_threads) {
|
||||
trace!(
|
||||
"sending... {}..{} {}",
|
||||
start,
|
||||
start + chunk_len,
|
||||
timestamp(),
|
||||
v.len(),
|
||||
timestamp()
|
||||
);
|
||||
for xv in v {
|
||||
sent += xv.0.packets.len();
|
||||
@@ -236,7 +197,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
check_txs(&signal_receiver2, txes / CHUNKS);
|
||||
|
||||
// This signature clear may not actually clear the signatures
|
||||
// in this chunk, but since we rotate between CHUNKS then
|
||||
// in this chunk, but since we rotate between 32 chunks then
|
||||
// we should clear them by the time we come around again to re-use that chunk.
|
||||
bank.clear_signatures();
|
||||
trace!(
|
||||
@@ -256,11 +217,124 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
bench_banking(bencher, TransactionType::Accounts);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
||||
bench_banking(bencher, TransactionType::Programs);
|
||||
let progs = 4;
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
// a multiple of packet chunk 2X duplicates to avoid races
|
||||
let txes = 96 * 100 * num_threads * 2;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let GenesisBlockInfo {
|
||||
genesis_block,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_block(mint_total);
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (vote_sender, vote_receiver) = channel();
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
let dummy = system_transaction::transfer(&mint_keypair, &to_pubkey, 1, genesis_block.hash());
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
new.message.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.message.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
let prog = new.message.instructions[0].clone();
|
||||
for i in 1..progs {
|
||||
//generate programs that spend to random keys
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let to_key = Pubkey::new(&to[0..32]);
|
||||
new.message.account_keys.push(to_key);
|
||||
assert_eq!(new.message.account_keys.len(), i + 2);
|
||||
new.message.instructions.push(prog.clone());
|
||||
assert_eq!(new.message.instructions.len(), i + 1);
|
||||
new.message.instructions[i].accounts[1] = 1 + i as u8;
|
||||
assert_eq!(new.key(i, 1), Some(&to_key));
|
||||
assert_eq!(
|
||||
new.message.account_keys[new.message.instructions[i].accounts[1] as usize],
|
||||
to_key
|
||||
);
|
||||
}
|
||||
assert_eq!(new.message.instructions.len(), progs);
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
})
|
||||
.collect();
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&tx.message.account_keys[0],
|
||||
mint_total / txes as u64,
|
||||
genesis_block.hash(),
|
||||
);
|
||||
bank.process_transaction(&fund).unwrap();
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blocktree);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let _banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
verified_receiver,
|
||||
vote_receiver,
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
let mut id = genesis_block.hash();
|
||||
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_tick(&id);
|
||||
}
|
||||
|
||||
let half_len = verified.len() / 2;
|
||||
let mut start = 0;
|
||||
let signal_receiver = Arc::new(signal_receiver);
|
||||
let signal_receiver2 = signal_receiver.clone();
|
||||
bencher.iter(move || {
|
||||
// make sure the transactions are still valid
|
||||
bank.register_tick(&genesis_block.hash());
|
||||
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver2, txes / 2);
|
||||
bank.clear_signatures();
|
||||
start += half_len;
|
||||
start %= verified.len();
|
||||
});
|
||||
drop(vote_sender);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
poh_service.join().unwrap();
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@
|
||||
extern crate test;
|
||||
|
||||
use solana::packet::to_packets;
|
||||
use solana::recycler::Recycler;
|
||||
use solana::sigverify;
|
||||
use solana::test_tx::test_tx;
|
||||
use test::Bencher;
|
||||
@@ -15,10 +14,8 @@ fn bench_sigverify(bencher: &mut Bencher) {
|
||||
// generate packet vector
|
||||
let batches = to_packets(&vec![tx; 128]);
|
||||
|
||||
let recycler = Recycler::default();
|
||||
let recycler_out = Recycler::default();
|
||||
// verify packets
|
||||
bencher.iter(|| {
|
||||
let _ans = sigverify::ed25519_verify(&batches, &recycler, &recycler_out);
|
||||
let _ans = sigverify::ed25519_verify(&batches);
|
||||
})
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana::packet::to_packets_chunked;
|
||||
@@ -22,7 +21,7 @@ use test::Bencher;
|
||||
fn bench_sigverify_stage(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let (packet_s, packet_r) = channel();
|
||||
let (verified_s, verified_r) = unbounded();
|
||||
let (verified_s, verified_r) = channel();
|
||||
let sigverify_disabled = false;
|
||||
let stage = SigVerifyStage::new(packet_r, sigverify_disabled, verified_s);
|
||||
|
||||
|
@@ -20,39 +20,6 @@ pub struct BankForks {
|
||||
root: u64,
|
||||
slots: HashSet<u64>,
|
||||
snapshot_path: Option<String>,
|
||||
confidence: HashMap<u64, Confidence>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, PartialEq)]
|
||||
pub struct Confidence {
|
||||
fork_stakes: u64,
|
||||
epoch_stakes: u64,
|
||||
lockouts: u64,
|
||||
stake_weighted_lockouts: u128,
|
||||
}
|
||||
|
||||
impl Confidence {
|
||||
pub fn new(fork_stakes: u64, epoch_stakes: u64, lockouts: u64) -> Self {
|
||||
Self {
|
||||
fork_stakes,
|
||||
epoch_stakes,
|
||||
lockouts,
|
||||
stake_weighted_lockouts: 0,
|
||||
}
|
||||
}
|
||||
pub fn new_with_stake_weighted(
|
||||
fork_stakes: u64,
|
||||
epoch_stakes: u64,
|
||||
lockouts: u64,
|
||||
stake_weighted_lockouts: u128,
|
||||
) -> Self {
|
||||
Self {
|
||||
fork_stakes,
|
||||
epoch_stakes,
|
||||
lockouts,
|
||||
stake_weighted_lockouts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<u64> for BankForks {
|
||||
@@ -73,7 +40,6 @@ impl BankForks {
|
||||
root: 0,
|
||||
slots: HashSet::new(),
|
||||
snapshot_path: None,
|
||||
confidence: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,17 +104,15 @@ impl BankForks {
|
||||
working_bank,
|
||||
slots: HashSet::new(),
|
||||
snapshot_path: None,
|
||||
confidence: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, bank: Bank) -> Arc<Bank> {
|
||||
pub fn insert(&mut self, bank: Bank) {
|
||||
let bank = Arc::new(bank);
|
||||
let prev = self.banks.insert(bank.slot(), bank.clone());
|
||||
assert!(prev.is_none());
|
||||
|
||||
self.working_bank = bank.clone();
|
||||
bank
|
||||
}
|
||||
|
||||
// TODO: really want to kill this...
|
||||
@@ -196,8 +160,6 @@ impl BankForks {
|
||||
let descendants = self.descendants();
|
||||
self.banks
|
||||
.retain(|slot, _| descendants[&root].contains(slot));
|
||||
self.confidence
|
||||
.retain(|slot, _| slot == &root || descendants[&root].contains(slot));
|
||||
if self.snapshot_path.is_some() {
|
||||
let diff: HashSet<_> = slots.symmetric_difference(&self.slots).collect();
|
||||
trace!("prune non root {} - {:?}", root, diff);
|
||||
@@ -212,41 +174,6 @@ impl BankForks {
|
||||
self.slots = slots.clone();
|
||||
}
|
||||
|
||||
pub fn cache_fork_confidence(
|
||||
&mut self,
|
||||
fork: u64,
|
||||
fork_stakes: u64,
|
||||
epoch_stakes: u64,
|
||||
lockouts: u64,
|
||||
) {
|
||||
self.confidence
|
||||
.entry(fork)
|
||||
.and_modify(|entry| {
|
||||
entry.fork_stakes = fork_stakes;
|
||||
entry.epoch_stakes = epoch_stakes;
|
||||
entry.lockouts = lockouts;
|
||||
})
|
||||
.or_insert_with(|| Confidence::new(fork_stakes, epoch_stakes, lockouts));
|
||||
}
|
||||
|
||||
pub fn cache_stake_weighted_lockouts(&mut self, fork: u64, stake_weighted_lockouts: u128) {
|
||||
self.confidence
|
||||
.entry(fork)
|
||||
.and_modify(|entry| {
|
||||
entry.stake_weighted_lockouts = stake_weighted_lockouts;
|
||||
})
|
||||
.or_insert(Confidence {
|
||||
fork_stakes: 0,
|
||||
epoch_stakes: 0,
|
||||
lockouts: 0,
|
||||
stake_weighted_lockouts,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn get_fork_confidence(&self, fork: u64) -> Option<&Confidence> {
|
||||
self.confidence.get(&fork)
|
||||
}
|
||||
|
||||
fn get_io_error(error: &str) -> Error {
|
||||
warn!("BankForks error: {:?}", error);
|
||||
Error::new(ErrorKind::Other, error)
|
||||
@@ -428,7 +355,6 @@ impl BankForks {
|
||||
root,
|
||||
slots,
|
||||
snapshot_path: snapshot_path.clone(),
|
||||
confidence: HashMap::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -512,59 +438,10 @@ mod tests {
|
||||
assert_eq!(bank_forks.active_banks(), vec![1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks_confidence_cache() {
|
||||
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
|
||||
let bank = Bank::new(&genesis_block);
|
||||
let fork = bank.slot();
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
assert!(bank_forks.confidence.get(&fork).is_none());
|
||||
bank_forks.cache_fork_confidence(fork, 11, 12, 13);
|
||||
assert_eq!(
|
||||
bank_forks.confidence.get(&fork).unwrap(),
|
||||
&Confidence {
|
||||
fork_stakes: 11,
|
||||
epoch_stakes: 12,
|
||||
lockouts: 13,
|
||||
stake_weighted_lockouts: 0,
|
||||
}
|
||||
);
|
||||
// Ensure that {fork_stakes, epoch_stakes, lockouts} and stake_weighted_lockouts
|
||||
// can be updated separately
|
||||
bank_forks.cache_stake_weighted_lockouts(fork, 20);
|
||||
assert_eq!(
|
||||
bank_forks.confidence.get(&fork).unwrap(),
|
||||
&Confidence {
|
||||
fork_stakes: 11,
|
||||
epoch_stakes: 12,
|
||||
lockouts: 13,
|
||||
stake_weighted_lockouts: 20,
|
||||
}
|
||||
);
|
||||
bank_forks.cache_fork_confidence(fork, 21, 22, 23);
|
||||
assert_eq!(
|
||||
bank_forks
|
||||
.confidence
|
||||
.get(&fork)
|
||||
.unwrap()
|
||||
.stake_weighted_lockouts,
|
||||
20,
|
||||
);
|
||||
}
|
||||
|
||||
struct TempPaths {
|
||||
pub paths: String,
|
||||
}
|
||||
|
||||
impl TempPaths {
|
||||
fn remove_all(&self) {
|
||||
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
|
||||
paths.iter().for_each(|p| {
|
||||
let _ignored = remove_dir_all(p);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! tmp_bank_accounts_name {
|
||||
() => {
|
||||
@@ -581,7 +458,10 @@ mod tests {
|
||||
|
||||
impl Drop for TempPaths {
|
||||
fn drop(&mut self) {
|
||||
self.remove_all()
|
||||
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
|
||||
paths.iter().for_each(|p| {
|
||||
let _ignored = remove_dir_all(p);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -590,7 +470,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn get_tmp_snapshots_path() -> TempPaths {
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let path = format!("{}/snapshots", out_dir);
|
||||
TempPaths {
|
||||
paths: path.to_string(),
|
||||
@@ -599,7 +479,7 @@ mod tests {
|
||||
|
||||
fn get_tmp_bank_accounts_path(paths: &str) -> TempPaths {
|
||||
let vpaths = get_paths_vec(paths);
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let vpaths: Vec<_> = vpaths
|
||||
.iter()
|
||||
.map(|path| format!("{}/{}", out_dir, path))
|
||||
@@ -641,8 +521,6 @@ mod tests {
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_block(10_000);
|
||||
path.remove_all();
|
||||
spath.remove_all();
|
||||
for index in 0..10 {
|
||||
let bank0 = Bank::new_with_paths(&genesis_block, Some(path.paths.clone()));
|
||||
bank0.freeze();
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,6 @@ use bincode::{deserialize, serialize};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
|
||||
use solana_sdk::timing::Slot;
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
@@ -40,10 +39,6 @@ pub mod columns {
|
||||
#[derive(Debug)]
|
||||
/// The root column
|
||||
pub struct Root;
|
||||
|
||||
#[derive(Debug)]
|
||||
/// The index column
|
||||
pub struct Index;
|
||||
}
|
||||
|
||||
pub trait Backend: Sized + Send + Sync {
|
||||
@@ -87,8 +82,6 @@ where
|
||||
|
||||
fn key(index: Self::Index) -> B::OwnedKey;
|
||||
fn index(key: &B::Key) -> Self::Index;
|
||||
fn slot(index: Self::Index) -> Slot;
|
||||
fn as_index(slot: Slot) -> Self::Index;
|
||||
}
|
||||
|
||||
pub trait DbCursor<B>
|
||||
@@ -412,29 +405,14 @@ where
|
||||
Ok(iter.map(|(key, value)| (C::index(&key), value)))
|
||||
}
|
||||
|
||||
pub fn delete_slot(
|
||||
&self,
|
||||
batch: &mut WriteBatch<B>,
|
||||
from: Option<Slot>,
|
||||
to: Option<Slot>,
|
||||
) -> Result<bool>
|
||||
where
|
||||
C::Index: PartialOrd + Copy,
|
||||
{
|
||||
let mut end = true;
|
||||
let iter = self.iter(from.map(C::as_index))?;
|
||||
for (index, _) in iter {
|
||||
if let Some(to) = to {
|
||||
if C::slot(index) > to {
|
||||
end = false;
|
||||
break;
|
||||
}
|
||||
};
|
||||
if let Err(e) = batch.delete::<C>(index) {
|
||||
error!("Error: {:?} while adding delete to batch {:?}", e, C::NAME)
|
||||
}
|
||||
}
|
||||
Ok(end)
|
||||
//TODO add a delete_until that goes the other way
|
||||
pub fn force_delete_all(&self, start_from: Option<C::Index>) -> Result<()> {
|
||||
let iter = self.iter(start_from)?;
|
||||
iter.for_each(|(index, _)| match self.delete(index) {
|
||||
Ok(_) => (),
|
||||
Err(e) => error!("Error: {:?} while deleting {:?}", e, C::NAME),
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@@ -100,25 +100,6 @@ impl Column<Kvs> for cf::Data {
|
||||
}
|
||||
}
|
||||
|
||||
impl Column<Kvs> for cf::Index {
|
||||
const NAME: &'static str = super::INDEX_CF;
|
||||
type Index = u64;
|
||||
|
||||
fn key(slot: u64) -> Key {
|
||||
let mut key = Key::default();
|
||||
BigEndian::write_u64(&mut key.0[8..16], slot);
|
||||
key
|
||||
}
|
||||
|
||||
fn index(key: &Key) -> u64 {
|
||||
BigEndian::read_u64(&key.0[8..16])
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedColumn<Kvs> for cf::Index {
|
||||
type Type = crate::blocktree::meta::Index;
|
||||
}
|
||||
|
||||
impl Column<Kvs> for cf::DeadSlots {
|
||||
const NAME: &'static str = super::DEAD_SLOTS;
|
||||
type Index = u64;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use crate::erasure::ErasureConfig;
|
||||
use crate::erasure::{NUM_CODING, NUM_DATA};
|
||||
use solana_metrics::datapoint;
|
||||
use std::{collections::BTreeSet, ops::RangeBounds};
|
||||
use std::borrow::Borrow;
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
||||
// The Meta column family
|
||||
@@ -27,118 +27,6 @@ pub struct SlotMeta {
|
||||
pub is_connected: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
/// Index recording presence/absence of blobs
|
||||
pub struct Index {
|
||||
pub slot: u64,
|
||||
data: DataIndex,
|
||||
coding: CodingIndex,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
pub struct DataIndex {
|
||||
/// Map representing presence/absence of data blobs
|
||||
index: BTreeSet<u64>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
/// Erasure coding information
|
||||
pub struct CodingIndex {
|
||||
/// Map from set index, to hashmap from blob index to presence bool
|
||||
index: BTreeSet<u64>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
||||
/// Erasure coding information
|
||||
pub struct ErasureMeta {
|
||||
/// Which erasure set in the slot this is
|
||||
pub set_index: u64,
|
||||
/// Size of shards in this erasure set
|
||||
pub size: usize,
|
||||
/// Erasure configuration for this erasure set
|
||||
config: ErasureConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ErasureMetaStatus {
|
||||
CanRecover,
|
||||
DataFull,
|
||||
StillNeed(usize),
|
||||
}
|
||||
|
||||
impl Index {
|
||||
pub(in crate::blocktree) fn new(slot: u64) -> Self {
|
||||
Index {
|
||||
slot,
|
||||
data: DataIndex::default(),
|
||||
coding: CodingIndex::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &DataIndex {
|
||||
&self.data
|
||||
}
|
||||
pub fn coding(&self) -> &CodingIndex {
|
||||
&self.coding
|
||||
}
|
||||
|
||||
pub fn data_mut(&mut self) -> &mut DataIndex {
|
||||
&mut self.data
|
||||
}
|
||||
pub fn coding_mut(&mut self) -> &mut CodingIndex {
|
||||
&mut self.coding
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: Mark: Change this when coding
|
||||
impl CodingIndex {
|
||||
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
|
||||
self.index.range(bounds).count()
|
||||
}
|
||||
|
||||
pub fn is_present(&self, index: u64) -> bool {
|
||||
self.index.contains(&index)
|
||||
}
|
||||
|
||||
pub fn set_present(&mut self, index: u64, presence: bool) {
|
||||
if presence {
|
||||
self.index.insert(index);
|
||||
} else {
|
||||
self.index.remove(&index);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_many_present(&mut self, presence: impl IntoIterator<Item = (u64, bool)>) {
|
||||
for (idx, present) in presence.into_iter() {
|
||||
self.set_present(idx, present);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataIndex {
|
||||
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
|
||||
self.index.range(bounds).count()
|
||||
}
|
||||
|
||||
pub fn is_present(&self, index: u64) -> bool {
|
||||
self.index.contains(&index)
|
||||
}
|
||||
|
||||
pub fn set_present(&mut self, index: u64, presence: bool) {
|
||||
if presence {
|
||||
self.index.insert(index);
|
||||
} else {
|
||||
self.index.remove(&index);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_many_present(&mut self, presence: impl IntoIterator<Item = (u64, bool)>) {
|
||||
for (idx, present) in presence.into_iter() {
|
||||
self.set_present(idx, present);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SlotMeta {
|
||||
pub fn is_full(&self) -> bool {
|
||||
// last_index is std::u64::MAX when it has no information about how
|
||||
@@ -184,37 +72,62 @@ impl SlotMeta {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
||||
/// Erasure coding information
|
||||
pub struct ErasureMeta {
|
||||
/// Which erasure set in the slot this is
|
||||
pub set_index: u64,
|
||||
/// Size of shards in this erasure set
|
||||
pub size: usize,
|
||||
/// Bitfield representing presence/absence of data blobs
|
||||
data: u64,
|
||||
/// Bitfield representing presence/absence of coding blobs
|
||||
coding: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ErasureMetaStatus {
|
||||
CanRecover,
|
||||
DataFull,
|
||||
StillNeed(usize),
|
||||
}
|
||||
|
||||
impl ErasureMeta {
|
||||
pub fn new(set_index: u64, config: &ErasureConfig) -> ErasureMeta {
|
||||
pub fn new(set_index: u64) -> ErasureMeta {
|
||||
ErasureMeta {
|
||||
set_index,
|
||||
size: 0,
|
||||
config: *config,
|
||||
data: 0,
|
||||
coding: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn status(&self, index: &Index) -> ErasureMetaStatus {
|
||||
use ErasureMetaStatus::*;
|
||||
|
||||
let start_idx = self.start_index();
|
||||
let (data_end_idx, coding_end_idx) = self.end_indexes();
|
||||
|
||||
let num_coding = index.coding().present_in_bounds(start_idx..coding_end_idx);
|
||||
let num_data = index.data().present_in_bounds(start_idx..data_end_idx);
|
||||
|
||||
let (data_missing, coding_missing) = (
|
||||
self.config.num_data() - num_data,
|
||||
self.config.num_coding() - num_coding,
|
||||
);
|
||||
|
||||
let total_missing = data_missing + coding_missing;
|
||||
|
||||
if data_missing > 0 && total_missing <= self.config.num_coding() {
|
||||
CanRecover
|
||||
pub fn status(&self) -> ErasureMetaStatus {
|
||||
let (data_missing, coding_missing) =
|
||||
(NUM_DATA - self.num_data(), NUM_CODING - self.num_coding());
|
||||
if data_missing > 0 && data_missing + coding_missing <= NUM_CODING {
|
||||
assert!(self.size != 0);
|
||||
ErasureMetaStatus::CanRecover
|
||||
} else if data_missing == 0 {
|
||||
DataFull
|
||||
ErasureMetaStatus::DataFull
|
||||
} else {
|
||||
StillNeed(total_missing - self.config.num_coding())
|
||||
ErasureMetaStatus::StillNeed(data_missing + coding_missing - NUM_CODING)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_coding(&self) -> usize {
|
||||
self.coding.count_ones() as usize
|
||||
}
|
||||
|
||||
pub fn num_data(&self) -> usize {
|
||||
self.data.count_ones() as usize
|
||||
}
|
||||
|
||||
pub fn is_coding_present(&self, index: u64) -> bool {
|
||||
if let Some(position) = self.data_index_in_set(index) {
|
||||
self.coding & (1 << position) != 0
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,78 +139,207 @@ impl ErasureMeta {
|
||||
self.size
|
||||
}
|
||||
|
||||
pub fn set_index_for(index: u64, num_data: usize) -> u64 {
|
||||
index / num_data as u64
|
||||
pub fn set_coding_present(&mut self, index: u64, present: bool) {
|
||||
if let Some(position) = self.data_index_in_set(index) {
|
||||
if present {
|
||||
self.coding |= 1 << position;
|
||||
} else {
|
||||
self.coding &= !(1 << position);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_data_present(&self, index: u64) -> bool {
|
||||
if let Some(position) = self.data_index_in_set(index) {
|
||||
self.data & (1 << position) != 0
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_data_present(&mut self, index: u64, present: bool) {
|
||||
if let Some(position) = self.data_index_in_set(index) {
|
||||
if present {
|
||||
self.data |= 1 << position;
|
||||
} else {
|
||||
self.data &= !(1 << position);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_data_multi<I, Idx>(&mut self, indexes: I, present: bool)
|
||||
where
|
||||
I: IntoIterator<Item = Idx>,
|
||||
Idx: Borrow<u64>,
|
||||
{
|
||||
for index in indexes.into_iter() {
|
||||
self.set_data_present(*index.borrow(), present);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_coding_multi<I, Idx>(&mut self, indexes: I, present: bool)
|
||||
where
|
||||
I: IntoIterator<Item = Idx>,
|
||||
Idx: Borrow<u64>,
|
||||
{
|
||||
for index in indexes.into_iter() {
|
||||
self.set_coding_present(*index.borrow(), present);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_index_for(index: u64) -> u64 {
|
||||
index / NUM_DATA as u64
|
||||
}
|
||||
|
||||
pub fn data_index_in_set(&self, index: u64) -> Option<u64> {
|
||||
let set_index = Self::set_index_for(index);
|
||||
|
||||
if set_index == self.set_index {
|
||||
Some(index - self.start_index())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn coding_index_in_set(&self, index: u64) -> Option<u64> {
|
||||
self.data_index_in_set(index).map(|i| i + NUM_DATA as u64)
|
||||
}
|
||||
|
||||
pub fn start_index(&self) -> u64 {
|
||||
self.set_index * self.config.num_data() as u64
|
||||
self.set_index * NUM_DATA as u64
|
||||
}
|
||||
|
||||
/// returns a tuple of (data_end, coding_end)
|
||||
pub fn end_indexes(&self) -> (u64, u64) {
|
||||
let start = self.start_index();
|
||||
(
|
||||
start + self.config.num_data() as u64,
|
||||
start + self.config.num_coding() as u64,
|
||||
)
|
||||
(start + NUM_DATA as u64, start + NUM_CODING as u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_meta_indexes() {
|
||||
use rand::{thread_rng, Rng};
|
||||
// to avoid casts everywhere
|
||||
const NUM_DATA: u64 = crate::erasure::NUM_DATA as u64;
|
||||
|
||||
let mut rng = thread_rng();
|
||||
|
||||
for _ in 0..100 {
|
||||
let set_index = rng.gen_range(0, 1_000);
|
||||
let blob_index = (set_index * NUM_DATA) + rng.gen_range(0, NUM_DATA);
|
||||
|
||||
assert_eq!(set_index, ErasureMeta::set_index_for(blob_index));
|
||||
let e_meta = ErasureMeta::new(set_index);
|
||||
|
||||
assert_eq!(e_meta.start_index(), set_index * NUM_DATA);
|
||||
let (data_end_idx, coding_end_idx) = e_meta.end_indexes();
|
||||
assert_eq!(data_end_idx, (set_index + 1) * NUM_DATA);
|
||||
assert_eq!(coding_end_idx, set_index * NUM_DATA + NUM_CODING as u64);
|
||||
}
|
||||
|
||||
let mut e_meta = ErasureMeta::new(0);
|
||||
|
||||
assert_eq!(e_meta.data_index_in_set(0), Some(0));
|
||||
assert_eq!(e_meta.data_index_in_set(NUM_DATA / 2), Some(NUM_DATA / 2));
|
||||
assert_eq!(e_meta.data_index_in_set(NUM_DATA - 1), Some(NUM_DATA - 1));
|
||||
assert_eq!(e_meta.data_index_in_set(NUM_DATA), None);
|
||||
assert_eq!(e_meta.data_index_in_set(std::u64::MAX), None);
|
||||
|
||||
e_meta.set_index = 1;
|
||||
|
||||
assert_eq!(e_meta.data_index_in_set(0), None);
|
||||
assert_eq!(e_meta.data_index_in_set(NUM_DATA - 1), None);
|
||||
assert_eq!(e_meta.data_index_in_set(NUM_DATA), Some(0));
|
||||
assert_eq!(
|
||||
e_meta.data_index_in_set(NUM_DATA * 2 - 1),
|
||||
Some(NUM_DATA - 1)
|
||||
);
|
||||
assert_eq!(e_meta.data_index_in_set(std::u64::MAX), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_meta_coding_present() {
|
||||
let mut e_meta = ErasureMeta::default();
|
||||
|
||||
e_meta.set_coding_multi(0..NUM_CODING as u64, true);
|
||||
for i in 0..NUM_CODING as u64 {
|
||||
assert_eq!(e_meta.is_coding_present(i), true);
|
||||
}
|
||||
for i in NUM_CODING as u64..NUM_DATA as u64 {
|
||||
assert_eq!(e_meta.is_coding_present(i), false);
|
||||
}
|
||||
|
||||
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 17) as u64);
|
||||
let start_idx = e_meta.start_index();
|
||||
e_meta.set_coding_multi(start_idx..start_idx + NUM_CODING as u64, true);
|
||||
|
||||
for i in start_idx..start_idx + NUM_CODING as u64 {
|
||||
e_meta.set_coding_present(i, true);
|
||||
assert_eq!(e_meta.is_coding_present(i), true);
|
||||
}
|
||||
for i in start_idx + NUM_CODING as u64..start_idx + NUM_DATA as u64 {
|
||||
assert_eq!(e_meta.is_coding_present(i), false);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_erasure_meta_status() {
|
||||
use rand::{seq::SliceRandom, thread_rng};
|
||||
use std::iter::repeat;
|
||||
// Local constansts just used to avoid repetitive casts
|
||||
const N_DATA: u64 = crate::erasure::NUM_DATA as u64;
|
||||
const N_CODING: u64 = crate::erasure::NUM_CODING as u64;
|
||||
|
||||
#[test]
|
||||
fn test_erasure_meta_status() {
|
||||
use ErasureMetaStatus::*;
|
||||
let mut e_meta = ErasureMeta::default();
|
||||
let mut rng = thread_rng();
|
||||
let data_indexes: Vec<u64> = (0..N_DATA).collect();
|
||||
let coding_indexes: Vec<u64> = (0..N_CODING).collect();
|
||||
|
||||
let set_index = 0;
|
||||
let erasure_config = ErasureConfig::default();
|
||||
assert_eq!(e_meta.status(), ErasureMetaStatus::StillNeed(NUM_DATA));
|
||||
|
||||
let mut e_meta = ErasureMeta::new(set_index, &erasure_config);
|
||||
let mut rng = thread_rng();
|
||||
let mut index = Index::new(0);
|
||||
e_meta.size = 1;
|
||||
e_meta.set_data_multi(0..N_DATA, true);
|
||||
|
||||
let data_indexes = 0..erasure_config.num_data() as u64;
|
||||
let coding_indexes = 0..erasure_config.num_coding() as u64;
|
||||
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
|
||||
|
||||
assert_eq!(e_meta.status(&index), StillNeed(erasure_config.num_data()));
|
||||
e_meta.size = 1;
|
||||
e_meta.set_coding_multi(0..N_CODING, true);
|
||||
|
||||
index
|
||||
.data_mut()
|
||||
.set_many_present(data_indexes.clone().zip(repeat(true)));
|
||||
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
|
||||
|
||||
assert_eq!(e_meta.status(&index), DataFull);
|
||||
for &idx in data_indexes.choose_multiple(&mut rng, NUM_CODING) {
|
||||
e_meta.set_data_present(idx, false);
|
||||
|
||||
index
|
||||
.coding_mut()
|
||||
.set_many_present(coding_indexes.clone().zip(repeat(true)));
|
||||
assert_eq!(e_meta.status(), ErasureMetaStatus::CanRecover);
|
||||
}
|
||||
|
||||
for &idx in data_indexes
|
||||
.clone()
|
||||
.collect::<Vec<_>>()
|
||||
.choose_multiple(&mut rng, erasure_config.num_data())
|
||||
{
|
||||
index.data_mut().set_present(idx, false);
|
||||
e_meta.set_data_multi(0..N_DATA, true);
|
||||
|
||||
assert_eq!(e_meta.status(&index), CanRecover);
|
||||
}
|
||||
for &idx in coding_indexes.choose_multiple(&mut rng, NUM_CODING) {
|
||||
e_meta.set_coding_present(idx, false);
|
||||
|
||||
index
|
||||
.data_mut()
|
||||
.set_many_present(data_indexes.zip(repeat(true)));
|
||||
|
||||
for &idx in coding_indexes
|
||||
.collect::<Vec<_>>()
|
||||
.choose_multiple(&mut rng, erasure_config.num_coding())
|
||||
{
|
||||
index.coding_mut().set_present(idx, false);
|
||||
|
||||
assert_eq!(e_meta.status(&index), DataFull);
|
||||
}
|
||||
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_meta_data_present() {
|
||||
let mut e_meta = ErasureMeta::default();
|
||||
|
||||
e_meta.set_data_multi(0..NUM_DATA as u64, true);
|
||||
for i in 0..NUM_DATA as u64 {
|
||||
assert_eq!(e_meta.is_data_present(i), true);
|
||||
}
|
||||
for i in NUM_DATA as u64..2 * NUM_DATA as u64 {
|
||||
assert_eq!(e_meta.is_data_present(i), false);
|
||||
}
|
||||
|
||||
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 23) as u64);
|
||||
let start_idx = e_meta.start_index();
|
||||
e_meta.set_data_multi(start_idx..start_idx + NUM_DATA as u64, true);
|
||||
|
||||
for i in start_idx..start_idx + NUM_DATA as u64 {
|
||||
assert_eq!(e_meta.is_data_present(i), true);
|
||||
}
|
||||
for i in start_idx - NUM_DATA as u64..start_idx {
|
||||
assert_eq!(e_meta.is_data_present(i), false);
|
||||
}
|
||||
}
|
||||
|
@@ -2,7 +2,6 @@ use crate::blocktree::db::columns as cf;
|
||||
use crate::blocktree::db::{Backend, Column, DbCursor, IWriteBatch, TypedColumn};
|
||||
use crate::blocktree::BlocktreeError;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_sdk::timing::Slot;
|
||||
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
|
||||
@@ -16,8 +15,7 @@ use std::path::Path;
|
||||
|
||||
// A good value for this is the number of cores on the machine
|
||||
const TOTAL_THREADS: i32 = 8;
|
||||
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
|
||||
const MIN_WRITE_BUFFER_SIZE: u64 = 64 * 1024; // 64KB
|
||||
const MAX_WRITE_BUFFER_SIZE: usize = 512 * 1024 * 1024;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Rocks(rocksdb::DB);
|
||||
@@ -32,9 +30,7 @@ impl Backend for Rocks {
|
||||
type Error = rocksdb::Error;
|
||||
|
||||
fn open(path: &Path) -> Result<Rocks> {
|
||||
use crate::blocktree::db::columns::{
|
||||
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, SlotMeta,
|
||||
};
|
||||
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta};
|
||||
|
||||
fs::create_dir_all(&path)?;
|
||||
|
||||
@@ -42,22 +38,14 @@ impl Backend for Rocks {
|
||||
let db_options = get_db_options();
|
||||
|
||||
// Column family names
|
||||
let meta_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(SlotMeta::NAME));
|
||||
let data_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Data::NAME, get_cf_options(Data::NAME));
|
||||
let dead_slots_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(DeadSlots::NAME));
|
||||
let erasure_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options(Coding::NAME));
|
||||
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
||||
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
|
||||
let dead_slots_cf_descriptor = ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options());
|
||||
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
|
||||
let erasure_meta_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(ErasureMeta::NAME));
|
||||
let orphans_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(Orphans::NAME));
|
||||
let root_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(Root::NAME));
|
||||
let index_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(Index::NAME));
|
||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
|
||||
let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
|
||||
let root_cf_descriptor = ColumnFamilyDescriptor::new(Root::NAME, get_cf_options());
|
||||
|
||||
let cfs = vec![
|
||||
meta_cf_descriptor,
|
||||
@@ -67,7 +55,6 @@ impl Backend for Rocks {
|
||||
erasure_meta_cf_descriptor,
|
||||
orphans_cf_descriptor,
|
||||
root_cf_descriptor,
|
||||
index_cf_descriptor,
|
||||
];
|
||||
|
||||
// Open the database
|
||||
@@ -77,16 +64,13 @@ impl Backend for Rocks {
|
||||
}
|
||||
|
||||
fn columns(&self) -> Vec<&'static str> {
|
||||
use crate::blocktree::db::columns::{
|
||||
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, SlotMeta,
|
||||
};
|
||||
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta};
|
||||
|
||||
vec![
|
||||
Coding::NAME,
|
||||
ErasureMeta::NAME,
|
||||
DeadSlots::NAME,
|
||||
Data::NAME,
|
||||
Index::NAME,
|
||||
Orphans::NAME,
|
||||
Root::NAME,
|
||||
SlotMeta::NAME,
|
||||
@@ -160,14 +144,6 @@ impl Column<Rocks> for cf::Coding {
|
||||
fn index(key: &[u8]) -> (u64, u64) {
|
||||
cf::Data::index(key)
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index.0
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
(slot, 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Column<Rocks> for cf::Data {
|
||||
@@ -186,41 +162,6 @@ impl Column<Rocks> for cf::Data {
|
||||
let index = BigEndian::read_u64(&key[8..16]);
|
||||
(slot, index)
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index.0
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
(slot, 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Column<Rocks> for cf::Index {
|
||||
const NAME: &'static str = super::INDEX_CF;
|
||||
type Index = u64;
|
||||
|
||||
fn key(slot: u64) -> Vec<u8> {
|
||||
let mut key = vec![0; 8];
|
||||
BigEndian::write_u64(&mut key[..], slot);
|
||||
key
|
||||
}
|
||||
|
||||
fn index(key: &[u8]) -> u64 {
|
||||
BigEndian::read_u64(&key[..8])
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
slot
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedColumn<Rocks> for cf::Index {
|
||||
type Type = crate::blocktree::meta::Index;
|
||||
}
|
||||
|
||||
impl Column<Rocks> for cf::DeadSlots {
|
||||
@@ -236,14 +177,6 @@ impl Column<Rocks> for cf::DeadSlots {
|
||||
fn index(key: &[u8]) -> u64 {
|
||||
BigEndian::read_u64(&key[..8])
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
slot
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedColumn<Rocks> for cf::DeadSlots {
|
||||
@@ -263,14 +196,6 @@ impl Column<Rocks> for cf::Orphans {
|
||||
fn index(key: &[u8]) -> u64 {
|
||||
BigEndian::read_u64(&key[..8])
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
slot
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedColumn<Rocks> for cf::Orphans {
|
||||
@@ -290,14 +215,6 @@ impl Column<Rocks> for cf::Root {
|
||||
fn index(key: &[u8]) -> u64 {
|
||||
BigEndian::read_u64(&key[..8])
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
slot
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedColumn<Rocks> for cf::Root {
|
||||
@@ -317,14 +234,6 @@ impl Column<Rocks> for cf::SlotMeta {
|
||||
fn index(key: &[u8]) -> u64 {
|
||||
BigEndian::read_u64(&key[..8])
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
slot
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedColumn<Rocks> for cf::SlotMeta {
|
||||
@@ -348,14 +257,6 @@ impl Column<Rocks> for cf::ErasureMeta {
|
||||
BigEndian::write_u64(&mut key[8..], set_index);
|
||||
key
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index.0
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
(slot, 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedColumn<Rocks> for cf::ErasureMeta {
|
||||
@@ -406,27 +307,11 @@ impl std::convert::From<rocksdb::Error> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cf_options(name: &'static str) -> Options {
|
||||
use crate::blocktree::db::columns::{Coding, Data};
|
||||
|
||||
fn get_cf_options() -> Options {
|
||||
let mut options = Options::default();
|
||||
match name {
|
||||
Coding::NAME | Data::NAME => {
|
||||
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM
|
||||
options.set_max_write_buffer_number(8);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
|
||||
}
|
||||
_ => {
|
||||
// We want smaller CFs to flush faster. This results in more WAL files but lowers
|
||||
// overall WAL space utilization and increases flush frequency
|
||||
options.set_write_buffer_size(MIN_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MIN_WRITE_BUFFER_SIZE);
|
||||
options.set_max_bytes_for_level_base(MIN_WRITE_BUFFER_SIZE);
|
||||
options.set_level_zero_file_num_compaction_trigger(1);
|
||||
}
|
||||
}
|
||||
options.set_max_write_buffer_number(32);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
|
||||
options
|
||||
}
|
||||
|
||||
@@ -437,5 +322,8 @@ fn get_db_options() -> Options {
|
||||
options.increase_parallelism(TOTAL_THREADS);
|
||||
options.set_max_background_flushes(4);
|
||||
options.set_max_background_compactions(4);
|
||||
options.set_max_write_buffer_number(32);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
|
||||
options
|
||||
}
|
||||
|
@@ -167,8 +167,7 @@ pub fn process_blocktree(
|
||||
|
||||
blocktree.set_roots(&[0]).expect("Couldn't set first root");
|
||||
|
||||
let leader_schedule_cache =
|
||||
LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), &pending_slots[0].2);
|
||||
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0);
|
||||
|
||||
let mut fork_info = vec![];
|
||||
let mut last_status_report = Instant::now();
|
||||
@@ -227,7 +226,7 @@ pub fn process_blocktree(
|
||||
|
||||
if blocktree.is_root(slot) {
|
||||
root = slot;
|
||||
leader_schedule_cache.set_root(&bank);
|
||||
leader_schedule_cache.set_root(slot);
|
||||
bank.squash();
|
||||
pending_slots.clear();
|
||||
fork_info.clear();
|
||||
@@ -306,7 +305,6 @@ pub mod tests {
|
||||
use crate::genesis_utils::{
|
||||
create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo,
|
||||
};
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_runtime::epoch_schedule::EpochSchedule;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::instruction::InstructionError;
|
||||
@@ -1267,7 +1265,7 @@ pub mod tests {
|
||||
} = create_genesis_block(1_000_000_000);
|
||||
let mut bank = Bank::new(&genesis_block);
|
||||
|
||||
const NUM_TRANSFERS: usize = 128;
|
||||
const NUM_TRANSFERS: usize = 100;
|
||||
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
|
||||
|
||||
// give everybody one lamport
|
||||
@@ -1278,7 +1276,6 @@ pub mod tests {
|
||||
|
||||
let mut i = 0;
|
||||
let mut hash = bank.last_blockhash();
|
||||
let mut root: Option<Arc<Bank>> = None;
|
||||
loop {
|
||||
let entries: Vec<_> = (0..NUM_TRANSFERS)
|
||||
.map(|i| {
|
||||
@@ -1324,19 +1321,9 @@ pub mod tests {
|
||||
)
|
||||
.expect("process ticks failed");
|
||||
|
||||
let parent = Arc::new(bank);
|
||||
|
||||
if i % 16 == 0 {
|
||||
root.map(|old_root| old_root.squash());
|
||||
root = Some(parent.clone());
|
||||
}
|
||||
i += 1;
|
||||
|
||||
bank = Bank::new_from_parent(
|
||||
&parent,
|
||||
&Pubkey::default(),
|
||||
parent.slot() + thread_rng().gen_range(1, 3),
|
||||
);
|
||||
bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), i as u64);
|
||||
bank.squash();
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,11 +1,9 @@
|
||||
//! A stage to broadcast data from a leader node to validators
|
||||
use self::broadcast_bad_blob_sizes::BroadcastBadBlobSizes;
|
||||
use self::broadcast_fake_blobs_run::BroadcastFakeBlobsRun;
|
||||
use self::fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun;
|
||||
use self::standard_broadcast_run::StandardBroadcastRun;
|
||||
use crate::blocktree::Blocktree;
|
||||
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
|
||||
use crate::erasure::{CodingGenerator, ErasureConfig};
|
||||
use crate::erasure::CodingGenerator;
|
||||
use crate::poh_recorder::WorkingBankEntries;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::service::Service;
|
||||
@@ -22,8 +20,6 @@ use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Instant;
|
||||
|
||||
mod broadcast_bad_blob_sizes;
|
||||
mod broadcast_fake_blobs_run;
|
||||
mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
@@ -39,8 +35,6 @@ pub enum BroadcastStageReturnType {
|
||||
pub enum BroadcastStageType {
|
||||
Standard,
|
||||
FailEntryVerification,
|
||||
BroadcastFakeBlobs,
|
||||
BroadcastBadBlobSizes,
|
||||
}
|
||||
|
||||
impl BroadcastStageType {
|
||||
@@ -51,7 +45,6 @@ impl BroadcastStageType {
|
||||
receiver: Receiver<WorkingBankEntries>,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
erasure_config: &ErasureConfig,
|
||||
) -> BroadcastStage {
|
||||
match self {
|
||||
BroadcastStageType::Standard => BroadcastStage::new(
|
||||
@@ -61,7 +54,6 @@ impl BroadcastStageType {
|
||||
exit_sender,
|
||||
blocktree,
|
||||
StandardBroadcastRun::new(),
|
||||
erasure_config,
|
||||
),
|
||||
|
||||
BroadcastStageType::FailEntryVerification => BroadcastStage::new(
|
||||
@@ -71,27 +63,6 @@ impl BroadcastStageType {
|
||||
exit_sender,
|
||||
blocktree,
|
||||
FailEntryVerificationBroadcastRun::new(),
|
||||
erasure_config,
|
||||
),
|
||||
|
||||
BroadcastStageType::BroadcastFakeBlobs => BroadcastStage::new(
|
||||
sock,
|
||||
cluster_info,
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
BroadcastFakeBlobsRun::new(0),
|
||||
erasure_config,
|
||||
),
|
||||
|
||||
BroadcastStageType::BroadcastBadBlobSizes => BroadcastStage::new(
|
||||
sock,
|
||||
cluster_info,
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
BroadcastBadBlobSizes::new(),
|
||||
erasure_config,
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -143,9 +114,8 @@ impl BroadcastStage {
|
||||
receiver: &Receiver<WorkingBankEntries>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
mut broadcast_stage_run: impl BroadcastRun,
|
||||
erasure_config: &ErasureConfig,
|
||||
) -> BroadcastStageReturnType {
|
||||
let coding_generator = CodingGenerator::new_from_config(erasure_config);
|
||||
let coding_generator = CodingGenerator::default();
|
||||
|
||||
let mut broadcast = Broadcast {
|
||||
coding_generator,
|
||||
@@ -197,11 +167,9 @@ impl BroadcastStage {
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
broadcast_stage_run: impl BroadcastRun + Send + 'static,
|
||||
erasure_config: &ErasureConfig,
|
||||
) -> Self {
|
||||
let blocktree = blocktree.clone();
|
||||
let exit_sender = exit_sender.clone();
|
||||
let erasure_config = *erasure_config;
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
@@ -212,7 +180,6 @@ impl BroadcastStage {
|
||||
&receiver,
|
||||
&blocktree,
|
||||
broadcast_stage_run,
|
||||
&erasure_config,
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
@@ -286,7 +253,6 @@ mod test {
|
||||
&exit_sender,
|
||||
&blocktree,
|
||||
StandardBroadcastRun::new(),
|
||||
&ErasureConfig::default(),
|
||||
);
|
||||
|
||||
MockBroadcastStage {
|
||||
|
@@ -1,84 +0,0 @@
|
||||
use super::*;
|
||||
use crate::packet::BLOB_HEADER_SIZE;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::Signable;
|
||||
|
||||
pub(super) struct BroadcastBadBlobSizes {}
|
||||
|
||||
impl BroadcastBadBlobSizes {
|
||||
pub(super) fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl BroadcastRun for BroadcastBadBlobSizes {
|
||||
fn run(
|
||||
&mut self,
|
||||
broadcast: &mut Broadcast,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
receiver: &Receiver<WorkingBankEntries>,
|
||||
sock: &UdpSocket,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let mut receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
|
||||
let bank = receive_results.bank.clone();
|
||||
let last_tick = receive_results.last_tick;
|
||||
|
||||
// 2) Convert entries to blobs + generate coding blobs. Set a garbage PoH on the last entry
|
||||
// in the slot to make verification fail on validators
|
||||
if last_tick == bank.max_tick_height() {
|
||||
let mut last_entry = receive_results
|
||||
.ventries
|
||||
.last_mut()
|
||||
.unwrap()
|
||||
.last_mut()
|
||||
.unwrap();
|
||||
last_entry.0.hash = Hash::default();
|
||||
}
|
||||
|
||||
let keypair = &cluster_info.read().unwrap().keypair.clone();
|
||||
let latest_blob_index = blocktree
|
||||
.meta(bank.slot())
|
||||
.expect("Database error")
|
||||
.map(|meta| meta.consumed)
|
||||
.unwrap_or(0);
|
||||
|
||||
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
|
||||
receive_results.ventries,
|
||||
&broadcast.thread_pool,
|
||||
latest_blob_index,
|
||||
last_tick,
|
||||
&bank,
|
||||
&keypair,
|
||||
&mut broadcast.coding_generator,
|
||||
);
|
||||
|
||||
for b in data_blobs.iter().chain(coding_blobs.iter()) {
|
||||
let mut w_b = b.write().unwrap();
|
||||
let real_size = w_b.meta.size;
|
||||
// corrupt the size in the header
|
||||
w_b.set_size(std::usize::MAX - BLOB_HEADER_SIZE);
|
||||
// resign the blob
|
||||
w_b.sign(&keypair);
|
||||
// don't corrupt the size in the meta so that broadcast will still work
|
||||
w_b.meta.size = real_size;
|
||||
}
|
||||
|
||||
blocktree.write_shared_blobs(data_blobs.iter())?;
|
||||
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
|
||||
|
||||
// 3) Start broadcast step
|
||||
let bank_epoch = bank.get_stakers_epoch(bank.slot());
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
|
||||
// Broadcast data + erasures
|
||||
cluster_info.read().unwrap().broadcast(
|
||||
sock,
|
||||
data_blobs.iter().chain(coding_blobs.iter()),
|
||||
stakes.as_ref(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -1,167 +0,0 @@
|
||||
use super::*;
|
||||
use crate::entry::Entry;
|
||||
use solana_sdk::hash::Hash;
|
||||
|
||||
pub(super) struct BroadcastFakeBlobsRun {
|
||||
last_blockhash: Hash,
|
||||
partition: usize,
|
||||
}
|
||||
|
||||
impl BroadcastFakeBlobsRun {
|
||||
pub(super) fn new(partition: usize) -> Self {
|
||||
Self {
|
||||
last_blockhash: Hash::default(),
|
||||
partition,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BroadcastRun for BroadcastFakeBlobsRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
broadcast: &mut Broadcast,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
receiver: &Receiver<WorkingBankEntries>,
|
||||
sock: &UdpSocket,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
|
||||
let bank = receive_results.bank.clone();
|
||||
let last_tick = receive_results.last_tick;
|
||||
|
||||
let keypair = &cluster_info.read().unwrap().keypair.clone();
|
||||
let latest_blob_index = blocktree
|
||||
.meta(bank.slot())
|
||||
.expect("Database error")
|
||||
.map(|meta| meta.consumed)
|
||||
.unwrap_or(0);
|
||||
|
||||
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
|
||||
receive_results.ventries,
|
||||
&broadcast.thread_pool,
|
||||
latest_blob_index,
|
||||
last_tick,
|
||||
&bank,
|
||||
&keypair,
|
||||
&mut broadcast.coding_generator,
|
||||
);
|
||||
|
||||
// If the last blockhash is default, a new block is being created
|
||||
// So grab the last blockhash from the parent bank
|
||||
if self.last_blockhash == Hash::default() {
|
||||
self.last_blockhash = bank.parent().unwrap().last_blockhash();
|
||||
}
|
||||
|
||||
let fake_ventries: Vec<_> = (0..receive_results.num_entries)
|
||||
.map(|_| vec![(Entry::new(&self.last_blockhash, 0, vec![]), 0)])
|
||||
.collect();
|
||||
|
||||
let (fake_data_blobs, fake_coding_blobs) = broadcast_utils::entries_to_blobs(
|
||||
fake_ventries,
|
||||
&broadcast.thread_pool,
|
||||
latest_blob_index,
|
||||
last_tick,
|
||||
&bank,
|
||||
&keypair,
|
||||
&mut broadcast.coding_generator,
|
||||
);
|
||||
|
||||
// If it's the last tick, reset the last block hash to default
|
||||
// this will cause next run to grab last bank's blockhash
|
||||
if last_tick == bank.max_tick_height() {
|
||||
self.last_blockhash = Hash::default();
|
||||
}
|
||||
|
||||
blocktree.write_shared_blobs(data_blobs.iter())?;
|
||||
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
|
||||
|
||||
// Set the forwarded flag to true, so that the blobs won't be forwarded to peers
|
||||
data_blobs
|
||||
.iter()
|
||||
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
|
||||
coding_blobs
|
||||
.iter()
|
||||
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
|
||||
fake_data_blobs
|
||||
.iter()
|
||||
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
|
||||
fake_coding_blobs
|
||||
.iter()
|
||||
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
|
||||
|
||||
// 3) Start broadcast step
|
||||
let peers = cluster_info.read().unwrap().tvu_peers();
|
||||
peers.iter().enumerate().for_each(|(i, peer)| {
|
||||
if i <= self.partition {
|
||||
// Send fake blobs to the first N peers
|
||||
fake_data_blobs.iter().for_each(|b| {
|
||||
let blob = b.read().unwrap();
|
||||
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
|
||||
.unwrap();
|
||||
});
|
||||
fake_coding_blobs.iter().for_each(|b| {
|
||||
let blob = b.read().unwrap();
|
||||
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
|
||||
.unwrap();
|
||||
});
|
||||
} else {
|
||||
data_blobs.iter().for_each(|b| {
|
||||
let blob = b.read().unwrap();
|
||||
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
|
||||
.unwrap();
|
||||
});
|
||||
coding_blobs.iter().for_each(|b| {
|
||||
let blob = b.read().unwrap();
|
||||
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
|
||||
#[test]
|
||||
fn test_tvu_peers_ordering() {
|
||||
let mut cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
));
|
||||
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
|
||||
8080,
|
||||
)));
|
||||
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)),
|
||||
8080,
|
||||
)));
|
||||
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 3)),
|
||||
8080,
|
||||
)));
|
||||
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 4)),
|
||||
8080,
|
||||
)));
|
||||
|
||||
let tvu_peers1 = cluster.tvu_peers();
|
||||
(0..5).for_each(|_| {
|
||||
cluster
|
||||
.tvu_peers()
|
||||
.iter()
|
||||
.zip(tvu_peers1.iter())
|
||||
.for_each(|(v1, v2)| {
|
||||
assert_eq!(v1, v2);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
@@ -52,8 +52,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
&mut broadcast.coding_generator,
|
||||
);
|
||||
|
||||
blocktree.write_shared_blobs(data_blobs.iter())?;
|
||||
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
|
||||
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
|
||||
|
||||
// 3) Start broadcast step
|
||||
let bank_epoch = bank.get_stakers_epoch(bank.slot());
|
||||
|
@@ -82,9 +82,7 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
&mut broadcast.coding_generator,
|
||||
);
|
||||
|
||||
blocktree.write_shared_blobs(data_blobs.iter())?;
|
||||
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
|
||||
|
||||
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
|
||||
let to_blobs_elapsed = to_blobs_start.elapsed();
|
||||
|
||||
// 3) Start broadcast step
|
||||
|
@@ -1,4 +1,5 @@
|
||||
use crate::blocktree::Blocktree;
|
||||
use solana_storage_api::SLOTS_PER_SEGMENT;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::{BufWriter, Write};
|
||||
@@ -13,7 +14,6 @@ pub const CHACHA_KEY_SIZE: usize = 32;
|
||||
pub fn chacha_cbc_encrypt_ledger(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
slice: u64,
|
||||
slots_per_segment: u64,
|
||||
out_path: &Path,
|
||||
ivec: &mut [u8; CHACHA_BLOCK_SIZE],
|
||||
) -> io::Result<usize> {
|
||||
@@ -28,7 +28,7 @@ pub fn chacha_cbc_encrypt_ledger(
|
||||
let mut entry = slice;
|
||||
|
||||
loop {
|
||||
match blocktree.read_blobs_bytes(0, slots_per_segment - total_entries, &mut buffer, entry) {
|
||||
match blocktree.read_blobs_bytes(0, SLOTS_PER_SEGMENT - total_entries, &mut buffer, entry) {
|
||||
Ok((num_entries, entry_len)) => {
|
||||
debug!(
|
||||
"chacha: encrypting slice: {} num_entries: {} entry_len: {}",
|
||||
@@ -113,11 +113,10 @@ mod tests {
|
||||
let ledger_dir = "chacha_test_encrypt_file";
|
||||
let ledger_path = get_tmp_ledger_path(ledger_dir);
|
||||
let ticks_per_slot = 16;
|
||||
let slots_per_segment = 32;
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let out_path = Path::new("test_chacha_encrypt_file_output.txt.enc");
|
||||
|
||||
let entries = make_tiny_deterministic_test_entries(slots_per_segment);
|
||||
let entries = make_tiny_deterministic_test_entries(32);
|
||||
blocktree
|
||||
.write_entries(0, 0, 0, ticks_per_slot, &entries)
|
||||
.unwrap();
|
||||
@@ -126,8 +125,7 @@ mod tests {
|
||||
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
|
||||
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
|
||||
);
|
||||
chacha_cbc_encrypt_ledger(&blocktree, 0, slots_per_segment as u64, out_path, &mut key)
|
||||
.unwrap();
|
||||
chacha_cbc_encrypt_ledger(&blocktree, 0, out_path, &mut key).unwrap();
|
||||
let mut out_file = File::open(out_path).unwrap();
|
||||
let mut buf = vec![];
|
||||
let size = out_file.read_to_end(&mut buf).unwrap();
|
||||
@@ -135,7 +133,7 @@ mod tests {
|
||||
hasher.hash(&buf[..size]);
|
||||
|
||||
// golden needs to be updated if blob stuff changes....
|
||||
let golden: Hash = "7hgFLHveuv9zvHpp6qpco9AHAJKyczdgxiktEMkeghDQ"
|
||||
let golden: Hash = "37YzrTgiFRGQG1EoMZVecnGqxEK7UGxEQeBSdGMJcKqp"
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
|
@@ -7,6 +7,7 @@ use crate::sigverify::{
|
||||
chacha_cbc_encrypt_many_sample, chacha_end_sha_state, chacha_init_sha_state,
|
||||
};
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_storage_api::SLOTS_PER_SEGMENT;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::sync::Arc;
|
||||
@@ -18,7 +19,6 @@ use std::sync::Arc;
|
||||
pub fn chacha_cbc_encrypt_file_many_keys(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
segment: u64,
|
||||
slots_per_segment: u64,
|
||||
ivecs: &mut [u8],
|
||||
samples: &[u64],
|
||||
) -> io::Result<Vec<Hash>> {
|
||||
@@ -46,7 +46,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
|
||||
chacha_init_sha_state(int_sha_states.as_mut_ptr(), num_keys as u32);
|
||||
}
|
||||
loop {
|
||||
match blocktree.read_blobs_bytes(entry, slots_per_segment - total_entries, &mut buffer, 0) {
|
||||
match blocktree.read_blobs_bytes(entry, SLOTS_PER_SEGMENT - total_entries, &mut buffer, 0) {
|
||||
Ok((num_entries, entry_len)) => {
|
||||
debug!(
|
||||
"chacha_cuda: encrypting segment: {} num_entries: {} entry_len: {}",
|
||||
@@ -76,9 +76,9 @@ pub fn chacha_cbc_encrypt_file_many_keys(
|
||||
entry += num_entries;
|
||||
debug!(
|
||||
"total entries: {} entry: {} segment: {} entries_per_segment: {}",
|
||||
total_entries, entry, segment, slots_per_segment
|
||||
total_entries, entry, segment, SLOTS_PER_SEGMENT
|
||||
);
|
||||
if (entry - segment) >= slots_per_segment {
|
||||
if (entry - segment) >= SLOTS_PER_SEGMENT {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -113,7 +113,6 @@ mod tests {
|
||||
use crate::entry::make_tiny_test_entries;
|
||||
use crate::replicator::sample_file;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::timing::DEFAULT_SLOTS_PER_SEGMENT;
|
||||
use std::fs::{remove_dir_all, remove_file};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
@@ -122,8 +121,7 @@ mod tests {
|
||||
fn test_encrypt_file_many_keys_single() {
|
||||
solana_logger::setup();
|
||||
|
||||
let slots_per_segment = 32;
|
||||
let entries = make_tiny_test_entries(slots_per_segment);
|
||||
let entries = make_tiny_test_entries(32);
|
||||
let ledger_dir = "test_encrypt_file_many_keys_single";
|
||||
let ledger_path = get_tmp_ledger_path(ledger_dir);
|
||||
let ticks_per_slot = 16;
|
||||
@@ -142,25 +140,12 @@ mod tests {
|
||||
);
|
||||
|
||||
let mut cpu_iv = ivecs.clone();
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blocktree,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
out_path,
|
||||
&mut cpu_iv,
|
||||
)
|
||||
.unwrap();
|
||||
chacha_cbc_encrypt_ledger(&blocktree, 0, out_path, &mut cpu_iv).unwrap();
|
||||
|
||||
let ref_hash = sample_file(&out_path, &samples).unwrap();
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blocktree,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
&mut ivecs,
|
||||
&samples,
|
||||
)
|
||||
.unwrap();
|
||||
let hashes =
|
||||
chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut ivecs, &samples).unwrap();
|
||||
|
||||
assert_eq!(hashes[0], ref_hash);
|
||||
|
||||
@@ -193,14 +178,7 @@ mod tests {
|
||||
);
|
||||
ivec[0] = i;
|
||||
ivecs.extend(ivec.clone().iter());
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blocktree.clone(),
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
out_path,
|
||||
&mut ivec,
|
||||
)
|
||||
.unwrap();
|
||||
chacha_cbc_encrypt_ledger(&blocktree.clone(), 0, out_path, &mut ivec).unwrap();
|
||||
|
||||
ref_hashes.push(sample_file(&out_path, &samples).unwrap());
|
||||
info!(
|
||||
@@ -211,14 +189,8 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blocktree,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut ivecs,
|
||||
&samples,
|
||||
)
|
||||
.unwrap();
|
||||
let hashes =
|
||||
chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut ivecs, &samples).unwrap();
|
||||
|
||||
assert_eq!(hashes, ref_hashes);
|
||||
|
||||
@@ -233,13 +205,6 @@ mod tests {
|
||||
let ledger_path = get_tmp_ledger_path(ledger_dir);
|
||||
let samples = [0];
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
assert!(chacha_cbc_encrypt_file_many_keys(
|
||||
&blocktree,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut keys,
|
||||
&samples,
|
||||
)
|
||||
.is_err());
|
||||
assert!(chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut keys, &samples,).is_err());
|
||||
}
|
||||
}
|
||||
|
@@ -47,7 +47,7 @@ use solana_sdk::transaction::Transaction;
|
||||
use std::borrow::Borrow;
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::min;
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::fmt;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
@@ -78,6 +78,9 @@ pub struct ClusterInfo {
|
||||
pub gossip: CrdsGossip,
|
||||
/// set the keypair that will be used to sign crds values generated. It is unset only in tests.
|
||||
pub(crate) keypair: Arc<Keypair>,
|
||||
// TODO: remove gossip_leader_pubkey once all usage of `set_leader()` and `leader_data()` is
|
||||
// purged
|
||||
gossip_leader_pubkey: Pubkey,
|
||||
/// The network entrypoint
|
||||
entrypoint: Option<ContactInfo>,
|
||||
}
|
||||
@@ -178,6 +181,7 @@ impl ClusterInfo {
|
||||
let mut me = Self {
|
||||
gossip: CrdsGossip::default(),
|
||||
keypair,
|
||||
gossip_leader_pubkey: Pubkey::default(),
|
||||
entrypoint: None,
|
||||
};
|
||||
let id = contact_info.id;
|
||||
@@ -202,8 +206,7 @@ impl ClusterInfo {
|
||||
let mut entry = CrdsValue::ContactInfo(my_data);
|
||||
entry.sign(&self.keypair);
|
||||
self.gossip.refresh_push_active_set(stakes);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
self.gossip.process_push_message(vec![entry], now);
|
||||
}
|
||||
|
||||
// TODO kill insert_info, only used by tests
|
||||
@@ -233,6 +236,15 @@ impl ClusterInfo {
|
||||
self.lookup(&self.id()).cloned().unwrap()
|
||||
}
|
||||
|
||||
// Deprecated: don't use leader_data().
|
||||
pub fn leader_data(&self) -> Option<&ContactInfo> {
|
||||
let leader_pubkey = self.gossip_leader_pubkey;
|
||||
if leader_pubkey == Pubkey::default() {
|
||||
return None;
|
||||
}
|
||||
self.lookup(&leader_pubkey)
|
||||
}
|
||||
|
||||
pub fn contact_info_trace(&self) -> String {
|
||||
let now = timestamp();
|
||||
let mut spy_nodes = 0;
|
||||
@@ -289,12 +301,22 @@ impl ClusterInfo {
|
||||
)
|
||||
}
|
||||
|
||||
/// Record the id of the current leader for use by `leader_tpu_via_blobs()`
|
||||
pub fn set_leader(&mut self, leader_pubkey: &Pubkey) {
|
||||
if *leader_pubkey != self.gossip_leader_pubkey {
|
||||
warn!(
|
||||
"{}: LEADER_UPDATE TO {} from {}",
|
||||
self.gossip.id, leader_pubkey, self.gossip_leader_pubkey,
|
||||
);
|
||||
self.gossip_leader_pubkey = *leader_pubkey;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_epoch_slots(&mut self, id: Pubkey, root: u64, slots: BTreeSet<u64>) {
|
||||
let now = timestamp();
|
||||
let mut entry = CrdsValue::EpochSlots(EpochSlots::new(id, root, slots, now));
|
||||
entry.sign(&self.keypair);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
self.gossip.process_push_message(vec![entry], now);
|
||||
}
|
||||
|
||||
pub fn push_vote(&mut self, vote: Transaction) {
|
||||
@@ -302,8 +324,7 @@ impl ClusterInfo {
|
||||
let vote = Vote::new(&self.id(), vote, now);
|
||||
let mut entry = CrdsValue::Vote(vote);
|
||||
entry.sign(&self.keypair);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
self.gossip.process_push_message(vec![entry], now);
|
||||
}
|
||||
|
||||
/// Get votes in the crds
|
||||
@@ -1050,13 +1071,12 @@ impl ClusterInfo {
|
||||
fn handle_blob(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
blob: &Blob,
|
||||
) -> Vec<SharedBlob> {
|
||||
deserialize(&blob.data[..blob.meta.size])
|
||||
.into_iter()
|
||||
.flat_map(|request| {
|
||||
ClusterInfo::handle_protocol(obj, &blob.meta.addr(), blocktree, stakes, request)
|
||||
ClusterInfo::handle_protocol(obj, &blob.meta.addr(), blocktree, request)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -1095,7 +1115,6 @@ impl ClusterInfo {
|
||||
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
|
||||
to_shared_blob(rsp, *from_addr).ok().into_iter().collect()
|
||||
}
|
||||
|
||||
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
||||
let len = data.len();
|
||||
let now = Instant::now();
|
||||
@@ -1110,52 +1129,40 @@ impl ClusterInfo {
|
||||
|
||||
report_time_spent("ReceiveUpdates", &now.elapsed(), &format!(" len: {}", len));
|
||||
}
|
||||
|
||||
fn handle_push_message(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
from: &Pubkey,
|
||||
data: Vec<CrdsValue>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<SharedBlob> {
|
||||
let self_id = me.read().unwrap().gossip.id;
|
||||
inc_new_counter_debug!("cluster_info-push_message", 1, 0, 1000);
|
||||
|
||||
let updated: Vec<_> =
|
||||
me.write()
|
||||
.unwrap()
|
||||
.gossip
|
||||
.process_push_message(from, data, timestamp());
|
||||
|
||||
let updated_labels: Vec<_> = updated.into_iter().map(|u| u.value.label()).collect();
|
||||
let prunes_map: HashMap<Pubkey, HashSet<Pubkey>> = me
|
||||
let prunes: Vec<_> = me
|
||||
.write()
|
||||
.unwrap()
|
||||
.gossip
|
||||
.prune_received_cache(updated_labels, stakes);
|
||||
.process_push_message(data, timestamp());
|
||||
|
||||
let mut rsp: Vec<_> = prunes_map
|
||||
.into_iter()
|
||||
.map(|(from, prune_set)| {
|
||||
inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len());
|
||||
me.read().unwrap().lookup(&from).cloned().and_then(|ci| {
|
||||
if !prunes.is_empty() {
|
||||
inc_new_counter_debug!("cluster_info-push_message-prunes", prunes.len());
|
||||
let ci = me.read().unwrap().lookup(from).cloned();
|
||||
let pushes: Vec<_> = me.write().unwrap().new_push_requests();
|
||||
inc_new_counter_debug!("cluster_info-push_message-pushes", pushes.len());
|
||||
let mut rsp: Vec<_> = ci
|
||||
.and_then(|ci| {
|
||||
let mut prune_msg = PruneData {
|
||||
pubkey: self_id,
|
||||
prunes: prune_set.into_iter().collect(),
|
||||
prunes,
|
||||
signature: Signature::default(),
|
||||
destination: from,
|
||||
destination: *from,
|
||||
wallclock: timestamp(),
|
||||
};
|
||||
prune_msg.sign(&me.read().unwrap().keypair);
|
||||
let rsp = Protocol::PruneMessage(self_id, prune_msg);
|
||||
to_shared_blob(rsp, ci.gossip).ok()
|
||||
})
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
if !rsp.is_empty() {
|
||||
let pushes: Vec<_> = me.write().unwrap().new_push_requests();
|
||||
inc_new_counter_debug!("cluster_info-push_message-pushes", pushes.len());
|
||||
.into_iter()
|
||||
.collect();
|
||||
let mut blobs: Vec<_> = pushes
|
||||
.into_iter()
|
||||
.filter_map(|(remote_gossip_addr, req)| {
|
||||
@@ -1257,7 +1264,6 @@ impl ClusterInfo {
|
||||
me: &Arc<RwLock<Self>>,
|
||||
from_addr: &SocketAddr,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
request: Protocol,
|
||||
) -> Vec<SharedBlob> {
|
||||
match request {
|
||||
@@ -1289,7 +1295,7 @@ impl ClusterInfo {
|
||||
}
|
||||
ret
|
||||
});
|
||||
Self::handle_push_message(me, &from, data, stakes)
|
||||
Self::handle_push_message(me, &from, data)
|
||||
}
|
||||
Protocol::PruneMessage(from, data) => {
|
||||
if data.verify() {
|
||||
@@ -1324,7 +1330,6 @@ impl ClusterInfo {
|
||||
fn run_listen(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
bank_forks: Option<&Arc<RwLock<BankForks>>>,
|
||||
requests_receiver: &BlobReceiver,
|
||||
response_sender: &BlobSender,
|
||||
) -> Result<()> {
|
||||
@@ -1335,16 +1340,8 @@ impl ClusterInfo {
|
||||
reqs.append(&mut more);
|
||||
}
|
||||
let mut resps = Vec::new();
|
||||
|
||||
let stakes: HashMap<_, _> = match bank_forks {
|
||||
Some(ref bank_forks) => {
|
||||
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
|
||||
}
|
||||
None => HashMap::new(),
|
||||
};
|
||||
|
||||
for req in reqs {
|
||||
let mut resp = Self::handle_blob(obj, blocktree, &stakes, &req.read().unwrap());
|
||||
let mut resp = Self::handle_blob(obj, blocktree, &req.read().unwrap());
|
||||
resps.append(&mut resp);
|
||||
}
|
||||
response_sender.send(resps)?;
|
||||
@@ -1353,7 +1350,6 @@ impl ClusterInfo {
|
||||
pub fn listen(
|
||||
me: Arc<RwLock<Self>>,
|
||||
blocktree: Option<Arc<Blocktree>>,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
requests_receiver: BlobReceiver,
|
||||
response_sender: BlobSender,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@@ -1365,7 +1361,6 @@ impl ClusterInfo {
|
||||
let e = Self::run_listen(
|
||||
&me,
|
||||
blocktree.as_ref(),
|
||||
bank_forks.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
);
|
||||
@@ -1652,7 +1647,6 @@ mod tests {
|
||||
use crate::blocktree::tests::make_many_slot_entries;
|
||||
use crate::blocktree::Blocktree;
|
||||
use crate::crds_value::CrdsValueLabel;
|
||||
use crate::erasure::ErasureConfig;
|
||||
use crate::packet::BLOB_HEADER_SIZE;
|
||||
use crate::repair_service::RepairType;
|
||||
use crate::result::Error;
|
||||
@@ -1817,7 +1811,6 @@ mod tests {
|
||||
w_blob.set_size(data_size);
|
||||
w_blob.set_index(1);
|
||||
w_blob.set_slot(2);
|
||||
w_blob.set_erasure_config(&ErasureConfig::default());
|
||||
w_blob.meta.size = data_size + BLOB_HEADER_SIZE;
|
||||
}
|
||||
|
||||
@@ -1862,7 +1855,6 @@ mod tests {
|
||||
blob.set_size(data_size);
|
||||
blob.set_index(i);
|
||||
blob.set_slot(2);
|
||||
blob.set_erasure_config(&ErasureConfig::default());
|
||||
blob.meta.size = data_size + BLOB_HEADER_SIZE;
|
||||
blob
|
||||
})
|
||||
@@ -1928,6 +1920,17 @@ mod tests {
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_leader() {
|
||||
solana_logger::setup();
|
||||
let contact_info = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let network_entry_point =
|
||||
ContactInfo::new_gossip_entry_point(&socketaddr!("127.0.0.1:1239"));
|
||||
cluster_info.insert_info(network_entry_point);
|
||||
assert!(cluster_info.leader_data().is_none());
|
||||
}
|
||||
|
||||
fn assert_in_range(x: u16, range: (u16, u16)) {
|
||||
assert!(x >= range.0);
|
||||
assert!(x < range.1);
|
||||
@@ -2011,9 +2014,12 @@ mod tests {
|
||||
//create new cluster info, leader, and peer
|
||||
let keypair = Keypair::new();
|
||||
let peer_keypair = Keypair::new();
|
||||
let leader_keypair = Keypair::new();
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let leader = ContactInfo::new_localhost(&leader_keypair.pubkey(), 0);
|
||||
let peer = ContactInfo::new_localhost(&peer_keypair.pubkey(), 0);
|
||||
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
|
||||
cluster_info.set_leader(&leader.id);
|
||||
cluster_info.insert_info(peer.clone());
|
||||
cluster_info.gossip.refresh_push_active_set(&HashMap::new());
|
||||
//check that all types of gossip messages are signed correctly
|
||||
|
@@ -4,9 +4,9 @@ use crate::result::Result;
|
||||
use crate::service::Service;
|
||||
use crate::sigverify_stage::VerifiedPackets;
|
||||
use crate::{packet, sigverify};
|
||||
use crossbeam_channel::Sender as CrossbeamSender;
|
||||
use solana_metrics::inc_new_counter_debug;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::Sender;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
@@ -20,7 +20,7 @@ impl ClusterInfoVoteListener {
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
sigverify_disabled: bool,
|
||||
sender: CrossbeamSender<VerifiedPackets>,
|
||||
sender: Sender<VerifiedPackets>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
) -> Self {
|
||||
let exit = exit.clone();
|
||||
@@ -45,7 +45,7 @@ impl ClusterInfoVoteListener {
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
sigverify_disabled: bool,
|
||||
sender: &CrossbeamSender<VerifiedPackets>,
|
||||
sender: &Sender<VerifiedPackets>,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
) -> Result<()> {
|
||||
let mut last_ts = 0;
|
||||
@@ -54,7 +54,7 @@ impl ClusterInfoVoteListener {
|
||||
return Ok(());
|
||||
}
|
||||
let (votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
|
||||
if poh_recorder.lock().unwrap().has_bank() {
|
||||
if poh_recorder.lock().unwrap().bank().is_some() {
|
||||
last_ts = new_ts;
|
||||
inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len());
|
||||
let msgs = packet::to_packets(&votes);
|
||||
@@ -85,7 +85,7 @@ impl Service for ClusterInfoVoteListener {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::consensus::MAX_RECENT_VOTES;
|
||||
use crate::locktower::MAX_RECENT_VOTES;
|
||||
use crate::packet;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
|
@@ -4,10 +4,10 @@ use crate::blocktree::Blocktree;
|
||||
/// All tests must start from an entry point and a funding keypair and
|
||||
/// discover the rest of the network.
|
||||
use crate::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use crate::consensus::VOTE_THRESHOLD_DEPTH;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::entry::{Entry, EntrySlice};
|
||||
use crate::gossip_service::discover_cluster;
|
||||
use crate::locktower::VOTE_THRESHOLD_DEPTH;
|
||||
use hashbrown::HashSet;
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH;
|
||||
|
@@ -147,7 +147,7 @@ impl ContactInfo {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new_with_pubkey_socketaddr(pubkey: &Pubkey, bind_addr: &SocketAddr) -> Self {
|
||||
fn new_with_pubkey_socketaddr(pubkey: &Pubkey, bind_addr: &SocketAddr) -> Self {
|
||||
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
||||
let mut nxt_addr = *addr;
|
||||
nxt_addr.set_port(addr.port() + nxt);
|
||||
|
@@ -3,16 +3,15 @@
|
||||
//! designed to run with a simulator or over a UDP network connection with messages up to a
|
||||
//! packet::BLOB_DATA_SIZE size.
|
||||
|
||||
use crate::crds::{Crds, VersionedCrdsValue};
|
||||
use crate::crds::Crds;
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_gossip_pull::CrdsGossipPull;
|
||||
use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE};
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
use crate::crds_value::CrdsValue;
|
||||
use solana_runtime::bloom::Bloom;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Signable;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
|
||||
///The min size for bloom filters
|
||||
pub const CRDS_GOSSIP_BLOOM_SIZE: usize = 1000;
|
||||
@@ -40,24 +39,25 @@ impl CrdsGossip {
|
||||
pub fn set_self(&mut self, id: &Pubkey) {
|
||||
self.id = *id;
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
pub fn process_push_message(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
values: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> Vec<VersionedCrdsValue> {
|
||||
values
|
||||
pub fn process_push_message(&mut self, values: Vec<CrdsValue>, now: u64) -> Vec<Pubkey> {
|
||||
let labels: Vec<_> = values.iter().map(CrdsValue::label).collect();
|
||||
|
||||
let results: Vec<_> = values
|
||||
.into_iter()
|
||||
.filter_map(|val| {
|
||||
let res = self
|
||||
.push
|
||||
.process_push_message(&mut self.crds, from, val, now);
|
||||
if let Ok(Some(val)) = res {
|
||||
.map(|val| self.push.process_push_message(&mut self.crds, val, now))
|
||||
.collect();
|
||||
|
||||
results
|
||||
.into_iter()
|
||||
.zip(labels)
|
||||
.filter_map(|(r, d)| {
|
||||
if r == Err(CrdsGossipError::PushMessagePrune) {
|
||||
Some(d.pubkey())
|
||||
} else if let Ok(Some(val)) = r {
|
||||
self.pull
|
||||
.record_old_hash(val.value_hash, val.local_timestamp);
|
||||
Some(val)
|
||||
None
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -65,31 +65,6 @@ impl CrdsGossip {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// remove redundant paths in the network
|
||||
pub fn prune_received_cache(
|
||||
&mut self,
|
||||
labels: Vec<CrdsValueLabel>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> HashMap<Pubkey, HashSet<Pubkey>> {
|
||||
let id = &self.id;
|
||||
let crds = &self.crds;
|
||||
let push = &mut self.push;
|
||||
let versioned = labels
|
||||
.into_iter()
|
||||
.filter_map(|label| crds.lookup_versioned(&label));
|
||||
|
||||
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
|
||||
for val in versioned {
|
||||
let origin = val.value.pubkey();
|
||||
let hash = val.value_hash;
|
||||
let peers = push.prune_received_cache(id, &origin, hash, stakes);
|
||||
for from in peers {
|
||||
prune_map.entry(from).or_default().insert(origin);
|
||||
}
|
||||
}
|
||||
prune_map
|
||||
}
|
||||
|
||||
pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, HashMap<Pubkey, Vec<CrdsValue>>) {
|
||||
let push_messages = self.push.new_push_messages(&self.crds, now);
|
||||
(self.id, push_messages)
|
||||
@@ -172,7 +147,7 @@ impl CrdsGossip {
|
||||
}
|
||||
if now > 5 * self.push.msg_timeout {
|
||||
let min = now - 5 * self.push.msg_timeout;
|
||||
self.push.purge_old_received_cache(min);
|
||||
self.push.purge_old_pushed_once_messages(min);
|
||||
}
|
||||
if now > self.pull.crds_timeout {
|
||||
let min = now - self.pull.crds_timeout;
|
||||
|
@@ -2,7 +2,7 @@
|
||||
pub enum CrdsGossipError {
|
||||
NoPeers,
|
||||
PushMessageTimeout,
|
||||
PushMessageAlreadyReceived,
|
||||
PushMessagePrune,
|
||||
PushMessageOldVersion,
|
||||
BadPruneDestination,
|
||||
PruneMessageTimeout,
|
||||
|
@@ -27,13 +27,12 @@ use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::cmp;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30;
|
||||
pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 5000;
|
||||
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
|
||||
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPush {
|
||||
@@ -43,8 +42,7 @@ pub struct CrdsGossipPush {
|
||||
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
|
||||
/// push message queue
|
||||
push_messages: HashMap<CrdsValueLabel, Hash>,
|
||||
/// cache that tracks which validators a message was received from
|
||||
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
|
||||
pushed_once: HashMap<Hash, u64>,
|
||||
pub num_active: usize,
|
||||
pub push_fanout: usize,
|
||||
pub msg_timeout: u64,
|
||||
@@ -57,7 +55,7 @@ impl Default for CrdsGossipPush {
|
||||
max_bytes: BLOB_DATA_SIZE,
|
||||
active_set: IndexMap::new(),
|
||||
push_messages: HashMap::new(),
|
||||
received_cache: HashMap::new(),
|
||||
pushed_once: HashMap::new(),
|
||||
num_active: CRDS_GOSSIP_NUM_ACTIVE,
|
||||
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
|
||||
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
|
||||
@@ -69,69 +67,10 @@ impl CrdsGossipPush {
|
||||
pub fn num_pending(&self) -> usize {
|
||||
self.push_messages.len()
|
||||
}
|
||||
|
||||
fn prune_stake_threshold(self_stake: u64, origin_stake: u64) -> u64 {
|
||||
let min_path_stake = self_stake.min(origin_stake);
|
||||
((CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT * min_path_stake as f64).round() as u64).max(1)
|
||||
}
|
||||
|
||||
pub fn prune_received_cache(
|
||||
&mut self,
|
||||
self_pubkey: &Pubkey,
|
||||
origin: &Pubkey,
|
||||
hash: Hash,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<Pubkey> {
|
||||
let origin_stake = stakes.get(origin).unwrap_or(&0);
|
||||
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
|
||||
let cache = self.received_cache.get(&hash);
|
||||
if cache.is_none() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let peers = &cache.unwrap().1;
|
||||
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
|
||||
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
|
||||
if peer_stake_total < prune_stake_threshold {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let staked_peers: Vec<(Pubkey, u64)> = peers
|
||||
.iter()
|
||||
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
|
||||
.filter(|(_, s)| *s > 0)
|
||||
.collect();
|
||||
|
||||
let mut seed = [0; 32];
|
||||
seed[0..8].copy_from_slice(&thread_rng().next_u64().to_le_bytes());
|
||||
let shuffle = weighted_shuffle(
|
||||
staked_peers.iter().map(|(_, stake)| *stake).collect_vec(),
|
||||
ChaChaRng::from_seed(seed),
|
||||
);
|
||||
|
||||
let mut keep = HashSet::new();
|
||||
let mut peer_stake_sum = 0;
|
||||
for next in shuffle {
|
||||
let (next_peer, next_stake) = staked_peers[next];
|
||||
keep.insert(next_peer);
|
||||
peer_stake_sum += next_stake;
|
||||
if peer_stake_sum >= prune_stake_threshold {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
peers
|
||||
.iter()
|
||||
.filter(|p| !keep.contains(p))
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
pub fn process_push_message(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
value: CrdsValue,
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
@@ -142,20 +81,18 @@ impl CrdsGossipPush {
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
let label = value.label();
|
||||
|
||||
let new_value = crds.new_versioned(now, value);
|
||||
let value_hash = new_value.value_hash;
|
||||
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
||||
received_set.insert(from.clone());
|
||||
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
||||
if self.pushed_once.get(&value_hash).is_some() {
|
||||
return Err(CrdsGossipError::PushMessagePrune);
|
||||
}
|
||||
let old = crds.insert_versioned(new_value);
|
||||
if old.is_err() {
|
||||
return Err(CrdsGossipError::PushMessageOldVersion);
|
||||
}
|
||||
let mut received_set = HashSet::new();
|
||||
received_set.insert(from.clone());
|
||||
self.push_messages.insert(label, value_hash);
|
||||
self.received_cache.insert(value_hash, (now, received_set));
|
||||
self.pushed_once.insert(value_hash, now);
|
||||
Ok(old.ok().and_then(|opt| opt))
|
||||
}
|
||||
|
||||
@@ -321,17 +258,16 @@ impl CrdsGossipPush {
|
||||
self.push_messages.remove(&k);
|
||||
}
|
||||
}
|
||||
|
||||
/// purge received push message cache
|
||||
pub fn purge_old_received_cache(&mut self, min_time: u64) {
|
||||
/// purge old pushed_once messages
|
||||
pub fn purge_old_pushed_once_messages(&mut self, min_time: u64) {
|
||||
let old_msgs: Vec<Hash> = self
|
||||
.received_cache
|
||||
.pushed_once
|
||||
.iter()
|
||||
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
|
||||
.filter_map(|(k, v)| if *v < min_time { Some(k) } else { None })
|
||||
.cloned()
|
||||
.collect();
|
||||
for k in old_msgs {
|
||||
self.received_cache.remove(&k);
|
||||
self.pushed_once.remove(&k);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -342,55 +278,6 @@ mod test {
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_sdk::signature::Signable;
|
||||
|
||||
#[test]
|
||||
fn test_prune() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut stakes = HashMap::new();
|
||||
|
||||
let self_id = Pubkey::new_rand();
|
||||
let origin = Pubkey::new_rand();
|
||||
stakes.insert(self_id, 100);
|
||||
stakes.insert(origin, 100);
|
||||
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&origin, 0));
|
||||
let label = value.label();
|
||||
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
|
||||
let mut low_staked_set = HashSet::new();
|
||||
low_staked_peers.for_each(|p| {
|
||||
let _ = push.process_push_message(&mut crds, &p, value.clone(), 0);
|
||||
low_staked_set.insert(p);
|
||||
stakes.insert(p, 1);
|
||||
});
|
||||
|
||||
let versioned = crds
|
||||
.lookup_versioned(&label)
|
||||
.expect("versioned value should exist");
|
||||
let hash = versioned.value_hash;
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
assert!(
|
||||
pruned.is_empty(),
|
||||
"should not prune if min threshold has not been reached"
|
||||
);
|
||||
|
||||
let high_staked_peer = Pubkey::new_rand();
|
||||
let high_stake = CrdsGossipPush::prune_stake_threshold(100, 100) + 10;
|
||||
stakes.insert(high_staked_peer, high_stake);
|
||||
let _ = push.process_push_message(&mut crds, &high_staked_peer, value.clone(), 0);
|
||||
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
assert!(
|
||||
pruned.len() < low_staked_set.len() + 1,
|
||||
"should not prune all peers"
|
||||
);
|
||||
pruned.iter().for_each(|p| {
|
||||
assert!(
|
||||
low_staked_set.contains(p),
|
||||
"only low staked peers should be pruned"
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_push() {
|
||||
let mut crds = Crds::default();
|
||||
@@ -399,15 +286,15 @@ mod test {
|
||||
let label = value.label();
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
push.process_push_message(&mut crds, value.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
push.process_push_message(&mut crds, value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessagePrune)
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
@@ -419,16 +306,13 @@ mod test {
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Ok(None)
|
||||
);
|
||||
assert_eq!(push.process_push_message(&mut crds, value, 0), Ok(None));
|
||||
|
||||
// push an old version
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
push.process_push_message(&mut crds, value, 0),
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
}
|
||||
@@ -443,7 +327,7 @@ mod test {
|
||||
ci.wallclock = timeout + 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
push.process_push_message(&mut crds, value, 0),
|
||||
Err(CrdsGossipError::PushMessageTimeout)
|
||||
);
|
||||
|
||||
@@ -451,7 +335,7 @@ mod test {
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, timeout + 1),
|
||||
push.process_push_message(&mut crds, value, timeout + 1),
|
||||
Err(CrdsGossipError::PushMessageTimeout)
|
||||
);
|
||||
}
|
||||
@@ -465,7 +349,7 @@ mod test {
|
||||
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value_old.clone(), 0),
|
||||
push.process_push_message(&mut crds, value_old.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
|
||||
@@ -473,7 +357,7 @@ mod test {
|
||||
ci.wallclock = 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0)
|
||||
push.process_push_message(&mut crds, value, 0)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.value,
|
||||
@@ -549,10 +433,7 @@ mod test {
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let mut expected = HashMap::new();
|
||||
expected.insert(peer.label().pubkey(), vec![new_msg.clone()]);
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg, 0),
|
||||
Ok(None)
|
||||
);
|
||||
assert_eq!(push.process_push_message(&mut crds, new_msg, 0), Ok(None));
|
||||
assert_eq!(push.active_set.len(), 1);
|
||||
assert_eq!(push.new_push_messages(&crds, 0), expected);
|
||||
}
|
||||
@@ -566,7 +447,7 @@ mod test {
|
||||
assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None));
|
||||
let peer_3 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
|
||||
push.process_push_message(&mut crds, peer_3.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
@@ -590,7 +471,7 @@ mod test {
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let expected = HashMap::new();
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
|
||||
push.process_push_message(&mut crds, new_msg.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
|
||||
@@ -609,7 +490,7 @@ mod test {
|
||||
let new_msg = CrdsValue::ContactInfo(ci.clone());
|
||||
let expected = HashMap::new();
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 1),
|
||||
push.process_push_message(&mut crds, new_msg.clone(), 1),
|
||||
Ok(None)
|
||||
);
|
||||
push.purge_old_pending_push_messages(&crds, 0);
|
||||
@@ -617,7 +498,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_purge_old_received_cache() {
|
||||
fn test_purge_old_pushed_once_messages() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
@@ -626,23 +507,23 @@ mod test {
|
||||
let label = value.label();
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
push.process_push_message(&mut crds, value.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
push.process_push_message(&mut crds, value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessagePrune)
|
||||
);
|
||||
|
||||
// purge the old pushed
|
||||
push.purge_old_received_cache(1);
|
||||
push.purge_old_pushed_once_messages(1);
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
push.process_push_message(&mut crds, value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
}
|
||||
|
@@ -1,297 +0,0 @@
|
||||
// Module for cuda-related helper functions and wrappers.
|
||||
//
|
||||
// cudaHostRegister/cudaHostUnregister -
|
||||
// apis for page-pinning memory. Cuda driver/hardware cannot overlap
|
||||
// copies from host memory to GPU memory unless the memory is page-pinned and
|
||||
// cannot be paged to disk. The cuda driver provides these interfaces to pin and unpin memory.
|
||||
|
||||
use crate::recycler::Reset;
|
||||
#[cfg(feature = "cuda")]
|
||||
use crate::sigverify::{cuda_host_register, cuda_host_unregister};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
use std::mem::size_of;
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
use core::ffi::c_void;
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
use std::os::raw::c_int;
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
const CUDA_SUCCESS: c_int = 0;
|
||||
|
||||
pub fn pin<T>(_mem: &mut Vec<T>) {
|
||||
#[cfg(feature = "cuda")]
|
||||
unsafe {
|
||||
let err = cuda_host_register(
|
||||
_mem.as_mut_ptr() as *mut c_void,
|
||||
_mem.capacity() * size_of::<T>(),
|
||||
0,
|
||||
);
|
||||
if err != CUDA_SUCCESS {
|
||||
error!(
|
||||
"cudaHostRegister error: {} ptr: {:?} bytes: {}",
|
||||
err,
|
||||
_mem.as_ptr(),
|
||||
_mem.capacity() * size_of::<T>()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unpin<T>(_mem: *mut T) {
|
||||
#[cfg(feature = "cuda")]
|
||||
unsafe {
|
||||
let err = cuda_host_unregister(_mem as *mut c_void);
|
||||
if err != CUDA_SUCCESS {
|
||||
error!("cudaHostUnregister returned: {} ptr: {:?}", err, _mem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A vector wrapper where the underlying memory can be
|
||||
// page-pinned. Controlled by flags in case user only wants
|
||||
// to pin in certain circumstances.
|
||||
#[derive(Debug)]
|
||||
pub struct PinnedVec<T> {
|
||||
x: Vec<T>,
|
||||
pinned: bool,
|
||||
pinnable: bool,
|
||||
}
|
||||
|
||||
impl Reset for PinnedVec<u8> {
|
||||
fn reset(&mut self) {
|
||||
self.resize(0, 0u8);
|
||||
}
|
||||
}
|
||||
|
||||
impl Reset for PinnedVec<u32> {
|
||||
fn reset(&mut self) {
|
||||
self.resize(0, 0u32);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Default for PinnedVec<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
x: Vec::new(),
|
||||
pinned: false,
|
||||
pinnable: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for PinnedVec<T> {
|
||||
type Target = Vec<T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.x
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for PinnedVec<T> {
|
||||
fn deref_mut(&mut self) -> &mut Vec<T> {
|
||||
&mut self.x
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PinnedIter<'a, T>(std::slice::Iter<'a, T>);
|
||||
|
||||
pub struct PinnedIterMut<'a, T>(std::slice::IterMut<'a, T>);
|
||||
|
||||
impl<'a, T> Iterator for PinnedIter<'a, T> {
|
||||
type Item = &'a T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Iterator for PinnedIterMut<'a, T> {
|
||||
type Item = &'a mut T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> IntoIterator for &'a mut PinnedVec<T> {
|
||||
type Item = &'a T;
|
||||
type IntoIter = PinnedIter<'a, T>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
PinnedIter(self.iter())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> IntoIterator for &'a PinnedVec<T> {
|
||||
type Item = &'a T;
|
||||
type IntoIter = PinnedIter<'a, T>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
PinnedIter(self.iter())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> PinnedVec<T> {
|
||||
pub fn reserve_and_pin(&mut self, size: usize) {
|
||||
if self.x.capacity() < size {
|
||||
if self.pinned {
|
||||
unpin(&mut self.x);
|
||||
self.pinned = false;
|
||||
}
|
||||
self.x.reserve(size);
|
||||
}
|
||||
self.set_pinnable();
|
||||
if !self.pinned {
|
||||
pin(&mut self.x);
|
||||
self.pinned = true;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_pinnable(&mut self) {
|
||||
self.pinnable = true;
|
||||
}
|
||||
|
||||
pub fn from_vec(source: Vec<T>) -> Self {
|
||||
Self {
|
||||
x: source,
|
||||
pinned: false,
|
||||
pinnable: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_capacity(capacity: usize) -> Self {
|
||||
let x = Vec::with_capacity(capacity);
|
||||
Self {
|
||||
x,
|
||||
pinned: false,
|
||||
pinnable: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> PinnedIter<T> {
|
||||
PinnedIter(self.x.iter())
|
||||
}
|
||||
|
||||
pub fn iter_mut(&mut self) -> PinnedIterMut<T> {
|
||||
PinnedIterMut(self.x.iter_mut())
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.x.is_empty()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.x.len()
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn as_ptr(&self) -> *const T {
|
||||
self.x.as_ptr()
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn as_mut_ptr(&mut self) -> *mut T {
|
||||
self.x.as_mut_ptr()
|
||||
}
|
||||
|
||||
pub fn push(&mut self, x: T) {
|
||||
let old_ptr = self.x.as_mut_ptr();
|
||||
let old_capacity = self.x.capacity();
|
||||
// Predict realloc and unpin
|
||||
if self.pinned && self.x.capacity() == self.x.len() {
|
||||
unpin(old_ptr);
|
||||
self.pinned = false;
|
||||
}
|
||||
self.x.push(x);
|
||||
self.check_ptr(old_ptr, old_capacity, "push");
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, size: usize, elem: T) {
|
||||
let old_ptr = self.x.as_mut_ptr();
|
||||
let old_capacity = self.x.capacity();
|
||||
// Predict realloc and unpin.
|
||||
if self.pinned && self.x.capacity() < size {
|
||||
unpin(old_ptr);
|
||||
self.pinned = false;
|
||||
}
|
||||
self.x.resize(size, elem);
|
||||
self.check_ptr(old_ptr, old_capacity, "resize");
|
||||
}
|
||||
|
||||
fn check_ptr(&mut self, _old_ptr: *mut T, _old_capacity: usize, _from: &'static str) {
|
||||
#[cfg(feature = "cuda")]
|
||||
{
|
||||
if self.pinnable && (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity)
|
||||
{
|
||||
if self.pinned {
|
||||
unpin(_old_ptr);
|
||||
}
|
||||
|
||||
trace!(
|
||||
"pinning from check_ptr old: {} size: {} from: {}",
|
||||
_old_capacity,
|
||||
self.x.capacity(),
|
||||
_from
|
||||
);
|
||||
pin(&mut self.x);
|
||||
self.pinned = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Clone for PinnedVec<T> {
|
||||
fn clone(&self) -> Self {
|
||||
let mut x = self.x.clone();
|
||||
let pinned = if self.pinned {
|
||||
pin(&mut x);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
debug!(
|
||||
"clone PinnedVec: size: {} pinned?: {} pinnable?: {}",
|
||||
self.x.capacity(),
|
||||
self.pinned,
|
||||
self.pinnable
|
||||
);
|
||||
Self {
|
||||
x,
|
||||
pinned,
|
||||
pinnable: self.pinnable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for PinnedVec<T> {
|
||||
fn drop(&mut self) {
|
||||
if self.pinned {
|
||||
unpin(self.x.as_mut_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_pinned_vec() {
|
||||
let mut mem = PinnedVec::with_capacity(10);
|
||||
mem.set_pinnable();
|
||||
mem.push(50);
|
||||
mem.resize(2, 10);
|
||||
assert_eq!(mem[0], 50);
|
||||
assert_eq!(mem[1], 10);
|
||||
assert_eq!(mem.len(), 2);
|
||||
assert_eq!(mem.is_empty(), false);
|
||||
let mut iter = mem.iter();
|
||||
assert_eq!(*iter.next().unwrap(), 50);
|
||||
assert_eq!(*iter.next().unwrap(), 10);
|
||||
assert_eq!(iter.next(), None);
|
||||
}
|
||||
}
|
@@ -10,9 +10,8 @@ use chrono::prelude::Utc;
|
||||
use rayon::prelude::*;
|
||||
use rayon::ThreadPool;
|
||||
use solana_budget_api::budget_instruction;
|
||||
use solana_merkle_tree::MerkleTree;
|
||||
use solana_metrics::inc_new_counter_warn;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::hash::{Hash, Hasher};
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::borrow::Borrow;
|
||||
@@ -173,16 +172,13 @@ impl Entry {
|
||||
|
||||
pub fn hash_transactions(transactions: &[Transaction]) -> Hash {
|
||||
// a hash of a slice of transactions only needs to hash the signatures
|
||||
let signatures: Vec<_> = transactions
|
||||
.iter()
|
||||
.flat_map(|tx| tx.signatures.iter())
|
||||
.collect();
|
||||
let merkle_tree = MerkleTree::new(&signatures);
|
||||
if let Some(root_hash) = merkle_tree.get_root() {
|
||||
*root_hash
|
||||
} else {
|
||||
Hash::default()
|
||||
}
|
||||
let mut hasher = Hasher::default();
|
||||
transactions.iter().for_each(|tx| {
|
||||
if !tx.signatures.is_empty() {
|
||||
hasher.hash(&tx.signatures[0].as_ref());
|
||||
}
|
||||
});
|
||||
hasher.result()
|
||||
}
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
|
@@ -55,38 +55,6 @@ pub const NUM_CODING: usize = 8;
|
||||
/// Total number of blobs in an erasure set; includes data and coding blobs
|
||||
pub const ERASURE_SET_SIZE: usize = NUM_DATA + NUM_CODING;
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ErasureConfig {
|
||||
num_data: usize,
|
||||
num_coding: usize,
|
||||
}
|
||||
|
||||
impl Default for ErasureConfig {
|
||||
fn default() -> ErasureConfig {
|
||||
ErasureConfig {
|
||||
num_data: NUM_DATA,
|
||||
num_coding: NUM_CODING,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ErasureConfig {
|
||||
pub fn new(num_data: usize, num_coding: usize) -> ErasureConfig {
|
||||
ErasureConfig {
|
||||
num_data,
|
||||
num_coding,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_data(self) -> usize {
|
||||
self.num_data
|
||||
}
|
||||
|
||||
pub fn num_coding(self) -> usize {
|
||||
self.num_coding
|
||||
}
|
||||
}
|
||||
|
||||
type Result<T> = std::result::Result<T, reed_solomon_erasure::Error>;
|
||||
|
||||
/// Represents an erasure "session" with a particular configuration and number of data and coding
|
||||
@@ -109,12 +77,6 @@ impl Session {
|
||||
Ok(Session(rs))
|
||||
}
|
||||
|
||||
pub fn new_from_config(config: &ErasureConfig) -> Result<Session> {
|
||||
let rs = ReedSolomon::new(config.num_data, config.num_coding)?;
|
||||
|
||||
Ok(Session(rs))
|
||||
}
|
||||
|
||||
/// Create coding blocks by overwriting `parity`
|
||||
pub fn encode(&self, data: &[&[u8]], parity: &mut [&mut [u8]]) -> Result<()> {
|
||||
self.0.encode_sep(data, parity)?;
|
||||
@@ -174,27 +136,25 @@ impl Session {
|
||||
let idx;
|
||||
let first_byte;
|
||||
|
||||
if n < self.0.data_shard_count() {
|
||||
if n < NUM_DATA {
|
||||
let mut blob = Blob::new(&blocks[n]);
|
||||
blob.meta.size = blob.data_size() as usize;
|
||||
|
||||
data_size = blob.data_size() as usize;
|
||||
data_size = blob.data_size() as usize - BLOB_HEADER_SIZE;
|
||||
idx = n as u64 + block_start_idx;
|
||||
first_byte = blob.data[0];
|
||||
|
||||
blob.set_size(data_size);
|
||||
recovered_data.push(blob);
|
||||
} else {
|
||||
let mut blob = Blob::default();
|
||||
blob.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + size].copy_from_slice(&blocks[n]);
|
||||
blob.meta.size = size;
|
||||
|
||||
blob.data_mut()[..size].copy_from_slice(&blocks[n]);
|
||||
data_size = size;
|
||||
idx = n as u64 + block_start_idx - NUM_DATA as u64;
|
||||
idx = (n as u64 + block_start_idx) - NUM_DATA as u64;
|
||||
first_byte = blob.data[0];
|
||||
|
||||
blob.set_slot(slot);
|
||||
blob.set_index(idx);
|
||||
blob.set_coding();
|
||||
blob.set_size(data_size);
|
||||
recovered_coding.push(blob);
|
||||
}
|
||||
|
||||
@@ -219,13 +179,6 @@ impl CodingGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_config(config: &ErasureConfig) -> Self {
|
||||
CodingGenerator {
|
||||
leftover: Vec::with_capacity(config.num_data),
|
||||
session: Arc::new(Session::new_from_config(config).unwrap()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Yields next set of coding blobs, if any.
|
||||
/// Must be called with consecutive data blobs within a slot.
|
||||
///
|
||||
@@ -282,7 +235,6 @@ impl CodingGenerator {
|
||||
coding_blob.set_version(version);
|
||||
coding_blob.set_size(max_data_size);
|
||||
coding_blob.set_coding();
|
||||
coding_blob.set_erasure_config(&data_blob.erasure_config());
|
||||
|
||||
coding_blobs.push(coding_blob);
|
||||
}
|
||||
@@ -290,7 +242,7 @@ impl CodingGenerator {
|
||||
if {
|
||||
let mut coding_ptrs: Vec<_> = coding_blobs
|
||||
.iter_mut()
|
||||
.map(|blob| &mut blob.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + max_data_size])
|
||||
.map(|blob| &mut blob.data_mut()[..max_data_size])
|
||||
.collect();
|
||||
|
||||
self.session.encode(&data_ptrs, coding_ptrs.as_mut_slice())
|
||||
@@ -477,7 +429,7 @@ pub mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generate_coding() {
|
||||
fn test_erasure_generate_coding() {
|
||||
solana_logger::setup();
|
||||
|
||||
// trivial case
|
||||
@@ -499,10 +451,10 @@ pub mod test {
|
||||
assert_eq!(coding_blobs.len(), NUM_CODING);
|
||||
|
||||
for j in 0..NUM_CODING {
|
||||
let coding_blob = coding_blobs[j].read().unwrap();
|
||||
|
||||
//assert_eq!(coding_blob.index(), (i * NUM_DATA + j % NUM_CODING) as u64);
|
||||
assert!(coding_blob.is_coding());
|
||||
assert_eq!(
|
||||
coding_blobs[j].read().unwrap().index(),
|
||||
((i / NUM_DATA) * NUM_DATA + j) as u64
|
||||
);
|
||||
}
|
||||
test_toss_and_recover(
|
||||
&coding_generator.session,
|
||||
@@ -704,8 +656,6 @@ pub mod test {
|
||||
S: Borrow<SlotSpec>,
|
||||
{
|
||||
let mut coding_generator = CodingGenerator::default();
|
||||
let keypair = Keypair::new();
|
||||
let bytes = keypair.to_bytes();
|
||||
|
||||
specs.into_iter().map(move |spec| {
|
||||
let spec = spec.borrow();
|
||||
@@ -718,14 +668,14 @@ pub mod test {
|
||||
let set_index = erasure_spec.set_index as usize;
|
||||
let start_index = set_index * NUM_DATA;
|
||||
|
||||
let mut blobs = generate_test_blobs(start_index, NUM_DATA);
|
||||
let keypair = Keypair::from_bytes(&bytes).unwrap();
|
||||
index_blobs(&blobs, &keypair.pubkey(), start_index as u64, slot, 0);
|
||||
|
||||
// Signing has to be deferred until all data/header fields are set correctly
|
||||
blobs.iter().for_each(|blob| {
|
||||
blob.write().unwrap().sign(&keypair);
|
||||
});
|
||||
let mut blobs = generate_test_blobs(0, NUM_DATA);
|
||||
index_blobs(
|
||||
&blobs,
|
||||
&Keypair::new().pubkey(),
|
||||
start_index as u64,
|
||||
slot,
|
||||
0,
|
||||
);
|
||||
|
||||
let mut coding_blobs = coding_generator.next(&blobs);
|
||||
|
||||
@@ -790,9 +740,9 @@ pub mod test {
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let mut blob = Blob::default();
|
||||
blob.data_mut()[..].copy_from_slice(&data);
|
||||
blob.set_size(BLOB_DATA_SIZE);
|
||||
blob.set_erasure_config(&ErasureConfig::default());
|
||||
blob.data_mut()[..data.len()].copy_from_slice(&data);
|
||||
blob.set_size(data.len());
|
||||
blob.sign(&Keypair::new());
|
||||
Arc::new(RwLock::new(blob))
|
||||
})
|
||||
.collect();
|
||||
@@ -823,7 +773,7 @@ pub mod test {
|
||||
if i < NUM_DATA {
|
||||
&mut blob.data[..size]
|
||||
} else {
|
||||
&mut blob.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + size]
|
||||
&mut blob.data_mut()[..size]
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
@@ -1,8 +1,6 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use crate::banking_stage::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::recycler::Recycler;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::service::Service;
|
||||
use crate::streamer::{self, PacketReceiver, PacketSender};
|
||||
@@ -63,11 +61,11 @@ impl FetchStage {
|
||||
batch.push(more);
|
||||
}
|
||||
|
||||
if poh_recorder.lock().unwrap().would_be_leader(
|
||||
FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET
|
||||
.saturating_add(1)
|
||||
.saturating_mul(DEFAULT_TICKS_PER_SLOT),
|
||||
) {
|
||||
if poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
.would_be_leader(DEFAULT_TICKS_PER_SLOT * 2)
|
||||
{
|
||||
inc_new_counter_debug!("fetch_stage-honor_forwards", len);
|
||||
for packets in batch {
|
||||
if sendr.send(packets).is_err() {
|
||||
@@ -88,16 +86,9 @@ impl FetchStage {
|
||||
sender: &PacketSender,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
) -> Self {
|
||||
let recycler = Recycler::default();
|
||||
let tpu_threads = sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
&exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
"fetch_stage",
|
||||
)
|
||||
});
|
||||
let tpu_threads = sockets
|
||||
.into_iter()
|
||||
.map(|socket| streamer::receiver(socket, &exit, sender.clone()));
|
||||
|
||||
let (forward_sender, forward_receiver) = channel();
|
||||
let tpu_via_blobs_threads = tpu_via_blobs_sockets
|
||||
|
@@ -45,7 +45,6 @@ impl GossipService {
|
||||
let t_listen = ClusterInfo::listen(
|
||||
cluster_info.clone(),
|
||||
blocktree,
|
||||
bank_forks.clone(),
|
||||
request_receiver,
|
||||
response_sender.clone(),
|
||||
exit,
|
||||
|
@@ -21,42 +21,22 @@ pub struct LeaderScheduleCache {
|
||||
|
||||
impl LeaderScheduleCache {
|
||||
pub fn new_from_bank(bank: &Bank) -> Self {
|
||||
Self::new(*bank.epoch_schedule(), bank)
|
||||
Self::new(*bank.epoch_schedule(), bank.slot())
|
||||
}
|
||||
|
||||
pub fn new(epoch_schedule: EpochSchedule, root_bank: &Bank) -> Self {
|
||||
pub fn new(epoch_schedule: EpochSchedule, root: u64) -> Self {
|
||||
let cache = Self {
|
||||
cached_schedules: RwLock::new((HashMap::new(), VecDeque::new())),
|
||||
epoch_schedule,
|
||||
max_epoch: RwLock::new(0),
|
||||
};
|
||||
|
||||
// This sets the root and calculates the schedule at stakers_epoch(root)
|
||||
cache.set_root(root_bank);
|
||||
|
||||
// Calculate the schedule for all epochs between 0 and stakers_epoch(root)
|
||||
let stakers_epoch = epoch_schedule.get_stakers_epoch(root_bank.slot());
|
||||
for epoch in 0..stakers_epoch {
|
||||
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
|
||||
cache.slot_leader_at(first_slot_in_epoch, Some(root_bank));
|
||||
}
|
||||
cache.set_root(root);
|
||||
cache
|
||||
}
|
||||
|
||||
pub fn set_root(&self, root_bank: &Bank) {
|
||||
let new_max_epoch = self.epoch_schedule.get_stakers_epoch(root_bank.slot());
|
||||
let old_max_epoch = {
|
||||
let mut max_epoch = self.max_epoch.write().unwrap();
|
||||
let old_max_epoch = *max_epoch;
|
||||
*max_epoch = new_max_epoch;
|
||||
assert!(new_max_epoch >= old_max_epoch);
|
||||
old_max_epoch
|
||||
};
|
||||
|
||||
// Calculate the epoch as soon as it's rooted
|
||||
if new_max_epoch > old_max_epoch {
|
||||
self.compute_epoch_schedule(new_max_epoch, root_bank);
|
||||
}
|
||||
pub fn set_root(&self, root: u64) {
|
||||
*self.max_epoch.write().unwrap() = self.epoch_schedule.get_stakers_epoch(root);
|
||||
}
|
||||
|
||||
pub fn slot_leader_at(&self, slot: u64, bank: Option<&Bank>) -> Option<Pubkey> {
|
||||
@@ -67,17 +47,15 @@ impl LeaderScheduleCache {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the (next slot, last slot) after the given current_slot that the given node will be leader
|
||||
/// Return the next slot after the given current_slot that the given node will be leader
|
||||
pub fn next_leader_slot(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
mut current_slot: u64,
|
||||
bank: &Bank,
|
||||
blocktree: Option<&Blocktree>,
|
||||
) -> Option<(u64, u64)> {
|
||||
) -> Option<u64> {
|
||||
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
|
||||
let mut first_slot = None;
|
||||
let mut last_slot = current_slot;
|
||||
while let Some(leader_schedule) = self.get_epoch_schedule_else_compute(epoch, bank) {
|
||||
// clippy thinks I should do this:
|
||||
// for (i, <item>) in leader_schedule
|
||||
@@ -100,19 +78,14 @@ impl LeaderScheduleCache {
|
||||
}
|
||||
}
|
||||
|
||||
if first_slot.is_none() {
|
||||
first_slot = Some(current_slot);
|
||||
}
|
||||
last_slot = current_slot;
|
||||
} else if first_slot.is_some() {
|
||||
return Some((first_slot.unwrap(), last_slot));
|
||||
return Some(current_slot);
|
||||
}
|
||||
}
|
||||
|
||||
epoch += 1;
|
||||
start_index = 0;
|
||||
}
|
||||
first_slot.and_then(|slot| Some((slot, last_slot)))
|
||||
None
|
||||
}
|
||||
|
||||
fn slot_leader_at_no_compute(&self, slot: u64) -> Option<Pubkey> {
|
||||
@@ -207,38 +180,18 @@ mod tests {
|
||||
use crate::blocktree::get_tmp_ledger_path;
|
||||
|
||||
#[test]
|
||||
fn test_new_cache() {
|
||||
fn test_slot_leader_at() {
|
||||
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2);
|
||||
let bank = Bank::new(&genesis_block);
|
||||
let cache = LeaderScheduleCache::new_from_bank(&bank);
|
||||
assert_eq!(bank.slot(), 0);
|
||||
|
||||
// Epoch schedule for all epochs in the range:
|
||||
// [0, stakers_epoch(bank.slot())] should
|
||||
// be calculated by constructor
|
||||
let epoch_schedule = bank.epoch_schedule();
|
||||
let stakers_epoch = bank.get_stakers_epoch(bank.slot());
|
||||
for epoch in 0..=stakers_epoch {
|
||||
let first_slot_in_stakers_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
|
||||
let last_slot_in_stakers_epoch = epoch_schedule.get_last_slot_in_epoch(epoch);
|
||||
assert!(cache
|
||||
.slot_leader_at(first_slot_in_stakers_epoch, None)
|
||||
.is_some());
|
||||
assert!(cache
|
||||
.slot_leader_at(last_slot_in_stakers_epoch, None)
|
||||
.is_some());
|
||||
if epoch == stakers_epoch {
|
||||
assert!(cache
|
||||
.slot_leader_at(last_slot_in_stakers_epoch + 1, None)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
// Nothing in the cache, should return None
|
||||
assert!(cache.slot_leader_at(bank.slot(), None).is_none());
|
||||
|
||||
// Should be a schedule for every epoch just checked
|
||||
assert_eq!(
|
||||
cache.cached_schedules.read().unwrap().0.len() as u64,
|
||||
stakers_epoch + 1
|
||||
);
|
||||
// Add something to the cache
|
||||
assert!(cache.slot_leader_at(bank.slot(), Some(&bank)).is_some());
|
||||
assert!(cache.slot_leader_at(bank.slot(), None).is_some());
|
||||
assert_eq!(cache.cached_schedules.read().unwrap().0.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -272,7 +225,7 @@ mod tests {
|
||||
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
|
||||
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule, &bank));
|
||||
let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule, bank.slot()));
|
||||
|
||||
let num_threads = 10;
|
||||
let (threads, senders): (Vec<_>, Vec<_>) = (0..num_threads)
|
||||
@@ -324,14 +277,8 @@ mod tests {
|
||||
cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(),
|
||||
pubkey
|
||||
);
|
||||
assert_eq!(
|
||||
cache.next_leader_slot(&pubkey, 0, &bank, None),
|
||||
Some((1, 16383))
|
||||
);
|
||||
assert_eq!(
|
||||
cache.next_leader_slot(&pubkey, 1, &bank, None),
|
||||
Some((2, 16383))
|
||||
);
|
||||
assert_eq!(cache.next_leader_slot(&pubkey, 0, &bank, None), Some(1));
|
||||
assert_eq!(cache.next_leader_slot(&pubkey, 1, &bank, None), Some(2));
|
||||
assert_eq!(
|
||||
cache.next_leader_slot(
|
||||
&pubkey,
|
||||
@@ -378,11 +325,8 @@ mod tests {
|
||||
);
|
||||
// Check that the next leader slot after 0 is slot 1
|
||||
assert_eq!(
|
||||
cache
|
||||
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
||||
.unwrap()
|
||||
.0,
|
||||
1
|
||||
cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
||||
Some(1)
|
||||
);
|
||||
|
||||
// Write a blob into slot 2 that chains to slot 1,
|
||||
@@ -390,11 +334,8 @@ mod tests {
|
||||
let (blobs, _) = make_slot_entries(2, 1, 1);
|
||||
blocktree.write_blobs(&blobs[..]).unwrap();
|
||||
assert_eq!(
|
||||
cache
|
||||
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
||||
.unwrap()
|
||||
.0,
|
||||
1
|
||||
cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
||||
Some(1)
|
||||
);
|
||||
|
||||
// Write a blob into slot 1
|
||||
@@ -403,11 +344,8 @@ mod tests {
|
||||
// Check that slot 1 and 2 are skipped
|
||||
blocktree.write_blobs(&blobs[..]).unwrap();
|
||||
assert_eq!(
|
||||
cache
|
||||
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
||||
.unwrap()
|
||||
.0,
|
||||
3
|
||||
cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
|
||||
Some(3)
|
||||
);
|
||||
|
||||
// Integrity checks
|
||||
@@ -481,11 +419,8 @@ mod tests {
|
||||
expected_slot += index;
|
||||
|
||||
assert_eq!(
|
||||
cache
|
||||
.next_leader_slot(&node_pubkey, 0, &bank, None)
|
||||
.unwrap()
|
||||
.0,
|
||||
expected_slot
|
||||
cache.next_leader_slot(&node_pubkey, 0, &bank, None),
|
||||
Some(expected_slot),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -511,7 +446,7 @@ mod tests {
|
||||
assert!(bank2.epoch_vote_accounts(2).is_some());
|
||||
|
||||
// Set root for a slot in epoch 1, so that epoch 2 is now confirmed
|
||||
cache.set_root(&bank2);
|
||||
cache.set_root(95);
|
||||
assert_eq!(*cache.max_epoch.read().unwrap(), 2);
|
||||
assert!(cache.slot_leader_at(96, Some(&bank2)).is_some());
|
||||
assert_eq!(bank2.get_epoch_and_slot_index(223).0, 2);
|
||||
|
@@ -33,6 +33,10 @@ pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 {
|
||||
bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() - 1
|
||||
}
|
||||
|
||||
pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 {
|
||||
tick_height / ticks_per_slot
|
||||
}
|
||||
|
||||
fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
|
||||
// Sort first by stake. If stakes are the same, sort by pubkey to ensure a
|
||||
// deterministic result.
|
||||
|
@@ -1,104 +0,0 @@
|
||||
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
|
||||
|
||||
use crate::blocktree::Blocktree;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::service::Service;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
|
||||
use std::string::ToString;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 3 * DEFAULT_SLOTS_PER_EPOCH;
|
||||
|
||||
pub struct LedgerCleanupService {
|
||||
t_cleanup: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl LedgerCleanupService {
|
||||
pub fn new(
|
||||
slot_full_receiver: Receiver<(u64, Pubkey)>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
max_ledger_slots: u64,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
info!(
|
||||
"LedgerCleanupService active. Max Ledger Slots {}",
|
||||
max_ledger_slots
|
||||
);
|
||||
let exit = exit.clone();
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-ledger-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
if let Err(e) =
|
||||
Self::cleanup_ledger(&slot_full_receiver, &blocktree, max_ledger_slots)
|
||||
{
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => info!("Error from cleanup_ledger: {:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
Self { t_cleanup }
|
||||
}
|
||||
|
||||
fn cleanup_ledger(
|
||||
slot_full_receiver: &Receiver<(u64, Pubkey)>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
max_ledger_slots: u64,
|
||||
) -> Result<()> {
|
||||
let (slot, _) = slot_full_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
if slot > max_ledger_slots {
|
||||
//cleanup
|
||||
blocktree.purge_slots(0, Some(slot - max_ledger_slots));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for LedgerCleanupService {
|
||||
type JoinReturnType = ();
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.t_cleanup.join()
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::blocktree::get_tmp_ledger_path;
|
||||
use crate::blocktree::tests::make_many_slot_entries;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
fn test_cleanup() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let (blobs, _) = make_many_slot_entries(0, 50, 5);
|
||||
blocktree.write_blobs(blobs).unwrap();
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
//send a signal to kill slots 0-40
|
||||
sender.send((50, Pubkey::default())).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blocktree, 10).unwrap();
|
||||
|
||||
//check that 0-40 don't exist
|
||||
blocktree
|
||||
.slot_meta_iterator(0)
|
||||
.unwrap()
|
||||
.for_each(|(slot, _)| assert!(slot > 40));
|
||||
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
||||
}
|
@@ -13,7 +13,6 @@ pub mod chacha;
|
||||
#[cfg(cuda)]
|
||||
pub mod chacha_cuda;
|
||||
pub mod cluster_info_vote_listener;
|
||||
pub mod recycler;
|
||||
#[macro_use]
|
||||
pub mod contact_info;
|
||||
pub mod crds;
|
||||
@@ -31,8 +30,6 @@ pub mod cluster;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_info_repair_listener;
|
||||
pub mod cluster_tests;
|
||||
pub mod consensus;
|
||||
pub mod cuda_runtime;
|
||||
pub mod entry;
|
||||
pub mod erasure;
|
||||
pub mod fetch_stage;
|
||||
@@ -42,9 +39,9 @@ pub mod gossip_service;
|
||||
pub mod leader_schedule;
|
||||
pub mod leader_schedule_cache;
|
||||
pub mod leader_schedule_utils;
|
||||
pub mod ledger_cleanup_service;
|
||||
pub mod local_cluster;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod locktower;
|
||||
pub mod packet;
|
||||
pub mod poh;
|
||||
pub mod poh_recorder;
|
||||
@@ -99,8 +96,3 @@ extern crate solana_metrics;
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
extern crate crossbeam_channel;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_move_loader_program;
|
||||
|
@@ -1,4 +1,4 @@
|
||||
use crate::blocktree::create_new_tmp_ledger;
|
||||
use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree};
|
||||
use crate::cluster::Cluster;
|
||||
use crate::cluster_info::{Node, FULLNODE_PORT_RANGE};
|
||||
use crate::contact_info::ContactInfo;
|
||||
@@ -16,8 +16,8 @@ use solana_sdk::poh_config::PohConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
|
||||
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_sdk::timing::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_SLOTS_PER_SEGMENT};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_stake_api::stake_instruction;
|
||||
use solana_storage_api::storage_contract;
|
||||
@@ -29,9 +29,6 @@ use std::fs::remove_dir_all;
|
||||
use std::io::{Error, ErrorKind, Result};
|
||||
use std::sync::Arc;
|
||||
|
||||
use solana_librapay_api::librapay_transaction;
|
||||
use solana_move_loader_api;
|
||||
|
||||
pub struct ValidatorInfo {
|
||||
pub keypair: Arc<Keypair>,
|
||||
pub voting_keypair: Arc<Keypair>,
|
||||
@@ -83,7 +80,6 @@ pub struct ClusterConfig {
|
||||
pub cluster_lamports: u64,
|
||||
pub ticks_per_slot: u64,
|
||||
pub slots_per_epoch: u64,
|
||||
pub slots_per_segment: u64,
|
||||
pub stakers_slot_offset: u64,
|
||||
pub native_instruction_processors: Vec<(String, Pubkey)>,
|
||||
pub poh_config: PohConfig,
|
||||
@@ -99,7 +95,6 @@ impl Default for ClusterConfig {
|
||||
cluster_lamports: 0,
|
||||
ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
|
||||
slots_per_epoch: DEFAULT_SLOTS_PER_EPOCH,
|
||||
slots_per_segment: DEFAULT_SLOTS_PER_SEGMENT,
|
||||
stakers_slot_offset: DEFAULT_SLOTS_PER_EPOCH,
|
||||
native_instruction_processors: vec![],
|
||||
poh_config: PohConfig::default(),
|
||||
@@ -115,10 +110,10 @@ pub struct LocalCluster {
|
||||
pub fullnode_infos: HashMap<Pubkey, ClusterValidatorInfo>,
|
||||
pub listener_infos: HashMap<Pubkey, ClusterValidatorInfo>,
|
||||
fullnodes: HashMap<Pubkey, Validator>,
|
||||
genesis_ledger_path: String,
|
||||
pub genesis_block: GenesisBlock,
|
||||
replicators: Vec<Replicator>,
|
||||
pub replicator_infos: HashMap<Pubkey, ReplicatorInfo>,
|
||||
pub libra_mint_keypair: Arc<Keypair>,
|
||||
}
|
||||
|
||||
impl LocalCluster {
|
||||
@@ -153,7 +148,6 @@ impl LocalCluster {
|
||||
);
|
||||
genesis_block.ticks_per_slot = config.ticks_per_slot;
|
||||
genesis_block.slots_per_epoch = config.slots_per_epoch;
|
||||
genesis_block.slots_per_segment = config.slots_per_segment;
|
||||
genesis_block.stakers_slot_offset = config.stakers_slot_offset;
|
||||
genesis_block.poh_config = config.poh_config.clone();
|
||||
genesis_block
|
||||
@@ -165,22 +159,14 @@ impl LocalCluster {
|
||||
storage_keypair.pubkey(),
|
||||
storage_contract::create_validator_storage_account(leader_pubkey, 1),
|
||||
));
|
||||
let libra_mint_keypair = Keypair::new();
|
||||
genesis_block.accounts.push((
|
||||
libra_mint_keypair.pubkey(),
|
||||
librapay_transaction::create_libra_genesis_account(10_000),
|
||||
));
|
||||
genesis_block
|
||||
.native_instruction_processors
|
||||
.push(solana_storage_program!());
|
||||
genesis_block
|
||||
.native_instruction_processors
|
||||
.push(solana_move_loader_program!());
|
||||
|
||||
let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
let (genesis_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
let leader_ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
|
||||
let leader_contact_info = leader_node.info.clone();
|
||||
let leader_storage_keypair = Arc::new(storage_keypair);
|
||||
let libra_mint_keypair = Arc::new(libra_mint_keypair);
|
||||
let leader_voting_keypair = Arc::new(voting_keypair);
|
||||
let leader_server = Validator::new(
|
||||
leader_node,
|
||||
@@ -215,11 +201,11 @@ impl LocalCluster {
|
||||
entry_point_info: leader_contact_info,
|
||||
fullnodes,
|
||||
replicators: vec![],
|
||||
genesis_ledger_path,
|
||||
genesis_block,
|
||||
fullnode_infos,
|
||||
replicator_infos: HashMap::new(),
|
||||
listener_infos: HashMap::new(),
|
||||
libra_mint_keypair,
|
||||
};
|
||||
|
||||
for (stake, validator_config) in (&config.node_stakes[1..])
|
||||
@@ -284,7 +270,7 @@ impl LocalCluster {
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
||||
let contact_info = validator_node.info.clone();
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_block);
|
||||
let ledger_path = tmp_copy_blocktree!(&self.genesis_ledger_path);
|
||||
|
||||
if validator_config.voting_disabled {
|
||||
// setup as a listener
|
||||
@@ -594,7 +580,7 @@ impl Drop for LocalCluster {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::storage_stage::SLOTS_PER_TURN_TEST;
|
||||
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
|
||||
use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH;
|
||||
|
||||
#[test]
|
||||
@@ -611,7 +597,7 @@ mod test {
|
||||
solana_logger::setup();
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.rpc_config.enable_fullnode_exit = true;
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
validator_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
|
||||
const NUM_NODES: usize = 1;
|
||||
let num_replicators = 1;
|
||||
let config = ClusterConfig {
|
||||
|
@@ -28,17 +28,8 @@ pub struct StakeLockout {
|
||||
stake: u64,
|
||||
}
|
||||
|
||||
impl StakeLockout {
|
||||
pub fn lockout(&self) -> u64 {
|
||||
self.lockout
|
||||
}
|
||||
pub fn stake(&self) -> u64 {
|
||||
self.stake
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Tower {
|
||||
pub struct Locktower {
|
||||
epoch_stakes: EpochStakes,
|
||||
threshold_depth: usize,
|
||||
threshold_size: f64,
|
||||
@@ -77,7 +68,7 @@ impl EpochStakes {
|
||||
}
|
||||
}
|
||||
|
||||
impl Tower {
|
||||
impl Locktower {
|
||||
pub fn new_from_forks(bank_forks: &BankForks, my_pubkey: &Pubkey) -> Self {
|
||||
let mut frozen_banks: Vec<_> = bank_forks.frozen_banks().values().cloned().collect();
|
||||
frozen_banks.sort_by_key(|b| (b.parents().len(), b.slot()));
|
||||
@@ -89,7 +80,7 @@ impl Tower {
|
||||
}
|
||||
};
|
||||
|
||||
let mut tower = Self {
|
||||
let mut locktower = Self {
|
||||
epoch_stakes,
|
||||
threshold_depth: VOTE_THRESHOLD_DEPTH,
|
||||
threshold_size: VOTE_THRESHOLD_SIZE,
|
||||
@@ -97,9 +88,10 @@ impl Tower {
|
||||
recent_votes: VecDeque::default(),
|
||||
};
|
||||
|
||||
let bank = tower.find_heaviest_bank(bank_forks).unwrap();
|
||||
tower.lockouts = Self::initialize_lockouts_from_bank(&bank, tower.epoch_stakes.epoch);
|
||||
tower
|
||||
let bank = locktower.find_heaviest_bank(bank_forks).unwrap();
|
||||
locktower.lockouts =
|
||||
Self::initialize_lockouts_from_bank(&bank, locktower.epoch_stakes.epoch);
|
||||
locktower
|
||||
}
|
||||
pub fn new(epoch_stakes: EpochStakes, threshold_depth: usize, threshold_size: f64) -> Self {
|
||||
Self {
|
||||
@@ -128,7 +120,7 @@ impl Tower {
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
datapoint_warn!(
|
||||
"tower_warn",
|
||||
"locktower_warn",
|
||||
(
|
||||
"warn",
|
||||
format!("Unable to get vote_state from account {}", key),
|
||||
@@ -149,7 +141,7 @@ impl Tower {
|
||||
);
|
||||
debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
|
||||
datapoint_info!(
|
||||
"tower-observed",
|
||||
"locktower-observed",
|
||||
(
|
||||
"slot",
|
||||
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0),
|
||||
@@ -231,14 +223,14 @@ impl Tower {
|
||||
"epoch_stakes cannot move backwards"
|
||||
);
|
||||
info!(
|
||||
"Tower updated epoch bank slot: {} epoch: {}",
|
||||
"Locktower updated epoch bank slot: {} epoch: {}",
|
||||
bank.slot(),
|
||||
self.epoch_stakes.epoch
|
||||
);
|
||||
self.epoch_stakes =
|
||||
EpochStakes::new_from_bank(bank, &self.epoch_stakes.delegate_pubkey);
|
||||
datapoint_info!(
|
||||
"tower-epoch",
|
||||
"locktower-epoch",
|
||||
("epoch", self.epoch_stakes.epoch, i64),
|
||||
("self_staked", self.epoch_stakes.self_staked, i64),
|
||||
("total_staked", self.epoch_stakes.total_staked, i64)
|
||||
@@ -264,7 +256,7 @@ impl Tower {
|
||||
.retain(|vote| slots.iter().any(|slot| vote.slot == *slot));
|
||||
|
||||
datapoint_info!(
|
||||
"tower-vote",
|
||||
"locktower-vote",
|
||||
("latest", slot, i64),
|
||||
("root", self.lockouts.root_slot.unwrap_or(0), i64)
|
||||
);
|
||||
@@ -283,10 +275,6 @@ impl Tower {
|
||||
self.lockouts.root_slot
|
||||
}
|
||||
|
||||
pub fn total_epoch_stakes(&self) -> u64 {
|
||||
self.epoch_stakes.total_staked
|
||||
}
|
||||
|
||||
pub fn calculate_weight(&self, stake_lockouts: &HashMap<u64, StakeLockout>) -> u128 {
|
||||
let mut sum = 0u128;
|
||||
let root_slot = self.lockouts.root_slot.unwrap_or(0);
|
||||
@@ -346,27 +334,6 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_stake_lockouts(
|
||||
root: Option<u64>,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
stake_lockouts: HashMap<u64, StakeLockout>,
|
||||
) -> HashMap<u64, u128> {
|
||||
let mut stake_weighted_lockouts: HashMap<u64, u128> = HashMap::new();
|
||||
for (fork, lockout) in stake_lockouts.iter() {
|
||||
if root.is_none() || *fork >= root.unwrap() {
|
||||
let mut slot_with_ancestors = vec![*fork];
|
||||
slot_with_ancestors.extend(ancestors.get(&fork).unwrap_or(&HashSet::new()));
|
||||
for slot in slot_with_ancestors {
|
||||
if root.is_none() || slot >= root.unwrap() {
|
||||
let entry = stake_weighted_lockouts.entry(slot).or_default();
|
||||
*entry += u128::from(lockout.lockout) * u128::from(lockout.stake);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stake_weighted_lockouts
|
||||
}
|
||||
|
||||
/// Update lockouts for all the ancestors
|
||||
fn update_ancestor_lockouts(
|
||||
stake_lockouts: &mut HashMap<u64, StakeLockout>,
|
||||
@@ -462,13 +429,12 @@ mod test {
|
||||
fn test_collect_vote_lockouts_no_epoch_stakes() {
|
||||
let accounts = gen_stakes(&[(1, &[0])]);
|
||||
let epoch_stakes = EpochStakes::new_for_tests(2);
|
||||
let tower = Tower::new(epoch_stakes, 0, 0.67);
|
||||
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let staked_lockouts = tower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
|
||||
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
|
||||
assert!(staked_lockouts.is_empty());
|
||||
assert_eq!(tower.epoch_stakes.total_staked, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -476,14 +442,13 @@ mod test {
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
|
||||
let epoch_stakes = EpochStakes::new_from_stakes(0, &accounts);
|
||||
let tower = Tower::new(epoch_stakes, 0, 0.67);
|
||||
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let staked_lockouts = tower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
|
||||
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
|
||||
assert_eq!(staked_lockouts[&0].stake, 2);
|
||||
assert_eq!(staked_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
assert_eq!(tower.epoch_stakes.total_staked, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -492,14 +457,14 @@ mod test {
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
|
||||
let epoch_stakes = EpochStakes::new_from_stakes(0, &accounts);
|
||||
let mut tower = Tower::new(epoch_stakes, 0, 0.67);
|
||||
let mut locktower = Locktower::new(epoch_stakes, 0, 0.67);
|
||||
let mut ancestors = HashMap::new();
|
||||
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
|
||||
tower.record_vote(i as u64, Hash::default());
|
||||
locktower.record_vote(i as u64, Hash::default());
|
||||
ancestors.insert(i as u64, (0..i as u64).into_iter().collect());
|
||||
}
|
||||
assert_eq!(tower.lockouts.root_slot, Some(0));
|
||||
let staked_lockouts = tower.collect_vote_lockouts(
|
||||
assert_eq!(locktower.lockouts.root_slot, Some(0));
|
||||
let staked_lockouts = locktower.collect_vote_lockouts(
|
||||
MAX_LOCKOUT_HISTORY as u64,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
@@ -513,8 +478,8 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_calculate_weight_skips_root() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
tower.lockouts.root_slot = Some(1);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
locktower.lockouts.root_slot = Some(1);
|
||||
let stakes = vec![
|
||||
(
|
||||
0,
|
||||
@@ -533,12 +498,12 @@ mod test {
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert_eq!(tower.calculate_weight(&stakes), 0u128);
|
||||
assert_eq!(locktower.calculate_weight(&stakes), 0u128);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_calculate_weight() {
|
||||
let tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
@@ -548,12 +513,12 @@ mod test {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert_eq!(tower.calculate_weight(&stakes), 8u128);
|
||||
assert_eq!(locktower.calculate_weight(&stakes), 8u128);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_without_votes() {
|
||||
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
@@ -563,65 +528,12 @@ mod test {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert!(tower.check_vote_stake_threshold(0, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_stake_lockouts() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
tower.lockouts.root_slot = Some(1);
|
||||
let stakes = vec![
|
||||
(
|
||||
0,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 32,
|
||||
},
|
||||
),
|
||||
(
|
||||
1,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 24,
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 16,
|
||||
},
|
||||
),
|
||||
(
|
||||
3,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 8,
|
||||
},
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let ancestors = vec![
|
||||
(0, HashSet::new()),
|
||||
(1, vec![0].into_iter().collect()),
|
||||
(2, vec![0, 1].into_iter().collect()),
|
||||
(3, vec![0, 1, 2].into_iter().collect()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let stake_weighted_lockouts =
|
||||
Tower::aggregate_stake_lockouts(tower.root(), &ancestors, stakes);
|
||||
assert!(stake_weighted_lockouts.get(&0).is_none());
|
||||
assert_eq!(*stake_weighted_lockouts.get(&1).unwrap(), 8 + 16 + 24);
|
||||
assert_eq!(*stake_weighted_lockouts.get(&2).unwrap(), 8 + 16);
|
||||
assert_eq!(*stake_weighted_lockouts.get(&3).unwrap(), 8);
|
||||
assert!(locktower.check_vote_stake_threshold(0, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_not_enough_stake_failure() {
|
||||
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
@@ -631,19 +543,19 @@ mod test {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert!(!tower.is_slot_confirmed(0, &stakes));
|
||||
assert!(!locktower.is_slot_confirmed(0, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_unknown_slot() {
|
||||
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = HashMap::new();
|
||||
assert!(!tower.is_slot_confirmed(0, &stakes));
|
||||
assert!(!locktower.is_slot_confirmed(0, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_pass() {
|
||||
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
@@ -653,68 +565,68 @@ mod test {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert!(tower.is_slot_confirmed(0, &stakes));
|
||||
assert!(locktower.is_slot_confirmed(0, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_empty() {
|
||||
let tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = HashMap::new();
|
||||
assert!(!tower.is_locked_out(0, &descendants));
|
||||
assert!(!locktower.is_locked_out(0, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_root_slot_child_pass() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.lockouts.root_slot = Some(0);
|
||||
assert!(!tower.is_locked_out(1, &descendants));
|
||||
locktower.lockouts.root_slot = Some(0);
|
||||
assert!(!locktower.is_locked_out(1, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_root_slot_sibling_fail() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.lockouts.root_slot = Some(0);
|
||||
assert!(tower.is_locked_out(2, &descendants));
|
||||
locktower.lockouts.root_slot = Some(0);
|
||||
assert!(locktower.is_locked_out(2, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_already_voted() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(tower.has_voted(0));
|
||||
assert!(!tower.has_voted(1));
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
locktower.record_vote(0, Hash::default());
|
||||
assert!(locktower.has_voted(0));
|
||||
assert!(!locktower.has_voted(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_double_vote() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect()), (1, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(tower.is_locked_out(0, &descendants));
|
||||
locktower.record_vote(0, Hash::default());
|
||||
locktower.record_vote(1, Hash::default());
|
||||
assert!(locktower.is_locked_out(0, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_child() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(!tower.is_locked_out(1, &descendants));
|
||||
locktower.record_vote(0, Hash::default());
|
||||
assert!(!locktower.is_locked_out(1, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_sibling() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![
|
||||
(0, vec![1, 2].into_iter().collect()),
|
||||
(1, HashSet::new()),
|
||||
@@ -722,30 +634,30 @@ mod test {
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(tower.is_locked_out(2, &descendants));
|
||||
locktower.record_vote(0, Hash::default());
|
||||
locktower.record_vote(1, Hash::default());
|
||||
assert!(locktower.is_locked_out(2, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_last_vote_expired() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1, 4].into_iter().collect()), (1, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(!tower.is_locked_out(4, &descendants));
|
||||
tower.record_vote(4, Hash::default());
|
||||
assert_eq!(tower.lockouts.votes[0].slot, 0);
|
||||
assert_eq!(tower.lockouts.votes[0].confirmation_count, 2);
|
||||
assert_eq!(tower.lockouts.votes[1].slot, 4);
|
||||
assert_eq!(tower.lockouts.votes[1].confirmation_count, 1);
|
||||
locktower.record_vote(0, Hash::default());
|
||||
locktower.record_vote(1, Hash::default());
|
||||
assert!(!locktower.is_locked_out(4, &descendants));
|
||||
locktower.record_vote(4, Hash::default());
|
||||
assert_eq!(locktower.lockouts.votes[0].slot, 0);
|
||||
assert_eq!(locktower.lockouts.votes[0].confirmation_count, 2);
|
||||
assert_eq!(locktower.lockouts.votes[1].slot, 4);
|
||||
assert_eq!(locktower.lockouts.votes[1].confirmation_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_below_threshold() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
@@ -755,12 +667,12 @@ mod test {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(!tower.check_vote_stake_threshold(1, &stakes));
|
||||
locktower.record_vote(0, Hash::default());
|
||||
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
|
||||
}
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
@@ -770,13 +682,13 @@ mod test {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(tower.check_vote_stake_threshold(1, &stakes));
|
||||
locktower.record_vote(0, Hash::default());
|
||||
assert!(locktower.check_vote_stake_threshold(1, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold_after_pop() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
@@ -786,18 +698,18 @@ mod test {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
tower.record_vote(2, Hash::default());
|
||||
assert!(tower.check_vote_stake_threshold(6, &stakes));
|
||||
locktower.record_vote(0, Hash::default());
|
||||
locktower.record_vote(1, Hash::default());
|
||||
locktower.record_vote(2, Hash::default());
|
||||
assert!(locktower.check_vote_stake_threshold(6, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold_no_stake() {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = HashMap::new();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(!tower.check_vote_stake_threshold(1, &stakes));
|
||||
locktower.record_vote(0, Hash::default());
|
||||
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -812,7 +724,7 @@ mod test {
|
||||
ancestors.insert(2, set);
|
||||
let set: HashSet<u64> = vec![0u64].into_iter().collect();
|
||||
ancestors.insert(1, set);
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
@@ -830,12 +742,12 @@ mod test {
|
||||
slot: 2,
|
||||
confirmation_count: 1,
|
||||
};
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
let vote = Lockout {
|
||||
slot: 1,
|
||||
confirmation_count: 2,
|
||||
};
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
@@ -848,7 +760,7 @@ mod test {
|
||||
account.lamports = 1;
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].into_iter().cloned().collect();
|
||||
Tower::update_ancestor_stakes(&mut stake_lockouts, 2, account.lamports, &ancestors);
|
||||
Locktower::update_ancestor_stakes(&mut stake_lockouts, 2, account.lamports, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].stake, 1);
|
||||
assert_eq!(stake_lockouts[&1].stake, 1);
|
||||
assert_eq!(stake_lockouts[&2].stake, 1);
|
||||
@@ -870,48 +782,51 @@ mod test {
|
||||
let total_stake = 4;
|
||||
let threshold_size = 0.67;
|
||||
let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
|
||||
let tower_votes: Vec<u64> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
|
||||
let locktower_votes: Vec<u64> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
|
||||
let accounts = gen_stakes(&[
|
||||
(threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
|
||||
(total_stake - threshold_stake, &tower_votes[..]),
|
||||
(total_stake - threshold_stake, &locktower_votes[..]),
|
||||
]);
|
||||
|
||||
// Initialize tower
|
||||
// Initialize locktower
|
||||
let stakes: HashMap<_, _> = accounts.iter().map(|(pk, (s, _))| (*pk, *s)).collect();
|
||||
let epoch_stakes = EpochStakes::new(0, stakes, &Pubkey::default());
|
||||
let mut tower = Tower::new(epoch_stakes, VOTE_THRESHOLD_DEPTH, threshold_size);
|
||||
let mut locktower = Locktower::new(epoch_stakes, VOTE_THRESHOLD_DEPTH, threshold_size);
|
||||
|
||||
// CASE 1: Record the first VOTE_THRESHOLD tower votes for fork 2. We want to
|
||||
// CASE 1: Record the first VOTE_THRESHOLD locktower votes for fork 2. We want to
|
||||
// evaluate a vote on slot VOTE_THRESHOLD_DEPTH. The nth most recent vote should be
|
||||
// for slot 0, which is common to all account vote states, so we should pass the
|
||||
// threshold check
|
||||
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64;
|
||||
for vote in &tower_votes {
|
||||
tower.record_vote(*vote, Hash::default());
|
||||
for vote in &locktower_votes {
|
||||
locktower.record_vote(*vote, Hash::default());
|
||||
}
|
||||
let staked_lockouts =
|
||||
tower.collect_vote_lockouts(vote_to_evaluate, accounts.clone().into_iter(), &ancestors);
|
||||
assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &staked_lockouts));
|
||||
let stakes_lockouts = locktower.collect_vote_lockouts(
|
||||
vote_to_evaluate,
|
||||
accounts.clone().into_iter(),
|
||||
&ancestors,
|
||||
);
|
||||
assert!(locktower.check_vote_stake_threshold(vote_to_evaluate, &stakes_lockouts));
|
||||
|
||||
// CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
|
||||
// will expire the vote in one of the vote accounts, so we should have insufficient
|
||||
// stake to pass the threshold
|
||||
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
|
||||
let staked_lockouts =
|
||||
tower.collect_vote_lockouts(vote_to_evaluate, accounts.into_iter(), &ancestors);
|
||||
assert!(!tower.check_vote_stake_threshold(vote_to_evaluate, &staked_lockouts));
|
||||
let stakes_lockouts =
|
||||
locktower.collect_vote_lockouts(vote_to_evaluate, accounts.into_iter(), &ancestors);
|
||||
assert!(!locktower.check_vote_stake_threshold(vote_to_evaluate, &stakes_lockouts));
|
||||
}
|
||||
|
||||
fn vote_and_check_recent(num_votes: usize) {
|
||||
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let start = num_votes.saturating_sub(MAX_RECENT_VOTES);
|
||||
let expected: Vec<_> = (start..num_votes)
|
||||
.map(|i| Vote::new(i as u64, Hash::default()))
|
||||
.collect();
|
||||
for i in 0..num_votes {
|
||||
tower.record_vote(i as u64, Hash::default());
|
||||
locktower.record_vote(i as u64, Hash::default());
|
||||
}
|
||||
assert_eq!(expected, tower.recent_votes())
|
||||
assert_eq!(expected, locktower.recent_votes())
|
||||
}
|
||||
|
||||
#[test]
|
@@ -1,8 +1,5 @@
|
||||
//! The `packet` module defines data structures and methods to pull data from the network.
|
||||
use crate::cuda_runtime::PinnedVec;
|
||||
use crate::erasure::ErasureConfig;
|
||||
use crate::recvmmsg::{recv_mmsg, NUM_RCVMMSGS};
|
||||
use crate::recycler::{Recycler, Reset};
|
||||
use crate::result::{Error, Result};
|
||||
use bincode;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
@@ -19,7 +16,6 @@ use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Cursor;
|
||||
use std::io::Write;
|
||||
use std::mem;
|
||||
use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
@@ -34,7 +30,6 @@ pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should b
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - (BLOB_HEADER_SIZE * 2);
|
||||
pub const BLOB_DATA_ALIGN: usize = 16; // safe for erasure input pointers, gf.c needs 16byte-aligned buffers
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
pub const PACKETS_PER_BLOB: usize = 256; // reasonable estimate for payment packets per blob based on ~200b transaction size
|
||||
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[repr(C)]
|
||||
@@ -129,61 +124,21 @@ impl Meta {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Packets {
|
||||
pub packets: PinnedVec<Packet>,
|
||||
|
||||
recycler: Option<PacketsRecycler>,
|
||||
}
|
||||
|
||||
impl Drop for Packets {
|
||||
fn drop(&mut self) {
|
||||
if let Some(ref recycler) = self.recycler {
|
||||
let old = mem::replace(&mut self.packets, PinnedVec::default());
|
||||
recycler.recycle(old)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Reset for Packets {
|
||||
fn reset(&mut self) {
|
||||
self.packets.resize(0, Packet::default());
|
||||
}
|
||||
}
|
||||
|
||||
impl Reset for PinnedVec<Packet> {
|
||||
fn reset(&mut self) {
|
||||
self.resize(0, Packet::default());
|
||||
}
|
||||
pub packets: Vec<Packet>,
|
||||
}
|
||||
|
||||
//auto derive doesn't support large arrays
|
||||
impl Default for Packets {
|
||||
fn default() -> Packets {
|
||||
let packets = PinnedVec::with_capacity(NUM_RCVMMSGS);
|
||||
Packets {
|
||||
packets,
|
||||
recycler: None,
|
||||
packets: Vec::with_capacity(NUM_RCVMMSGS),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type PacketsRecycler = Recycler<PinnedVec<Packet>>;
|
||||
|
||||
impl Packets {
|
||||
pub fn new(packets: Vec<Packet>) -> Self {
|
||||
let packets = PinnedVec::from_vec(packets);
|
||||
Self {
|
||||
packets,
|
||||
recycler: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_recycler(recycler: PacketsRecycler, size: usize, name: &'static str) -> Self {
|
||||
let mut packets = recycler.allocate(name);
|
||||
packets.reserve_and_pin(size);
|
||||
Packets {
|
||||
packets,
|
||||
recycler: Some(recycler),
|
||||
}
|
||||
Self { packets }
|
||||
}
|
||||
|
||||
pub fn set_addr(&mut self, addr: &SocketAddr) {
|
||||
@@ -391,8 +346,7 @@ const SLOT_RANGE: std::ops::Range<usize> = range!(VERSION_RANGE.end, u64);
|
||||
const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64);
|
||||
const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey);
|
||||
const FLAGS_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, u32);
|
||||
const ERASURE_CONFIG_RANGE: std::ops::Range<usize> = range!(FLAGS_RANGE.end, ErasureConfig);
|
||||
const SIZE_RANGE: std::ops::Range<usize> = range!(ERASURE_CONFIG_RANGE.end, u64);
|
||||
const SIZE_RANGE: std::ops::Range<usize> = range!(FLAGS_RANGE.end, u64);
|
||||
|
||||
macro_rules! align {
|
||||
($x:expr, $align:expr) => {
|
||||
@@ -417,7 +371,7 @@ impl Blob {
|
||||
|
||||
let bytes = &data[..data_len];
|
||||
blob.data[..data_len].copy_from_slice(bytes);
|
||||
blob.meta.size = data_len;
|
||||
blob.meta.size = blob.data_size() as usize;
|
||||
blob
|
||||
}
|
||||
|
||||
@@ -429,7 +383,6 @@ impl Blob {
|
||||
out.position() as usize
|
||||
};
|
||||
blob.set_size(pos);
|
||||
blob.set_erasure_config(&ErasureConfig::default());
|
||||
blob
|
||||
}
|
||||
|
||||
@@ -458,14 +411,6 @@ impl Blob {
|
||||
LittleEndian::write_u64(&mut self.data[INDEX_RANGE], ix);
|
||||
}
|
||||
|
||||
pub fn set_erasure_config(&mut self, config: &ErasureConfig) {
|
||||
self.data[ERASURE_CONFIG_RANGE].copy_from_slice(&bincode::serialize(config).unwrap())
|
||||
}
|
||||
|
||||
pub fn erasure_config(&self) -> ErasureConfig {
|
||||
bincode::deserialize(&self.data[ERASURE_CONFIG_RANGE]).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> [u8; 32] {
|
||||
let mut seed = [0; 32];
|
||||
let seed_len = seed.len();
|
||||
@@ -522,10 +467,7 @@ impl Blob {
|
||||
}
|
||||
|
||||
pub fn data_size(&self) -> u64 {
|
||||
cmp::min(
|
||||
LittleEndian::read_u64(&self.data[SIZE_RANGE]),
|
||||
BLOB_SIZE as u64,
|
||||
)
|
||||
LittleEndian::read_u64(&self.data[SIZE_RANGE])
|
||||
}
|
||||
|
||||
pub fn set_data_size(&mut self, size: u64) {
|
||||
@@ -533,10 +475,10 @@ impl Blob {
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + BLOB_DATA_SIZE]
|
||||
&self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + BLOB_DATA_SIZE]
|
||||
&mut self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn size(&self) -> usize {
|
||||
let size = self.data_size() as usize;
|
||||
@@ -581,8 +523,9 @@ impl Blob {
|
||||
}
|
||||
|
||||
// other side of store_packets
|
||||
pub fn load_packets(&self, packets: &mut PinnedVec<Packet>) {
|
||||
pub fn load_packets(&self) -> Vec<Packet> {
|
||||
// rough estimate
|
||||
let mut packets: Vec<Packet> = Vec::with_capacity(self.size() / PACKET_DATA_SIZE);
|
||||
let mut pos = 0;
|
||||
let size_len = bincode::serialized_size(&0usize).unwrap() as usize;
|
||||
|
||||
@@ -602,6 +545,7 @@ impl Blob {
|
||||
pos += size;
|
||||
packets.push(packet);
|
||||
}
|
||||
packets
|
||||
}
|
||||
|
||||
pub fn recv_blob(socket: &UdpSocket, r: &SharedBlob) -> io::Result<()> {
|
||||
@@ -715,7 +659,7 @@ mod tests {
|
||||
// test that the address is actually being updated
|
||||
let send_addr = socketaddr!([127, 0, 0, 1], 123);
|
||||
let packets = vec![Packet::default()];
|
||||
let mut msgs = Packets::new(packets);
|
||||
let mut msgs = Packets { packets };
|
||||
msgs.set_addr(&send_addr);
|
||||
assert_eq!(SocketAddr::from(msgs.packets[0].meta.addr()), send_addr);
|
||||
}
|
||||
@@ -741,7 +685,7 @@ mod tests {
|
||||
|
||||
assert_eq!(recvd, p.packets.len());
|
||||
|
||||
for m in &p.packets {
|
||||
for m in p.packets {
|
||||
assert_eq!(m.meta.size, PACKET_DATA_SIZE);
|
||||
assert_eq!(m.meta.addr(), saddr);
|
||||
}
|
||||
@@ -825,15 +769,6 @@ mod tests {
|
||||
assert!(!b.should_forward());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blob_erasure_config() {
|
||||
let mut b = Blob::default();
|
||||
let config = ErasureConfig::new(32, 16);
|
||||
b.set_erasure_config(&config);
|
||||
|
||||
assert_eq!(config, b.erasure_config());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_blobs_max() {
|
||||
let serialized_size_size = bincode::serialized_size(&0usize).unwrap() as usize;
|
||||
@@ -882,12 +817,10 @@ mod tests {
|
||||
|
||||
let blobs = packets_to_blobs(&packets[..]);
|
||||
|
||||
let mut reconstructed_packets = PinnedVec::default();
|
||||
blobs
|
||||
.iter()
|
||||
.for_each(|b| b.load_packets(&mut reconstructed_packets));
|
||||
let reconstructed_packets: Vec<Packet> =
|
||||
blobs.iter().flat_map(|b| b.load_packets()).collect();
|
||||
|
||||
assert_eq!(reconstructed_packets[..], packets[..]);
|
||||
assert_eq!(reconstructed_packets, packets);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -937,15 +870,6 @@ mod tests {
|
||||
assert!(b.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_packets_reset() {
|
||||
let mut packets = Packets::default();
|
||||
packets.packets.resize(10, Packet::default());
|
||||
assert_eq!(packets.packets.len(), 10);
|
||||
packets.reset();
|
||||
assert_eq!(packets.packets.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_version() {
|
||||
let mut b = Blob::default();
|
||||
|
@@ -13,6 +13,7 @@
|
||||
use crate::blocktree::Blocktree;
|
||||
use crate::entry::Entry;
|
||||
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||
use crate::leader_schedule_utils;
|
||||
use crate::poh::Poh;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_runtime::bank::Bank;
|
||||
@@ -20,16 +21,12 @@ use solana_sdk::hash::Hash;
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing;
|
||||
pub use solana_sdk::timing::Slot;
|
||||
use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS;
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::cmp;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender, SyncSender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Instant;
|
||||
|
||||
const GRACE_TICKS_FACTOR: u64 = 2;
|
||||
const MAX_GRACE_TICKS: u64 = 12;
|
||||
const MAX_LAST_LEADER_GRACE_TICKS_FACTOR: u64 = 2;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum PohRecorderError {
|
||||
@@ -51,14 +48,14 @@ pub struct PohRecorder {
|
||||
pub poh: Arc<Mutex<Poh>>,
|
||||
tick_height: u64,
|
||||
clear_bank_signal: Option<SyncSender<bool>>,
|
||||
start_slot: Slot, // parent slot
|
||||
start_tick: u64, // first tick this recorder will observe
|
||||
start_slot: u64,
|
||||
start_tick: u64,
|
||||
tick_cache: Vec<(Entry, u64)>,
|
||||
working_bank: Option<WorkingBank>,
|
||||
sender: Sender<WorkingBankEntries>,
|
||||
start_leader_at_tick: Option<u64>,
|
||||
last_leader_tick: u64, // zero if none
|
||||
grace_ticks: u64,
|
||||
last_leader_tick: Option<u64>,
|
||||
max_last_leader_grace_ticks: u64,
|
||||
id: Pubkey,
|
||||
blocktree: Arc<Blocktree>,
|
||||
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
||||
@@ -76,10 +73,11 @@ impl PohRecorder {
|
||||
&bank,
|
||||
Some(&self.blocktree),
|
||||
);
|
||||
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
|
||||
let (start_leader_at_tick, last_leader_tick, grace_ticks) =
|
||||
Self::compute_leader_slot_ticks(next_leader_slot, self.ticks_per_slot);
|
||||
self.grace_ticks = grace_ticks;
|
||||
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
|
||||
&next_leader_slot,
|
||||
bank.ticks_per_slot(),
|
||||
self.max_last_leader_grace_ticks,
|
||||
);
|
||||
self.start_leader_at_tick = start_leader_at_tick;
|
||||
self.last_leader_tick = last_leader_tick;
|
||||
}
|
||||
@@ -89,141 +87,133 @@ impl PohRecorder {
|
||||
}
|
||||
|
||||
pub fn would_be_leader(&self, within_next_n_ticks: u64) -> bool {
|
||||
self.has_bank()
|
||||
|| self.start_leader_at_tick.map_or(false, |leader_tick| {
|
||||
let ideal_leader_tick = leader_tick.saturating_sub(self.grace_ticks);
|
||||
let close_to_leader_tick = self.start_leader_at_tick.map_or(false, |leader_tick| {
|
||||
let leader_pubkeyeal_start_tick =
|
||||
leader_tick.saturating_sub(self.max_last_leader_grace_ticks);
|
||||
|
||||
self.tick_height <= self.last_leader_tick
|
||||
&& self.tick_height >= ideal_leader_tick.saturating_sub(within_next_n_ticks)
|
||||
})
|
||||
}
|
||||
self.tick_height() <= self.last_leader_tick.unwrap_or(0)
|
||||
&& self.tick_height()
|
||||
>= leader_pubkeyeal_start_tick.saturating_sub(within_next_n_ticks)
|
||||
});
|
||||
|
||||
pub fn leader_after_slots(&self, slots: u64) -> Option<Pubkey> {
|
||||
self.leader_schedule_cache
|
||||
.slot_leader_at(self.tick_height / self.ticks_per_slot + slots, None)
|
||||
self.working_bank.is_some() || close_to_leader_tick
|
||||
}
|
||||
|
||||
pub fn next_slot_leader(&self) -> Option<Pubkey> {
|
||||
self.leader_after_slots(1)
|
||||
let slot =
|
||||
leader_schedule_utils::tick_height_to_slot(self.ticks_per_slot, self.tick_height());
|
||||
self.leader_schedule_cache.slot_leader_at(slot + 1, None)
|
||||
}
|
||||
|
||||
pub fn start_slot(&self) -> u64 {
|
||||
self.start_slot
|
||||
}
|
||||
|
||||
pub fn bank(&self) -> Option<Arc<Bank>> {
|
||||
self.working_bank.clone().map(|w| w.bank)
|
||||
}
|
||||
|
||||
pub fn has_bank(&self) -> bool {
|
||||
self.working_bank.is_some()
|
||||
}
|
||||
|
||||
pub fn tick_height(&self) -> u64 {
|
||||
self.tick_height
|
||||
}
|
||||
|
||||
pub fn ticks_per_slot(&self) -> u64 {
|
||||
self.ticks_per_slot
|
||||
}
|
||||
|
||||
/// returns if leader tick has reached, how many grace ticks were afforded,
|
||||
/// imputed leader_slot and self.start_slot
|
||||
/// reached_leader_tick() == true means "ready for a bank"
|
||||
pub fn reached_leader_tick(&self) -> (bool, u64, Slot, Slot) {
|
||||
trace!(
|
||||
"tick_height {}, start_tick {}, start_leader_at_tick {:?}, grace {}, has_bank {}",
|
||||
self.tick_height,
|
||||
self.start_tick,
|
||||
self.start_leader_at_tick,
|
||||
self.grace_ticks,
|
||||
self.has_bank()
|
||||
);
|
||||
|
||||
let next_tick = self.tick_height + 1;
|
||||
|
||||
if let Some(target_tick) = self.start_leader_at_tick {
|
||||
// we've reached target_tick OR poh was reset to run immediately
|
||||
if next_tick >= target_tick || self.start_tick + self.grace_ticks == target_tick {
|
||||
assert!(next_tick >= self.start_tick);
|
||||
let ideal_target_tick = target_tick.saturating_sub(self.grace_ticks);
|
||||
|
||||
return (
|
||||
true,
|
||||
next_tick.saturating_sub(ideal_target_tick),
|
||||
next_tick / self.ticks_per_slot,
|
||||
self.start_slot,
|
||||
// returns if leader tick has reached, and how many grace ticks were afforded
|
||||
pub fn reached_leader_tick(&self) -> (bool, u64) {
|
||||
self.start_leader_at_tick
|
||||
.map(|target_tick| {
|
||||
debug!(
|
||||
"Current tick {}, start tick {} target {}, grace {}",
|
||||
self.tick_height(),
|
||||
self.start_tick,
|
||||
target_tick,
|
||||
self.max_last_leader_grace_ticks
|
||||
);
|
||||
}
|
||||
}
|
||||
(false, 0, next_tick / self.ticks_per_slot, self.start_slot)
|
||||
}
|
||||
|
||||
// returns (start_leader_at_tick, last_leader_tick, grace_ticks) given the next slot this
|
||||
// recorder will lead
|
||||
fn compute_leader_slot_ticks(
|
||||
next_leader_slot: Option<(Slot, Slot)>,
|
||||
ticks_per_slot: u64,
|
||||
) -> (Option<u64>, u64, u64) {
|
||||
next_leader_slot
|
||||
.map(|(first, last)| {
|
||||
let first_tick = first * ticks_per_slot;
|
||||
let last_tick = (last + 1) * ticks_per_slot - 1;
|
||||
let grace_ticks = cmp::min(
|
||||
MAX_GRACE_TICKS,
|
||||
(last_tick - first_tick + 1) / GRACE_TICKS_FACTOR,
|
||||
);
|
||||
(Some(first_tick + grace_ticks), last_tick, grace_ticks)
|
||||
let leader_pubkeyeal_start_tick =
|
||||
target_tick.saturating_sub(self.max_last_leader_grace_ticks);
|
||||
// Is the current tick in the same slot as the target tick?
|
||||
// Check if either grace period has expired,
|
||||
// or target tick is = grace period (i.e. poh recorder was just reset)
|
||||
if self.tick_height() <= self.last_leader_tick.unwrap_or(0)
|
||||
&& (self.tick_height() >= target_tick
|
||||
|| self.max_last_leader_grace_ticks
|
||||
>= target_tick.saturating_sub(self.start_tick))
|
||||
{
|
||||
return (
|
||||
true,
|
||||
self.tick_height()
|
||||
.saturating_sub(leader_pubkeyeal_start_tick),
|
||||
);
|
||||
}
|
||||
|
||||
(false, 0)
|
||||
})
|
||||
.unwrap_or((
|
||||
None,
|
||||
0,
|
||||
cmp::min(
|
||||
MAX_GRACE_TICKS,
|
||||
ticks_per_slot * NUM_CONSECUTIVE_LEADER_SLOTS / GRACE_TICKS_FACTOR,
|
||||
),
|
||||
))
|
||||
.unwrap_or((false, 0))
|
||||
}
|
||||
|
||||
fn compute_leader_slot_ticks(
|
||||
next_leader_slot: &Option<u64>,
|
||||
ticks_per_slot: u64,
|
||||
grace_ticks: u64,
|
||||
) -> (Option<u64>, Option<u64>) {
|
||||
next_leader_slot
|
||||
.map(|slot| {
|
||||
(
|
||||
Some(slot * ticks_per_slot + grace_ticks),
|
||||
Some((slot + 1) * ticks_per_slot - 1),
|
||||
)
|
||||
})
|
||||
.unwrap_or((None, None))
|
||||
}
|
||||
|
||||
// synchronize PoH with a bank
|
||||
pub fn reset(
|
||||
&mut self,
|
||||
tick_height: u64,
|
||||
blockhash: Hash,
|
||||
start_slot: Slot,
|
||||
next_leader_slot: Option<(Slot, Slot)>,
|
||||
start_slot: u64,
|
||||
my_next_leader_slot: Option<u64>,
|
||||
ticks_per_slot: u64,
|
||||
) {
|
||||
self.clear_bank();
|
||||
let mut cache = vec![];
|
||||
{
|
||||
let mut poh = self.poh.lock().unwrap();
|
||||
info!(
|
||||
"reset poh from: {},{},{} to: {},{}",
|
||||
poh.hash, self.tick_height, self.start_slot, blockhash, start_slot
|
||||
"reset poh from: {},{} to: {},{}",
|
||||
poh.hash, self.tick_height, blockhash, tick_height,
|
||||
);
|
||||
poh.reset(blockhash, self.poh_config.hashes_per_tick);
|
||||
}
|
||||
|
||||
std::mem::swap(&mut cache, &mut self.tick_cache);
|
||||
|
||||
self.start_slot = start_slot;
|
||||
self.start_tick = (start_slot + 1) * self.ticks_per_slot;
|
||||
self.tick_height = self.start_tick - 1;
|
||||
|
||||
let (start_leader_at_tick, last_leader_tick, grace_ticks) =
|
||||
Self::compute_leader_slot_ticks(next_leader_slot, self.ticks_per_slot);
|
||||
self.grace_ticks = grace_ticks;
|
||||
self.start_tick = tick_height + 1;
|
||||
self.tick_height = tick_height;
|
||||
self.max_last_leader_grace_ticks = ticks_per_slot / MAX_LAST_LEADER_GRACE_TICKS_FACTOR;
|
||||
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
|
||||
&my_next_leader_slot,
|
||||
ticks_per_slot,
|
||||
self.max_last_leader_grace_ticks,
|
||||
);
|
||||
self.start_leader_at_tick = start_leader_at_tick;
|
||||
self.last_leader_tick = last_leader_tick;
|
||||
self.ticks_per_slot = ticks_per_slot;
|
||||
}
|
||||
|
||||
pub fn set_working_bank(&mut self, working_bank: WorkingBank) {
|
||||
trace!("new working bank");
|
||||
assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot());
|
||||
self.working_bank = Some(working_bank);
|
||||
let _ = self.flush_cache(false);
|
||||
}
|
||||
pub fn set_bank(&mut self, bank: &Arc<Bank>) {
|
||||
let max_tick_height = (bank.slot() + 1) * bank.ticks_per_slot() - 1;
|
||||
let working_bank = WorkingBank {
|
||||
bank: bank.clone(),
|
||||
min_tick_height: bank.tick_height(),
|
||||
max_tick_height: bank.max_tick_height(),
|
||||
max_tick_height,
|
||||
};
|
||||
self.ticks_per_slot = bank.ticks_per_slot();
|
||||
self.set_working_bank(working_bank);
|
||||
}
|
||||
|
||||
@@ -255,7 +245,7 @@ impl PohRecorder {
|
||||
.take_while(|x| x.1 <= working_bank.max_tick_height)
|
||||
.count();
|
||||
let send_result = if entry_count > 0 {
|
||||
trace!(
|
||||
debug!(
|
||||
"flush_cache: bank_slot: {} tick_height: {} max: {} sending: {}",
|
||||
working_bank.bank.slot(),
|
||||
working_bank.bank.tick_height(),
|
||||
@@ -273,12 +263,11 @@ impl PohRecorder {
|
||||
};
|
||||
if self.tick_height >= working_bank.max_tick_height {
|
||||
info!(
|
||||
"poh_record: max_tick_height {} reached, clearing working_bank {}",
|
||||
working_bank.max_tick_height,
|
||||
"poh_record: max_tick_height reached, setting working bank {} to None",
|
||||
working_bank.bank.slot()
|
||||
);
|
||||
self.start_slot = working_bank.max_tick_height / self.ticks_per_slot;
|
||||
self.start_tick = (self.start_slot + 1) * self.ticks_per_slot;
|
||||
self.start_slot = working_bank.max_tick_height / working_bank.bank.ticks_per_slot();
|
||||
self.start_tick = (self.start_slot + 1) * working_bank.bank.ticks_per_slot();
|
||||
self.clear_bank();
|
||||
}
|
||||
if send_result.is_err() {
|
||||
@@ -336,7 +325,7 @@ impl PohRecorder {
|
||||
|
||||
pub fn record(
|
||||
&mut self,
|
||||
bank_slot: Slot,
|
||||
bank_slot: u64,
|
||||
mixin: Hash,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Result<()> {
|
||||
@@ -381,8 +370,8 @@ impl PohRecorder {
|
||||
pub fn new_with_clear_signal(
|
||||
tick_height: u64,
|
||||
last_entry_hash: Hash,
|
||||
start_slot: Slot,
|
||||
next_leader_slot: Option<(Slot, Slot)>,
|
||||
start_slot: u64,
|
||||
my_leader_slot_index: Option<u64>,
|
||||
ticks_per_slot: u64,
|
||||
id: &Pubkey,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
@@ -395,8 +384,12 @@ impl PohRecorder {
|
||||
poh_config.hashes_per_tick,
|
||||
)));
|
||||
let (sender, receiver) = channel();
|
||||
let (start_leader_at_tick, last_leader_tick, grace_ticks) =
|
||||
Self::compute_leader_slot_ticks(next_leader_slot, ticks_per_slot);
|
||||
let max_last_leader_grace_ticks = ticks_per_slot / MAX_LAST_LEADER_GRACE_TICKS_FACTOR;
|
||||
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
|
||||
&my_leader_slot_index,
|
||||
ticks_per_slot,
|
||||
max_last_leader_grace_ticks,
|
||||
);
|
||||
(
|
||||
Self {
|
||||
poh,
|
||||
@@ -409,7 +402,7 @@ impl PohRecorder {
|
||||
start_tick: tick_height + 1,
|
||||
start_leader_at_tick,
|
||||
last_leader_tick,
|
||||
grace_ticks,
|
||||
max_last_leader_grace_ticks,
|
||||
id: *id,
|
||||
blocktree: blocktree.clone(),
|
||||
leader_schedule_cache: leader_schedule_cache.clone(),
|
||||
@@ -426,8 +419,8 @@ impl PohRecorder {
|
||||
pub fn new(
|
||||
tick_height: u64,
|
||||
last_entry_hash: Hash,
|
||||
start_slot: Slot,
|
||||
next_leader_slot: Option<(Slot, Slot)>,
|
||||
start_slot: u64,
|
||||
my_leader_slot_index: Option<u64>,
|
||||
ticks_per_slot: u64,
|
||||
id: &Pubkey,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
@@ -438,7 +431,7 @@ impl PohRecorder {
|
||||
tick_height,
|
||||
last_entry_hash,
|
||||
start_slot,
|
||||
next_leader_slot,
|
||||
my_leader_slot_index,
|
||||
ticks_per_slot,
|
||||
id,
|
||||
blocktree,
|
||||
@@ -471,7 +464,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -498,7 +491,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -524,7 +517,7 @@ mod tests {
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -533,7 +526,7 @@ mod tests {
|
||||
);
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 1);
|
||||
poh_recorder.reset(Hash::default(), 0, Some((4, 4)));
|
||||
poh_recorder.reset(0, Hash::default(), 0, Some(4), DEFAULT_TICKS_PER_SLOT);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
@@ -552,7 +545,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -586,7 +579,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -632,7 +625,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -676,7 +669,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -714,7 +707,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -754,7 +747,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -801,7 +794,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -845,7 +838,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -880,7 +873,7 @@ mod tests {
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -891,7 +884,13 @@ mod tests {
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
let hash = poh_recorder.poh.lock().unwrap().hash;
|
||||
poh_recorder.reset(hash, 0, Some((4, 4)));
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height,
|
||||
hash,
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
@@ -907,7 +906,7 @@ mod tests {
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -917,7 +916,13 @@ mod tests {
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4)));
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_cache[0].1,
|
||||
poh_recorder.tick_cache[0].0.hash,
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
@@ -925,8 +930,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_reset_to_new_value() {
|
||||
solana_logger::setup();
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
@@ -935,7 +938,7 @@ mod tests {
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -947,10 +950,10 @@ mod tests {
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 3);
|
||||
assert_eq!(poh_recorder.tick_height, 3);
|
||||
poh_recorder.reset(hash(b"hello"), 0, Some((4, 4))); // parent slot 0 implies tick_height of 3
|
||||
poh_recorder.reset(1, hash(b"hello"), 0, Some(4), DEFAULT_TICKS_PER_SLOT);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_height, 4);
|
||||
assert_eq!(poh_recorder.tick_height, 2);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
@@ -967,20 +970,21 @@ mod tests {
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
let ticks_per_slot = bank.ticks_per_slot();
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
min_tick_height: 2,
|
||||
max_tick_height: 3,
|
||||
};
|
||||
poh_recorder.set_working_bank(working_bank);
|
||||
poh_recorder.reset(hash(b"hello"), 0, Some((4, 4)));
|
||||
poh_recorder.reset(1, hash(b"hello"), 0, Some(4), ticks_per_slot);
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
@@ -1032,7 +1036,7 @@ mod tests {
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -1060,15 +1064,13 @@ mod tests {
|
||||
.is_err());
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
// Make sure the starting slot is updated
|
||||
assert_eq!(poh_recorder.start_slot, end_slot);
|
||||
assert_eq!(poh_recorder.start_slot(), end_slot);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reached_leader_tick() {
|
||||
solana_logger::setup();
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
@@ -1091,13 +1093,25 @@ mod tests {
|
||||
// Test that with no leader slot, we don't reach the leader tick
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, None);
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Test that with no leader slot in reset(), we don't reach the leader tick
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// Provide a leader slot 1 slot down
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, Some((2, 2)));
|
||||
poh_recorder.reset(
|
||||
bank.ticks_per_slot(),
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
Some(2),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
let init_ticks = poh_recorder.tick_height();
|
||||
|
||||
@@ -1115,16 +1129,29 @@ mod tests {
|
||||
// Test that we don't reach the leader tick because of grace ticks
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// reset poh now. we should immediately be leader
|
||||
poh_recorder.reset(bank.last_blockhash(), 1, Some((2, 2)));
|
||||
// reset poh now. it should discard the grace ticks wait
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
1,
|
||||
Some(2),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
// without sending more ticks, we should be leader now
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, true);
|
||||
assert_eq!(poh_recorder.reached_leader_tick().1, 0);
|
||||
|
||||
// Now test that with grace ticks we can reach leader ticks
|
||||
// Set the leader slot 1 slot down
|
||||
poh_recorder.reset(bank.last_blockhash(), 1, Some((3, 3)));
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
2,
|
||||
Some(3),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Send one slot worth of ticks ("skips" slot 2)
|
||||
// Send one slot worth of ticks
|
||||
for _ in 0..bank.ticks_per_slot() {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
@@ -1133,46 +1160,73 @@ mod tests {
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// Send 1 less tick than the grace ticks
|
||||
for _ in 0..bank.ticks_per_slot() * NUM_CONSECUTIVE_LEADER_SLOTS / GRACE_TICKS_FACTOR {
|
||||
for _ in 0..bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR - 1 {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
// We are still not the leader
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// Send one more tick
|
||||
poh_recorder.tick();
|
||||
|
||||
// We should be the leader now
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, true);
|
||||
assert_eq!(
|
||||
poh_recorder.reached_leader_tick().1,
|
||||
bank.ticks_per_slot() * NUM_CONSECUTIVE_LEADER_SLOTS / GRACE_TICKS_FACTOR
|
||||
bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR
|
||||
);
|
||||
|
||||
// Let's test that correct grace ticks are reported
|
||||
// Set the leader slot 1 slot down
|
||||
poh_recorder.reset(bank.last_blockhash(), 2, Some((4, 4)));
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
3,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// send ticks for a slot
|
||||
for _ in 0..bank.ticks_per_slot() {
|
||||
// Send remaining ticks for the slot (remember we sent extra ticks in the previous part of the test)
|
||||
for _ in
|
||||
bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR..bank.ticks_per_slot()
|
||||
{
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
// Send one extra tick before resetting (so that there's one grace tick)
|
||||
poh_recorder.tick();
|
||||
|
||||
// We are not the leader yet, as expected
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
poh_recorder.reset(bank.last_blockhash(), 3, Some((4, 4)));
|
||||
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
3,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
// without sending more ticks, we should be leader now
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, true);
|
||||
assert_eq!(poh_recorder.reached_leader_tick().1, 0);
|
||||
assert_eq!(poh_recorder.reached_leader_tick().1, 1);
|
||||
|
||||
// Let's test that if a node overshoots the ticks for its target
|
||||
// leader slot, reached_leader_tick() will return true, because it's overdue
|
||||
// leader slot, reached_leader_tick() will return false
|
||||
// Set the leader slot 1 slot down
|
||||
poh_recorder.reset(bank.last_blockhash(), 4, Some((5, 5)));
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
4,
|
||||
Some(5),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Send remaining ticks for the slot (remember we sent extra ticks in the previous part of the test)
|
||||
for _ in 0..4 * bank.ticks_per_slot() {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
// We are overdue to lead
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, true);
|
||||
// We are not the leader, as expected
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
@@ -1204,7 +1258,13 @@ mod tests {
|
||||
false
|
||||
);
|
||||
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, None);
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||
@@ -1212,8 +1272,13 @@ mod tests {
|
||||
);
|
||||
|
||||
// We reset with leader slot after 3 slots
|
||||
let bank_slot = bank.slot() + 3;
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, Some((bank_slot, bank_slot)));
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
Some(bank.slot() + 3),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Test that the node won't be leader in next 2 slots
|
||||
assert_eq!(
|
||||
@@ -1258,7 +1323,7 @@ mod tests {
|
||||
0,
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
Some((2, 2)),
|
||||
Some(2),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
@@ -1275,32 +1340,4 @@ mod tests {
|
||||
assert!(!bank.check_hash_age(&genesis_blockhash, 1));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_leader_slots() {
|
||||
assert_eq!(
|
||||
PohRecorder::compute_leader_slot_ticks(None, 0),
|
||||
(None, 0, 0)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PohRecorder::compute_leader_slot_ticks(Some((4, 4)), 8),
|
||||
(Some(36), 39, 4)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PohRecorder::compute_leader_slot_ticks(Some((4, 7)), 8),
|
||||
(Some(44), 63, MAX_GRACE_TICKS)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PohRecorder::compute_leader_slot_ticks(Some((6, 7)), 8),
|
||||
(Some(56), 63, 8)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PohRecorder::compute_leader_slot_ticks(Some((6, 7)), 4),
|
||||
(Some(28), 31, 4)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -113,7 +113,7 @@ mod tests {
|
||||
bank.tick_height(),
|
||||
prev_hash,
|
||||
bank.slot(),
|
||||
Some((4, 4)),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
|
@@ -1,111 +0,0 @@
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct RecyclerStats {
|
||||
total: AtomicUsize,
|
||||
reuse: AtomicUsize,
|
||||
max_gc: AtomicUsize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Recycler<T> {
|
||||
gc: Arc<Mutex<Vec<T>>>,
|
||||
stats: Arc<RecyclerStats>,
|
||||
id: usize,
|
||||
}
|
||||
|
||||
impl<T: Default> Default for Recycler<T> {
|
||||
fn default() -> Recycler<T> {
|
||||
let id = thread_rng().gen_range(0, 1000);
|
||||
trace!("new recycler..{}", id);
|
||||
Recycler {
|
||||
gc: Arc::new(Mutex::new(vec![])),
|
||||
stats: Arc::new(RecyclerStats::default()),
|
||||
id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Clone for Recycler<T> {
|
||||
fn clone(&self) -> Recycler<T> {
|
||||
Recycler {
|
||||
gc: self.gc.clone(),
|
||||
stats: self.stats.clone(),
|
||||
id: self.id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Reset {
|
||||
fn reset(&mut self);
|
||||
}
|
||||
|
||||
impl<T: Default + Reset> Recycler<T> {
|
||||
pub fn allocate(&self, name: &'static str) -> T {
|
||||
let new = self
|
||||
.gc
|
||||
.lock()
|
||||
.expect("recycler lock in pb fn allocate")
|
||||
.pop();
|
||||
|
||||
if let Some(mut x) = new {
|
||||
self.stats.reuse.fetch_add(1, Ordering::Relaxed);
|
||||
x.reset();
|
||||
return x;
|
||||
}
|
||||
|
||||
trace!(
|
||||
"allocating new: total {} {:?} id: {} reuse: {} max_gc: {}",
|
||||
self.stats.total.fetch_add(1, Ordering::Relaxed),
|
||||
name,
|
||||
self.id,
|
||||
self.stats.reuse.load(Ordering::Relaxed),
|
||||
self.stats.max_gc.load(Ordering::Relaxed),
|
||||
);
|
||||
|
||||
T::default()
|
||||
}
|
||||
|
||||
pub fn recycle(&self, x: T) {
|
||||
let len = {
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
|
||||
gc.push(x);
|
||||
gc.len()
|
||||
};
|
||||
|
||||
let max_gc = self.stats.max_gc.load(Ordering::Relaxed);
|
||||
if len > max_gc {
|
||||
// this is not completely accurate, but for most cases should be fine.
|
||||
self.stats
|
||||
.max_gc
|
||||
.compare_and_swap(max_gc, len, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
impl Reset for u64 {
|
||||
fn reset(&mut self) {
|
||||
*self = 10;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recycler() {
|
||||
let recycler = Recycler::default();
|
||||
let mut y: u64 = recycler.allocate("test_recycler1");
|
||||
assert_eq!(y, 0);
|
||||
y = 20;
|
||||
let recycler2 = recycler.clone();
|
||||
recycler2.recycle(y);
|
||||
assert_eq!(recycler.gc.lock().unwrap().len(), 1);
|
||||
let z = recycler.allocate("test_recycler2");
|
||||
assert_eq!(z, 10);
|
||||
assert_eq!(recycler.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
}
|
@@ -1,12 +1,13 @@
|
||||
//! The `replay_stage` replays transactions broadcast by the leader.
|
||||
|
||||
use crate::bank_forks::BankForks;
|
||||
use crate::blocktree::{Blocktree, BlocktreeError};
|
||||
use crate::blocktree::Blocktree;
|
||||
use crate::blocktree_processor;
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::consensus::{StakeLockout, Tower};
|
||||
use crate::entry::{Entry, EntrySlice};
|
||||
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||
use crate::leader_schedule_utils;
|
||||
use crate::locktower::{Locktower, StakeLockout};
|
||||
use crate::packet::BlobError;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::result::{Error, Result};
|
||||
@@ -50,7 +51,6 @@ impl Drop for Finalizer {
|
||||
|
||||
pub struct ReplayStage {
|
||||
t_replay: JoinHandle<Result<()>>,
|
||||
t_lockouts: JoinHandle<()>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -89,26 +89,24 @@ impl ReplayStage {
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
slot_full_senders: Vec<Sender<(u64, Pubkey)>>,
|
||||
) -> (Self, Receiver<Vec<Arc<Bank>>>)
|
||||
) -> (Self, Receiver<(u64, Pubkey)>, Receiver<Vec<Arc<Bank>>>)
|
||||
where
|
||||
T: 'static + KeypairUtil + Send + Sync,
|
||||
{
|
||||
let (root_bank_sender, root_bank_receiver) = channel();
|
||||
let (slot_full_sender, slot_full_receiver) = channel();
|
||||
trace!("replay stage");
|
||||
let exit_ = exit.clone();
|
||||
let subscriptions = subscriptions.clone();
|
||||
let bank_forks = bank_forks.clone();
|
||||
let poh_recorder = poh_recorder.clone();
|
||||
let my_pubkey = *my_pubkey;
|
||||
let mut tower = Tower::new_from_forks(&bank_forks.read().unwrap(), &my_pubkey);
|
||||
let mut ticks_per_slot = 0;
|
||||
let mut locktower = Locktower::new_from_forks(&bank_forks.read().unwrap(), &my_pubkey);
|
||||
// Start the replay stage loop
|
||||
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||
let vote_account = *vote_account;
|
||||
let voting_keypair = voting_keypair.cloned();
|
||||
|
||||
let (lockouts_sender, t_lockouts) = aggregate_stake_lockouts(exit);
|
||||
|
||||
let t_replay = Builder::new()
|
||||
.name("solana-replay-stage".to_string())
|
||||
.spawn(move || {
|
||||
@@ -128,20 +126,26 @@ impl ReplayStage {
|
||||
&mut bank_forks.write().unwrap(),
|
||||
&leader_schedule_cache,
|
||||
);
|
||||
|
||||
let mut tpu_has_bank = poh_recorder.lock().unwrap().has_bank();
|
||||
|
||||
let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some();
|
||||
let did_complete_bank = Self::replay_active_banks(
|
||||
&blocktree,
|
||||
&bank_forks,
|
||||
&my_pubkey,
|
||||
&mut ticks_per_slot,
|
||||
&mut progress,
|
||||
&slot_full_senders,
|
||||
&slot_full_sender,
|
||||
);
|
||||
|
||||
let votable = Self::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
||||
if ticks_per_slot == 0 {
|
||||
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
||||
let bank = frozen_banks.values().next().unwrap();
|
||||
ticks_per_slot = bank.ticks_per_slot();
|
||||
}
|
||||
|
||||
if let Some((_, bank, lockouts)) = votable.into_iter().last() {
|
||||
let votable =
|
||||
Self::generate_votable_banks(&bank_forks, &locktower, &mut progress);
|
||||
|
||||
if let Some((_, bank)) = votable.last() {
|
||||
subscriptions.notify_subscribers(bank.slot(), &bank_forks);
|
||||
|
||||
if let Some(new_leader) =
|
||||
@@ -158,7 +162,7 @@ impl ReplayStage {
|
||||
Self::handle_votable_bank(
|
||||
&bank,
|
||||
&bank_forks,
|
||||
&mut tower,
|
||||
&mut locktower,
|
||||
&mut progress,
|
||||
&vote_account,
|
||||
&voting_keypair,
|
||||
@@ -166,8 +170,6 @@ impl ReplayStage {
|
||||
&blocktree,
|
||||
&leader_schedule_cache,
|
||||
&root_bank_sender,
|
||||
lockouts,
|
||||
&lockouts_sender,
|
||||
)?;
|
||||
|
||||
Self::reset_poh_recorder(
|
||||
@@ -175,16 +177,35 @@ impl ReplayStage {
|
||||
&blocktree,
|
||||
&bank,
|
||||
&poh_recorder,
|
||||
ticks_per_slot,
|
||||
&leader_schedule_cache,
|
||||
);
|
||||
tpu_has_bank = false;
|
||||
|
||||
is_tpu_bank_active = false;
|
||||
}
|
||||
|
||||
if !tpu_has_bank {
|
||||
Self::maybe_start_leader(
|
||||
let (reached_leader_tick, grace_ticks) = if !is_tpu_bank_active {
|
||||
let poh = poh_recorder.lock().unwrap();
|
||||
poh.reached_leader_tick()
|
||||
} else {
|
||||
(false, 0)
|
||||
};
|
||||
|
||||
if !is_tpu_bank_active {
|
||||
assert!(ticks_per_slot > 0);
|
||||
let poh_tick_height = poh_recorder.lock().unwrap().tick_height();
|
||||
let poh_slot = leader_schedule_utils::tick_height_to_slot(
|
||||
ticks_per_slot,
|
||||
poh_tick_height + 1,
|
||||
);
|
||||
Self::start_leader(
|
||||
&my_pubkey,
|
||||
&bank_forks,
|
||||
&poh_recorder,
|
||||
&cluster_info,
|
||||
poh_slot,
|
||||
reached_leader_tick,
|
||||
grace_ticks,
|
||||
&leader_schedule_cache,
|
||||
);
|
||||
|
||||
@@ -217,13 +238,7 @@ impl ReplayStage {
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
(
|
||||
Self {
|
||||
t_replay,
|
||||
t_lockouts,
|
||||
},
|
||||
root_bank_receiver,
|
||||
)
|
||||
(Self { t_replay }, slot_full_receiver, root_bank_receiver)
|
||||
}
|
||||
|
||||
fn log_leader_change(
|
||||
@@ -250,73 +265,57 @@ impl ReplayStage {
|
||||
current_leader.replace(new_leader.to_owned());
|
||||
}
|
||||
|
||||
fn maybe_start_leader(
|
||||
pub fn start_leader(
|
||||
my_pubkey: &Pubkey,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
poh_slot: u64,
|
||||
reached_leader_tick: bool,
|
||||
grace_ticks: u64,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
) {
|
||||
// all the individual calls to poh_recorder.lock() are designed to
|
||||
// increase granularity, decrease contention
|
||||
trace!("{} checking poh slot {}", my_pubkey, poh_slot);
|
||||
if bank_forks.read().unwrap().get(poh_slot).is_none() {
|
||||
let parent_slot = poh_recorder.lock().unwrap().start_slot();
|
||||
let parent = {
|
||||
let r_bf = bank_forks.read().unwrap();
|
||||
r_bf.get(parent_slot)
|
||||
.expect("start slot doesn't exist in bank forks")
|
||||
.clone()
|
||||
};
|
||||
assert!(parent.is_frozen());
|
||||
|
||||
assert!(!poh_recorder.lock().unwrap().has_bank());
|
||||
|
||||
let (reached_leader_tick, grace_ticks, poh_slot, parent_slot) =
|
||||
poh_recorder.lock().unwrap().reached_leader_tick();
|
||||
|
||||
if !reached_leader_tick {
|
||||
trace!("{} poh_recorder hasn't reached_leader_tick", my_pubkey);
|
||||
return;
|
||||
}
|
||||
trace!("{} reached_leader_tick", my_pubkey,);
|
||||
|
||||
let parent = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(parent_slot)
|
||||
.expect("parent_slot doesn't exist in bank forks")
|
||||
.clone();
|
||||
|
||||
assert!(parent.is_frozen());
|
||||
|
||||
if bank_forks.read().unwrap().get(poh_slot).is_some() {
|
||||
warn!("{} already have bank in forks at {}?", my_pubkey, poh_slot);
|
||||
return;
|
||||
}
|
||||
trace!(
|
||||
"{} poh_slot {} parent_slot {}",
|
||||
my_pubkey,
|
||||
poh_slot,
|
||||
parent_slot
|
||||
);
|
||||
|
||||
if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) {
|
||||
trace!(
|
||||
"{} leader {} at poh slot: {}",
|
||||
my_pubkey,
|
||||
next_leader,
|
||||
poh_slot
|
||||
);
|
||||
|
||||
// I guess I missed my slot
|
||||
if next_leader != *my_pubkey {
|
||||
return;
|
||||
}
|
||||
|
||||
datapoint_warn!(
|
||||
"replay_stage-new_leader",
|
||||
("count", poh_slot, i64),
|
||||
("grace", grace_ticks, i64)
|
||||
);
|
||||
|
||||
let tpu_bank = bank_forks
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(Bank::new_from_parent(&parent, my_pubkey, poh_slot));
|
||||
|
||||
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
|
||||
} else {
|
||||
error!("{} No next leader found", my_pubkey);
|
||||
leader_schedule_cache
|
||||
.slot_leader_at(poh_slot, Some(&parent))
|
||||
.map(|next_leader| {
|
||||
debug!(
|
||||
"me: {} leader {} at poh slot {}",
|
||||
my_pubkey, next_leader, poh_slot
|
||||
);
|
||||
cluster_info.write().unwrap().set_leader(&next_leader);
|
||||
if next_leader == *my_pubkey && reached_leader_tick {
|
||||
debug!("{} starting tpu for slot {}", my_pubkey, poh_slot);
|
||||
datapoint_warn!(
|
||||
"replay_stage-new_leader",
|
||||
("count", poh_slot, i64),
|
||||
("grace", grace_ticks, i64)
|
||||
);
|
||||
let tpu_bank = Bank::new_from_parent(&parent, my_pubkey, poh_slot);
|
||||
bank_forks.write().unwrap().insert(tpu_bank);
|
||||
if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() {
|
||||
assert_eq!(
|
||||
bank_forks.read().unwrap().working_bank().slot(),
|
||||
tpu_bank.slot()
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
|
||||
}
|
||||
}
|
||||
})
|
||||
.or_else(|| {
|
||||
warn!("{} No next leader found", my_pubkey);
|
||||
None
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,7 +329,6 @@ impl ReplayStage {
|
||||
!Bank::can_commit(&tx_error)
|
||||
}
|
||||
Err(Error::BlobError(BlobError::VerificationFailed)) => true,
|
||||
Err(Error::BlocktreeError(BlocktreeError::InvalidBlobData(_))) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
@@ -340,17 +338,10 @@ impl ReplayStage {
|
||||
blocktree: &Blocktree,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
) -> Result<()> {
|
||||
let result =
|
||||
Self::load_blocktree_entries(bank, blocktree, progress).and_then(|(entries, num)| {
|
||||
Self::replay_entries_into_bank(bank, entries, progress, num)
|
||||
});
|
||||
let (entries, num) = Self::load_blocktree_entries(bank, blocktree, progress)?;
|
||||
let result = Self::replay_entries_into_bank(bank, entries, progress, num);
|
||||
|
||||
if Self::is_replay_result_fatal(&result) {
|
||||
warn!(
|
||||
"Fatal replay result in slot: {}, result: {:?}",
|
||||
bank.slot(),
|
||||
result
|
||||
);
|
||||
Self::mark_dead_slot(bank.slot(), blocktree, progress);
|
||||
}
|
||||
|
||||
@@ -372,7 +363,7 @@ impl ReplayStage {
|
||||
fn handle_votable_bank<T>(
|
||||
bank: &Arc<Bank>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
tower: &mut Tower,
|
||||
locktower: &mut Locktower,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
vote_account: &Pubkey,
|
||||
voting_keypair: &Option<Arc<T>>,
|
||||
@@ -380,14 +371,11 @@ impl ReplayStage {
|
||||
blocktree: &Arc<Blocktree>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
|
||||
lockouts: HashMap<u64, StakeLockout>,
|
||||
lockouts_sender: &Sender<LockoutAggregationData>,
|
||||
) -> Result<()>
|
||||
where
|
||||
T: 'static + KeypairUtil + Send + Sync,
|
||||
{
|
||||
trace!("handle votable bank {}", bank.slot());
|
||||
if let Some(new_root) = tower.record_vote(bank.slot(), bank.hash()) {
|
||||
if let Some(new_root) = locktower.record_vote(bank.slot(), bank.hash()) {
|
||||
// get the root bank before squash
|
||||
let root_bank = bank_forks
|
||||
.read()
|
||||
@@ -404,17 +392,12 @@ impl ReplayStage {
|
||||
// Set root first in leader schedule_cache before bank_forks because bank_forks.root
|
||||
// is consumed by repair_service to update gossip, so we don't want to get blobs for
|
||||
// repair on gossip before we update leader schedule, otherwise they may get dropped.
|
||||
leader_schedule_cache.set_root(rooted_banks.last().unwrap());
|
||||
leader_schedule_cache.set_root(new_root);
|
||||
bank_forks.write().unwrap().set_root(new_root);
|
||||
Self::handle_new_root(&bank_forks, progress);
|
||||
trace!("new root {}", new_root);
|
||||
if let Err(e) = root_bank_sender.send(rooted_banks) {
|
||||
trace!("root_bank_sender failed: {:?}", e);
|
||||
Err(e)?;
|
||||
}
|
||||
root_bank_sender.send(rooted_banks)?;
|
||||
}
|
||||
Self::update_confidence_cache(bank_forks, tower, lockouts, lockouts_sender);
|
||||
tower.update_epoch(&bank);
|
||||
locktower.update_epoch(&bank);
|
||||
if let Some(ref voting_keypair) = voting_keypair {
|
||||
let node_keypair = cluster_info.read().unwrap().keypair.clone();
|
||||
|
||||
@@ -422,11 +405,11 @@ impl ReplayStage {
|
||||
let vote_ix = vote_instruction::vote(
|
||||
&vote_account,
|
||||
&voting_keypair.pubkey(),
|
||||
tower.recent_votes(),
|
||||
locktower.recent_votes(),
|
||||
);
|
||||
|
||||
let mut vote_tx =
|
||||
Transaction::new_with_payer(vec![vote_ix], Some(&node_keypair.pubkey()));
|
||||
Transaction::new_with_payer(vec![vote_ix], Some(&node_keypair.pubkey()));;
|
||||
|
||||
let blockhash = bank.last_blockhash();
|
||||
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
|
||||
@@ -436,49 +419,26 @@ impl ReplayStage {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_confidence_cache(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
tower: &Tower,
|
||||
lockouts: HashMap<u64, StakeLockout>,
|
||||
lockouts_sender: &Sender<LockoutAggregationData>,
|
||||
) {
|
||||
let total_epoch_stakes = tower.total_epoch_stakes();
|
||||
let mut w_bank_forks = bank_forks.write().unwrap();
|
||||
for (fork, stake_lockout) in lockouts.iter() {
|
||||
if tower.root().is_none() || *fork >= tower.root().unwrap() {
|
||||
w_bank_forks.cache_fork_confidence(
|
||||
*fork,
|
||||
stake_lockout.stake(),
|
||||
total_epoch_stakes,
|
||||
stake_lockout.lockout(),
|
||||
);
|
||||
}
|
||||
}
|
||||
drop(w_bank_forks);
|
||||
let bank_forks_clone = bank_forks.clone();
|
||||
let root = tower.root();
|
||||
|
||||
if let Err(e) = lockouts_sender.send((lockouts, root, bank_forks_clone)) {
|
||||
trace!("lockouts_sender failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
fn reset_poh_recorder(
|
||||
my_pubkey: &Pubkey,
|
||||
blocktree: &Blocktree,
|
||||
bank: &Arc<Bank>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
ticks_per_slot: u64,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
) {
|
||||
let next_leader_slot =
|
||||
leader_schedule_cache.next_leader_slot(&my_pubkey, bank.slot(), &bank, Some(blocktree));
|
||||
poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
.reset(bank.last_blockhash(), bank.slot(), next_leader_slot);
|
||||
poh_recorder.lock().unwrap().reset(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
next_leader_slot,
|
||||
ticks_per_slot,
|
||||
);
|
||||
|
||||
let next_leader_msg = if let Some(next_leader_slot) = next_leader_slot {
|
||||
format!("My next leader slot is #{}", next_leader_slot.0)
|
||||
format!("My next leader slot is #{}", next_leader_slot)
|
||||
} else {
|
||||
"I am not in the upcoming leader schedule yet".to_owned()
|
||||
};
|
||||
@@ -495,8 +455,9 @@ impl ReplayStage {
|
||||
blocktree: &Arc<Blocktree>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
my_pubkey: &Pubkey,
|
||||
ticks_per_slot: &mut u64,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
slot_full_senders: &[Sender<(u64, Pubkey)>],
|
||||
slot_full_sender: &Sender<(u64, Pubkey)>,
|
||||
) -> bool {
|
||||
let mut did_complete_bank = false;
|
||||
let active_banks = bank_forks.read().unwrap().active_banks();
|
||||
@@ -509,27 +470,20 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
|
||||
*ticks_per_slot = bank.ticks_per_slot();
|
||||
if bank.collector_id() != my_pubkey
|
||||
&& Self::is_replay_result_fatal(&Self::replay_blocktree_into_bank(
|
||||
&bank, &blocktree, progress,
|
||||
))
|
||||
{
|
||||
trace!("replay_result_fatal slot {}", bank_slot);
|
||||
// If the bank was corrupted, don't try to run the below logic to check if the
|
||||
// bank is completed
|
||||
continue;
|
||||
}
|
||||
assert_eq!(*bank_slot, bank.slot());
|
||||
if bank.tick_height() == bank.max_tick_height() {
|
||||
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
|
||||
if bank.tick_height() == max_tick_height {
|
||||
did_complete_bank = true;
|
||||
Self::process_completed_bank(my_pubkey, bank, slot_full_senders);
|
||||
} else {
|
||||
trace!(
|
||||
"bank {} not completed tick_height: {}, max_tick_height: {}",
|
||||
bank.slot(),
|
||||
bank.tick_height(),
|
||||
bank.max_tick_height()
|
||||
);
|
||||
Self::process_completed_bank(my_pubkey, bank, slot_full_sender);
|
||||
}
|
||||
}
|
||||
did_complete_bank
|
||||
@@ -537,17 +491,17 @@ impl ReplayStage {
|
||||
|
||||
fn generate_votable_banks(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
tower: &Tower,
|
||||
locktower: &Locktower,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
) -> Vec<(u128, Arc<Bank>, HashMap<u64, StakeLockout>)> {
|
||||
let tower_start = Instant::now();
|
||||
// Tower voting
|
||||
) -> Vec<(u128, Arc<Bank>)> {
|
||||
let locktower_start = Instant::now();
|
||||
// Locktower voting
|
||||
let descendants = bank_forks.read().unwrap().descendants();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
||||
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let mut votable: Vec<(u128, Arc<Bank>, HashMap<u64, StakeLockout>)> = frozen_banks
|
||||
let mut votable: Vec<(u128, Arc<Bank>)> = frozen_banks
|
||||
.values()
|
||||
.filter(|b| {
|
||||
let is_votable = b.is_votable();
|
||||
@@ -555,24 +509,24 @@ impl ReplayStage {
|
||||
is_votable
|
||||
})
|
||||
.filter(|b| {
|
||||
let is_recent_epoch = tower.is_recent_epoch(b);
|
||||
let is_recent_epoch = locktower.is_recent_epoch(b);
|
||||
trace!("bank is is_recent_epoch: {} {}", b.slot(), is_recent_epoch);
|
||||
is_recent_epoch
|
||||
})
|
||||
.filter(|b| {
|
||||
let has_voted = tower.has_voted(b.slot());
|
||||
let has_voted = locktower.has_voted(b.slot());
|
||||
trace!("bank is has_voted: {} {}", b.slot(), has_voted);
|
||||
!has_voted
|
||||
})
|
||||
.filter(|b| {
|
||||
let is_locked_out = tower.is_locked_out(b.slot(), &descendants);
|
||||
let is_locked_out = locktower.is_locked_out(b.slot(), &descendants);
|
||||
trace!("bank is is_locked_out: {} {}", b.slot(), is_locked_out);
|
||||
!is_locked_out
|
||||
})
|
||||
.map(|bank| {
|
||||
(
|
||||
bank,
|
||||
tower.collect_vote_lockouts(
|
||||
locktower.collect_vote_lockouts(
|
||||
bank.slot(),
|
||||
bank.vote_accounts().into_iter(),
|
||||
&ancestors,
|
||||
@@ -580,48 +534,43 @@ impl ReplayStage {
|
||||
)
|
||||
})
|
||||
.filter(|(b, stake_lockouts)| {
|
||||
let vote_threshold = tower.check_vote_stake_threshold(b.slot(), &stake_lockouts);
|
||||
Self::confirm_forks(tower, stake_lockouts, progress, bank_forks);
|
||||
let vote_threshold =
|
||||
locktower.check_vote_stake_threshold(b.slot(), &stake_lockouts);
|
||||
Self::confirm_forks(locktower, stake_lockouts, progress, bank_forks);
|
||||
debug!("bank vote_threshold: {} {}", b.slot(), vote_threshold);
|
||||
vote_threshold
|
||||
})
|
||||
.map(|(b, stake_lockouts)| {
|
||||
(
|
||||
tower.calculate_weight(&stake_lockouts),
|
||||
b.clone(),
|
||||
stake_lockouts,
|
||||
)
|
||||
})
|
||||
.map(|(b, stake_lockouts)| (locktower.calculate_weight(&stake_lockouts), b.clone()))
|
||||
.collect();
|
||||
|
||||
votable.sort_by_key(|b| b.0);
|
||||
let ms = timing::duration_as_ms(&tower_start.elapsed());
|
||||
let ms = timing::duration_as_ms(&locktower_start.elapsed());
|
||||
|
||||
trace!("votable_banks {}", votable.len());
|
||||
if !votable.is_empty() {
|
||||
let weights: Vec<u128> = votable.iter().map(|x| x.0).collect();
|
||||
info!(
|
||||
"@{:?} tower duration: {:?} len: {} weights: {:?}",
|
||||
"@{:?} locktower duration: {:?} len: {} weights: {:?}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
votable.len(),
|
||||
weights
|
||||
);
|
||||
}
|
||||
inc_new_counter_info!("replay_stage-tower_duration", ms as usize);
|
||||
inc_new_counter_info!("replay_stage-locktower_duration", ms as usize);
|
||||
|
||||
votable
|
||||
}
|
||||
|
||||
fn confirm_forks(
|
||||
tower: &Tower,
|
||||
locktower: &Locktower,
|
||||
stake_lockouts: &HashMap<u64, StakeLockout>,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
) {
|
||||
progress.retain(|slot, prog| {
|
||||
let duration = timing::timestamp() - prog.started_ms;
|
||||
if tower.is_slot_confirmed(*slot, stake_lockouts)
|
||||
if locktower.is_slot_confirmed(*slot, stake_lockouts)
|
||||
&& bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
@@ -711,15 +660,13 @@ impl ReplayStage {
|
||||
fn process_completed_bank(
|
||||
my_pubkey: &Pubkey,
|
||||
bank: Arc<Bank>,
|
||||
slot_full_senders: &[Sender<(u64, Pubkey)>],
|
||||
slot_full_sender: &Sender<(u64, Pubkey)>,
|
||||
) {
|
||||
bank.freeze();
|
||||
info!("bank frozen {}", bank.slot());
|
||||
slot_full_senders.iter().for_each(|sender| {
|
||||
if let Err(e) = sender.send((bank.slot(), *bank.collector_id())) {
|
||||
trace!("{} slot_full alert failed: {:?}", my_pubkey, e);
|
||||
}
|
||||
});
|
||||
if let Err(e) = slot_full_sender.send((bank.slot(), *bank.collector_id())) {
|
||||
trace!("{} slot_full alert failed: {:?}", my_pubkey, e);
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_new_bank_forks(
|
||||
@@ -730,15 +677,12 @@ impl ReplayStage {
|
||||
// Find the next slot that chains to the old slot
|
||||
let frozen_banks = forks.frozen_banks();
|
||||
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
||||
trace!("frozen_banks {:?}", frozen_bank_slots);
|
||||
let next_slots = blocktree
|
||||
.get_slots_since(&frozen_bank_slots)
|
||||
.expect("Db error");
|
||||
// Filter out what we've already seen
|
||||
trace!("generate new forks {:?}", {
|
||||
let mut next_slots = next_slots.iter().collect::<Vec<_>>();
|
||||
next_slots.sort();
|
||||
next_slots
|
||||
});
|
||||
trace!("generate new forks {:?}", next_slots);
|
||||
for (parent_id, children) in next_slots {
|
||||
let parent_bank = frozen_banks
|
||||
.get(&parent_id)
|
||||
@@ -763,67 +707,24 @@ impl Service for ReplayStage {
|
||||
type JoinReturnType = ();
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.t_lockouts.join()?;
|
||||
self.t_replay.join().map(|_| ())
|
||||
}
|
||||
}
|
||||
|
||||
type LockoutAggregationData = (
|
||||
HashMap<u64, StakeLockout>, // lockouts
|
||||
Option<u64>, // root
|
||||
Arc<RwLock<BankForks>>, // bank_forks
|
||||
);
|
||||
|
||||
fn aggregate_stake_lockouts(
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> (Sender<LockoutAggregationData>, JoinHandle<()>) {
|
||||
let (lockouts_sender, lockouts_receiver): (
|
||||
Sender<LockoutAggregationData>,
|
||||
Receiver<LockoutAggregationData>,
|
||||
) = channel();
|
||||
let exit_ = exit.clone();
|
||||
(
|
||||
lockouts_sender,
|
||||
Builder::new()
|
||||
.name("solana-aggregate-stake-lockouts".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit_.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
if let Ok((lockouts, root, bank_forks)) = lockouts_receiver.try_recv() {
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let stake_weighted_lockouts =
|
||||
Tower::aggregate_stake_lockouts(root, &ancestors, lockouts);
|
||||
let mut w_bank_forks = bank_forks.write().unwrap();
|
||||
for (fork, stake_weighted_lockout) in stake_weighted_lockouts.iter() {
|
||||
if root.is_none() || *fork >= root.unwrap() {
|
||||
w_bank_forks
|
||||
.cache_stake_weighted_lockouts(*fork, *stake_weighted_lockout)
|
||||
}
|
||||
}
|
||||
drop(w_bank_forks);
|
||||
}
|
||||
})
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::bank_forks::Confidence;
|
||||
use crate::blocktree::get_tmp_ledger_path;
|
||||
use crate::entry;
|
||||
use crate::erasure::ErasureConfig;
|
||||
use crate::genesis_utils::{create_genesis_block, create_genesis_block_with_leader};
|
||||
use crate::packet::{Blob, BLOB_HEADER_SIZE};
|
||||
use crate::genesis_utils::create_genesis_block;
|
||||
use crate::packet::Blob;
|
||||
use crate::replay_stage::ReplayStage;
|
||||
use solana_runtime::genesis_utils::GenesisBlockInfo;
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::transaction::TransactionError;
|
||||
use solana_vote_api::vote_state::VoteState;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
@@ -845,7 +746,6 @@ mod test {
|
||||
let mut blob_slot_1 = Blob::default();
|
||||
blob_slot_1.set_slot(1);
|
||||
blob_slot_1.set_parent(0);
|
||||
blob_slot_1.set_erasure_config(&ErasureConfig::default());
|
||||
blocktree.insert_data_blobs(&vec![blob_slot_1]).unwrap();
|
||||
assert!(bank_forks.get(1).is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
@@ -859,7 +759,6 @@ mod test {
|
||||
let mut blob_slot_2 = Blob::default();
|
||||
blob_slot_2.set_slot(2);
|
||||
blob_slot_2.set_parent(0);
|
||||
blob_slot_2.set_erasure_config(&ErasureConfig::default());
|
||||
blocktree.insert_data_blobs(&vec![blob_slot_2]).unwrap();
|
||||
assert!(bank_forks.get(2).is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
@@ -886,110 +785,53 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dead_fork_transaction_error() {
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
let missing_keypair = Keypair::new();
|
||||
let missing_keypair2 = Keypair::new();
|
||||
fn test_dead_forks() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let GenesisBlockInfo {
|
||||
genesis_block,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_block(1000);
|
||||
let bank0 = Arc::new(Bank::new(&genesis_block));
|
||||
let mut progress = HashMap::new();
|
||||
progress.insert(bank0.slot(), ForkProgress::new(bank0.last_blockhash()));
|
||||
|
||||
let res = check_dead_fork(|blockhash| {
|
||||
entry::next_entry(
|
||||
blockhash,
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
let missing_keypair = Keypair::new();
|
||||
|
||||
// Insert entry with TransactionError::AccountNotFound error
|
||||
let account_not_found_blob = entry::next_entry(
|
||||
&bank0.last_blockhash(),
|
||||
1,
|
||||
vec![
|
||||
system_transaction::create_user_account(
|
||||
&keypair1,
|
||||
&keypair2.pubkey(),
|
||||
2,
|
||||
*blockhash,
|
||||
bank0.last_blockhash(),
|
||||
), // should be fine,
|
||||
system_transaction::transfer(
|
||||
&missing_keypair,
|
||||
&missing_keypair2.pubkey(),
|
||||
&mint_keypair.pubkey(),
|
||||
2,
|
||||
*blockhash,
|
||||
bank0.last_blockhash(),
|
||||
), // should cause AccountNotFound error
|
||||
],
|
||||
)
|
||||
.to_blob()
|
||||
});
|
||||
|
||||
assert_matches!(
|
||||
res,
|
||||
Err(Error::TransactionError(TransactionError::AccountNotFound))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dead_fork_entry_verification_failure() {
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
let res = check_dead_fork(|blockhash| {
|
||||
let bad_hash = hash(&[2; 30]);
|
||||
entry::next_entry(
|
||||
// User wrong blockhash so that the the entry causes an entry verification failure
|
||||
&bad_hash,
|
||||
1,
|
||||
vec![system_transaction::create_user_account(
|
||||
&keypair1,
|
||||
&keypair2.pubkey(),
|
||||
2,
|
||||
*blockhash,
|
||||
)],
|
||||
)
|
||||
.to_blob()
|
||||
});
|
||||
|
||||
assert_matches!(res, Err(Error::BlobError(BlobError::VerificationFailed)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dead_fork_blob_deserialize_failure() {
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
// Insert entry that causes blob deserialization failure
|
||||
|
||||
let res = check_dead_fork(|blockhash| {
|
||||
let mut b = entry::next_entry(
|
||||
&blockhash,
|
||||
1,
|
||||
vec![system_transaction::create_user_account(
|
||||
&keypair1,
|
||||
&keypair2.pubkey(),
|
||||
2,
|
||||
*blockhash,
|
||||
)],
|
||||
)
|
||||
.to_blob();
|
||||
b.set_size(BLOB_HEADER_SIZE);
|
||||
b
|
||||
});
|
||||
|
||||
assert_matches!(
|
||||
res,
|
||||
Err(Error::BlocktreeError(BlocktreeError::InvalidBlobData(_)))
|
||||
);
|
||||
}
|
||||
|
||||
// Given a blob and a fatal expected error, check that replaying that blob causes causes the fork to be
|
||||
// marked as dead. Returns the error for caller to verify.
|
||||
fn check_dead_fork<F>(blob_to_insert: F) -> Result<()>
|
||||
where
|
||||
F: Fn(&Hash) -> Blob,
|
||||
{
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let res = {
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
blocktree
|
||||
.insert_data_blobs(&[account_not_found_blob])
|
||||
.unwrap();
|
||||
assert_matches!(
|
||||
ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut progress),
|
||||
Err(Error::TransactionError(TransactionError::AccountNotFound))
|
||||
);
|
||||
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(1000);
|
||||
let bank0 = Arc::new(Bank::new(&genesis_block));
|
||||
let mut progress = HashMap::new();
|
||||
let last_blockhash = bank0.last_blockhash();
|
||||
progress.insert(bank0.slot(), ForkProgress::new(last_blockhash));
|
||||
let blob = blob_to_insert(&last_blockhash);
|
||||
blocktree.insert_data_blobs(&[blob]).unwrap();
|
||||
let res = ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut progress);
|
||||
|
||||
// Check that the erroring bank was marked as dead in the progress map
|
||||
assert!(progress
|
||||
@@ -999,102 +841,46 @@ mod test {
|
||||
|
||||
// Check that the erroring bank was marked as dead in blocktree
|
||||
assert!(blocktree.is_dead(bank0.slot()));
|
||||
res
|
||||
};
|
||||
|
||||
// Create new bank
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), bank0.slot() + 1);
|
||||
progress.insert(bank1.slot(), ForkProgress::new(bank0.last_blockhash()));
|
||||
let bad_hash = hash(&[2; 30]);
|
||||
|
||||
// Insert entry that causes verification failure
|
||||
let mut verifcation_failure_blob = entry::next_entry(
|
||||
// use wrong blockhash
|
||||
&bad_hash,
|
||||
1,
|
||||
vec![system_transaction::create_user_account(
|
||||
&keypair1,
|
||||
&keypair2.pubkey(),
|
||||
2,
|
||||
bank1.last_blockhash(),
|
||||
)],
|
||||
)
|
||||
.to_blob();
|
||||
verifcation_failure_blob.set_slot(1);
|
||||
verifcation_failure_blob.set_index(0);
|
||||
verifcation_failure_blob.set_parent(bank0.slot());
|
||||
|
||||
blocktree
|
||||
.insert_data_blobs(&[verifcation_failure_blob])
|
||||
.unwrap();
|
||||
assert_matches!(
|
||||
ReplayStage::replay_blocktree_into_bank(&bank1, &blocktree, &mut progress),
|
||||
Err(Error::BlobError(BlobError::VerificationFailed))
|
||||
);
|
||||
// Check that the erroring bank was marked as dead in the progress map
|
||||
assert!(progress
|
||||
.get(&bank1.slot())
|
||||
.map(|b| b.is_dead)
|
||||
.unwrap_or(false));
|
||||
|
||||
// Check that the erroring bank was marked as dead in blocktree
|
||||
assert!(blocktree.is_dead(bank1.slot()));
|
||||
}
|
||||
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
res
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_replay_confidence_cache() {
|
||||
fn leader_vote(bank: &Arc<Bank>, pubkey: &Pubkey) {
|
||||
let mut leader_vote_account = bank.get_account(&pubkey).unwrap();
|
||||
let mut vote_state = VoteState::from(&leader_vote_account).unwrap();
|
||||
vote_state.process_slot_vote_unchecked(bank.slot());
|
||||
vote_state.to(&mut leader_vote_account).unwrap();
|
||||
bank.store_account(&pubkey, &leader_vote_account);
|
||||
}
|
||||
|
||||
let (lockouts_sender, _) = aggregate_stake_lockouts(&Arc::new(AtomicBool::new(false)));
|
||||
|
||||
let leader_pubkey = Pubkey::new_rand();
|
||||
let leader_lamports = 3;
|
||||
let genesis_block_info =
|
||||
create_genesis_block_with_leader(50, &leader_pubkey, leader_lamports);
|
||||
let mut genesis_block = genesis_block_info.genesis_block;
|
||||
let leader_voting_pubkey = genesis_block_info.voting_keypair.pubkey();
|
||||
genesis_block.epoch_warmup = false;
|
||||
genesis_block.ticks_per_slot = 4;
|
||||
let bank0 = Bank::new(&genesis_block);
|
||||
for _ in 1..genesis_block.ticks_per_slot {
|
||||
bank0.register_tick(&Hash::default());
|
||||
}
|
||||
bank0.freeze();
|
||||
let arc_bank0 = Arc::new(bank0);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(
|
||||
&[arc_bank0.clone()],
|
||||
0,
|
||||
)));
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let mut tower = Tower::new_from_forks(&bank_forks.read().unwrap(), &pubkey);
|
||||
let mut progress = HashMap::new();
|
||||
|
||||
leader_vote(&arc_bank0, &leader_voting_pubkey);
|
||||
let votable = ReplayStage::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
||||
if let Some((_, _, lockouts)) = votable.into_iter().last() {
|
||||
ReplayStage::update_confidence_cache(&bank_forks, &tower, lockouts, &lockouts_sender);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
bank_forks.read().unwrap().get_fork_confidence(0).unwrap(),
|
||||
&Confidence::new(0, 1, 2)
|
||||
);
|
||||
assert!(bank_forks.read().unwrap().get_fork_confidence(1).is_none());
|
||||
|
||||
tower.record_vote(arc_bank0.slot(), arc_bank0.hash());
|
||||
|
||||
let bank1 = Bank::new_from_parent(&arc_bank0, &Pubkey::default(), arc_bank0.slot() + 1);
|
||||
let _res = bank1.transfer(10, &genesis_block_info.mint_keypair, &Pubkey::new_rand());
|
||||
for _ in 0..genesis_block.ticks_per_slot {
|
||||
bank1.register_tick(&Hash::default());
|
||||
}
|
||||
bank1.freeze();
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let arc_bank1 = bank_forks.read().unwrap().get(1).unwrap().clone();
|
||||
leader_vote(&arc_bank1, &leader_voting_pubkey);
|
||||
let votable = ReplayStage::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
||||
if let Some((_, _, lockouts)) = votable.into_iter().last() {
|
||||
ReplayStage::update_confidence_cache(&bank_forks, &tower, lockouts, &lockouts_sender);
|
||||
}
|
||||
|
||||
tower.record_vote(arc_bank1.slot(), arc_bank1.hash());
|
||||
|
||||
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
|
||||
let _res = bank2.transfer(10, &genesis_block_info.mint_keypair, &Pubkey::new_rand());
|
||||
for _ in 0..genesis_block.ticks_per_slot {
|
||||
bank2.register_tick(&Hash::default());
|
||||
}
|
||||
bank2.freeze();
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let arc_bank2 = bank_forks.read().unwrap().get(2).unwrap().clone();
|
||||
leader_vote(&arc_bank2, &leader_voting_pubkey);
|
||||
let votable = ReplayStage::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
||||
if let Some((_, _, lockouts)) = votable.into_iter().last() {
|
||||
ReplayStage::update_confidence_cache(&bank_forks, &tower, lockouts, &lockouts_sender);
|
||||
}
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
|
||||
assert_eq!(
|
||||
bank_forks.read().unwrap().get_fork_confidence(0).unwrap(),
|
||||
&Confidence::new_with_stake_weighted(1, 1, 14, 20)
|
||||
);
|
||||
assert_eq!(
|
||||
bank_forks.read().unwrap().get_fork_confidence(1).unwrap(),
|
||||
&Confidence::new_with_stake_weighted(1, 1, 6, 6)
|
||||
);
|
||||
assert_eq!(
|
||||
bank_forks.read().unwrap().get_fork_confidence(2).unwrap(),
|
||||
&Confidence::new_with_stake_weighted(0, 1, 2, 0)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user