Compare commits

...

37 Commits

Author SHA1 Message Date
a8b447f86c Revert "GitBook: [v0.18] 80 pages and 4 assets modified"
This reverts commit f1b7f00c30.
2019-09-17 20:41:57 -06:00
f1b7f00c30 GitBook: [v0.18] 80 pages and 4 assets modified 2019-09-18 02:22:21 +00:00
6b18db969d Add gitbook configuration 2019-09-17 15:20:47 -07:00
a9b79d6563 Bump blake2 from 0.8.0 to 0.8.1 (#5847)
automerge
2019-09-08 11:53:21 -07:00
b00ae72a31 Move appveyor off the system drive 2019-09-08 10:08:17 -07:00
87e677964f Update cargo files to 0.18.2 (#5805) 2019-09-06 15:57:14 -04:00
898dabba70 Update book install versions (#5802) 2019-09-05 14:36:56 -04:00
d90461f856 Pull in rbpf v0.1.15 (#5787) (#5794) 2019-09-05 07:40:49 -07:00
1e10c43abb Add libstd support to Rust BPF (bp #5788) (#5791) 2019-09-04 19:02:02 -07:00
9d0ed84caa Pull in LLVM with stack location fixes (#5732) (#5754) 2019-08-30 14:46:23 -07:00
f6a418aa9d Use LLVM's C builtins for BPF (#5717) (#5749) 2019-08-30 10:43:48 -07:00
a31475e22f Integrate shreds to the replicators (#5711) (#5720)
automerge
2019-08-29 13:14:21 -07:00
e11f38733a Ignore cargo audit advisory RUSTSEC-2019-0013 (#5713) (#5733) 2019-08-29 12:28:43 -07:00
f1f148bbd2 Adjust snapshot metrics layout 2019-08-27 20:57:18 -07:00
460c4f312d Remove extra call to serialize in shred verify (#5698) (#5703)
automerge
2019-08-27 20:14:39 -07:00
519e0c147e Install bzip2 in solana docker file (#5701) (#5702)
automerge
2019-08-27 19:43:31 -07:00
6c0ceb9067 Use serialize_into to fill in shreds instead of writing one byte at a time (#5695) (#5697)
automerge
2019-08-27 17:53:17 -07:00
d052de847e feat: getInflation() endpoint (#5681) (#5692)
(cherry picked from commit 34ab25a88b)
2019-08-27 15:56:46 -07:00
ad37dfb0a7 Disable LocalVoteSignerService. It's grabbing an TCP port that's causing CI to fail occasionally (#5690) (#5693)
(cherry picked from commit ffc748becb)
2019-08-27 15:36:07 -07:00
1087ca6b9a Don't unwrap get_balance immediately in bench-tps move mode (#5685) (#5689)
automerge

(cherry picked from commit 362a39a941)
2019-08-27 15:33:51 -07:00
b4a41fff6e Disable cargo caching. Travis is timing itself out as it updates the cache at the end of a build 2019-08-27 14:21:05 -07:00
925abbbf15 Add metrics for snapshot generation (#5677) (#5684)
automerge
2019-08-27 14:15:12 -07:00
9098f02f98 Ignore retransmit channel error (#5680) (#5683)
automerge

(cherry picked from commit f1d58f980b)
2019-08-27 13:58:10 -07:00
6a630ff156 [V0.18] Cherry pick erasure shreds from master (#5679)
automerge
2019-08-27 12:23:33 -07:00
8323309ccc remove replicode in run_purge_batch() (#5630) (#5675)
automerge
2019-08-27 10:31:36 -07:00
cb0a580b07 Re enable c tests (#5634) (#5672) 2019-08-27 09:28:13 -07:00
97488c0cd8 Bump version to 0.18.1 2019-08-27 08:40:58 -07:00
9c90e29a00 Add newline before cluster info log (#5671) (#5673)
(cherry picked from commit a29f0484dc)
2019-08-27 08:36:57 -07:00
c01789d2a8 Log bind error (#5666) (#5667)
automerge
2019-08-26 22:54:14 -07:00
a0f9d968fe Log contact info every 10 seconds (#5663) (#5664)
automerge
2019-08-26 19:08:53 -07:00
888072d4c2 Ignore flaky test_banking_stage_entryfication (#5659) (#5662)
automerge

(cherry picked from commit a0f3208828)
2019-08-26 18:33:04 -07:00
af1010cfd3 Add bigger buffers for shred column families in rocks (#5653) (#5658)
automerge
2019-08-26 16:34:56 -07:00
fe419db5b4 Add open file descriptor monitoring (#5655) (#5656)
automerge
2019-08-26 15:44:47 -07:00
a86dc44c96 Ignore flaky test_ledger_cleanup_service (#5649) (#5652)
(cherry picked from commit e1dd74f1bf)
2019-08-26 13:13:19 -07:00
ebda293dc4 Add warmup, cooldown to definitions (#5647) (#5648)
(cherry picked from commit 6512aced21)
2019-08-26 10:06:02 -07:00
6acfc2cf0f Bump version to v0.18.0 2019-08-25 23:08:55 -07:00
a863e82741 Add missing space 2019-08-25 23:03:48 -07:00
158 changed files with 2697 additions and 1581 deletions

View File

@ -9,6 +9,8 @@ cache:
- '%USERPROFILE%\.cargo'
- '%APPVEYOR_BUILD_FOLDER%\target'
clone_folder: d:\projects\solana
build_script:
- bash ci/publish-tarball.sh

4
.gitbook.yaml Normal file
View File

@ -0,0 +1,4 @@
root: ./book/src
structure:
readme: introduction.md

View File

@ -2,7 +2,6 @@ os:
- osx
language: rust
cache: cargo
rust:
- 1.37.0

568
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -24,17 +24,17 @@ serde_derive = "1.0.99"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
# solana-runtime = { path = "../solana/runtime"}
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-local-cluster = { path = "../local_cluster", version = "0.18.0-pre2" }
solana-client = { path = "../client", version = "0.18.0-pre2" }
solana-drone = { path = "../drone", version = "0.18.0-pre2" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.18.0-pre2" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-metrics = { path = "../metrics", version = "0.18.0-pre2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.0-pre2" }
solana-runtime = { path = "../runtime", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-local-cluster = { path = "../local_cluster", version = "0.18.2" }
solana-client = { path = "../client", version = "0.18.2" }
solana-drone = { path = "../drone", version = "0.18.2" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.18.2" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-metrics = { path = "../metrics", version = "0.18.2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
solana-runtime = { path = "../runtime", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
untrusted = "0.7.0"
ws = "0.9.0"

View File

@ -2,16 +2,16 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
[features]
cuda = ["solana-core/cuda"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,19 +16,19 @@ serde = "1.0.99"
serde_derive = "1.0.99"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-local-cluster = { path = "../local_cluster", version = "0.18.0-pre2" }
solana-client = { path = "../client", version = "0.18.0-pre2" }
solana-drone = { path = "../drone", version = "0.18.0-pre2" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-metrics = { path = "../metrics", version = "0.18.0-pre2" }
solana-measure = { path = "../measure", version = "0.18.0-pre2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.0-pre2" }
solana-runtime = { path = "../runtime", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.18.0-pre2" }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-local-cluster = { path = "../local_cluster", version = "0.18.2" }
solana-client = { path = "../client", version = "0.18.2" }
solana-drone = { path = "../drone", version = "0.18.2" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-metrics = { path = "../metrics", version = "0.18.2" }
solana-measure = { path = "../measure", version = "0.18.2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
solana-runtime = { path = "../runtime", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.18.2" }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.18.2" }
[features]
cuda = ["solana-core/cuda"]

View File

@ -767,13 +767,14 @@ fn fund_move_keys<T: Client>(
client.send_message(&[funding_key], tx.message).unwrap();
let mut balance = 0;
for _ in 0..20 {
balance = client.get_balance(&funding_keys[0].pubkey()).unwrap();
if balance > 0 {
if let Ok(balance_) = client.get_balance(&funding_keys[0].pubkey()) {
if balance_ > 0 {
balance = balance_;
break;
} else {
sleep(Duration::from_millis(100));
}
}
sleep(Duration::from_millis(100));
}
assert!(balance > 0);
info!("funded multiple funding accounts.. {:?}", balance);

View File

@ -53,7 +53,7 @@ software.
##### Linux and mac OS
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.1/install/solana-install-init.sh | sh -s
```
Alternatively build the `solana-install` program from source and run the

View File

@ -58,6 +58,13 @@ with a ledger interpretation that matches the leader's.
A gossip network connecting all [nodes](#node) of a [cluster](#cluster).
#### cooldown period
Some number of epochs after stake has been deactivated while it progressively
becomes available for withdrawal. During this period, the stake is considered to
be "deactivating". More info about:
[warmup and cooldown](stake-delegation-and-rewards.md#stake-warmup-cooldown-withdrawal)
#### credit
See [vote credit](#vote-credit).
@ -199,7 +206,7 @@ See [Proof of History](#proof-of-history).
#### point
A weighted [credit](#credit) in a rewards regime. In the validator (rewards regime)[staking-rewards.md], the number of points owed to a stake during redemption is the product of the [vote credits](#vote-credit) earned and the number of lamports staked.
A weighted [credit](#credit) in a rewards regime. In the validator [rewards regime](staking-rewards.md), the number of points owed to a stake during redemption is the product of the [vote credits](#vote-credit) earned and the number of lamports staked.
#### program
@ -341,3 +348,10 @@ See [ledger vote](#ledger-vote).
A reward tally for validators. A vote credit is awarded to a validator in its
vote account when the validator reaches a [root](#root).
#### warmup period
Some number of epochs after stake has been delegated while it progressively
becomes effective. During this period, the stake is considered to be
"activating". More info about:
[warmup and cooldown](stake-delegation-and-rewards.md#stake-warmup-cooldown-withdrawal)

View File

@ -27,16 +27,16 @@ commands in the following pages.
If you are bootstrapping with `solana-install`, you can specify the release tag or named channel to install to match your desired testnet.
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - 0.18.0
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.1/install/solana-install-init.sh | sh -s - 0.18.1
```
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - beta
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.1/install/solana-install-init.sh | sh -s - beta
```
Similarly, you can add this argument to the `solana-install` command if you've built the program from source:
```bash
$ solana-install init 0.18.0
$ solana-install init 0.18.1
```
If you are downloading pre-compiled binaries or building from source, simply choose the release matching your desired testnet.

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -9,8 +9,9 @@ if [[ -n $APPVEYOR ]]; then
source ci/rust-version.sh
appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
export USERPROFILE="D:\\"
./rustup-init -yv --default-toolchain $rust_stable --default-host x86_64-pc-windows-msvc
export PATH="$PATH:$USERPROFILE/.cargo/bin"
export PATH="$PATH:/d/.cargo/bin"
rustc -vV
cargo -vV
fi

View File

@ -19,12 +19,12 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
# _ cargo +"$rust_stable" audit --version ### cargo-audit stopped supporting --version?? https://github.com/RustSec/cargo-audit/issues/100
_ cargo +"$rust_stable" audit
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0013
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ book/build.sh
for project in sdk/bpf/rust/{rust-no-std,rust-utils,rust-test} programs/bpf/rust/*/ ; do
for project in sdk/bpf/rust/{rust-utils,rust-test} programs/bpf/rust/*/ ; do
echo "+++ do_bpf_check $project"
(
cd "$project"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -25,22 +25,22 @@ serde = "1.0.99"
serde_derive = "1.0.99"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
solana-budget-api = { path = "../programs/budget_api", version = "0.18.0-pre2" }
solana-client = { path = "../client", version = "0.18.0-pre2" }
solana-drone = { path = "../drone", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.0-pre2" }
solana-runtime = { path = "../runtime", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.0-pre2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.0-pre2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.0-pre2" }
solana-vote-signer = { path = "../vote-signer", version = "0.18.0-pre2" }
solana-budget-api = { path = "../programs/budget_api", version = "0.18.2" }
solana-client = { path = "../client", version = "0.18.2" }
solana-drone = { path = "../drone", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
solana-runtime = { path = "../runtime", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.2" }
solana-vote-signer = { path = "../vote-signer", version = "0.18.2" }
url = "2.1.0"
[dev-dependencies]
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.18.2" }
[features]
cuda = []

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ reqwest = "0.9.20"
serde = "1.0.99"
serde_derive = "1.0.99"
serde_json = "1.0.40"
solana-netutil = { path = "../utils/netutil", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
[dev-dependencies]
jsonrpc-core = "13.0.0"
jsonrpc-http-server = "13.0.0"
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.2" }

View File

@ -9,6 +9,7 @@ use serde_json::{json, Value};
use solana_sdk::account::Account;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::hash::Hash;
use solana_sdk::inflation::Inflation;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{KeypairUtil, Signature};
use solana_sdk::timing::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
@ -94,6 +95,25 @@ impl RpcClient {
})
}
pub fn get_inflation(&self) -> io::Result<Inflation> {
let response = self
.client
.send(&RpcRequest::GetInflation, None, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetInflation request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetInflation parse failure: {}", err),
)
})
}
pub fn get_version(&self) -> io::Result<String> {
let response = self
.client

View File

@ -10,6 +10,7 @@ pub enum RpcRequest {
GetBalance,
GetClusterNodes,
GetGenesisBlockhash,
GetInflation,
GetNumBlocksSinceSignatureConfirmation,
GetProgramAccounts,
GetRecentBlockhash,
@ -40,6 +41,7 @@ impl RpcRequest {
RpcRequest::GetBalance => "getBalance",
RpcRequest::GetClusterNodes => "getClusterNodes",
RpcRequest::GetGenesisBlockhash => "getGenesisBlockhash",
RpcRequest::GetInflation => "getInflation",
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
"getNumBlocksSinceSignatureConfirmation"
}
@ -110,6 +112,10 @@ mod tests {
let request = test_request.build_request_json(1, Some(addr));
assert_eq!(request["method"], "getBalance");
let test_request = RpcRequest::GetInflation;
let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "getInflation");
let test_request = RpcRequest::GetRecentBlockhash;
let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "getRecentBlockhash");

View File

@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -47,25 +47,25 @@ reqwest = "0.9.20"
serde = "1.0.99"
serde_derive = "1.0.99"
serde_json = "1.0.40"
solana-budget-api = { path = "../programs/budget_api", version = "0.18.0-pre2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.18.0-pre2" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.18.0-pre2" }
solana-client = { path = "../client", version = "0.18.0-pre2" }
solana-drone = { path = "../drone", version = "0.18.0-pre2" }
solana-budget-api = { path = "../programs/budget_api", version = "0.18.2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.18.2" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.18.2" }
solana-client = { path = "../client", version = "0.18.2" }
solana-drone = { path = "../drone", version = "0.18.2" }
solana-ed25519-dalek = "0.2.0"
solana-kvstore = { path = "../kvstore", version = "0.18.0-pre2", optional = true }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.18.0-pre2" }
solana-metrics = { path = "../metrics", version = "0.18.0-pre2" }
solana-measure = { path = "../measure", version = "0.18.0-pre2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.0-pre2" }
solana-runtime = { path = "../runtime", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.0-pre2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.0-pre2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.18.0-pre2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.0-pre2" }
solana-vote-signer = { path = "../vote-signer", version = "0.18.0-pre2" }
solana-kvstore = { path = "../kvstore", version = "0.18.2", optional = true }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.18.2" }
solana-metrics = { path = "../metrics", version = "0.18.2" }
solana-measure = { path = "../measure", version = "0.18.2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
solana-runtime = { path = "../runtime", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.18.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.2" }
solana-vote-signer = { path = "../vote-signer", version = "0.18.2" }
symlink = "0.1.0"
sys-info = "0.5.7"
tar = "0.4.26"

View File

@ -1157,6 +1157,7 @@ mod tests {
}
#[test]
#[ignore]
fn test_banking_stage_entryfication() {
solana_logger::setup();
// In this attack we'll demonstrate that a verifier can interpret the ledger

View File

@ -161,7 +161,16 @@ mod test {
let expected_tick_heights = [5, 6, 7, 8, 8, 9];
blocktree
.write_entries_using_shreds(1, 0, 0, ticks_per_slot, None, true, &entries)
.write_entries_using_shreds(
1,
0,
0,
ticks_per_slot,
None,
true,
&Arc::new(Keypair::new()),
&entries,
)
.unwrap();
slot_full_sender.send((1, leader_pubkey)).unwrap();

View File

@ -1,7 +1,6 @@
//! The `block_tree` module provides functions for parallel verification of the
//! Proof of History ledger as well as iterative read, append write, and random
//! access read to a persistent file-based ledger.
use crate::broadcast_stage::broadcast_utils::entries_to_shreds;
use crate::entry::Entry;
use crate::erasure::{ErasureConfig, Session};
use crate::packet::{Blob, SharedBlob, BLOB_HEADER_SIZE};
@ -94,7 +93,7 @@ pub struct Blocktree {
orphans_cf: LedgerColumn<cf::Orphans>,
index_cf: LedgerColumn<cf::Index>,
data_shred_cf: LedgerColumn<cf::ShredData>,
_code_shred_cf: LedgerColumn<cf::ShredCode>,
code_shred_cf: LedgerColumn<cf::ShredCode>,
batch_processor: Arc<RwLock<BatchProcessor>>,
pub new_blobs_signals: Vec<SyncSender<bool>>,
pub completed_slots_senders: Vec<SyncSender<Vec<u64>>>,
@ -166,7 +165,7 @@ impl Blocktree {
orphans_cf,
index_cf,
data_shred_cf,
_code_shred_cf: code_shred_cf,
code_shred_cf,
new_blobs_signals: vec![],
batch_processor,
completed_slots_senders: vec![],
@ -234,116 +233,56 @@ impl Blocktree {
// Returns whether or not all iterators have reached their end
fn run_purge_batch(&self, from_slot: Slot, batch_end: Slot) -> Result<bool> {
let mut end = true;
let from_slot = Some(from_slot);
let batch_end = Some(batch_end);
unsafe {
let mut batch_processor = self.db.batch_processor();
let mut write_batch = batch_processor
.batch()
.expect("Database Error: Failed to get write batch");
end &= match self
let end = self
.meta_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
{
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
end &= match self
.unwrap_or(false)
&& self
.data_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
{
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
end &= match self
.unwrap_or(false)
&& self
.erasure_meta_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
{
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
end &= match self
.unwrap_or(false)
&& self
.erasure_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
{
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
end &= match self
.unwrap_or(false)
&& self
.data_shred_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
.unwrap_or(false)
&& self
.code_shred_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
.unwrap_or(false)
&& self
.orphans_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
{
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
end &= match self
.unwrap_or(false)
&& self
.index_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
{
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
end &= match self
.unwrap_or(false)
&& self
.dead_slots_cf
.delete_slot(&mut write_batch, from_slot, batch_end)
{
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
let roots_cf = self.db.column::<cf::Root>();
end &= match roots_cf.delete_slot(&mut write_batch, from_slot, batch_end) {
Ok(finished) => finished,
Err(e) => {
error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, from_slot
);
false
}
};
.unwrap_or(false)
&& self
.db
.column::<cf::Root>()
.delete_slot(&mut write_batch, from_slot, batch_end)
.unwrap_or(false);
if let Err(e) = batch_processor.write(write_batch) {
error!(
"Error: {:?} while submitting write batch for slot {:?} retrying...",
@ -386,47 +325,173 @@ impl Blocktree {
Ok(slot_iterator.take_while(move |((blob_slot, _), _)| *blob_slot == slot))
}
pub fn insert_shreds(&self, shreds: &[Shred]) -> Result<()> {
fn try_shred_recovery(
db: &Database,
erasure_metas: &HashMap<(u64, u64), ErasureMeta>,
index_working_set: &HashMap<u64, Index>,
prev_inserted_datas: &mut HashMap<(u64, u64), Shred>,
prev_inserted_codes: &mut HashMap<(u64, u64), Shred>,
) -> Vec<Shred> {
let data_cf = db.column::<cf::ShredData>();
let code_cf = db.column::<cf::ShredCode>();
let mut recovered_data_shreds = vec![];
// Recovery rules:
// 1. Only try recovery around indexes for which new data or coding shreds are received
// 2. For new data shreds, check if an erasure set exists. If not, don't try recovery
// 3. Before trying recovery, check if enough number of shreds have been received
// 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data
for (&(slot, set_index), erasure_meta) in erasure_metas.iter() {
let submit_metrics = |attempted: bool, status: String| {
datapoint_info!(
"blocktree-erasure",
("slot", slot as i64, i64),
("start_index", set_index as i64, i64),
("end_index", erasure_meta.end_indexes().0 as i64, i64),
("recovery_attempted", attempted, bool),
("recovery_status", status, String),
);
};
let index = index_working_set.get(&slot).expect("Index");
match erasure_meta.status(&index) {
ErasureMetaStatus::CanRecover => {
// Find shreds for this erasure set and try recovery
let slot = index.slot;
let mut available_shreds = vec![];
(set_index..set_index + erasure_meta.config.num_data() as u64).for_each(|i| {
if index.data().is_present(i) {
if let Some(shred) =
prev_inserted_datas.remove(&(slot, i)).or_else(|| {
let some_data = data_cf
.get_bytes((slot, i))
.expect("Database failure, could not fetch data shred");
if let Some(data) = some_data {
bincode::deserialize(&data).ok()
} else {
warn!("Data shred deleted while reading for recovery");
None
}
})
{
available_shreds.push(shred);
}
}
});
(set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(
|i| {
if index.coding().is_present(i) {
if let Some(shred) =
prev_inserted_codes.remove(&(slot, i)).or_else(|| {
let some_code = code_cf
.get_bytes((slot, i))
.expect("Database failure, could not fetch code shred");
if let Some(code) = some_code {
bincode::deserialize(&code).ok()
} else {
warn!("Code shred deleted while reading for recovery");
None
}
})
{
available_shreds.push(shred);
}
}
},
);
if let Ok(mut result) = Shredder::try_recovery(
&available_shreds,
erasure_meta.config.num_data(),
erasure_meta.config.num_coding(),
set_index as usize,
slot,
) {
submit_metrics(true, "complete".into());
recovered_data_shreds.append(&mut result.recovered_data);
} else {
submit_metrics(true, "incomplete".into());
}
}
ErasureMetaStatus::DataFull => {
submit_metrics(false, "complete".into());
}
ErasureMetaStatus::StillNeed(needed) => {
submit_metrics(false, format!("still need: {}", needed));
}
};
}
recovered_data_shreds
}
pub fn insert_shreds(&self, shreds: Vec<Shred>) -> Result<()> {
let db = &*self.db;
let mut batch_processor = self.batch_processor.write().unwrap();
let mut write_batch = batch_processor.batch()?;
let mut just_inserted_data_indexes = HashMap::new();
let mut just_inserted_data_shreds = HashMap::new();
let mut just_inserted_coding_shreds = HashMap::new();
let mut erasure_metas = HashMap::new();
let mut slot_meta_working_set = HashMap::new();
let mut index_working_set = HashMap::new();
shreds.iter().for_each(|shred| {
shreds.into_iter().for_each(|shred| {
let slot = shred.slot();
let shred_index = u64::from(shred.index());
let _ = index_working_set.entry(slot).or_insert_with(|| {
let index_meta = index_working_set.entry(slot).or_insert_with(|| {
self.index_cf
.get(slot)
.unwrap()
.unwrap_or_else(|| Index::new(slot))
});
});
// Possibly do erasure recovery here
if let Shred::Coding(coding_shred) = &shred {
// This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index
let pos = u64::from(coding_shred.header.position);
if shred_index >= pos {
let set_index = shred_index - pos;
let dummy_data = vec![];
for shred in shreds {
let slot = shred.slot();
let index = u64::from(shred.index());
let inserted = Blocktree::insert_data_shred(
db,
&just_inserted_data_indexes,
&mut slot_meta_working_set,
&mut index_working_set,
self.insert_coding_shred(
set_index,
coding_shred.header.num_data_shreds as usize,
coding_shred.header.num_coding_shreds as usize,
&mut just_inserted_coding_shreds,
&mut erasure_metas,
index_meta,
shred,
&mut write_batch,
)?;
)
}
} else if Blocktree::insert_data_shred(
db,
&mut slot_meta_working_set,
&mut index_working_set,
&shred,
&mut write_batch,
)
.unwrap_or(false)
{
just_inserted_data_shreds.insert((slot, shred_index), shred);
}
});
if inserted {
just_inserted_data_indexes.insert((slot, index), &dummy_data);
}
}
let recovered_data = Self::try_shred_recovery(
&db,
&erasure_metas,
&index_working_set,
&mut just_inserted_data_shreds,
&mut just_inserted_coding_shreds,
);
recovered_data.into_iter().for_each(|shred| {
let _ = Blocktree::insert_data_shred(
db,
&mut slot_meta_working_set,
&mut index_working_set,
&shred,
&mut write_batch,
);
});
// Handle chaining for the working set
handle_chaining(&db, &mut write_batch, &slot_meta_working_set)?;
@ -437,6 +502,10 @@ impl Blocktree {
&mut write_batch,
)?;
for ((slot, set_index), erasure_meta) in erasure_metas {
write_batch.put::<cf::ErasureMeta>((slot, set_index), &erasure_meta)?;
}
for (&slot, index) in index_working_set.iter() {
write_batch.put::<cf::Index>(slot, index)?;
}
@ -459,9 +528,53 @@ impl Blocktree {
Ok(())
}
fn insert_coding_shred(
&self,
set_index: u64,
num_data: usize,
num_coding: usize,
prev_inserted_coding_shreds: &mut HashMap<(u64, u64), Shred>,
erasure_metas: &mut HashMap<(u64, u64), ErasureMeta>,
index_meta: &mut Index,
shred: Shred,
write_batch: &mut WriteBatch,
) {
let slot = shred.slot();
let shred_index = u64::from(shred.index());
let erasure_config = ErasureConfig::new(num_data, num_coding);
let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| {
self.erasure_meta_cf
.get((slot, set_index))
.expect("Expect database get to succeed")
.unwrap_or_else(|| ErasureMeta::new(set_index, &erasure_config))
});
if erasure_config != erasure_meta.config {
// ToDo: This is a potential slashing condition
warn!("Received multiple erasure configs for the same erasure set!!!");
warn!(
"Stored config: {:#?}, new config: {:#?}",
erasure_meta.config, erasure_config
);
}
let serialized_shred = bincode::serialize(&shred).unwrap();
let inserted =
write_batch.put_bytes::<cf::ShredCode>((slot, shred_index), &serialized_shred);
if inserted.is_ok() {
index_meta.coding_mut().set_present(shred_index, true);
// `or_insert_with` used to prevent stack overflow
prev_inserted_coding_shreds
.entry((slot, shred_index))
.or_insert_with(|| shred);
}
}
fn insert_data_shred(
db: &Database,
prev_inserted_data_indexes: &HashMap<(u64, u64), &[u8]>,
mut slot_meta_working_set: &mut HashMap<u64, (Rc<RefCell<SlotMeta>>, Option<SlotMeta>)>,
index_working_set: &mut HashMap<u64, Index>,
shred: &Shred,
@ -494,31 +607,30 @@ impl Blocktree {
.unwrap_or(false)
};
if should_insert(
slot_meta,
&prev_inserted_data_indexes,
index as u64,
slot,
last_in_slot,
check_data_cf,
) {
let new_consumed = compute_consume_index(
prev_inserted_data_indexes,
slot_meta,
index,
slot,
check_data_cf,
);
let index_meta = index_working_set
.get_mut(&slot)
.expect("Index must be present for all data blobs")
.data_mut();
if !index_meta.is_present(index)
&& should_insert(slot_meta, index, slot, last_in_slot, check_data_cf)
{
let new_consumed = if slot_meta.consumed == index {
let mut current_index = index + 1;
while index_meta.is_present(current_index) || check_data_cf(slot, current_index) {
current_index += 1;
}
current_index
} else {
slot_meta.consumed
};
let serialized_shred = bincode::serialize(shred).unwrap();
write_batch.put_bytes::<cf::ShredData>((slot, index), &serialized_shred)?;
update_slot_meta(last_in_slot, slot_meta, index, new_consumed);
index_working_set
.get_mut(&slot)
.expect("Index must be present for all data blobs")
.data_mut()
.set_present(index, true);
index_meta.set_present(index, true);
trace!("inserted shred into slot {:?} and index {:?}", slot, index);
Ok(true)
} else {
@ -531,6 +643,43 @@ impl Blocktree {
self.data_shred_cf.get_bytes((slot, index))
}
pub fn get_data_shreds(
&self,
slot: u64,
from_index: u64,
buffer: &mut [u8],
) -> Result<(u64, usize)> {
let meta_cf = self.db.column::<cf::SlotMeta>();
let mut buffer_offset = 0;
let mut last_index = 0;
if let Some(meta) = meta_cf.get(slot)? {
if !meta.is_full() {
warn!("The slot is not yet full. Will not return any shreds");
return Ok((last_index, buffer_offset));
}
for index in from_index..meta.consumed {
if let Some(shred_data) = self.get_data_shred(slot, index)? {
let shred_len = shred_data.len();
if buffer.len().saturating_sub(buffer_offset) >= shred_len {
buffer[buffer_offset..buffer_offset + shred_len]
.copy_from_slice(&shred_data[..shred_len]);
buffer_offset += shred_len;
last_index = index;
// All shreds are of the same length.
// Let's check if we have scope to accomodate another shred
// If not, let's break right away, as it'll save on 1 DB read
if buffer.len().saturating_sub(buffer_offset) < shred_len {
break;
}
} else {
break;
}
}
}
}
Ok((last_index, buffer_offset))
}
/// Use this function to write data blobs to blocktree
pub fn write_shared_blobs<I>(&self, shared_blobs: I) -> Result<()>
where
@ -619,6 +768,7 @@ impl Blocktree {
ticks_per_slot: u64,
parent: Option<u64>,
is_full_slot: bool,
keypair: &Arc<Keypair>,
entries: I,
) -> Result<usize>
where
@ -641,7 +791,7 @@ impl Blocktree {
current_slot,
Some(parent_slot),
0.0,
&Arc::new(Keypair::new()),
keypair,
start_index as u32,
)
.expect("Failed to create entry shredder");
@ -673,12 +823,13 @@ impl Blocktree {
remaining_ticks_in_slot -= 1;
}
entries_to_shreds(
vec![vec![entry.borrow().clone()]],
ticks_per_slot - remaining_ticks_in_slot,
ticks_per_slot,
&mut shredder,
);
bincode::serialize_into(&mut shredder, &vec![entry.borrow().clone()])
.expect("Expect to write all entries to shreds");
if remaining_ticks_in_slot == 0 {
shredder.finalize_slot();
} else {
shredder.finalize_fec_block();
}
}
if is_full_slot && remaining_ticks_in_slot != 0 {
@ -692,7 +843,7 @@ impl Blocktree {
all_shreds.extend(shreds);
let num_shreds = all_shreds.len();
self.insert_shreds(&all_shreds)?;
self.insert_shreds(all_shreds)?;
Ok(num_shreds)
}
@ -1250,8 +1401,8 @@ impl Blocktree {
break;
}
if let Ok(deshred) = Shredder::deshred(&shred_chunk) {
let entries: Vec<Entry> = bincode::deserialize(&deshred.payload)?;
if let Ok(deshred_payload) = Shredder::deshred(&shred_chunk) {
let entries: Vec<Entry> = bincode::deserialize(&deshred_payload)?;
trace!("Found entries: {:#?}", entries);
all_entries.extend(entries);
num += shred_chunk.len();
@ -1610,19 +1761,12 @@ fn should_insert_blob(
.unwrap_or(false)
};
should_insert(
slot,
prev_inserted_blob_datas,
blob_index,
blob_slot,
last_in_slot,
check_data_cf,
)
!prev_inserted_blob_datas.contains_key(&(blob_slot, blob_index))
&& should_insert(slot, blob_index, blob_slot, last_in_slot, check_data_cf)
}
fn should_insert<F>(
slot_meta: &SlotMeta,
prev_inserted_blob_datas: &HashMap<(u64, u64), &[u8]>,
index: u64,
slot: u64,
last_in_slot: bool,
@ -1632,10 +1776,7 @@ where
F: Fn(u64, u64) -> bool,
{
// Check that the index doesn't already exist
if index < slot_meta.consumed
|| prev_inserted_blob_datas.contains_key(&(slot, index))
|| db_check(slot, index)
{
if index < slot_meta.consumed || db_check(slot, index) {
return false;
}
// Check that we do not receive index >= than the last_index
@ -2344,14 +2485,16 @@ pub fn create_new_ledger(ledger_path: &Path, genesis_block: &GenesisBlock) -> Re
let mut shredder = Shredder::new(0, Some(0), 0.0, &Arc::new(Keypair::new()), 0)
.expect("Failed to create entry shredder");
let last_hash = entries.last().unwrap().hash;
entries_to_shreds(vec![entries], ticks_per_slot, ticks_per_slot, &mut shredder);
bincode::serialize_into(&mut shredder, &entries)
.expect("Expect to write all entries to shreds");
shredder.finalize_slot();
let shreds: Vec<Shred> = shredder
.shreds
.iter()
.map(|s| bincode::deserialize(s).unwrap())
.collect();
blocktree.insert_shreds(&shreds)?;
blocktree.insert_shreds(shreds)?;
Ok(last_hash)
}
@ -2469,6 +2612,7 @@ pub mod tests {
ticks_per_slot,
Some(i.saturating_sub(1)),
true,
&Arc::new(Keypair::new()),
new_ticks.clone(),
)
.unwrap() as u64;
@ -2670,7 +2814,7 @@ pub mod tests {
// Insert last blob, we're missing the other blobs, so no consecutive
// blobs starting from slot 0, index 0 should exist.
let last_shred = shreds.pop().unwrap();
ledger.insert_shreds(&[last_shred]).unwrap();
ledger.insert_shreds(vec![last_shred]).unwrap();
assert!(ledger.get_slot_entries(0, 0, None).unwrap().is_empty());
let meta = ledger
@ -2680,7 +2824,7 @@ pub mod tests {
assert!(meta.consumed == 0 && meta.received == num_shreds);
// Insert the other blobs, check for consecutive returned entries
ledger.insert_shreds(&shreds).unwrap();
ledger.insert_shreds(shreds).unwrap();
let result = ledger.get_slot_entries(0, 0, None).unwrap();
assert_eq!(result, entries);
@ -2713,7 +2857,7 @@ pub mod tests {
// Insert blobs in reverse, check for consecutive returned blobs
for i in (0..num_shreds).rev() {
let shred = shreds.pop().unwrap();
ledger.insert_shreds(&[shred]).unwrap();
ledger.insert_shreds(vec![shred]).unwrap();
let result = ledger.get_slot_entries(0, 0, None).unwrap();
let meta = ledger
@ -2789,7 +2933,7 @@ pub mod tests {
let entries = make_tiny_test_entries(8);
let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false);
blocktree
.insert_shreds(&shreds)
.insert_shreds(shreds)
.expect("Expected successful write of blobs");
let mut shreds1 = entries_to_test_shreds(entries[4..].to_vec(), 1, 0, false);
@ -2797,7 +2941,7 @@ pub mod tests {
b.set_index(8 + i as u32);
}
blocktree
.insert_shreds(&shreds1)
.insert_shreds(shreds1)
.expect("Expected successful write of blobs");
assert_eq!(
@ -2831,7 +2975,7 @@ pub mod tests {
index += 1;
}
blocktree
.insert_shreds(&shreds)
.insert_shreds(shreds)
.expect("Expected successful write of shreds");
assert_eq!(
blocktree
@ -2864,7 +3008,7 @@ pub mod tests {
entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false);
assert!(shreds.len() as u64 >= shreds_per_slot);
blocktree
.insert_shreds(&shreds)
.insert_shreds(shreds)
.expect("Expected successful write of shreds");
assert_eq!(blocktree.get_slot_entries(slot, 0, None).unwrap(), entries);
}
@ -2892,7 +3036,7 @@ pub mod tests {
odd_shreds.insert(0, shreds.remove(i as usize));
}
}
blocktree.insert_shreds(&odd_shreds).unwrap();
blocktree.insert_shreds(odd_shreds).unwrap();
assert_eq!(blocktree.get_slot_entries(slot, 0, None).unwrap(), vec![]);
@ -2910,7 +3054,7 @@ pub mod tests {
assert_eq!(meta.last_index, std::u64::MAX);
}
blocktree.insert_shreds(&shreds).unwrap();
blocktree.insert_shreds(shreds).unwrap();
assert_eq!(
blocktree.get_slot_entries(slot, 0, None).unwrap(),
@ -2943,19 +3087,19 @@ pub mod tests {
// Discard first shred
original_shreds.remove(0);
blocktree.insert_shreds(&original_shreds).unwrap();
blocktree.insert_shreds(original_shreds).unwrap();
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), vec![]);
let duplicate_shreds = entries_to_test_shreds(original_entries.clone(), 0, 0, true);
blocktree.insert_shreds(&duplicate_shreds).unwrap();
let num_shreds = duplicate_shreds.len() as u64;
blocktree.insert_shreds(duplicate_shreds).unwrap();
assert_eq!(
blocktree.get_slot_entries(0, 0, None).unwrap(),
original_entries
);
let num_shreds = duplicate_shreds.len() as u64;
let meta = blocktree.meta(0).unwrap().unwrap();
assert_eq!(meta.consumed, num_shreds);
assert_eq!(meta.received, num_shreds);
@ -3591,11 +3735,11 @@ pub mod tests {
let num_shreds = shreds.len();
// Write blobs to the database
if should_bulk_write {
blocktree.insert_shreds(&shreds).unwrap();
blocktree.insert_shreds(shreds).unwrap();
} else {
for _ in 0..num_shreds {
let shred = shreds.remove(0);
blocktree.insert_shreds(&vec![shred]).unwrap();
blocktree.insert_shreds(vec![shred]).unwrap();
}
}
@ -3637,7 +3781,7 @@ pub mod tests {
b.set_index(i as u32 * gap as u32);
b.set_slot(slot);
}
blocktree.insert_shreds(&shreds).unwrap();
blocktree.insert_shreds(shreds).unwrap();
// Index of the first blob is 0
// Index of the second blob is "gap"
@ -3722,6 +3866,7 @@ pub mod tests {
let entries = make_tiny_test_entries(20);
let mut shreds = entries_to_test_shreds(entries, slot, 0, true);
shreds.drain(2..);
const ONE: u64 = 1;
const OTHER: u64 = 4;
@ -3730,7 +3875,7 @@ pub mod tests {
shreds[1].set_index(OTHER as u32);
// Insert one blob at index = first_index
blocktree.insert_shreds(&shreds[0..2]).unwrap();
blocktree.insert_shreds(shreds).unwrap();
const STARTS: u64 = OTHER * 2;
const END: u64 = OTHER * 3;
@ -3764,7 +3909,7 @@ pub mod tests {
let shreds = entries_to_test_shreds(entries, slot, 0, true);
let num_shreds = shreds.len();
blocktree.insert_shreds(&shreds).unwrap();
blocktree.insert_shreds(shreds).unwrap();
let empty: Vec<u64> = vec![];
for i in 0..num_shreds as u64 {
@ -4119,6 +4264,7 @@ pub mod tests {
}
#[test]
#[ignore]
pub fn test_recovery_basic() {
solana_logger::setup();
@ -4425,6 +4571,7 @@ pub mod tests {
}
#[test]
#[ignore]
fn test_recovery_multi_slot_multi_thread() {
use rand::{rngs::SmallRng, seq::SliceRandom, SeedableRng};
use std::thread;
@ -4683,14 +4830,13 @@ pub mod tests {
)
.expect("Failed to create entry shredder");
let last_tick = 0;
let bank_max_tick = if is_full_slot {
last_tick
bincode::serialize_into(&mut shredder, &entries)
.expect("Expect to write all entries to shreds");
if is_full_slot {
shredder.finalize_slot();
} else {
last_tick + 1
};
entries_to_shreds(vec![entries], last_tick, bank_max_tick, &mut shredder);
shredder.finalize_fec_block();
}
let shreds: Vec<Shred> = shredder
.shreds

View File

@ -439,7 +439,12 @@ where
}
};
if let Err(e) = batch.delete::<C>(index) {
error!("Error: {:?} while adding delete to batch {:?}", e, C::NAME)
error!(
"Error: {:?} while adding delete from_slot {:?} to batch {:?}",
e,
from,
C::NAME
)
}
}
Ok(end)

View File

@ -277,7 +277,7 @@ impl ErasureMeta {
}
pub fn start_index(&self) -> u64 {
self.set_index * self.config.num_data() as u64
self.set_index
}
/// returns a tuple of (data_end, coding_end)

View File

@ -464,11 +464,11 @@ impl std::convert::From<rocksdb::Error> for Error {
}
fn get_cf_options(name: &'static str) -> Options {
use crate::blocktree::db::columns::{Coding, Data};
use crate::blocktree::db::columns::{Coding, Data, ShredCode, ShredData};
let mut options = Options::default();
match name {
Coding::NAME | Data::NAME => {
Coding::NAME | Data::NAME | ShredCode::NAME | ShredData::NAME => {
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM
options.set_max_write_buffer_number(8);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);

View File

@ -437,6 +437,7 @@ pub mod tests {
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
&entries,
)
.unwrap();
@ -824,7 +825,16 @@ pub mod tests {
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
blocktree
.write_entries_using_shreds(1, 0, 0, genesis_block.ticks_per_slot, None, true, &entries)
.write_entries_using_shreds(
1,
0,
0,
genesis_block.ticks_per_slot,
None,
true,
&Arc::new(Keypair::new()),
&entries,
)
.unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap();

View File

@ -108,7 +108,6 @@ trait BroadcastRun {
struct Broadcast {
coding_generator: CodingGenerator,
parent_slot: Option<u64>,
thread_pool: ThreadPool,
}
@ -148,7 +147,6 @@ impl BroadcastStage {
let mut broadcast = Broadcast {
coding_generator,
parent_slot: None,
thread_pool: rayon::ThreadPoolBuilder::new()
.num_threads(sys_info::cpu_num().unwrap_or(NUM_THREADS) as usize)
.build()
@ -298,7 +296,6 @@ mod test {
}
#[test]
#[ignore]
fn test_broadcast_ledger() {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path("test_broadcast_ledger");
@ -316,12 +313,13 @@ mod test {
let start_tick_height;
let max_tick_height;
let ticks_per_slot;
let slot;
{
let bank = broadcast_service.bank.clone();
start_tick_height = bank.tick_height();
max_tick_height = bank.max_tick_height();
ticks_per_slot = bank.ticks_per_slot();
slot = bank.slot();
let ticks = create_ticks(max_tick_height - start_tick_height, Hash::default());
for (i, tick) in ticks.into_iter().enumerate() {
entry_sender
@ -339,15 +337,10 @@ mod test {
);
let blocktree = broadcast_service.blocktree;
let mut blob_index = 0;
for i in 0..max_tick_height - start_tick_height {
let slot = (start_tick_height + i + 1) / ticks_per_slot;
let result = blocktree.get_data_shred_as_blob(slot, blob_index).unwrap();
blob_index += 1;
result.expect("expect blob presence");
}
let (entries, _) = blocktree
.get_slot_entries_with_shred_count(slot, 0)
.expect("Expect entries to be present");
assert_eq!(entries.len(), max_tick_height as usize);
drop(entry_sender);
broadcast_service

View File

@ -4,12 +4,10 @@ use crate::erasure::CodingGenerator;
use crate::packet::{self, SharedBlob};
use crate::poh_recorder::WorkingBankEntries;
use crate::result::Result;
use crate::shred::Shredder;
use rayon::prelude::*;
use rayon::ThreadPool;
use solana_runtime::bank::Bank;
use solana_sdk::signature::{Keypair, KeypairUtil, Signable};
use std::io::Write;
use std::sync::mpsc::Receiver;
use std::sync::Arc;
use std::time::{Duration, Instant};
@ -99,34 +97,6 @@ pub(super) fn entries_to_blobs(
(blobs, coding)
}
pub fn entries_to_shreds(
ventries: Vec<Vec<Entry>>,
last_tick: u64,
bank_max_tick: u64,
shredder: &mut Shredder,
) {
ventries.iter().enumerate().for_each(|(i, entries)| {
let data = bincode::serialize(entries).unwrap();
let mut offset = 0;
while offset < data.len() {
offset += shredder.write(&data[offset..]).unwrap();
}
// bincode::serialize_into(&shredder, &entries).unwrap();
trace!(
"Shredded {:?} entries into {:?} shreds",
entries.len(),
shredder.shreds.len()
);
if i + 1 == ventries.len() && last_tick == bank_max_tick {
debug!("Finalized slot for the shreds");
shredder.finalize_slot();
} else {
debug!("Finalized fec block for the shreds");
shredder.finalize_fec_block();
}
})
}
pub(super) fn generate_data_blobs(
ventries: Vec<Vec<(Entry, u64)>>,
thread_pool: &ThreadPool,

View File

@ -51,7 +51,7 @@ impl StandardBroadcastRun {
impl BroadcastRun for StandardBroadcastRun {
fn run(
&mut self,
broadcast: &mut Broadcast,
_broadcast: &mut Broadcast,
cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>,
sock: &UdpSocket,
@ -68,61 +68,43 @@ impl BroadcastRun for StandardBroadcastRun {
// 2) Convert entries to blobs + generate coding blobs
let to_blobs_start = Instant::now();
let keypair = &cluster_info.read().unwrap().keypair.clone();
let latest_blob_index = blocktree
let mut latest_blob_index = blocktree
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0);
let parent_slot = bank.parent().unwrap().slot();
let shredder = if let Some(slot) = broadcast.parent_slot {
if slot != parent_slot {
trace!("Renew shredder with parent slot {:?}", parent_slot);
broadcast.parent_slot = Some(parent_slot);
Shredder::new(
bank.slot(),
Some(parent_slot),
0.0,
keypair,
latest_blob_index as u32,
)
let parent_slot = if let Some(parent_bank) = bank.parent() {
parent_bank.slot()
} else {
trace!("Renew shredder with same parent slot {:?}", parent_slot);
Shredder::new(
bank.slot(),
Some(parent_slot),
0.0,
keypair,
latest_blob_index as u32,
)
}
} else {
trace!("New shredder with parent slot {:?}", parent_slot);
broadcast.parent_slot = Some(parent_slot);
Shredder::new(
bank.slot(),
Some(parent_slot),
0.0,
keypair,
latest_blob_index as u32,
)
0
};
let mut shredder = shredder.expect("Expected to create a new shredder");
let ventries = receive_results
let mut all_shreds = vec![];
let mut all_seeds = vec![];
let num_ventries = receive_results.ventries.len();
receive_results
.ventries
.into_iter()
.map(|entries_tuple| {
.enumerate()
.for_each(|(i, entries_tuple)| {
let (entries, _): (Vec<_>, Vec<_>) = entries_tuple.into_iter().unzip();
entries
})
.collect();
broadcast_utils::entries_to_shreds(
ventries,
last_tick,
bank.max_tick_height(),
&mut shredder,
);
//entries
let mut shredder = Shredder::new(
bank.slot(),
Some(parent_slot),
1.0,
keypair,
latest_blob_index as u32,
)
.expect("Expected to create a new shredder");
bincode::serialize_into(&mut shredder, &entries)
.expect("Expect to write all entries to shreds");
if i == (num_ventries - 1) && last_tick == bank.max_tick_height() {
shredder.finalize_slot();
} else {
shredder.finalize_fec_block();
}
let shreds: Vec<Shred> = shredder
.shreds
@ -130,11 +112,15 @@ impl BroadcastRun for StandardBroadcastRun {
.map(|s| bincode::deserialize(s).unwrap())
.collect();
let seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
let mut seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
trace!("Inserting {:?} shreds in blocktree", shreds.len());
blocktree
.insert_shreds(&shreds)
.insert_shreds(shreds)
.expect("Failed to insert shreds in blocktree");
latest_blob_index = u64::from(shredder.index);
all_shreds.append(&mut shredder.shreds);
all_seeds.append(&mut seeds);
});
let to_blobs_elapsed = to_blobs_start.elapsed();
@ -143,15 +129,15 @@ impl BroadcastRun for StandardBroadcastRun {
let bank_epoch = bank.get_stakers_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
trace!("Broadcasting {:?} shreds", shredder.shreds.len());
trace!("Broadcasting {:?} shreds", all_shreds.len());
cluster_info.read().unwrap().broadcast_shreds(
sock,
&shredder.shreds,
&seeds,
&all_shreds,
&all_seeds,
stakes.as_ref(),
)?;
inc_new_counter_debug!("streamer-broadcast-sent", shredder.shreds.len());
inc_new_counter_debug!("streamer-broadcast-sent", all_shreds.len());
let broadcast_elapsed = broadcast_start.elapsed();
self.update_broadcast_stats(

View File

@ -12,7 +12,7 @@ pub const CHACHA_KEY_SIZE: usize = 32;
pub fn chacha_cbc_encrypt_ledger(
blocktree: &Arc<Blocktree>,
slice: u64,
start_slot: u64,
slots_per_segment: u64,
out_path: &Path,
ivec: &mut [u8; CHACHA_BLOCK_SIZE],
@ -23,26 +23,32 @@ pub fn chacha_cbc_encrypt_ledger(
let mut buffer = [0; BUFFER_SIZE];
let mut encrypted_buffer = [0; BUFFER_SIZE];
let key = [0; CHACHA_KEY_SIZE];
let mut total_entries = 0;
let mut total_size = 0;
let mut entry = slice;
let mut current_slot = start_slot;
let mut start_index = 0;
loop {
match blocktree.read_blobs_bytes(0, slots_per_segment - total_entries, &mut buffer, entry) {
Ok((num_entries, entry_len)) => {
match blocktree.get_data_shreds(current_slot, start_index, &mut buffer) {
Ok((last_index, mut size)) => {
debug!(
"chacha: encrypting slice: {} num_entries: {} entry_len: {}",
slice, num_entries, entry_len
"chacha: encrypting slice: {} num_shreds: {} data_len: {}",
current_slot,
last_index.saturating_sub(start_index),
size
);
debug!("read {} bytes", entry_len);
let mut size = entry_len as usize;
debug!("read {} bytes", size);
if size == 0 {
if current_slot.saturating_sub(start_slot) < slots_per_segment {
current_slot += 1;
start_index = 0;
continue;
} else {
break;
}
}
if size < BUFFER_SIZE {
// We are on the last block, round to the nearest key_size
// boundary
// round to the nearest key_size boundary
size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1);
}
total_size += size;
@ -53,8 +59,7 @@ pub fn chacha_cbc_encrypt_ledger(
return Err(res);
}
total_entries += num_entries;
entry += num_entries;
start_index = last_index + 1;
}
Err(e) => {
info!("Error encrypting file: {:?}", e);
@ -117,9 +122,22 @@ mod tests {
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let out_path = Path::new("test_chacha_encrypt_file_output.txt.enc");
let seed = [2u8; 32];
let mut rnd = GenKeys::new(seed);
let keypair = rnd.gen_keypair();
let entries = make_tiny_deterministic_test_entries(slots_per_segment);
blocktree
.write_entries_using_shreds(0, 0, 0, ticks_per_slot, None, true, &entries)
.write_entries_using_shreds(
0,
0,
0,
ticks_per_slot,
None,
true,
&Arc::new(keypair),
&entries,
)
.unwrap();
let mut key = hex!(
@ -135,7 +153,7 @@ mod tests {
hasher.hash(&buf[..size]);
// golden needs to be updated if blob stuff changes....
let golden: Hash = "GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn"
let golden: Hash = "EdYYuAuDPVY7DLNeCtPWAKipicx2KjsxqD2PZ7oxVmHE"
.parse()
.unwrap();

View File

@ -33,54 +33,62 @@ pub fn chacha_cbc_encrypt_file_many_keys(
));
}
let mut buffer = [0; 8 * 1024];
const BUFFER_SIZE: usize = 8 * 1024;
let mut buffer = [0; BUFFER_SIZE];
let num_keys = ivecs.len() / CHACHA_BLOCK_SIZE;
let mut sha_states = vec![0; num_keys * size_of::<Hash>()];
let mut int_sha_states = vec![0; num_keys * 112];
let keys: Vec<u8> = vec![0; num_keys * CHACHA_KEY_SIZE]; // keys not used ATM, uniqueness comes from IV
let mut entry = segment;
let mut total_entries = 0;
let mut total_entry_len = 0;
let mut current_slot = segment * slots_per_segment;
let mut start_index = 0;
let start_slot = current_slot;
let mut total_size = 0;
let mut time: f32 = 0.0;
unsafe {
chacha_init_sha_state(int_sha_states.as_mut_ptr(), num_keys as u32);
}
loop {
match blocktree.read_blobs_bytes(entry, slots_per_segment - total_entries, &mut buffer, 0) {
Ok((num_entries, entry_len)) => {
match blocktree.get_data_shreds(current_slot, start_index, &mut buffer) {
Ok((last_index, mut size)) => {
debug!(
"chacha_cuda: encrypting segment: {} num_entries: {} entry_len: {}",
segment, num_entries, entry_len
"chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}",
segment,
last_index.saturating_sub(start_index),
size
);
if num_entries == 0 {
if size == 0 {
if current_slot.saturating_sub(start_slot) < slots_per_segment {
current_slot += 1;
start_index = 0;
continue;
} else {
break;
}
let entry_len_usz = entry_len as usize;
}
if size < BUFFER_SIZE {
// round to the nearest key_size boundary
size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1);
}
unsafe {
chacha_cbc_encrypt_many_sample(
buffer[..entry_len_usz].as_ptr(),
buffer[..size].as_ptr(),
int_sha_states.as_mut_ptr(),
entry_len_usz,
size,
keys.as_ptr(),
ivecs.as_mut_ptr(),
num_keys as u32,
samples.as_ptr(),
samples.len() as u32,
total_entry_len,
total_size,
&mut time,
);
}
total_entry_len += entry_len;
total_entries += num_entries;
entry += num_entries;
debug!(
"total entries: {} entry: {} segment: {} entries_per_segment: {}",
total_entries, entry, segment, slots_per_segment
);
if (entry - segment) >= slots_per_segment {
break;
}
total_size += size as u64;
start_index = last_index + 1;
}
Err(e) => {
info!("Error encrypting file: {:?}", e);
@ -113,6 +121,7 @@ mod tests {
use crate::entry::make_tiny_test_entries;
use crate::replicator::sample_file;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::timing::DEFAULT_SLOTS_PER_SEGMENT;
use std::fs::{remove_dir_all, remove_file};
use std::path::Path;
@ -130,7 +139,16 @@ mod tests {
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree
.write_entries(0, 0, 0, ticks_per_slot, &entries)
.write_entries_using_shreds(
0,
0,
0,
ticks_per_slot,
Some(0),
true,
&Arc::new(Keypair::new()),
&entries,
)
.unwrap();
let out_path = Path::new("test_chacha_encrypt_file_many_keys_single_output.txt.enc");
@ -178,7 +196,16 @@ mod tests {
let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree
.write_entries(0, 0, 0, ticks_per_slot, &entries)
.write_entries_using_shreds(
0,
0,
0,
ticks_per_slot,
Some(0),
true,
&Arc::new(Keypair::new()),
&entries,
)
.unwrap();
let out_path = Path::new("test_chacha_encrypt_file_many_keys_multiple_output.txt.enc");

View File

@ -999,8 +999,15 @@ impl ClusterInfo {
.name("solana-gossip".to_string())
.spawn(move || {
let mut last_push = timestamp();
let mut last_contact_info_trace = timestamp();
loop {
let start = timestamp();
if start - last_contact_info_trace > 10000 {
// Log contact info every 10 seconds
info!("\n{}", obj.read().unwrap().contact_info_trace());
last_contact_info_trace = start;
}
let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => {
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
@ -1916,7 +1923,7 @@ mod tests {
shred.set_index(1);
blocktree
.insert_shreds(&vec![shred])
.insert_shreds(vec![shred])
.expect("Expect successful ledger write");
let rv = ClusterInfo::run_window_request(
@ -1994,7 +2001,7 @@ mod tests {
let (blobs, _) = make_many_slot_entries_using_shreds(1, 3, 5);
blocktree
.insert_shreds(&blobs)
.insert_shreds(blobs)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset

View File

@ -627,7 +627,7 @@ mod tests {
let num_shreds_per_slot = shreds.len() as u64 / num_slots;
// Write slots in the range [0, num_slots] to blocktree
blocktree.insert_shreds(&shreds).unwrap();
blocktree.insert_shreds(shreds).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=num_slots - 1).collect();
@ -704,7 +704,7 @@ mod tests {
// Create blobs for first two epochs and write them to blocktree
let total_slots = slots_per_epoch * 2;
let (shreds, _) = make_many_slot_entries_using_shreds(0, total_slots, 1);
blocktree.insert_shreds(&shreds).unwrap();
blocktree.insert_shreds(shreds).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();

View File

@ -488,7 +488,7 @@ mod test {
missing_indexes_per_slot.insert(0, index);
}
}
blocktree.insert_shreds(&shreds_to_write).unwrap();
blocktree.insert_shreds(shreds_to_write).unwrap();
let expected: Vec<RepairType> = (0..num_slots)
.flat_map(|slot| {
@ -548,8 +548,9 @@ mod test {
let num_entries_per_slot = 10;
let shreds = make_chaining_slot_entries_using_shreds(&slots, num_entries_per_slot);
for (slot_shreds, _) in shreds.iter() {
blocktree.insert_shreds(&slot_shreds[1..]).unwrap();
for (mut slot_shreds, _) in shreds.into_iter() {
slot_shreds.remove(0);
blocktree.insert_shreds(slot_shreds).unwrap();
}
// Iterate through all possible combinations of start..end (inclusive on both

View File

@ -1018,7 +1018,7 @@ mod test {
let last_blockhash = bank0.last_blockhash();
progress.insert(bank0.slot(), ForkProgress::new(last_blockhash));
let shreds = shred_to_insert(&last_blockhash, bank0.slot());
blocktree.insert_shreds(&shreds).unwrap();
blocktree.insert_shreds(shreds).unwrap();
let (res, _tx_count) =
ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut progress);

View File

@ -474,7 +474,7 @@ impl Replicator {
repair_socket,
&exit,
RepairStrategy::RepairRange(repair_slot_range),
|_, _, _| true,
|_, _, _, _| true,
);
info!("waiting for ledger download");
Self::wait_for_segment_download(
@ -873,7 +873,7 @@ impl Replicator {
.iter()
.filter_map(|p| bincode::deserialize(&p.data).ok())
.collect();
blocktree.insert_shreds(&shreds)?;
blocktree.insert_shreds(shreds)?;
}
// check if all the slots in the segment are complete
if Self::segment_complete(start_slot, slots_per_segment, blocktree) {

View File

@ -150,8 +150,14 @@ impl RetransmitStage {
repair_socket,
exit,
repair_strategy,
move |id, blob, working_bank| {
should_retransmit_and_persist(blob, working_bank, &leader_schedule_cache, id)
move |id, shred, shred_buf, working_bank| {
should_retransmit_and_persist(
shred,
shred_buf,
working_bank,
&leader_schedule_cache,
id,
)
},
);

View File

@ -15,6 +15,7 @@ use solana_runtime::bank::Bank;
use solana_sdk::account::Account;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::hash::Hash;
use solana_sdk::inflation::Inflation;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signature;
use solana_sdk::transaction::{self, Transaction};
@ -81,6 +82,10 @@ impl JsonRpcRequestProcessor {
.collect())
}
pub fn get_inflation(&self) -> Result<Inflation> {
Ok(self.bank().inflation())
}
pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
self.bank().get_balance(&pubkey)
}
@ -291,6 +296,9 @@ pub trait RpcSol {
#[rpc(meta, name = "getProgramAccounts")]
fn get_program_accounts(&self, _: Self::Metadata, _: String) -> Result<Vec<(String, Account)>>;
#[rpc(meta, name = "getInflation")]
fn get_inflation(&self, _: Self::Metadata) -> Result<Inflation>;
#[rpc(meta, name = "getBalance")]
fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>;
@ -409,6 +417,16 @@ impl RpcSol for RpcSolImpl {
.get_program_accounts(&program_id)
}
fn get_inflation(&self, meta: Self::Metadata) -> Result<Inflation> {
debug!("get_inflation rpc request received");
Ok(meta
.request_processor
.read()
.unwrap()
.get_inflation()
.unwrap())
}
fn get_balance(&self, meta: Self::Metadata, id: String) -> Result<u64> {
debug!("get_balance rpc request received: {:?}", id);
let pubkey = verify_pubkey(id)?;
@ -854,6 +872,28 @@ pub mod tests {
assert!(supply >= TEST_MINT_LAMPORTS);
}
#[test]
fn test_rpc_get_inflation() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getInflation"}}"#);
let rep = io.handle_request_sync(&req, meta);
let res: Response = serde_json::from_str(&rep.expect("actual response"))
.expect("actual response deserialization");
let inflation: Inflation = if let Response::Single(res) = res {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
} else {
panic!("Expected success");
}
} else {
panic!("Expected single response");
};
assert_eq!(inflation, bank.inflation());
}
#[test]
fn test_rpc_get_account_info() {
let bob_pubkey = Pubkey::new_rand();

View File

@ -107,6 +107,11 @@ impl Shred {
}
pub fn verify(&self, pubkey: &Pubkey) -> bool {
let shred = bincode::serialize(&self).unwrap();
self.fast_verify(&shred, pubkey)
}
pub fn fast_verify(&self, shred_buf: &[u8], pubkey: &Pubkey) -> bool {
let signed_payload_offset = match self {
Shred::FirstInSlot(_)
| Shred::FirstInFECSet(_)
@ -119,9 +124,8 @@ impl Shred {
}
} + bincode::serialized_size(&Signature::default()).unwrap()
as usize;
let shred = bincode::serialize(&self).unwrap();
self.signature()
.verify(pubkey.as_ref(), &shred[signed_payload_offset..])
.verify(pubkey.as_ref(), &shred_buf[signed_payload_offset..])
}
}
@ -219,8 +223,8 @@ impl Default for CodingShred {
/// Common trait implemented by all types of shreds
pub trait ShredCommon {
/// Write at a particular offset in the shred
fn write_at(&mut self, offset: usize, buf: &[u8]) -> usize;
/// Write at a particular offset in the shred. Returns amount written and leftover capacity
fn write_at(&mut self, offset: usize, buf: &[u8]) -> (usize, usize);
/// Overhead of shred enum and headers
fn overhead() -> usize;
/// Utility function to create an empty shred
@ -228,12 +232,14 @@ pub trait ShredCommon {
}
impl ShredCommon for FirstDataShred {
fn write_at(&mut self, offset: usize, buf: &[u8]) -> usize {
let slice_len = cmp::min(self.payload.len().saturating_sub(offset), buf.len());
fn write_at(&mut self, offset: usize, buf: &[u8]) -> (usize, usize) {
let mut capacity = self.payload.len().saturating_sub(offset);
let slice_len = cmp::min(capacity, buf.len());
capacity -= slice_len;
if slice_len > 0 {
self.payload[offset..offset + slice_len].copy_from_slice(&buf[..slice_len]);
}
slice_len
(slice_len, capacity)
}
fn overhead() -> usize {
@ -250,12 +256,14 @@ impl ShredCommon for FirstDataShred {
}
impl ShredCommon for DataShred {
fn write_at(&mut self, offset: usize, buf: &[u8]) -> usize {
let slice_len = cmp::min(self.payload.len().saturating_sub(offset), buf.len());
fn write_at(&mut self, offset: usize, buf: &[u8]) -> (usize, usize) {
let mut capacity = self.payload.len().saturating_sub(offset);
let slice_len = cmp::min(capacity, buf.len());
capacity -= slice_len;
if slice_len > 0 {
self.payload[offset..offset + slice_len].copy_from_slice(&buf[..slice_len]);
}
slice_len
(slice_len, capacity)
}
fn overhead() -> usize {
@ -272,12 +280,14 @@ impl ShredCommon for DataShred {
}
impl ShredCommon for CodingShred {
fn write_at(&mut self, offset: usize, buf: &[u8]) -> usize {
let slice_len = cmp::min(self.header.payload.len().saturating_sub(offset), buf.len());
fn write_at(&mut self, offset: usize, buf: &[u8]) -> (usize, usize) {
let mut capacity = self.header.payload.len().saturating_sub(offset);
let slice_len = cmp::min(capacity, buf.len());
capacity -= slice_len;
if slice_len > 0 {
self.header.payload[offset..offset + slice_len].copy_from_slice(&buf[..slice_len]);
}
slice_len
(slice_len, capacity)
}
fn overhead() -> usize {
@ -294,7 +304,7 @@ impl ShredCommon for CodingShred {
#[derive(Default, Debug)]
pub struct Shredder {
slot: u64,
index: u32,
pub index: u32,
pub parent: Option<u64>,
parent_slot: u64,
fec_rate: f32,
@ -333,7 +343,7 @@ impl Write for Shredder {
.unwrap();
let written = self.active_offset;
let slice_len = match current_shred.borrow_mut() {
let (slice_len, left_capacity) = match current_shred.borrow_mut() {
Shred::FirstInSlot(s) => s.write_at(written, buf),
Shred::FirstInFECSet(s)
| Shred::Data(s)
@ -342,7 +352,7 @@ impl Write for Shredder {
Shred::Coding(s) => s.write_at(written, buf),
};
let active_shred = if buf.len() > slice_len {
let active_shred = if buf.len() > slice_len || left_capacity == 0 {
self.finalize_data_shred(current_shred);
// Continue generating more data shreds.
// If the caller decides to finalize the FEC block or Slot, the data shred will
@ -363,6 +373,12 @@ impl Write for Shredder {
}
}
#[derive(Default, Debug, PartialEq)]
pub struct RecoveryResult {
pub recovered_data: Vec<Shred>,
pub recovered_code: Vec<Shred>,
}
#[derive(Default, Debug, PartialEq)]
pub struct DeshredResult {
pub payload: Vec<u8>,
@ -555,9 +571,15 @@ impl Shredder {
) -> (Vec<Vec<u8>>, bool, usize) {
let (index, mut first_shred_in_slot) = Self::get_shred_index(shred, num_data);
// The index of current shred must be within the range of shreds that are being
// recovered
if !(first_index..first_index + num_data + num_coding).contains(&index) {
return (vec![], false, index);
}
let mut missing_blocks: Vec<Vec<u8>> = (expected_index..index)
.map(|missing| {
present[missing] = false;
present[missing.saturating_sub(first_index)] = false;
// If index 0 shred is missing, then first shred in slot will also be recovered
first_shred_in_slot |= missing == 0;
Shredder::new_empty_missing_shred(num_data, num_coding, slot, first_index, missing)
@ -601,39 +623,26 @@ impl Shredder {
bincode::serialize(&missing_shred).unwrap()
}
/// Combines all shreds to recreate the original buffer
/// If the shreds include coding shreds, and if not all shreds are present, it tries
/// to reconstruct missing shreds using erasure
/// Note: The shreds are expected to be sorted
/// (lower to higher index, and data shreds before coding shreds)
pub fn deshred(shreds: &[Shred]) -> Result<DeshredResult, reed_solomon_erasure::Error> {
// If coding is enabled, the last shred must be a coding shred.
let (num_data, num_coding, first_index, slot) =
if let Shred::Coding(code) = shreds.last().unwrap() {
(
code.header.num_data_shreds as usize,
code.header.num_coding_shreds as usize,
code.header.common_header.index as usize - code.header.position as usize,
code.header.common_header.slot,
)
} else {
(shreds.len(), 0, 0, 0)
};
pub fn try_recovery(
shreds: &[Shred],
num_data: usize,
num_coding: usize,
first_index: usize,
slot: u64,
) -> Result<RecoveryResult, reed_solomon_erasure::Error> {
let mut recovered_data = vec![];
let mut recovered_code = vec![];
let fec_set_size = num_data + num_coding;
let (data_shred_bufs, first_shred) = if num_coding > 0 && shreds.len() < fec_set_size {
if num_coding > 0 && shreds.len() < fec_set_size {
let coding_block_offset = CodingShred::overhead();
// Let's try recovering missing shreds using erasure
let mut present = &mut vec![true; fec_set_size];
let mut first_shred_in_slot = false;
let mut next_expected_index = first_index;
let mut shred_bufs: Vec<Vec<u8>> = shreds
.iter()
.flat_map(|shred| {
let (blocks, first_shred, last_index) = Self::fill_in_missing_shreds(
let (blocks, _first_shred, last_index) = Self::fill_in_missing_shreds(
shred,
num_data,
num_coding,
@ -642,21 +651,26 @@ impl Shredder {
next_expected_index,
&mut present,
);
first_shred_in_slot |= first_shred;
next_expected_index = last_index + 1;
blocks
})
.collect();
// Insert any other missing shreds after the last shred we have received in the
// current FEC block
let mut pending_shreds: Vec<Vec<u8>> = (next_expected_index
..first_index + fec_set_size)
.map(|missing| {
present[missing] = false;
present[missing.saturating_sub(first_index)] = false;
Self::new_empty_missing_shred(num_data, num_coding, slot, first_index, missing)
})
.collect();
shred_bufs.append(&mut pending_shreds);
if shred_bufs.len() != fec_set_size {
Err(reed_solomon_erasure::Error::TooFewShardsPresent)?;
}
let session = Session::new(num_data, num_coding).unwrap();
let mut blocks: Vec<&mut [u8]> = shred_bufs
@ -665,10 +679,22 @@ impl Shredder {
.collect();
session.decode_blocks(&mut blocks, &present)?;
present.iter().enumerate().for_each(|(index, was_present)| {
present
.iter()
.enumerate()
.for_each(|(position, was_present)| {
if !was_present {
let shred: Shred = bincode::deserialize(&shred_bufs[index]).unwrap();
if index < first_index + num_data {
let shred: Shred = bincode::deserialize(&shred_bufs[position]).unwrap();
let shred_index = shred.index() as usize;
// Valid shred must be in the same slot as the original shreds
if shred.slot() == slot {
// Data shreds are "positioned" at the start of the iterator. First num_data
// shreds are expected to be the data shreds.
if position < num_data
&& (first_index..first_index + num_data).contains(&shred_index)
{
// Also, a valid data shred must be indexed between first_index and first+num_data index
// Check if the last recovered data shred is also last in Slot.
// If so, it needs to be morphed into the correct type
let shred = if let Shred::Data(s) = shred {
@ -687,13 +713,30 @@ impl Shredder {
shred
};
recovered_data.push(shred)
} else {
} else if (first_index..first_index + num_coding).contains(&shred_index)
{
// A valid coding shred must be indexed between first_index and first+num_coding index
recovered_code.push(shred)
}
}
}
});
(shred_bufs, first_shred_in_slot)
} else {
}
Ok(RecoveryResult {
recovered_data,
recovered_code,
})
}
/// Combines all shreds to recreate the original buffer
/// If the shreds include coding shreds, and if not all shreds are present, it tries
/// to reconstruct missing shreds using erasure
/// Note: The shreds are expected to be sorted
/// (lower to higher index, and data shreds before coding shreds)
pub fn deshred(shreds: &[Shred]) -> Result<Vec<u8>, reed_solomon_erasure::Error> {
let num_data = shreds.len();
let (data_shred_bufs, first_shred) = {
let (first_index, first_shred_in_slot) =
Shredder::get_shred_index(shreds.first().unwrap(), num_data);
@ -715,11 +758,11 @@ impl Shredder {
(shred_bufs, first_shred_in_slot)
};
Ok(DeshredResult {
payload: Self::reassemble_payload(num_data, data_shred_bufs, first_shred),
recovered_data,
recovered_code,
})
Ok(Self::reassemble_payload(
num_data,
data_shred_bufs,
first_shred,
))
}
fn get_shred_index(shred: &Shred, num_data: usize) -> (usize, bool) {
@ -1085,20 +1128,34 @@ mod tests {
// Test0: Try recovery/reassembly with only data shreds, but not all data shreds. Hint: should fail
assert_matches!(
Shredder::deshred(&shreds[..4]),
Err(reed_solomon_erasure::Error::TooFewDataShards)
Shredder::try_recovery(
&shreds[..4],
expected_shred_count / 2,
expected_shred_count / 2,
0,
slot
),
Err(reed_solomon_erasure::Error::TooFewShardsPresent)
);
// Test1: Try recovery/reassembly with only data shreds. Hint: should work
let result = Shredder::deshred(&shreds[..5]).unwrap();
assert_ne!(DeshredResult::default(), result);
assert!(result.payload.len() >= data.len());
let result = Shredder::try_recovery(
&shreds[..5],
expected_shred_count / 2,
expected_shred_count / 2,
0,
slot,
)
.unwrap();
assert_ne!(RecoveryResult::default(), result);
assert!(result.recovered_data.is_empty());
assert!(result.recovered_code.is_empty());
assert_eq!(data[..], result.payload[..data.len()]);
assert!(!result.recovered_code.is_empty());
let result = Shredder::deshred(&shreds[..5]).unwrap();
assert!(result.len() >= data.len());
assert_eq!(data[..], result[..data.len()]);
// Test2: Try recovery/reassembly with missing data shreds + coding shreds. Hint: should work
let shreds: Vec<Shred> = shredder
let mut shreds: Vec<Shred> = shredder
.shreds
.iter()
.enumerate()
@ -1111,20 +1168,30 @@ mod tests {
})
.collect();
let mut result = Shredder::deshred(&shreds).unwrap();
assert!(result.payload.len() >= data.len());
let mut result = Shredder::try_recovery(
&shreds,
expected_shred_count / 2,
expected_shred_count / 2,
0,
slot,
)
.unwrap();
assert_ne!(RecoveryResult::default(), result);
assert_eq!(result.recovered_data.len(), 2); // Data shreds 1 and 3 were missing
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::Data(_));
assert_eq!(recovered_shred.index(), 1);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(1, recovered_shred);
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::Data(_));
assert_eq!(recovered_shred.index(), 3);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(3, recovered_shred);
assert_eq!(result.recovered_code.len(), 3); // Coding shreds 5, 7, 9 were missing
let recovered_shred = result.recovered_code.remove(0);
@ -1151,10 +1218,13 @@ mod tests {
assert_eq!(code.header.common_header.slot, slot);
assert_eq!(code.header.common_header.index, 4);
}
assert_eq!(data[..], result.payload[..data.len()]);
let result = Shredder::deshred(&shreds[..5]).unwrap();
assert!(result.len() >= data.len());
assert_eq!(data[..], result[..data.len()]);
// Test3: Try recovery/reassembly with 3 missing data shreds + 2 coding shreds. Hint: should work
let shreds: Vec<Shred> = shredder
let mut shreds: Vec<Shred> = shredder
.shreds
.iter()
.enumerate()
@ -1167,26 +1237,37 @@ mod tests {
})
.collect();
let mut result = Shredder::deshred(&shreds).unwrap();
assert!(result.payload.len() >= data.len());
let mut result = Shredder::try_recovery(
&shreds,
expected_shred_count / 2,
expected_shred_count / 2,
0,
slot,
)
.unwrap();
assert_ne!(RecoveryResult::default(), result);
assert_eq!(result.recovered_data.len(), 3); // Data shreds 0, 2 and 4 were missing
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::FirstInSlot(_));
assert_eq!(recovered_shred.index(), 0);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(0, recovered_shred);
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::Data(_));
assert_eq!(recovered_shred.index(), 2);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(2, recovered_shred);
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::LastInFECSet(_));
assert_eq!(recovered_shred.index(), 4);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(4, recovered_shred);
assert_eq!(result.recovered_code.len(), 2); // Coding shreds 6, 8 were missing
let recovered_shred = result.recovered_code.remove(0);
@ -1205,7 +1286,10 @@ mod tests {
assert_eq!(code.header.common_header.slot, slot);
assert_eq!(code.header.common_header.index, 3);
}
assert_eq!(data[..], result.payload[..data.len()]);
let result = Shredder::deshred(&shreds[..5]).unwrap();
assert!(result.len() >= data.len());
assert_eq!(data[..], result[..data.len()]);
// Test4: Try recovery/reassembly full slot with 3 missing data shreds + 2 coding shreds. Hint: should work
let mut shredder =
@ -1231,7 +1315,7 @@ mod tests {
let expected_shred_count = ((data.len() / approx_shred_payload_size) + 1) * 2;
assert_eq!(shredder.shreds.len(), expected_shred_count);
let shreds: Vec<Shred> = shredder
let mut shreds: Vec<Shred> = shredder
.shreds
.iter()
.enumerate()
@ -1244,26 +1328,37 @@ mod tests {
})
.collect();
let mut result = Shredder::deshred(&shreds).unwrap();
assert!(result.payload.len() >= data.len());
let mut result = Shredder::try_recovery(
&shreds,
expected_shred_count / 2,
expected_shred_count / 2,
0,
slot,
)
.unwrap();
assert_ne!(RecoveryResult::default(), result);
assert_eq!(result.recovered_data.len(), 3); // Data shreds 0, 2 and 4 were missing
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::FirstInSlot(_));
assert_eq!(recovered_shred.index(), 0);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(0, recovered_shred);
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::Data(_));
assert_eq!(recovered_shred.index(), 2);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(2, recovered_shred);
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::LastInSlot(_));
assert_eq!(recovered_shred.index(), 4);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(4, recovered_shred);
assert_eq!(result.recovered_code.len(), 2); // Coding shreds 6, 8 were missing
let recovered_shred = result.recovered_code.remove(0);
@ -1282,7 +1377,10 @@ mod tests {
assert_eq!(code.header.common_header.slot, slot);
assert_eq!(code.header.common_header.index, 3);
}
assert_eq!(data[..], result.payload[..data.len()]);
let result = Shredder::deshred(&shreds[..5]).unwrap();
assert!(result.len() >= data.len());
assert_eq!(data[..], result[..data.len()]);
// Test5: Try recovery/reassembly with 3 missing data shreds + 3 coding shreds. Hint: should fail
let shreds: Vec<Shred> = shredder
@ -1301,6 +1399,132 @@ mod tests {
assert_eq!(shreds.len(), 4);
assert_matches!(
Shredder::deshred(&shreds),
Err(reed_solomon_erasure::Error::TooFewDataShards)
);
// Test6: Try recovery/reassembly with non zero index full slot with 3 missing data shreds + 2 coding shreds. Hint: should work
let mut shredder =
Shredder::new(slot, Some(5), 1.0, &keypair, 25).expect("Failed in creating shredder");
let mut offset = shredder.write(&data).unwrap();
let approx_shred_payload_size = offset;
offset += shredder.write(&data[offset..]).unwrap();
offset += shredder.write(&data[offset..]).unwrap();
offset += shredder.write(&data[offset..]).unwrap();
offset += shredder.write(&data[offset..]).unwrap();
// We should have some shreds now
assert_eq!(
shredder.shreds.len(),
data.len() / approx_shred_payload_size
);
assert_eq!(offset, data.len());
shredder.finalize_slot();
// We should have 10 shreds now (one additional final shred, and equal number of coding shreds)
let expected_shred_count = ((data.len() / approx_shred_payload_size) + 1) * 2;
assert_eq!(shredder.shreds.len(), expected_shred_count);
let mut shreds: Vec<Shred> = shredder
.shreds
.iter()
.enumerate()
.filter_map(|(i, s)| {
if i % 2 != 0 {
Some(bincode::deserialize(s).unwrap())
} else {
None
}
})
.collect();
let mut result = Shredder::try_recovery(
&shreds,
expected_shred_count / 2,
expected_shred_count / 2,
25,
slot,
)
.unwrap();
assert_ne!(RecoveryResult::default(), result);
assert_eq!(result.recovered_data.len(), 3); // Data shreds 0, 2 and 4 were missing
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::Data(_));
assert_eq!(recovered_shred.index(), 25);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(0, recovered_shred);
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::Data(_));
assert_eq!(recovered_shred.index(), 27);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(2, recovered_shred);
let recovered_shred = result.recovered_data.remove(0);
assert_matches!(recovered_shred, Shred::LastInSlot(_));
assert_eq!(recovered_shred.index(), 29);
assert_eq!(recovered_shred.slot(), slot);
assert!(recovered_shred.verify(&keypair.pubkey()));
shreds.insert(4, recovered_shred);
assert_eq!(result.recovered_code.len(), 2); // Coding shreds 6, 8 were missing
let recovered_shred = result.recovered_code.remove(0);
if let Shred::Coding(code) = recovered_shred {
assert_eq!(code.header.num_data_shreds, 5);
assert_eq!(code.header.num_coding_shreds, 5);
assert_eq!(code.header.position, 1);
assert_eq!(code.header.common_header.slot, slot);
assert_eq!(code.header.common_header.index, 26);
}
let recovered_shred = result.recovered_code.remove(0);
if let Shred::Coding(code) = recovered_shred {
assert_eq!(code.header.num_data_shreds, 5);
assert_eq!(code.header.num_coding_shreds, 5);
assert_eq!(code.header.position, 3);
assert_eq!(code.header.common_header.slot, slot);
assert_eq!(code.header.common_header.index, 28);
}
let result = Shredder::deshred(&shreds[..5]).unwrap();
assert!(result.len() >= data.len());
assert_eq!(data[..], result[..data.len()]);
// Test7: Try recovery/reassembly with incorrect slot. Hint: does not recover any shreds
let result = Shredder::try_recovery(
&shreds,
expected_shred_count / 2,
expected_shred_count / 2,
25,
slot + 1,
)
.unwrap();
assert!(result.recovered_data.is_empty());
// Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds
assert_matches!(
Shredder::try_recovery(
&shreds,
expected_shred_count / 2,
expected_shred_count / 2,
15,
slot,
),
Err(reed_solomon_erasure::Error::TooFewShardsPresent)
);
// Test9: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds
assert_matches!(
Shredder::try_recovery(
&shreds,
expected_shred_count / 2,
expected_shred_count / 2,
35,
slot,
),
Err(reed_solomon_erasure::Error::TooFewShardsPresent)
);
}

View File

@ -1,6 +1,7 @@
use crate::result::{Error, Result};
use crate::service::Service;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_info;
use solana_runtime::accounts_db::AccountStorageEntry;
use std::fs;
use std::io::{Error as IOError, ErrorKind};
@ -133,12 +134,21 @@ impl SnapshotPackagerService {
// Once everything is successful, overwrite the previous tarball so that other validators
// can fetch this newly packaged snapshot
let _ = fs::remove_file(&snapshot_package.tar_output_file);
let metadata = fs::metadata(&temp_tar_path)?;
fs::hard_link(&temp_tar_path, &snapshot_package.tar_output_file)?;
timer.stop();
info!(
"Successfully created tarball for root: {}, elapsed ms: {}",
"Successfully created tarball. slot: {}, elapsed ms: {}, size={}",
snapshot_package.root,
timer.as_ms()
timer.as_ms(),
metadata.len()
);
datapoint_info!(
"snapshot-package",
("slot", snapshot_package.root, i64),
("duration_ms", timer.as_ms(), i64),
("size", metadata.len(), i64)
);
Ok(())
}

View File

@ -690,7 +690,6 @@ mod tests {
}
#[test]
#[ignore]
fn test_storage_stage_process_banks() {
solana_logger::setup();
let keypair = Arc::new(Keypair::new());

View File

@ -23,7 +23,7 @@ use std::time::{Duration, Instant};
pub const NUM_THREADS: u32 = 10;
/// Process a blob: Add blob to the ledger window.
pub fn process_shreds(shreds: &[Shred], blocktree: &Arc<Blocktree>) -> Result<()> {
pub fn process_shreds(shreds: Vec<Shred>, blocktree: &Arc<Blocktree>) -> Result<()> {
blocktree.insert_shreds(shreds)
}
@ -31,6 +31,7 @@ pub fn process_shreds(shreds: &[Shred], blocktree: &Arc<Blocktree>) -> Result<()
/// blob's slot
pub fn should_retransmit_and_persist(
shred: &Shred,
shred_buf: &[u8],
bank: Option<Arc<Bank>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
my_pubkey: &Pubkey,
@ -44,7 +45,7 @@ pub fn should_retransmit_and_persist(
if leader_id == *my_pubkey {
inc_new_counter_debug!("streamer-recv_window-circular_transmission", 1);
false
} else if !shred.verify(&leader_id) {
} else if !shred.fast_verify(&shred_buf, &leader_id) {
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
false
} else {
@ -64,7 +65,7 @@ fn recv_window<F>(
shred_filter: F,
) -> Result<()>
where
F: Fn(&Shred) -> bool,
F: Fn(&Shred, &[u8]) -> bool,
F: Sync,
{
let timer = Duration::from_millis(200);
@ -81,7 +82,7 @@ where
for (i, packet) in packets.packets.iter_mut().enumerate() {
if let Ok(s) = bincode::deserialize(&packet.data) {
let shred: Shred = s;
if shred_filter(&shred) {
if shred_filter(&shred, &packet.data) {
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
shreds.push(shred);
@ -106,13 +107,11 @@ where
);
if !packets.packets.is_empty() {
match retransmit.send(packets) {
Ok(_) => Ok(()),
Err(e) => Err(e),
}?;
// Ignore the send error, as the retransmit is optional (e.g. replicators don't retransmit)
let _ = retransmit.send(packets);
}
blocktree.insert_shreds(&shreds)?;
blocktree.insert_shreds(shreds)?;
trace!(
"Elapsed processing time in recv_window(): {}",
@ -159,7 +158,7 @@ impl WindowService {
) -> WindowService
where
F: 'static
+ Fn(&Pubkey, &Shred, Option<Arc<Bank>>) -> bool
+ Fn(&Pubkey, &Shred, &[u8], Option<Arc<Bank>>) -> bool
+ std::marker::Send
+ std::marker::Sync,
{
@ -198,10 +197,11 @@ impl WindowService {
&id,
&r,
&retransmit,
|shred| {
|shred, shred_buf| {
shred_filter(
&id,
shred,
shred_buf,
bank_forks
.as_ref()
.map(|bank_forks| bank_forks.read().unwrap().working_bank()),
@ -249,7 +249,6 @@ mod test {
use super::*;
use crate::bank_forks::BankForks;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::broadcast_stage::broadcast_utils::entries_to_shreds;
use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::{make_consecutive_blobs, make_tiny_test_entries, Entry};
use crate::genesis_utils::create_genesis_block_with_leader;
@ -270,7 +269,9 @@ mod test {
fn local_entries_to_shred(entries: Vec<Entry>, keypair: &Arc<Keypair>) -> Vec<Shred> {
let mut shredder =
Shredder::new(0, Some(0), 0.0, keypair, 0).expect("Failed to create entry shredder");
entries_to_shreds(vec![entries], 0, 0, &mut shredder);
bincode::serialize_into(&mut shredder, &entries)
.expect("Expect to write all entries to shreds");
shredder.finalize_slot();
shredder
.shreds
.iter()
@ -287,7 +288,7 @@ mod test {
let shreds = local_entries_to_shred(original_entries.clone(), &Arc::new(Keypair::new()));
for shred in shreds.into_iter().rev() {
process_shreds(&[shred], &blocktree).expect("Expect successful processing of blob");
process_shreds(vec![shred], &blocktree).expect("Expect successful processing of blob");
}
assert_eq!(
@ -311,10 +312,20 @@ mod test {
let entry = Entry::default();
let mut shreds = local_entries_to_shred(vec![entry], &Arc::new(leader_keypair));
let shred_bufs: Vec<_> = shreds
.iter()
.map(|s| bincode::serialize(s).unwrap())
.collect();
// with a Bank for slot 0, blob continues
assert_eq!(
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id),
should_retransmit_and_persist(
&shreds[0],
&shred_bufs[0],
Some(bank.clone()),
&cache,
&me_id
),
true
);
@ -329,7 +340,7 @@ mod test {
// with a Bank and no idea who leader is, blob gets thrown out
shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3);
assert_eq!(
should_retransmit_and_persist(&shreds[0], Some(bank), &cache, &me_id),
should_retransmit_and_persist(&shreds[0], &shred_bufs[0], Some(bank), &cache, &me_id),
false
);
@ -389,7 +400,7 @@ mod test {
Arc::new(leader_node.sockets.repair),
&exit,
repair_strategy,
|_, _, _| true,
|_, _, _, _| true,
);
let t_responder = {
let (s_responder, r_responder) = channel();
@ -478,7 +489,7 @@ mod test {
Arc::new(leader_node.sockets.repair),
&exit,
repair_strategy,
|_, _, _| true,
|_, _, _, _| true,
);
let t_responder = {
let (s_responder, r_responder) = channel();

View File

@ -1,6 +1,6 @@
[package]
name = "solana-drone"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana Drone"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -20,9 +20,9 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.99"
serde_derive = "1.0.99"
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-metrics = { path = "../metrics", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-metrics = { path = "../metrics", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -15,12 +15,12 @@ serde = "1.0.99"
serde_derive = "1.0.99"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-genesis-programs = { path = "../genesis_programs", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.0-pre2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.0-pre2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-genesis-programs = { path = "../genesis_programs", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.2" }
[features]
cuda = ["solana-core/cuda"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -9,25 +9,25 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.18.0-pre2" }
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.18.0-pre2" }
solana-budget-api= { path = "../programs/budget_api", version = "0.18.0-pre0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.18.0-pre2" }
solana-config-api = { path = "../programs/config_api", version = "0.18.0-pre2" }
solana-config-program = { path = "../programs/config_program", version = "0.18.0-pre2" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.18.0-pre2" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.18.0-pre2" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.18.0-pre2" }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.0-pre2" }
solana-stake-program = { path = "../programs/stake_program", version = "0.18.0-pre2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.0-pre2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.18.0-pre2" }
solana-token-api = { path = "../programs/token_api", version = "0.18.0-pre2" }
solana-token-program = { path = "../programs/token_program", version = "0.18.0-pre2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.0-pre2" }
solana-vote-program = { path = "../programs/vote_program", version = "0.18.0-pre2" }
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.18.2" }
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.18.2" }
solana-budget-api = { path = "../programs/budget_api", version = "0.18.2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.18.2" }
solana-config-api = { path = "../programs/config_api", version = "0.18.2" }
solana-config-program = { path = "../programs/config_program", version = "0.18.2" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.18.2" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.18.2" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.18.2" }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.2" }
solana-stake-program = { path = "../programs/stake_program", version = "0.18.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.18.2" }
solana-token-api = { path = "../programs/token_api", version = "0.18.2" }
solana-token-program = { path = "../programs/token_program", version = "0.18.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.2" }
solana-vote-program = { path = "../programs/vote_program", version = "0.18.2" }
[lib]
crate-type = ["lib"]

View File

@ -3,18 +3,18 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-client = { path = "../client", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-client = { path = "../client", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
[features]
cuda = []

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-install"
description = "The solana cluster software installer"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -32,10 +32,10 @@ serde = "1.0.99"
serde_derive = "1.0.99"
serde_yaml = "0.8.9"
sha2 = "0.8.0"
solana-client = { path = "../client", version = "0.18.0-pre2" }
solana-config-api = { path = "../programs/config_api", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-client = { path = "../client", version = "0.18.2" }
solana-config-api = { path = "../programs/config_api", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
tar = "0.4.26"
tempdir = "0.3.7"
url = "2.1.0"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -15,7 +15,7 @@ cuda = []
[dependencies]
clap = "2.33"
dirs = "2.0.2"
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
[[bin]]
name = "solana-keygen"

View File

@ -1,7 +1,7 @@
[package]
name = "solana-kvstore"
description = "Embedded Key-Value store for solana"
version = "0.18.0-pre2"
version = "0.18.2"
homepage = "https://solana.com/"
repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"]

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -15,10 +15,10 @@ serde = "1.0.99"
serde_derive = "1.0.99"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-runtime = { path = "../runtime", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-runtime = { path = "../runtime", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
[dev-dependencies]
assert_cmd = "0.11"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-local-cluster"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -11,15 +11,15 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.8"
rand = "0.6.5"
solana-core = { path = "../core", version = "0.18.0-pre2" }
solana-client = { path = "../client", version = "0.18.0-pre2" }
solana-logger = { path = "../logger", version = "0.18.0-pre2" }
solana-runtime = { path = "../runtime", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.0-pre2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.0-pre2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.18.0-pre2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.0-pre2" }
solana-core = { path = "../core", version = "0.18.2" }
solana-client = { path = "../client", version = "0.18.2" }
solana-logger = { path = "../logger", version = "0.18.2" }
solana-runtime = { path = "../runtime", version = "0.18.2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.18.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.18.2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.18.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.18.2" }
symlink = "0.1.0"
tempfile = "3.1.0"

View File

@ -25,6 +25,8 @@ use tempfile::TempDir;
#[test]
#[serial]
#[allow(unused_attributes)]
#[ignore]
fn test_ledger_cleanup_service() {
solana_logger::setup();
error!("test_ledger_cleanup_service");

View File

@ -74,13 +74,13 @@ fn run_replicator_startup_basic(num_nodes: usize, num_replicators: usize) {
}
#[test]
#[ignore]
#[serial]
fn test_replicator_startup_1_node() {
run_replicator_startup_basic(1, 1);
}
#[test]
#[ignore]
#[serial]
fn test_replicator_startup_2_nodes() {
run_replicator_startup_basic(2, 1);
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -1,7 +1,7 @@
[package]
name = "solana-measure"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -11,4 +11,4 @@ license = "Apache-2.0"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }

View File

@ -1,6 +1,6 @@
[package]
name = "solana-merkle-tree"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
[dev-dependencies]
hex = "0.3.2"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -14,7 +14,7 @@ influx_db_client = "0.3.6"
lazy_static = "1.3.0"
log = "0.4.8"
reqwest = "0.9.20"
solana-sdk = { path = "../sdk", version = "0.18.0-pre2" }
solana-sdk = { path = "../sdk", version = "0.18.2" }
sys-info = "0.5.7"
[dev-dependencies]

View File

@ -752,7 +752,7 @@ stopNode() {
PS4=\"$PS4\"
set -x
! tmux list-sessions || tmux kill-session
for pid in solana/{net-stats,oom-monitor}.pid; do
for pid in solana/{net-stats,fd-monitor,oom-monitor}.pid; do
pgid=\$(ps opgid= \$(cat \$pid) | tr -d '[:space:]')
if [[ -n \$pgid ]]; then
sudo kill -- -\$pgid

View File

@ -50,9 +50,13 @@ skip)
esac
(
sudo scripts/oom-monitor.sh
sudo SOLANA_METRICS_CONFIG="$SOLANA_METRICS_CONFIG" scripts/oom-monitor.sh
) > oom-monitor.log 2>&1 &
echo $! > oom-monitor.pid
scripts/fd-monitor.sh > fd-monitor.log 2>&1 &
echo $! > fd-monitor.pid
scripts/net-stats.sh > net-stats.log 2>&1 &
echo $! > net-stats.pid
! tmux list-sessions || tmux kill-session

View File

@ -93,6 +93,8 @@ local|tar|skip)
sudo SOLANA_METRICS_CONFIG="$SOLANA_METRICS_CONFIG" scripts/oom-monitor.sh
) > oom-monitor.log 2>&1 &
echo $! > oom-monitor.pid
scripts/fd-monitor.sh > fd-monitor.log 2>&1 &
echo $! > fd-monitor.pid
scripts/net-stats.sh > net-stats.log 2>&1 &
echo $! > net-stats.pid

View File

@ -1,7 +1,7 @@
[package]
name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale"
version = "0.18.0-pre2"
version = "0.18.2"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
@ -22,11 +22,11 @@ walkdir = "2"
bincode = "1.1.4"
byteorder = "1.3.2"
elf = "0.0.10"
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.18.0-pre2" }
solana-logger = { path = "../../logger", version = "0.18.0-pre2" }
solana-runtime = { path = "../../runtime", version = "0.18.0-pre2" }
solana-sdk = { path = "../../sdk", version = "0.18.0-pre2" }
solana_rbpf = "=0.1.14"
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.18.2" }
solana-logger = { path = "../../logger", version = "0.18.2" }
solana-runtime = { path = "../../runtime", version = "0.18.2" }
solana-sdk = { path = "../../sdk", version = "0.18.2" }
solana_rbpf = "=0.1.15"
[[bench]]
name = "bpf_loader"

View File

@ -76,8 +76,7 @@ fn main() {
"external_spend",
"noop",
"panic",
// ISSUE: https://github.com/solana-labs/solana/issues/5602
// "stack_bug",
"param_passing",
"tick_height",
];
for program in rust_programs.iter() {

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,12 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.18.2" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.0-pre2" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,12 +1,6 @@
//! @brief Example Rust-based BPF program tests loop iteration
#![no_std]
#![allow(unused_attributes)]
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
use solana_sdk_bpf_utils::info;
#[no_mangle]
@ -24,10 +18,25 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> bool {
z -= 1;
assert_eq!(z, 340_282_366_920_938_463_463_374_607_431_768_211_454);
assert_eq!(u128::from(1u32.to_le()), 1);
assert_eq!(u128::from(1u32.to_be()), 0x1_000_000);
assert_eq!(solana_bpf_rust_128bit_dep::uadd(10, 20), 30u128);
assert_eq!(solana_bpf_rust_128bit_dep::usubtract(30, 20), 10u128);
assert_eq!(solana_bpf_rust_128bit_dep::umultiply(30, 20), 600u128);
assert_eq!(solana_bpf_rust_128bit_dep::udivide(20, 10), 2u128);
assert_eq!(solana_bpf_rust_128bit_dep::umodulo(20, 3), 2u128);
assert_eq!(solana_bpf_rust_128bit_dep::add(-10, -20), -30i128);
assert_eq!(solana_bpf_rust_128bit_dep::subtract(-30, -20), -10i128);
assert_eq!(solana_bpf_rust_128bit_dep::multiply(-30, -20), 600i128);
assert_eq!(solana_bpf_rust_128bit_dep::divide(-20, -10), 2i128);
assert_eq!(solana_bpf_rust_128bit_dep::modulo(-20, -3), -2i128);
let x = u64::max_value();
assert_eq!(u128::from(x) + u128::from(x), 36_893_488_147_419_103_230);
let x = solana_bpf_rust_128bit_dep::work(
let x = solana_bpf_rust_128bit_dep::uadd(
u128::from(u64::max_value()),
u128::from(u64::max_value()),
);

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit-dep"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF many-args-dep program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.0-pre2" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,12 +1,38 @@
//! @brief Solana Rust-based BPF program utility functions and types
#![no_std]
extern crate solana_sdk_bpf_utils;
pub fn work(x: u128, y: u128) -> u128 {
pub fn uadd(x: u128, y: u128) -> u128 {
x + y
}
pub fn usubtract(x: u128, y: u128) -> u128 {
x - y
}
pub fn umultiply(x: u128, y: u128) -> u128 {
x * y
}
pub fn udivide(n: u128, d: u128) -> u128 {
n / d
}
pub fn umodulo(n: u128, d: u128) -> u128 {
n % d
}
pub fn add(x: i128, y: i128) -> i128 {
x + y
}
pub fn subtract(x: i128, y: i128) -> i128 {
x - y
}
pub fn multiply(x: i128, y: i128) -> i128 {
x * y
}
pub fn divide(n: i128, d: i128) -> i128 {
n / d
}
pub fn modulo(n: i128, d: i128) -> i128 {
n % d
}
#[cfg(test)]
mod test {
@ -26,7 +52,7 @@ mod test {
}
#[test]
fn test_work() {
assert_eq!(3, work(1, 2));
fn test_add() {
assert_eq!(3, add(1, 2));
}
}

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-alloc"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF alloc program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,24 +1,18 @@
//! @brief Example Rust-based BPF program that test dynamic memory allocation
#![no_std]
#![allow(unused_attributes)]
#[macro_use]
extern crate alloc;
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
use alloc::vec::Vec;
use core::alloc::Layout;
use core::mem;
use solana_sdk_bpf_utils::info;
use std::alloc::Layout;
use std::mem;
#[no_mangle]
pub extern "C" fn entrypoint(_input: *mut u8) -> bool {
unsafe {
// Confirm large allocation fails
let layout = Layout::from_size_align(core::usize::MAX, mem::align_of::<u8>()).unwrap();
let layout = Layout::from_size_align(std::usize::MAX, mem::align_of::<u8>()).unwrap();
let ptr = alloc::alloc::alloc(layout);
if !ptr.is_null() {
info!("Error: Alloc of very larger buffer should fail");
@ -59,7 +53,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> bool {
alloc::alloc::dealloc(ptr, layout);
}
// // TODO not supported for system or bump allocator
// TODO not supported bump allocator
// unsafe {
// // Test alloc all bytes and one more (assumes heap size of 2048)
@ -93,7 +87,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> bool {
}
{
// TODO test Vec::new()
// test Vec::new()
const ITERS: usize = 100;
let mut v = Vec::new();

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-dep-crate"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF dep-crate program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,8 +13,7 @@ edition = "2018"
[dependencies]
byteorder = { version = "1", default-features = false }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,12 +1,6 @@
//! @brief Example Rust-based BPF program tests dependent crates
#![no_std]
#![allow(unused_attributes)]
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
use byteorder::{ByteOrder, LittleEndian};
use solana_sdk_bpf_utils::info;

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-external-spend"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF external spend program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,15 +1,8 @@
//! @brief Example Rust-based BPF program that moves a lamport from one account to another
#![no_std]
#![allow(unreachable_code)]
#![allow(unused_attributes)]
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
use solana_sdk_bpf_utils::entrypoint;
use solana_sdk_bpf_utils::entrypoint::*;
use solana_sdk_bpf_utils::{entrypoint, info};
entrypoint!(process_instruction);
fn process_instruction(ka: &mut [SolKeyedAccount], _info: &SolClusterInfo, _data: &[u8]) -> bool {
@ -18,6 +11,5 @@ fn process_instruction(ka: &mut [SolKeyedAccount], _info: &SolClusterInfo, _data
// is seen by the runtime and fails as expected
*ka[0].lamports -= 1;
info!("Success");
true
}

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-iter"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,12 +1,6 @@
//! @brief Example Rust-based BPF program tests loop iteration
#![no_std]
#![allow(unused_attributes)]
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
use solana_sdk_bpf_utils::info;
#[no_mangle]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF many-args program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,9 +12,8 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,14 +1,7 @@
//! @brief Example Rust-based BPF program tests loop iteration
#![no_std]
#![allow(unused_attributes)]
mod helper;
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
use solana_sdk_bpf_utils::info;
#[no_mangle]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args-dep"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF many-args-dep program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.0-pre2" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,9 +1,6 @@
//! @brief Solana Rust-based BPF program utility functions and types
#![no_std]
extern crate solana_sdk_bpf_utils;
use solana_sdk_bpf_utils::info;
pub fn many_args(

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-noop"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF noop program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,13 +1,8 @@
//! @brief Example Rust-based BPF program that prints out the parameters passed to it
#![no_std]
#![allow(unreachable_code)]
#![allow(unused_attributes)]
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
use solana_sdk_bpf_utils::entrypoint::*;
use solana_sdk_bpf_utils::log::*;
use solana_sdk_bpf_utils::{entrypoint, info};
@ -36,17 +31,11 @@ fn process_instruction(ka: &mut [SolKeyedAccount], info: &SolClusterInfo, data:
sol_log_params(ka, data);
{
// Test - arch config
#[cfg(not(target_arch = "bpf"))]
panic!();
}
{
// Test - use core methods, unwrap
// Test - use std methods, unwrap
// valid bytes, in a stack-allocated array
let sparkle_heart = [240, 159, 146, 150];
let result_str = core::str::from_utf8(&sparkle_heart).unwrap();
let result_str = std::str::from_utf8(&sparkle_heart).unwrap();
assert_eq!(4, result_str.len());
assert_eq!("💖", result_str);
info!(result_str);
@ -59,6 +48,12 @@ fn process_instruction(ka: &mut [SolKeyedAccount], info: &SolClusterInfo, data:
assert_eq!(s.x + s.y + s.z, 6);
}
{
// Test - arch config
#[cfg(not(target_arch = "bpf"))]
panic!();
}
info!("Success");
true
}

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-panic"
version = "0.18.0-pre2"
version = "0.18.2"
description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[workspace]
members = []

View File

@ -1,6 +1,2 @@
[dependencies.compiler_builtins]
path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/compiler-builtins"
features = ["c", "mem"]
[target.bpfel-unknown-unknown.dependencies]
alloc = { path = "../../../../sdk/bpf/dependencies/rust-bpf-sysroot/src/liballoc" }
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -1,10 +1,5 @@
//! @brief Example Rust-based BPF program that panics
#![no_std]
#![allow(unused_attributes)]
#[cfg(not(test))]
extern crate solana_sdk_bpf_no_std;
extern crate solana_sdk_bpf_utils;
#[no_mangle]

View File

@ -2,9 +2,9 @@
# Note: This crate must be built using build.sh
[package]
name = "solana-bpf-rust-stack-bug"
version = "0.18.0-pre2"
description = "Solana BPF iter program written in Rust"
name = "solana-bpf-rust-param-passing"
version = "0.18.2"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
@ -12,16 +12,15 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-bpf-rust-stack-bug-dep = { path = "../stack_bug_dep", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "0.18.2" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.0-pre2" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.2" }
[workspace]
members = []
[lib]
crate-type = ["cdylib"]
name = "solana_bpf_rust_stack_bug"
name = "solana_bpf_rust_param_passing"

View File

@ -0,0 +1,2 @@
[target.bpfel-unknown-unknown.dependencies.std]
features = []

View File

@ -0,0 +1,22 @@
//! @brief Example Rust-based BPF program tests loop iteration
extern crate solana_sdk_bpf_utils;
use solana_bpf_rust_param_passing_dep::{Data, TestDep};
use solana_sdk_bpf_utils::info;
#[no_mangle]
pub extern "C" fn entrypoint(_input: *mut u8) -> bool {
let array = [0xA, 0xB, 0xC, 0xD, 0xE, 0xF];
let data = Data {
twentyone: 21u64,
twentytwo: 22u64,
twentythree: 23u64,
twentyfour: 24u64,
twentyfive: 25u32,
array: &array,
};
let test_dep = TestDep::new(&data, 1, 2, 3, 4, 5);
info!(0, 0, 0, 0, test_dep.thirty);
test_dep.thirty == 30
}

View File

@ -2,8 +2,8 @@
# Note: This crate must be built using build.sh
[package]
name = "solana-bpf-rust-stack-bug-dep"
version = "0.18.0-pre2"
name = "solana-bpf-rust-param-passing-dep"
version = "0.18.2"
description = "Solana BPF program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.0-pre2" }
solana-sdk-bpf-no-std = { path = "../../../../sdk/bpf/rust/rust-no-std", version = "0.18.0-pre2" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.18.2" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.0-pre2" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/rust-test", version = "0.18.2" }
[workspace]
members = []

View File

@ -0,0 +1,2 @@
[target.bpfel-unknown-unknown.dependencies.std]
features = []

Some files were not shown because too many files have changed in this diff Show More