Compare commits
69 Commits
Author | SHA1 | Date | |
---|---|---|---|
6f098e0145 | |||
f90bc20a8b | |||
60074c9d36 | |||
5d9354fca7 | |||
0ea09d75ed | |||
f475a46df6 | |||
5681a24896 | |||
214aba6d2f | |||
fa551e5fc1 | |||
d9a5a86d10 | |||
83ad921ad6 | |||
5753c719bd | |||
322e2e0c6a | |||
371fdc6495 | |||
d23f2b5754 | |||
a50a015542 | |||
353cfb1980 | |||
79d737e760 | |||
8745034cec | |||
db979b30c4 | |||
a92855c995 | |||
5b006eba57 | |||
32a728d585 | |||
1b3be91e3c | |||
2509002fe4 | |||
9c9a690d0d | |||
216cc34224 | |||
71f1459ef9 | |||
f84bdb7d81 | |||
ed59c58a72 | |||
de941f4074 | |||
b7fb050d09 | |||
9ee2e768d6 | |||
d6d3a3c3d8 | |||
3e229b248f | |||
0470072436 | |||
f74fa60c8b | |||
c189767090 | |||
c82c18353d | |||
da58a272dd | |||
001f5fbb6b | |||
63cd452ab5 | |||
6ee77e9754 | |||
cee22262fc | |||
0d13352916 | |||
78a9832f13 | |||
795cf14650 | |||
8c112e8bc4 | |||
8e6d213459 | |||
b33df42640 | |||
e0462e6933 | |||
1f5e30a366 | |||
633eeb1586 | |||
c1148a6da3 | |||
713e86670d | |||
c004c726e7 | |||
5ffb8631e0 | |||
fd32a0280e | |||
e76f202eb3 | |||
ba4558cb92 | |||
74e5577dd4 | |||
b878002cf5 | |||
f111250e3b | |||
3d91f650db | |||
91a88cda6a | |||
2128c17ed0 | |||
7b819c9b74 | |||
eec5c661af | |||
0398f6b87a |
602
Cargo.lock
generated
602
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
||||
solana-core = { path = "../core", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "0.22.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
||||
solana-measure = { path = "../measure", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-measure = { path = "../measure", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
|
@ -10,7 +10,7 @@ use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::poh_recorder::PohRecorder;
|
||||
use solana_core::poh_recorder::WorkingBankEntry;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
@ -139,11 +139,11 @@ fn main() {
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blocktree, None);
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let banking_stage = BankingStage::new(
|
||||
@ -300,5 +300,5 @@ fn main() {
|
||||
sleep(Duration::from_secs(1));
|
||||
debug!("waited for poh_service");
|
||||
}
|
||||
let _unused = Blocktree::destroy(&ledger_path);
|
||||
let _unused = Blockstore::destroy(&ledger_path);
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -23,19 +23,19 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
||||
solana-core = { path = "../core", version = "0.22.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.22.0" }
|
||||
solana-client = { path = "../client", version = "0.22.0" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-genesis = { path = "../genesis", version = "0.22.3" }
|
||||
solana-client = { path = "../client", version = "0.22.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
untrusted = "0.7.0"
|
||||
ws = "0.9.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.3" }
|
||||
|
@ -2,14 +2,14 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
||||
solana-core = { path = "../core", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -16,24 +16,24 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
||||
solana-core = { path = "../core", version = "0.22.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.22.0" }
|
||||
solana-client = { path = "../client", version = "0.22.0" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.0" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "0.22.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
||||
solana-measure = { path = "../measure", version = "0.22.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.0", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-genesis = { path = "../genesis", version = "0.22.3" }
|
||||
solana-client = { path = "../client", version = "0.22.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.3" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "0.22.3", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-measure = { path = "../measure", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.3.2"
|
||||
serial_test_derive = "0.3.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.22.3" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@ -18,9 +18,9 @@
|
||||
| | `-------` `--------` `--+---------` | | | | |
|
||||
| | ^ ^ | | | `------------` |
|
||||
| | | | v | | |
|
||||
| | | .--+--------. | | |
|
||||
| | | | Blocktree | | | |
|
||||
| | | `-----------` | | .------------. |
|
||||
| | | .--+---------. | | |
|
||||
| | | | Blockstore | | | |
|
||||
| | | `------------` | | .------------. |
|
||||
| | | ^ | | | | |
|
||||
| | | | | | | Downstream | |
|
||||
| | .--+--. .-------+---. | | | Validators | |
|
||||
|
@ -21,7 +21,7 @@
|
||||
* [Anatomy of a Validator](validator/README.md)
|
||||
* [TPU](validator/tpu.md)
|
||||
* [TVU](validator/tvu/README.md)
|
||||
* [Blocktree](validator/tvu/blocktree.md)
|
||||
* [Blockstore](validator/tvu/blockstore.md)
|
||||
* [Gossip Service](validator/gossip.md)
|
||||
* [The Runtime](validator/runtime.md)
|
||||
* [Anatomy of a Transaction](transaction.md)
|
||||
@ -39,6 +39,7 @@
|
||||
* [Installation](paper-wallet/installation.md)
|
||||
* [Paper Wallet Usage](paper-wallet/usage.md)
|
||||
* [Offline Signing](offline-signing/README.md)
|
||||
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
|
||||
* [API Reference](api-reference/README.md)
|
||||
* [Transaction](api-reference/transaction-api.md)
|
||||
* [Instruction](api-reference/instruction-api.md)
|
||||
@ -58,7 +59,7 @@
|
||||
* [Bankless Leader](proposals/bankless-leader.md)
|
||||
* [Slashing](proposals/slashing.md)
|
||||
* [Implemented Design Proposals](implemented-proposals/README.md)
|
||||
* [Blocktree](implemented-proposals/blocktree.md)
|
||||
* [Blockstore](implemented-proposals/blockstore.md)
|
||||
* [Cluster Software Installation and Updates](implemented-proposals/installer.md)
|
||||
* [Cluster Economics](implemented-proposals/ed_overview/README.md)
|
||||
* [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md)
|
||||
|
@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
## Usage
|
||||
### solana-cli
|
||||
```text
|
||||
solana-cli 0.22.0 [channel=unknown commit=unknown]
|
||||
solana-cli 0.22.3 [channel=unknown commit=unknown]
|
||||
Blockchain, Rebuilt for Scale
|
||||
|
||||
USAGE:
|
||||
@ -201,6 +201,7 @@ OPTIONS:
|
||||
SUBCOMMANDS:
|
||||
address Get your public key
|
||||
airdrop Request lamports
|
||||
authorize-nonce-account Assign account authority to a new entity
|
||||
balance Get your balance
|
||||
cancel Cancel a transfer
|
||||
catchup Wait for a validator to catch up to the cluster
|
||||
@ -305,6 +306,35 @@ ARGS:
|
||||
<UNIT> Specify unit to use for request and balance display [possible values: SOL, lamports]
|
||||
```
|
||||
|
||||
#### solana-authorize-nonce-account
|
||||
```text
|
||||
solana-authorize-nonce-account
|
||||
Assign account authority to a new entity
|
||||
|
||||
USAGE:
|
||||
solana authorize-nonce-account [FLAGS] [OPTIONS] <NONCE_ACCOUNT> <NEW_AUTHORITY_PUBKEY>
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
--skip-seed-phrase-validation Skip validation of seed phrases. Use this if your phrase does not use the BIP39
|
||||
official English word list
|
||||
-V, --version Prints version information
|
||||
-v, --verbose Show extra information header
|
||||
|
||||
OPTIONS:
|
||||
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
|
||||
[possible values: keypair]
|
||||
-C, --config <PATH> Configuration file to use [default:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce-authority <KEYPAIR> Specify nonce authority if different from account
|
||||
|
||||
ARGS:
|
||||
<NONCE_ACCOUNT> Address of the nonce account
|
||||
<NEW_AUTHORITY_PUBKEY> Account to be granted authority of the nonce account
|
||||
```
|
||||
|
||||
#### solana-balance
|
||||
```text
|
||||
solana-balance
|
||||
@ -664,14 +694,20 @@ FLAGS:
|
||||
-v, --verbose Show extra information header
|
||||
|
||||
OPTIONS:
|
||||
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
|
||||
[possible values: keypair]
|
||||
--blockhash <BLOCKHASH> Use the supplied blockhash
|
||||
-C, --config <PATH> Configuration file to use [default:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
|
||||
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
|
||||
[possible values: keypair]
|
||||
--blockhash <BLOCKHASH> Use the supplied blockhash
|
||||
-C, --config <PATH> Configuration file to use [default:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY> Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <nonce_authority> Provide the nonce authority keypair to use when signing a nonced
|
||||
transaction
|
||||
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
|
||||
|
||||
ARGS:
|
||||
<STAKE ACCOUNT> Stake account to be deactivated.
|
||||
@ -694,14 +730,20 @@ FLAGS:
|
||||
-v, --verbose Show extra information header
|
||||
|
||||
OPTIONS:
|
||||
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
|
||||
[possible values: keypair]
|
||||
--blockhash <BLOCKHASH> Use the supplied blockhash
|
||||
-C, --config <PATH> Configuration file to use [default:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
|
||||
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
|
||||
[possible values: keypair]
|
||||
--blockhash <BLOCKHASH> Use the supplied blockhash
|
||||
-C, --config <PATH> Configuration file to use [default:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY> Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <nonce_authority> Provide the nonce authority keypair to use when signing a nonced
|
||||
transaction
|
||||
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
|
||||
|
||||
ARGS:
|
||||
<STAKE ACCOUNT> Stake account to delegate
|
||||
@ -1004,6 +1046,12 @@ OPTIONS:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY> Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <nonce_authority> Provide the nonce authority keypair to use when signing a nonced
|
||||
transaction
|
||||
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
|
||||
--after <DATETIME> A timestamp after which transaction will execute
|
||||
--require-timestamp-from <PUBKEY> Require timestamp from this third party
|
||||
|
@ -146,8 +146,8 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
|
||||
|
||||
* `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing:
|
||||
* `lamports`, number of lamports assigned to this account, as a u64
|
||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||
* `data`, array of bytes representing any data associated with the account
|
||||
* `owner`, base-58 encoded pubkey of the program this account has been assigned to
|
||||
* `data`, base-58 encoded data associated with the account
|
||||
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
|
||||
#### Example:
|
||||
@ -157,7 +157,7 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"}},"id":1}
|
||||
```
|
||||
|
||||
### getBalance
|
||||
@ -193,13 +193,13 @@ Returns commitment for particular block
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an array with two fields:
|
||||
The result field will be a JSON object containing:
|
||||
|
||||
* Commitment
|
||||
* `commitment` - commitment, comprising either:
|
||||
* `null` - Unknown block
|
||||
* `object` - BlockCommitment
|
||||
* `array` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
|
||||
* 'integer' - total active stake, in lamports, of the current epoch
|
||||
* `totalStake` - total active stake, in lamports, of the current epoch
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -213,9 +213,17 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
|
||||
### getBlockTime
|
||||
|
||||
Returns the estimated production time of a block. Validators report their UTC
|
||||
time to the ledger on a regular interval. A block's time is calculated as an
|
||||
offset from the median value of the most recent validator time report.
|
||||
Returns the estimated production time of a block.
|
||||
|
||||
Each validator reports their UTC time to the ledger on a regular interval by
|
||||
intermittently adding a timestamp to a Vote for a particular block. A requested
|
||||
block's time is calculated from the stake-weighted mean of the Vote timestamps
|
||||
in a set of recent blocks recorded on the ledger.
|
||||
|
||||
Nodes that are booting from snapshot or limiting ledger size (by purging old
|
||||
slots) will return null timestamps for blocks below their lowest root +
|
||||
`TIMESTAMP_SLOT_RANGE`. Users interested in having this historical data must
|
||||
query a node that is built from genesis and retains the entire ledger.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
@ -270,17 +278,18 @@ Returns identity and transaction information about a confirmed block in the ledg
|
||||
#### Parameters:
|
||||
|
||||
* `integer` - slot, as u64 integer
|
||||
* `string` - (optional) encoding for each returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `blockhash` - the blockhash of this block
|
||||
* `previousBlockhash` - the blockhash of this block's parent
|
||||
* `blockhash` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash` - the blockhash of this block's parent, as base-58 encoded string
|
||||
* `parentSlot` - the slot index of this block's parent
|
||||
* `transactions` - an array of tuples containing:
|
||||
* [Transaction](transaction-api.md) object, in JSON format
|
||||
* Transaction status object, containing:
|
||||
* `transactions` - an array of JSON objects containing:
|
||||
* `transaction` - [Transaction](transaction-api.md) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta` - transaction status metadata object, containing `null` or:
|
||||
* `status` - Transaction status:
|
||||
* `"Ok": null` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
@ -292,10 +301,16 @@ The result field will be an object with the following fields:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430]}' localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":[165,245,120,183,32,205,89,222,249,114,229,49,250,231,149,122,156,232,181,83,238,194,157,153,7,213,180,54,177,6,25,101],"parentSlot":429,"previousBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166],"transactions":[[{"message":{"accountKeys":[[5],[219,181,202,40,52,148,34,136,186,59,137,160,250,225,234,17,244,160,88,116,24,176,30,227,68,11,199,38,141,68,131,228],[233,48,179,56,91,40,254,206,53,48,196,176,119,248,158,109,121,77,11,69,108,160,128,27,228,122,146,249,53,184,68,87],[6,167,213,23,25,47,10,175,198,242,101,227,251,119,204,122,218,130,197,41,208,190,59,19,110,45,0,85,32,0,0,0],[6,167,213,23,24,199,116,201,40,86,99,152,105,29,94,182,139,94,184,163,155,75,109,92,115,85,91,33,0,0,0,0],[7,97,72,29,53,116,116,187,124,77,118,36,235,211,189,179,216,53,94,115,209,16,67,252,13,163,83,128,0,0,0,0]],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[[1],{"accounts":[[3],1,2,3],"data":[[52],2,0,0,0,1,0,0,0,0,0,0,0,173,1,0,0,0,0,0,0,86,55,9,248,142,238,135,114,103,83,247,124,67,68,163,233,55,41,59,129,64,50,110,221,234,234,27,213,205,193,219,50],"program_id_index":4}],"recentBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166]},"signatures":[[2],[119,9,95,108,35,95,7,1,69,101,65,45,5,204,61,114,172,88,123,238,32,201,135,229,57,50,13,21,106,216,129,183,238,43,37,101,148,81,56,232,88,136,80,65,46,189,39,106,94,13,238,54,186,48,118,186,0,62,121,122,172,171,66,5],[78,40,77,250,10,93,6,157,48,173,100,40,251,9,7,218,7,184,43,169,76,240,254,34,235,48,41,175,119,126,75,107,106,248,45,161,119,48,174,213,57,69,111,225,245,60,148,73,124,82,53,6,203,126,120,180,111,169,89,64,29,23,237,13]]},{"fee":100000,"status":{"Ok":null},"preBalances":[499998337500,15298080,1,1,1],"postBalances":[499998237500,15298080,1,1,1]}]]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[[{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[["81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ",{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedBlocks
|
||||
@ -361,11 +376,11 @@ None
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `slots_per_epoch`, the maximum number of slots in each epoch
|
||||
* `leader_schedule_slot_offset`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
|
||||
* `slotsPerEpoch`, the maximum number of slots in each epoch
|
||||
* `leaderScheduleSlotOffset`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
|
||||
* `warmup`, whether epochs start short and grow
|
||||
* `first_normal_epoch`, first normal-length epoch, log2(slots_per_epoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
|
||||
* `first_normal_slot`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(first_normal_epoch) - 1)
|
||||
* `firstNormalEpoch`, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
|
||||
* `firstNormalSlot`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(firstNormalEpoch) - 1)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -374,7 +389,7 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochSchedule"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"first_normal_epoch":8,"first_normal_slot":8160,"leader_schedule_slot_offset":8192,"slots_per_epoch":8192,"warmup":true},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"firstNormalEpoch":8,"firstNormalSlot":8160,"leaderScheduleSlotOffset":8192,"slotsPerEpoch":8192,"warmup":true},"id":1}
|
||||
```
|
||||
|
||||
### getGenesisHash
|
||||
@ -485,18 +500,18 @@ The result field will be an array of arrays. Each sub array will contain:
|
||||
|
||||
* `string` - the account Pubkey as base-58 encoded string and a JSON object, with the following sub fields:
|
||||
* `lamports`, number of lamports assigned to this account, as a u64
|
||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||
* `data`, array of bytes representing any data associated with the account
|
||||
* `owner`, base-58 encoded pubkey of the program this account has been assigned to
|
||||
* `data`, base-58 encoded data associated with the account
|
||||
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
|
||||
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":1,"data":"", ["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":10,"data":[]]]},"id":1}
|
||||
```
|
||||
|
||||
### getRecentBlockhash
|
||||
@ -509,11 +524,11 @@ Returns a recent block hash from the ledger, and a fee schedule that can be used
|
||||
|
||||
#### Results:
|
||||
|
||||
An RpcResponse containing an array consisting of a string blockhash and FeeCalculator JSON object.
|
||||
An RpcResponse containing a JSON object consisting of a string blockhash and FeeCalculator JSON object.
|
||||
|
||||
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to an array including:
|
||||
* `string` - a Hash as base-58 encoded string
|
||||
* `FeeCalculator object` - the fee schedule for this block hash
|
||||
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to a JSON object including:
|
||||
* `blockhash` - a Hash as base-58 encoded string
|
||||
* `feeCalculator` - FeeCalculator object, the fee schedule for this block hash
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -522,7 +537,7 @@ An RpcResponse containing an array consisting of a string blockhash and FeeCalcu
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC",{"lamportsPerSignature": 0}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","feeCalculator":{"lamportsPerSignature": 0}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatus
|
||||
@ -626,10 +641,10 @@ None
|
||||
|
||||
#### Results:
|
||||
|
||||
An array consisting of
|
||||
A JSON object consisting of
|
||||
|
||||
* `string` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
||||
* `u64` - the current storage turn slot
|
||||
* `blockhash` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
||||
* `slot` - the current storage turn slot
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -637,7 +652,7 @@ An array consisting of
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "2048"],"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "slot": "2048"},"id":1}
|
||||
```
|
||||
|
||||
### getStorageTurnRate
|
||||
@ -658,7 +673,7 @@ None
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
||||
{"jsonrpc":"2.0","result":1024,"id":1}
|
||||
```
|
||||
|
||||
### getTransactionCount
|
||||
@ -855,7 +870,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"},"subscription":0}}
|
||||
```
|
||||
|
||||
### accountUnsubscribe
|
||||
@ -913,7 +928,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
||||
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.22.0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":{"pubkey": "8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM","account":{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd"}},"subscription":0}}
|
||||
```
|
||||
|
||||
### programUnsubscribe
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Managing Forks
|
||||
|
||||
The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blocktree_. When the validator interprets the blocktree, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork.
|
||||
The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blockstore_. When the validator interprets the blockstore, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork.
|
||||
|
||||
A validator selects a fork by submiting a vote to a slot leader on that fork. The vote commits the validator for a duration of time called a _lockout period_. The validator is not permitted to vote on a different fork until that lockout period expires. Each subsequent vote on the same fork doubles the length of the lockout period. After some cluster-configured number of votes \(currently 32\), the length of the lockout period reaches what's called _max lockout_. Until the max lockout is reached, the validator has the option to wait until the lockout period is over and then vote on another fork. When it votes on another fork, it performs a operation called _rollback_, whereby the state rolls back in time to a shared checkpoint and then jumps forward to the tip of the fork that it just voted on. The maximum distance that a fork may roll back is called the _rollback depth_. Rollback depth is the number of votes required to achieve max lockout. Whenever a validator votes, any checkpoints beyond the rollback depth become unreachable. That is, there is no scenario in which the validator will need to roll back beyond rollback depth. It therefore may safely _prune_ unreachable forks and _squash_ all checkpoints beyond rollback depth into the root checkpoint.
|
||||
|
||||
|
@ -1,16 +1,16 @@
|
||||
# Blocktree
|
||||
# Blockstore
|
||||
|
||||
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized.
|
||||
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
|
||||
|
||||
The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
|
||||
The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
|
||||
|
||||
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
|
||||
|
||||
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree.
|
||||
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
|
||||
|
||||
## Functionalities of Blocktree
|
||||
## Functionalities of Blockstore
|
||||
|
||||
1. Persistence: the Blocktree lives in the front of the nodes verification
|
||||
1. Persistence: the Blockstore lives in the front of the nodes verification
|
||||
|
||||
pipeline, right behind network receive and signature verification. If the
|
||||
|
||||
@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out
|
||||
|
||||
2. Repair: repair is the same as window repair above, but able to serve any
|
||||
|
||||
shred that's been received. Blocktree stores shreds with signatures,
|
||||
shred that's been received. Blockstore stores shreds with signatures,
|
||||
|
||||
preserving the chain of origination.
|
||||
|
||||
3. Forks: Blocktree supports random access of shreds, so can support a
|
||||
3. Forks: Blockstore supports random access of shreds, so can support a
|
||||
|
||||
validator's need to rollback and replay from a Bank checkpoint.
|
||||
|
||||
4. Restart: with proper pruning/culling, the Blocktree can be replayed by
|
||||
4. Restart: with proper pruning/culling, the Blockstore can be replayed by
|
||||
|
||||
ordered enumeration of entries from slot 0. The logic of the replay stage
|
||||
|
||||
\(i.e. dealing with forks\) will have to be used for the most recent entries in
|
||||
|
||||
the Blocktree.
|
||||
the Blockstore.
|
||||
|
||||
## Blocktree Design
|
||||
## Blockstore Design
|
||||
|
||||
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
|
||||
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing:
|
||||
1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
|
||||
2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
|
||||
* `slot_index` - The index of this slot
|
||||
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
|
||||
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
|
||||
@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out
|
||||
|
||||
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
|
||||
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
|
||||
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details.
|
||||
5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
|
||||
4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
|
||||
5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
|
||||
|
||||
## Blocktree APIs
|
||||
## Blockstore APIs
|
||||
|
||||
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
|
||||
The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
|
||||
|
||||
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
|
||||
|
||||
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree.
|
||||
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
|
||||
|
||||
## Interfacing with Bank
|
||||
|
||||
@ -80,11 +80,11 @@ The bank exposes to replay stage:
|
||||
|
||||
be able to be chained below this vote
|
||||
|
||||
Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
|
||||
Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
|
||||
|
||||
## Pruning Blocktree
|
||||
## Pruning Blockstore
|
||||
|
||||
Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged.
|
||||
Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
|
||||
|
||||
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.
|
||||
|
@ -26,10 +26,7 @@ account data. A transaction is now constructed in the normal way, but with the
|
||||
following additional requirements:
|
||||
|
||||
1) The durable nonce value is used in the `recent_blockhash` field
|
||||
2) A `Nonce` instruction is issued (first?)
|
||||
3) The appropriate transaction flag is set, signaling that the usual
|
||||
hash age check should be skipped and the previous requirements enforced. This
|
||||
may be unnecessary, see [Runtime Support](#runtime-support) below
|
||||
2) A `NonceAdvance` instruction is the first issued in the transaction
|
||||
|
||||
### Contract Mechanics
|
||||
|
||||
@ -66,21 +63,43 @@ WithdrawInstruction(to, lamports)
|
||||
success
|
||||
```
|
||||
|
||||
A client wishing to use this feature starts by creating a nonce account and
|
||||
depositing sufficient lamports as to make it rent-exempt. The resultant account
|
||||
will be in the `Uninitialized` state with no stored hash and thus unusable.
|
||||
A client wishing to use this feature starts by creating a nonce account under
|
||||
the system program. This account will be in the `Uninitialized` state with no
|
||||
stored hash, and thus unusable.
|
||||
|
||||
The `Nonce` instruction is used to request that a new nonce be stored for the
|
||||
calling account. The first `Nonce` instruction run on a newly created account
|
||||
will drive the account's state to `Initialized`. As such, a `Nonce` instruction
|
||||
MUST be issued before the account can be used.
|
||||
To initialize a newly created account, a `NonceInitialize` instruction must be
|
||||
issued. This instruction takes one parameter, the `Pubkey` of the account's
|
||||
[authority](../offline-signing/durable-nonce.md#nonce-authority). Nonce accounts
|
||||
must be [rent-exempt](rent.md#two-tiered-rent-regime) to meet the data-persistence
|
||||
requirements of the feature, and as such, require that sufficient lamports be
|
||||
deposited before they can be initialized. Upon successful initialization, the
|
||||
cluster's most recent blockhash is stored along with specified nonce authority
|
||||
`Pubkey`.
|
||||
|
||||
To discard a `NonceAccount`, the client should issue a `Withdraw` instruction
|
||||
which withdraws all lamports, leaving a zero balance and making the account
|
||||
eligible for deletion.
|
||||
The `NonceAdvance` instruction is used to manage the account's stored nonce
|
||||
value. It stores the cluster's most recent blockhash in the account's state data,
|
||||
failing if that matches the value already stored there. This check prevents
|
||||
replaying transactions within the same block.
|
||||
|
||||
`Nonce` and `Withdraw` instructions each will only succeed if the stored
|
||||
blockhash is no longer resident in sysvar.recent_blockhashes.
|
||||
Due to nonce accounts' [rent-exempt](rent.md#two-tiered-rent-regime) requirement,
|
||||
a custom withdraw instruction is used to move funds out of the account.
|
||||
The `NonceWithdraw` instruction takes a single argument, lamports to withdraw,
|
||||
and enforces rent-exemption by preventing the account's balance from falling
|
||||
below the rent-exempt minimum. An exception to this check is if the final balance
|
||||
would be zero lamports, which makes the account eligible for deletion. This
|
||||
account closure detail has an additional requirement that the stored nonce value
|
||||
must not match the cluster's most recent blockhash, as per `NonceAdvance`.
|
||||
|
||||
The account's [nonce authority](../offline-signing/durable-nonce.md#nonce-authority)
|
||||
can be changed using the `NonceAuthorize` instruction. It takes one parameter,
|
||||
the `Pubkey` of the new authority. Executing this instruction grants full
|
||||
control over the account and its balance to the new authority.
|
||||
|
||||
{% hint style="info" %}
|
||||
`NonceAdvance`, `NonceWithdraw` and `NonceAuthorize` all require the current
|
||||
[nonce authority](../offline-signing/durable-nonce.md#nonce-authority) for the
|
||||
account to sign the transaction.
|
||||
{% endhint %}
|
||||
|
||||
### Runtime Support
|
||||
|
||||
@ -89,25 +108,11 @@ an extant `recent_blockhash` on the transaction and prevent fee theft via
|
||||
failed transaction replay, runtime modifications are necessary.
|
||||
|
||||
Any transaction failing the usual `check_hash_age` validation will be tested
|
||||
for a Durable Transaction Nonce. This specifics of this test are undecided, some
|
||||
options:
|
||||
for a Durable Transaction Nonce. This is signaled by including a `NonceAdvance`
|
||||
instruction as the first instruction in the transaction.
|
||||
|
||||
1) Require that the `Nonce` instruction be the first in the transaction
|
||||
* + No ABI changes
|
||||
* + Fast and simple
|
||||
* - Sets a precedent that may lead to incompatible instruction combinations
|
||||
2) Blind search for a `Nonce` instruction over all instructions in the
|
||||
transaction
|
||||
* + No ABI changes
|
||||
* - Potentially slow
|
||||
3) [2], but guarded by a transaction flag
|
||||
* - ABI changes
|
||||
* - Wire size increase
|
||||
* + We'll probably end up with some sort of flags eventually anyway
|
||||
|
||||
Current prototyping will use [1]. If it is determined that a Durable Transaction
|
||||
Nonce is in use, the runtime will take the following actions to validate the
|
||||
transaction:
|
||||
If the runtime determines that a Durable Transaction Nonce is in use, it will
|
||||
take the following additional actions to validate the transaction:
|
||||
|
||||
1) The `NonceAccount` specified in the `Nonce` instruction is loaded.
|
||||
2) The `NonceState` is deserialized from the `NonceAccount`'s data field and
|
||||
@ -118,6 +123,11 @@ one specified in the transaction's `recent_blockhash` field.
|
||||
If all three of the above checks succeed, the transaction is allowed to continue
|
||||
validation.
|
||||
|
||||
### Open Questions
|
||||
|
||||
* Should this feature be restricted in the number of uses per transaction?
|
||||
Since transactions that fail with an `InstructionError` are charged a fee and
|
||||
changes to their state rolled back, there is an opportunity for fee theft if a
|
||||
`NonceAdvance` instruction is reverted. A malicious validator could replay the
|
||||
failed transaction until the stored nonce is successfully advanced. Runtime
|
||||
changes prevent this behavior. When a durable nonce transaction fails with an
|
||||
`InstructionError` aside from the `NonceAdvance` instruction, the nonce account
|
||||
is rolled back to its pre-execution state as usual. Then the runtime advances
|
||||
its nonce value and the advanced nonce account stored as if it succeeded.
|
||||
|
@ -8,32 +8,32 @@ The RepairService is in charge of retrieving missing shreds that failed to be de
|
||||
|
||||
1\) Validators can fail to receive particular shreds due to network failures
|
||||
|
||||
2\) Consider a scenario where blocktree contains the set of slots {1, 3, 5}. Then Blocktree receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -> 7 is stored in blocktree. However, there is no way to chain these slots to any of the existing banks in Blocktree, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node.
|
||||
2\) Consider a scenario where blockstore contains the set of slots {1, 3, 5}. Then Blockstore receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -> 7 is stored in blockstore. However, there is no way to chain these slots to any of the existing banks in Blockstore, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node.
|
||||
|
||||
3\) Validators that find themselves behind the cluster by an entire epoch struggle/fail to catch up because they do not have a leader schedule for future epochs. If nodes were to blindly accept repair shreds in these future epochs, this exposes nodes to spam.
|
||||
|
||||
## Repair Protocols
|
||||
|
||||
The repair protocol makes best attempts to progress the forking structure of Blocktree.
|
||||
The repair protocol makes best attempts to progress the forking structure of Blockstore.
|
||||
|
||||
The different protocol strategies to address the above challenges:
|
||||
|
||||
1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blocktree tracks the latest root slot. RepairService will then periodically iterate every fork in blocktree starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration.
|
||||
1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blockstore tracks the latest root slot. RepairService will then periodically iterate every fork in blockstore starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration.
|
||||
|
||||
Note: Validators will only accept shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
|
||||
|
||||
2. Preemptive Slot Repair \(Addresses Challenge \#2\): The goal of this protocol is to discover the chaining relationship of "orphan" slots that do not currently chain to any known fork.
|
||||
* Blocktree will track the set of "orphan" slots in a separate column family.
|
||||
* RepairService will periodically make `RequestOrphan` requests for each of the orphans in blocktree.
|
||||
* Blockstore will track the set of "orphan" slots in a separate column family.
|
||||
* RepairService will periodically make `RequestOrphan` requests for each of the orphans in blockstore.
|
||||
|
||||
`RequestOrphan(orphan)` request - `orphan` is the orphan slot that the requestor wants to know the parents of `RequestOrphan(orphan)` response - The highest shreds for each of the first `N` parents of the requested `orphan`
|
||||
|
||||
On receiving the responses `p`, where `p` is some shred in a parent slot, validators will:
|
||||
|
||||
* Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist.
|
||||
* Insert an empty `SlotMeta` in blockstore for `p.slot` if it doesn't already exist.
|
||||
* If `p.slot` does exist, update the parent of `p` based on `parents`
|
||||
|
||||
Note: that once these empty slots are added to blocktree, the `Shred Repair` protocol should attempt to fill those slots.
|
||||
Note: that once these empty slots are added to blockstore, the `Shred Repair` protocol should attempt to fill those slots.
|
||||
|
||||
Note: Validators will only accept responses containing shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
|
||||
3. Repairmen \(Addresses Challenge \#3\): This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every shred that they receive against a known leader schedule.
|
||||
@ -45,5 +45,5 @@ The different protocol strategies to address the above challenges:
|
||||
|
||||
Observers of this gossip message with higher epochs \(repairmen\) send shreds to catch the lagging node up with the rest of the cluster. The repairmen are responsible for sending the slots within the epochs that are confrimed by the advertised `root` in gossip. The repairmen divide the responsibility of sending each of the missing slots in these epochs based on a random seed \(simple shred.index iteration by N, seeded with the repairman's node\_pubkey\). Ideally, each repairman in an N node cluster \(N nodes whose epochs are higher than that of the repairee\) sends 1/N of the missing shreds. Both data and coding shreds for missing slots are sent. Repairmen do not send shreds again to the same validator until they see the message in gossip updated, at which point they perform another iteration of this protocol.
|
||||
|
||||
Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blocktree and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blocktree, which holds the latest root.
|
||||
Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blockstore and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blockstore, which holds the latest root.
|
||||
|
||||
|
@ -52,5 +52,5 @@ Solana's trustless sense of time and ordering provided by its PoH data structure
|
||||
|
||||
As discussed in the [Economic Design](../implemented-proposals/ed_overview/) section, annual validator interest rates are to be specified as a function of total percentage of circulating supply that has been staked. The cluster rewards validators who are online and actively participating in the validation process throughout the entirety of their _validation period_. For validators that go offline/fail to validate transactions during this period, their annual reward is effectively reduced.
|
||||
|
||||
Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered ‘active’ \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a super-majority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the ‘active’ amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set.
|
||||
Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered ‘active’ \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a supermajority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the ‘active’ amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set.
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
This design describes Solana's _Tower BFT_ algorithm. It addresses the following problems:
|
||||
|
||||
* Some forks may not end up accepted by the super-majority of the cluster, and voters need to recover from voting on such forks.
|
||||
* Some forks may not end up accepted by the supermajority of the cluster, and voters need to recover from voting on such forks.
|
||||
* Many forks may be votable by different voters, and each voter may see a different set of votable forks. The selected forks should eventually converge for the cluster.
|
||||
* Reward based votes have an associated risk. Voters should have the ability to configure how much risk they take on.
|
||||
* The [cost of rollback](tower-bft.md#cost-of-rollback) needs to be computable. It is important to clients that rely on some measurable form of Consistency. The costs to break consistency need to be computable, and increase super-linearly for older votes.
|
||||
|
@ -84,7 +84,7 @@ let timestamp_slot = floor(current_slot / timestamp_interval);
|
||||
```
|
||||
|
||||
Then the validator needs to gather all Vote WithTimestamp transactions from the
|
||||
ledger that reference that slot, using `Blocktree::get_slot_entries()`. As these
|
||||
ledger that reference that slot, using `Blockstore::get_slot_entries()`. As these
|
||||
transactions could have taken some time to reach and be processed by the leader,
|
||||
the validator needs to scan several completed blocks after the timestamp\_slot to
|
||||
get a reasonable set of Timestamps. The exact number of slots will need to be
|
||||
|
@ -75,3 +75,11 @@ Output
|
||||
```text
|
||||
4vC38p4bz7XyiXrk6HtaooUqwxTWKocf45cstASGtmrD398biNJnmTcUCVEojE7wVQvgdYbjHJqRFZPpzfCQpmUN
|
||||
```
|
||||
|
||||
## Buying More Time to Sign
|
||||
|
||||
Typically a Solana transaction must be signed and accepted by the network within
|
||||
a number of slots from the blockhash in its `recent_blockhash` field (~2min at
|
||||
the time of this writing). If your signing procedure takes longer than this, a
|
||||
[Durable Transaction Nonce](durable-nonce.md) can give you the extra time you
|
||||
need.
|
||||
|
263
book/src/offline-signing/durable-nonce.md
Normal file
263
book/src/offline-signing/durable-nonce.md
Normal file
@ -0,0 +1,263 @@
|
||||
# Durable Transaction Nonces
|
||||
|
||||
Durable transaction nonces are a mechanism for getting around the typical
|
||||
short lifetime of a transaction's [`recent_blockhash`](../transaction.md#recent-blockhash).
|
||||
They are implemented as a Solana Program, the mechanics of which can be read
|
||||
about in the [proposal](../implemented-proposals/durable-tx-nonces.md).
|
||||
|
||||
## Usage Examples
|
||||
|
||||
Full usage details for durable nonce CLI commands can be found in the
|
||||
[CLI reference](../api-reference/cli.md).
|
||||
|
||||
### Nonce Authority
|
||||
|
||||
Authority over a nonce account can optionally be assigned to another account. In
|
||||
doing so the new authority inherits full control over the nonce account from the
|
||||
previous authority, including the account creator. This feature enables the
|
||||
creation of more complex account ownership arrangements and derived account
|
||||
addresses not associated with a keypair. The `--nonce-authority <AUTHORITY_KEYPAIR>`
|
||||
argument is used to specify this account and is supported by the following
|
||||
commands
|
||||
* `create-nonce-account`
|
||||
* `new-nonce`
|
||||
* `withdraw-from-nonce-account`
|
||||
* `authorize-nonce-account`
|
||||
|
||||
### Nonce Account Creation
|
||||
|
||||
The durable transaction nonce feature uses an account to store the next nonce
|
||||
value. Durable nonce accounts must be [rent-exempt](../implemented-proposals/rent.md#two-tiered-rent-regime),
|
||||
so need to carry the minimum balance to achieve this.
|
||||
|
||||
A nonce account is created by first generating a new keypair, then create the account on chain
|
||||
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana-keygen new -o nonce-keypair.json
|
||||
solana create-nonce-account nonce-keypair.json 1 SOL
|
||||
```
|
||||
|
||||
- Output
|
||||
|
||||
```text
|
||||
2SymGjGV4ksPdpbaqWFiDoBz8okvtiik4KE9cnMQgRHrRLySSdZ6jrEcpPifW4xUpp4z66XM9d9wM48sA7peG2XL
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
To keep the keypair entirely offline, use the [Paper Wallet](../paper-wallet/README.md)
|
||||
keypair generation [instructions](../paper-wallet/usage.md#seed-phrase-generation.md)
|
||||
instead
|
||||
{% endhint %}
|
||||
|
||||
{% hint style="info" %}
|
||||
[Full usage documentation](../api-reference/cli.md#solana-create-nonce-account)
|
||||
{% endhint %}
|
||||
|
||||
### Querying the Stored Nonce Value
|
||||
|
||||
Creating a durable nonce transaction requires passing the stored nonce value as
|
||||
the value to the `--blockhash` argument upon signing and submission. Obtain the
|
||||
presently stored nonce value with
|
||||
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana get-nonce nonce-keypair.json
|
||||
```
|
||||
|
||||
- Output
|
||||
|
||||
```text
|
||||
8GRipryfxcsxN8mAGjy8zbFo9ezaUsh47TsPzmZbuytU
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
[Full usage documentation](../api-reference/cli.md#solana-get-nonce)
|
||||
{% endhint %}
|
||||
|
||||
### Advancing the Stored Nonce Value
|
||||
|
||||
While not typically needed outside a more useful transaction, the stored nonce
|
||||
value can be advanced by
|
||||
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana new-nonce nonce-keypair.json
|
||||
```
|
||||
|
||||
- Output
|
||||
|
||||
```text
|
||||
44jYe1yPKrjuYDmoFTdgPjg8LFpYyh1PFKJqm5SC1PiSyAL8iw1bhadcAX1SL7KDmREEkmHpYvreKoNv6fZgfvUK
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
[Full usage documentation](../api-reference/cli.md#solana-new-nonce)
|
||||
{% endhint %}
|
||||
|
||||
### Display Nonce Account
|
||||
|
||||
Inspect a nonce account in a more human friendly format with
|
||||
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana show-nonce-account nonce-keypair.json
|
||||
```
|
||||
|
||||
- Output
|
||||
|
||||
```text
|
||||
balance: 0.5 SOL
|
||||
minimum balance required: 0.00136416 SOL
|
||||
nonce: DZar6t2EaCFQTbUP4DHKwZ1wT8gCPW2aRfkVWhydkBvS
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
[Full usage documentation](../api-reference/cli.md#solana-show-nonce-account)
|
||||
{% endhint %}
|
||||
|
||||
### Withdraw Funds from a Nonce Account
|
||||
|
||||
Withdraw funds from a nonce account with
|
||||
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana withdraw-from-nonce-account nonce-keypair.json ~/.config/solana/id.json 0.5 SOL
|
||||
```
|
||||
|
||||
- Output
|
||||
|
||||
```text
|
||||
3foNy1SBqwXSsfSfTdmYKDuhnVheRnKXpoPySiUDBVeDEs6iMVokgqm7AqfTjbk7QBE8mqomvMUMNQhtdMvFLide
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
Close a nonce account by withdrawing the full balance
|
||||
{% endhint %}
|
||||
|
||||
{% hint style="info" %}
|
||||
[Full usage documentation](../api-reference/cli.md#solana-withdraw-from-nonce-account)
|
||||
{% endhint %}
|
||||
|
||||
### Assign a New Authority to a Nonce Account
|
||||
|
||||
Reassign the authority of a nonce account after creation with
|
||||
|
||||
- Command
|
||||
|
||||
```bash
|
||||
solana authorize-nonce-account nonce-keypair.json nonce-authority.json
|
||||
```
|
||||
|
||||
- Output
|
||||
|
||||
```text
|
||||
3F9cg4zN9wHxLGx4c3cUKmqpej4oa67QbALmChsJbfxTgTffRiL3iUehVhR9wQmWgPua66jPuAYeL1K2pYYjbNoT
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
[Full usage documentation](../api-reference/cli.md#solana-authorize-nonce-account)
|
||||
{% endhint %}
|
||||
|
||||
## Other Commands Supporting Durable Nonces
|
||||
|
||||
To make use of durable nonces with other CLI subcommands, two arguments must be
|
||||
supported.
|
||||
* `--nonce`, specifies the account storing the nonce value
|
||||
* `--nonce-authority`, specifies an optional [nonce authority](#nonce-authority)
|
||||
|
||||
The following subcommands have received this treatment so far
|
||||
* [`pay`](../api-reference/cli.md#solana-pay)
|
||||
* [`delegate-stake`](../api-reference/cli.md#solana-delegate-stake)
|
||||
* [`deactivate-stake`](../api-reference/cli.md#solana-deactivate-stake)
|
||||
|
||||
### Example Pay Using Durable Nonce
|
||||
|
||||
Here we demonstrate Alice paying Bob 1 SOL using a durable nonce. The procedure
|
||||
is the same for all subcommands supporting durable nonces
|
||||
|
||||
#### - Create accounts
|
||||
|
||||
First we need some accounts for Alice, Alice's nonce and Bob
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o alice.json
|
||||
$ solana-keygen new -o nonce.json
|
||||
$ solana-keygen new -o bob.json
|
||||
```
|
||||
|
||||
#### - Fund Alice's account
|
||||
|
||||
Alice will need some funds to create a nonce account and send to Bob. Airdrop
|
||||
her some SOL
|
||||
|
||||
```bash
|
||||
$ solana airdrop -k alice.json 10 SOL
|
||||
10 SOL
|
||||
```
|
||||
|
||||
#### - Create Alice's nonce account
|
||||
|
||||
Now Alice needs a nonce account. Create one
|
||||
|
||||
{% hint style="info" %}
|
||||
Here, no separate [nonce authority](#nonce-authority) is employed, so `alice.json`
|
||||
has full authority over the nonce account
|
||||
{% endhint %}
|
||||
|
||||
```bash
|
||||
$ solana create-nonce-account -k alice.json nonce.json 1 SOL
|
||||
3KPZr96BTsL3hqera9up82KAU462Gz31xjqJ6eHUAjF935Yf8i1kmfEbo6SVbNaACKE5z6gySrNjVRvmS8DcPuwV
|
||||
```
|
||||
|
||||
#### - A failed first attempt to pay Bob
|
||||
|
||||
Alice attempts to pay Bob, but takes too long to sign. The specified blockhash
|
||||
expires and the transaction fails
|
||||
|
||||
```bash
|
||||
$ solana pay -k alice.json --blockhash expiredDTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7 bob.json 1 SOL
|
||||
[2020-01-02T18:48:28.462911000Z ERROR solana_cli::cli] Io(Custom { kind: Other, error: "Transaction \"33gQQaoPc9jWePMvDAeyJpcnSPiGUAdtVg8zREWv4GiKjkcGNufgpcbFyRKRrA25NkgjZySEeKue5rawyeH5TzsV\" failed: None" })
|
||||
Error: Io(Custom { kind: Other, error: "Transaction \"33gQQaoPc9jWePMvDAeyJpcnSPiGUAdtVg8zREWv4GiKjkcGNufgpcbFyRKRrA25NkgjZySEeKue5rawyeH5TzsV\" failed: None" })
|
||||
```
|
||||
|
||||
#### - Nonce to the rescue!
|
||||
|
||||
Alice retries the transaction, this time specifying her nonce account and the
|
||||
blockhash stored there
|
||||
|
||||
{% hint style="info" %}
|
||||
Remember, `alice.json` is the [nonce authority](#nonce-authority) in this example
|
||||
{% endhint %}
|
||||
|
||||
```bash
|
||||
$ solana show-nonce-account nonce.json
|
||||
balance: 1 SOL
|
||||
minimum balance required: 0.00136416 SOL
|
||||
nonce: F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7
|
||||
```
|
||||
```bash
|
||||
$ solana pay -k alice.json --blockhash F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7 --nonce nonce.json bob.json 1 SOL
|
||||
HR1368UKHVZyenmH7yVz5sBAijV6XAPeWbEiXEGVYQorRMcoijeNAbzZqEZiH8cDB8tk65ckqeegFjK8dHwNFgQ
|
||||
```
|
||||
|
||||
#### - Success!
|
||||
|
||||
The transaction succeeds! Bob receives 1 SOL from Alice and Alice's stored
|
||||
nonce advances to a new value
|
||||
|
||||
```bash
|
||||
$ solana balance -k bob.json
|
||||
1 SOL
|
||||
```
|
||||
```bash
|
||||
$ solana show-nonce-account nonce.json
|
||||
balance: 1 SOL
|
||||
minimum balance required: 0.00136416 SOL
|
||||
nonce: 6bjroqDcZgTv6Vavhqf81oBHTv3aMnX19UTB51YhAZnN
|
||||
```
|
@ -28,17 +28,17 @@ slashing proof to punish this bad behavior.
|
||||
2) Otherwise, we can simply mark the slot as dead and not playable. A slashing
|
||||
proof may or may not be necessary depending on feasibility.
|
||||
|
||||
# Blocktree receiving shreds
|
||||
# Blockstore receiving shreds
|
||||
|
||||
When blocktree receives a new shred `s`, there are two cases:
|
||||
When blockstore receives a new shred `s`, there are two cases:
|
||||
|
||||
1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred
|
||||
`s'` in blocktree for that slot where `s'.index > s.index` If so, together `s`
|
||||
`s'` in blockstore for that slot where `s'.index > s.index` If so, together `s`
|
||||
and `s'` constitute a slashing proof.
|
||||
|
||||
2) Blocktree has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT`
|
||||
2) Blockstore has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT`
|
||||
with index `i`. If `s.index > i`, then together `s` and `s'`constitute a
|
||||
slashing proof. In this case, blocktree will also not insert `s`.
|
||||
slashing proof. In this case, blockstore will also not insert `s`.
|
||||
|
||||
3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for
|
||||
the same index are a slashable condition. Details for this case are covered
|
||||
@ -47,7 +47,7 @@ in the `Leader Duplicate Block Slashing` section.
|
||||
|
||||
# Replaying and validating ticks
|
||||
|
||||
1) Replay stage replays entries from blocktree, keeping track of the number of
|
||||
1) Replay stage replays entries from blockstore, keeping track of the number of
|
||||
ticks it has seen per slot, and verifying there are `hashes_per_tick` number of
|
||||
hashes between ticcks. After the tick from this last shred has been played,
|
||||
replay stage then checks the total number of ticks.
|
||||
|
@ -21,7 +21,6 @@ We unwrap the many abstraction layers and build a single pipeline that can toggl
|
||||
should forward transactions to the next leader.
|
||||
|
||||
* Hoist FetchStage and BroadcastStage out of TPU
|
||||
* Blocktree renamed to Blockstore
|
||||
* BankForks renamed to Banktree
|
||||
* TPU moves to new socket-free crate called solana-tpu.
|
||||
* TPU's BankingStage absorbs ReplayStage
|
||||
|
@ -95,7 +95,7 @@ Download the binaries by navigating to [https://github.com/solana-labs/solana/re
|
||||
Try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||
|
||||
```bash
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip spy --entrypoint testnet.solana.com:8001
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
@ -146,7 +146,7 @@ solana-archiver --entrypoint testnet.solana.com:8001 --identity-keypair archiver
|
||||
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
|
||||
|
||||
```bash
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip spy --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:
|
||||
|
@ -6,7 +6,7 @@ Confirm the IP address and **identity pubkey** of your validator is visible in
|
||||
the gossip network by running:
|
||||
|
||||
```bash
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip spy --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
## Check Your Balance
|
||||
|
@ -89,7 +89,7 @@ To monitor your validator during its warmup period:
|
||||
* View your stake account, the delegation preference and details of your stake:`solana show-stake-account ~/validator-stake-keypair.json`
|
||||
* `solana uptime ~/validator-vote-keypair.json` will display the voting history \(aka, uptime\) of your validator over recent Epochs
|
||||
* `solana show-validators` displays the current active stake of all validators, including yours
|
||||
* `solana show-show-stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
* `solana show-stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####`
|
||||
* Once your stake is warmed up, you will see a stake balance listed for your validator on the [Solana Network Explorer](http://explorer.solana.com/validators)
|
||||
|
||||
|
@ -33,7 +33,7 @@ Try running following command to join the gossip network and view all the other
|
||||
nodes in the cluster:
|
||||
|
||||
```bash
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip spy --entrypoint testnet.solana.com:8001
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
@ -42,10 +42,6 @@ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
If your machine has a GPU with CUDA installed \(Linux-only currently\), include
|
||||
the `--cuda` argument to `solana-validator`.
|
||||
|
||||
```bash
|
||||
export SOLANA_CUDA=1
|
||||
```
|
||||
|
||||
When your validator is started look for the following log message to indicate
|
||||
that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"`
|
||||
|
||||
|
@ -1,16 +1,16 @@
|
||||
# Blocktree
|
||||
# Blockstore
|
||||
|
||||
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized.
|
||||
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
|
||||
|
||||
The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
|
||||
The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
|
||||
|
||||
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
|
||||
|
||||
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree.
|
||||
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
|
||||
|
||||
## Functionalities of Blocktree
|
||||
## Functionalities of Blockstore
|
||||
|
||||
1. Persistence: the Blocktree lives in the front of the nodes verification
|
||||
1. Persistence: the Blockstore lives in the front of the nodes verification
|
||||
|
||||
pipeline, right behind network receive and signature verification. If the
|
||||
|
||||
@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out
|
||||
|
||||
2. Repair: repair is the same as window repair above, but able to serve any
|
||||
|
||||
shred that's been received. Blocktree stores shreds with signatures,
|
||||
shred that's been received. Blockstore stores shreds with signatures,
|
||||
|
||||
preserving the chain of origination.
|
||||
|
||||
3. Forks: Blocktree supports random access of shreds, so can support a
|
||||
3. Forks: Blockstore supports random access of shreds, so can support a
|
||||
|
||||
validator's need to rollback and replay from a Bank checkpoint.
|
||||
|
||||
4. Restart: with proper pruning/culling, the Blocktree can be replayed by
|
||||
4. Restart: with proper pruning/culling, the Blockstore can be replayed by
|
||||
|
||||
ordered enumeration of entries from slot 0. The logic of the replay stage
|
||||
|
||||
\(i.e. dealing with forks\) will have to be used for the most recent entries in
|
||||
|
||||
the Blocktree.
|
||||
the Blockstore.
|
||||
|
||||
## Blocktree Design
|
||||
## Blockstore Design
|
||||
|
||||
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
|
||||
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing:
|
||||
1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
|
||||
2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
|
||||
* `slot_index` - The index of this slot
|
||||
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
|
||||
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
|
||||
@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out
|
||||
|
||||
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
|
||||
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
|
||||
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details.
|
||||
5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
|
||||
4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
|
||||
5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
|
||||
|
||||
## Blocktree APIs
|
||||
## Blockstore APIs
|
||||
|
||||
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
|
||||
The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
|
||||
|
||||
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
|
||||
|
||||
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree.
|
||||
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
|
||||
|
||||
## Interfacing with Bank
|
||||
|
||||
@ -80,11 +80,11 @@ The bank exposes to replay stage:
|
||||
|
||||
be able to be chained below this vote
|
||||
|
||||
Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
|
||||
Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
|
||||
|
||||
## Pruning Blocktree
|
||||
## Pruning Blockstore
|
||||
|
||||
Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged.
|
||||
Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
|
||||
|
||||
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -20,7 +20,7 @@ steps:
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
|
@ -72,10 +72,14 @@ ARGS+=(
|
||||
--env CI_JOB_ID
|
||||
--env CI_PULL_REQUEST
|
||||
--env CI_REPO_SLUG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
)
|
||||
|
||||
# Also propagate environment variables needed for codecov
|
||||
# https://docs.codecov.io/docs/testing-with-docker#section-codecov-inside-docker
|
||||
# We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite...
|
||||
CODECOV_ENVS=$(CI=true bash <(curl -s https://codecov.io/env))
|
||||
|
||||
if $INTERACTIVE; then
|
||||
if [[ -n $1 ]]; then
|
||||
echo
|
||||
@ -83,8 +87,10 @@ if $INTERACTIVE; then
|
||||
echo
|
||||
fi
|
||||
set -x
|
||||
exec docker run --interactive --tty "${ARGS[@]}" "$IMAGE" bash
|
||||
# shellcheck disable=SC2086
|
||||
exec docker run --interactive --tty "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" bash
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
||||
# shellcheck disable=SC2086
|
||||
exec docker run "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" "$@"
|
||||
|
@ -5,7 +5,28 @@ cd "$(dirname "$0")/.."
|
||||
|
||||
me=$(basename "$0")
|
||||
|
||||
BOOK="book"
|
||||
echo --- update gitbook-cage
|
||||
if [[ -n $CI_BRANCH ]]; then
|
||||
(
|
||||
|
||||
set -x
|
||||
(
|
||||
. ci/rust-version.sh stable
|
||||
ci/docker-run.sh "$rust_stable_docker_image" make -Cbook -B svg
|
||||
)
|
||||
# make a local commit for the svgs
|
||||
git add -A -f book/src/.gitbook/assets/.
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$me"
|
||||
git commit -m "gitbook-cage update $(date -Is)"
|
||||
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
||||
# pop off the local commit
|
||||
git reset --hard HEAD~
|
||||
fi
|
||||
)
|
||||
fi
|
||||
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
eval "$(ci/channel-info.sh)"
|
||||
@ -31,6 +52,7 @@ EOF
|
||||
exit 0
|
||||
fi
|
||||
repo=git@github.com:solana-labs/book.git
|
||||
BOOK="book"
|
||||
else
|
||||
# book-edge and book-beta are published automatically on the tip of the branch
|
||||
case $CHANNEL in
|
||||
@ -73,27 +95,4 @@ echo "--- publish $BOOK"
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- update gitbook-cage
|
||||
(
|
||||
if [[ -z $CI_BRANCH ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -x
|
||||
(
|
||||
. ci/rust-version.sh
|
||||
ci/docker-run.sh $rust_stable_docker_image make -Cbook -B svg
|
||||
)
|
||||
# make a local commit for the svgs
|
||||
git add -A -f book/src/.gitbook/assets/.
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git config user.email maintainers@solana.com
|
||||
git config user.name "$me"
|
||||
git commit -m "gitbook-cage update $(date -Is)"
|
||||
git push -f git@github.com:solana-labs/solana-gitbook-cage.git HEAD:refs/heads/"$CI_BRANCH"
|
||||
# pop off the local commit
|
||||
git reset --hard HEAD~
|
||||
fi
|
||||
)
|
||||
|
||||
exit 0
|
||||
|
@ -4,7 +4,12 @@ set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
echo --- Creating tarball
|
||||
if [[ -n "$CI_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$CI_TAG
|
||||
else
|
||||
CHANNEL_OR_TAG=$CHANNEL
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
sdk/bpf/scripts/package.sh
|
||||
@ -12,7 +17,7 @@ echo --- Creating tarball
|
||||
)
|
||||
|
||||
echo --- AWS S3 Store
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo Skipped
|
||||
else
|
||||
(
|
||||
@ -24,7 +29,7 @@ else
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put /solana/bpf-sdk.tar.bz2 \
|
||||
s3://solana-sdk/"$CHANNEL"/bpf-sdk.tar.bz2
|
||||
s3://solana-sdk/"$CHANNEL_OR_TAG"/bpf-sdk.tar.bz2
|
||||
)
|
||||
fi
|
||||
|
||||
|
@ -53,7 +53,7 @@ windows)
|
||||
;;
|
||||
esac
|
||||
|
||||
echo --- Creating tarball
|
||||
echo --- Creating release tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf solana-release/
|
||||
@ -89,15 +89,21 @@ echo --- Creating tarball
|
||||
)
|
||||
|
||||
# Metrics tarball is platform agnostic, only publish it from Linux
|
||||
MAYBE_METRICS_TARBALL=
|
||||
MAYBE_TARBALLS=
|
||||
if [[ "$CI_OS_NAME" = linux ]]; then
|
||||
metrics/create-metrics-tarball.sh
|
||||
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
|
||||
(
|
||||
set -x
|
||||
sdk/bpf/scripts/package.sh
|
||||
[[ -f bpf-sdk.tar.bz2 ]]
|
||||
|
||||
)
|
||||
MAYBE_TARBALLS="bpf-sdk.tar.bz2 solana-metrics.tar.bz2"
|
||||
fi
|
||||
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
|
||||
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
|
||||
upload-ci-artifact "$file"
|
||||
|
||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||
|
@ -41,7 +41,8 @@ if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo "^^^ +++"
|
||||
echo CODECOV_TOKEN undefined, codecov.io upload skipped
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
|
||||
# We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite...
|
||||
CI=true bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
|
||||
|
||||
annotate --style success --context codecov.io \
|
||||
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}"
|
||||
|
@ -111,7 +111,7 @@ test-move)
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
|
@ -460,6 +460,14 @@ deploy() {
|
||||
maybeGpu=(-G "${ENABLE_GPU}")
|
||||
fi
|
||||
|
||||
if [[ -z $HASHES_PER_TICK ]]; then
|
||||
maybeHashesPerTick="--hashes-per-tick auto"
|
||||
elif [[ $HASHES_PER_TICK == skip ]]; then
|
||||
maybeHashesPerTick=""
|
||||
else
|
||||
maybeHashesPerTick="--hashes-per-tick ${HASHES_PER_TICK}"
|
||||
fi
|
||||
|
||||
if [[ -z $DISABLE_AIRDROPS ]]; then
|
||||
DISABLE_AIRDROPS="true"
|
||||
fi
|
||||
@ -470,6 +478,22 @@ deploy() {
|
||||
maybeDisableAirdrops=""
|
||||
fi
|
||||
|
||||
if [[ -z $INTERNAL_NODES_STAKE_LAMPORTS ]]; then
|
||||
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports 1000000000" # 1 SOL
|
||||
elif [[ $INTERNAL_NODES_STAKE_LAMPORTS == skip ]]; then
|
||||
maybeInternalNodesStakeLamports=""
|
||||
else
|
||||
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports ${INTERNAL_NODES_STAKE_LAMPORTS}"
|
||||
fi
|
||||
|
||||
if [[ -z $INTERNAL_NODES_LAMPORTS ]]; then
|
||||
maybeInternalNodesLamports="--internal-nodes-lamports 500000000000" # 500 SOL
|
||||
elif [[ $INTERNAL_NODES_LAMPORTS == skip ]]; then
|
||||
maybeInternalNodesLamports=""
|
||||
else
|
||||
maybeInternalNodesLamports="--internal-nodes-lamports ${INTERNAL_NODES_LAMPORTS}"
|
||||
fi
|
||||
|
||||
EXTERNAL_ACCOUNTS_FILE=/tmp/validator.yml
|
||||
if [[ -z $EXTERNAL_ACCOUNTS_FILE_URL ]]; then
|
||||
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/validators/all.yml
|
||||
@ -503,11 +527,14 @@ deploy() {
|
||||
--idle-clients \
|
||||
-P -u \
|
||||
-a tds-solana-com --letsencrypt tds.solana.com \
|
||||
${maybeHashesPerTick} \
|
||||
${skipCreate:+-e} \
|
||||
${skipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
${maybeDelete:+-D} \
|
||||
${maybeDisableAirdrops} \
|
||||
${maybeInternalNodesStakeLamports} \
|
||||
${maybeInternalNodesLamports} \
|
||||
${maybeExternalAccountsFile} \
|
||||
--target-lamports-per-signature 0 \
|
||||
--slots-per-epoch 4096 \
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,7 +12,7 @@ edition = "2018"
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
semver = "0.9.0"
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
tiny-bip39 = "0.6.2"
|
||||
url = "2.1.0"
|
||||
chrono = "0.4"
|
||||
|
@ -33,7 +33,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unix_timestamp_of(matches: &ArgMatches<'_>, name: &str) -> Option<UnixTimestamp> {
|
||||
pub fn unix_timestamp_from_rfc3339_datetime(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
) -> Option<UnixTimestamp> {
|
||||
matches.value_of(name).and_then(|value| {
|
||||
DateTime::parse_from_rfc3339(value)
|
||||
.ok()
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::keypair::ASK_KEYWORD;
|
||||
use chrono::DateTime;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{read_keypair_file, Signature};
|
||||
@ -129,3 +130,9 @@ pub fn is_amount(amount: String) -> Result<(), String> {
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
|
||||
DateTime::parse_from_rfc3339(&value)
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -28,24 +28,24 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
||||
solana-client = { path = "../client", version = "0.22.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.22.0" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.22.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.22.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.22.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.22.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-client = { path = "../client", version = "0.22.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.22.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.22.3" }
|
||||
url = "2.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "0.22.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.0" }
|
||||
solana-core = { path = "../core", version = "0.22.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
481
cli/src/cli.rs
481
cli/src/cli.rs
@ -72,6 +72,21 @@ impl std::ops::Deref for KeypairEq {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq)]
|
||||
pub struct PayCommand {
|
||||
pub lamports: u64,
|
||||
pub to: Pubkey,
|
||||
pub timestamp: Option<DateTime<Utc>>,
|
||||
pub timestamp_pubkey: Option<Pubkey>,
|
||||
pub witnesses: Option<Vec<Pubkey>>,
|
||||
pub cancelable: bool,
|
||||
pub sign_only: bool,
|
||||
pub signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
pub blockhash: Option<Hash>,
|
||||
pub nonce_account: Option<Pubkey>,
|
||||
pub nonce_authority: Option<KeypairEq>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum CliCommand {
|
||||
@ -117,18 +132,18 @@ pub enum CliCommand {
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
nonce_account: Pubkey,
|
||||
nonce_authority: KeypairEq,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
new_authority: Pubkey,
|
||||
},
|
||||
CreateNonceAccount {
|
||||
nonce_account: KeypairEq,
|
||||
nonce_authority: Pubkey,
|
||||
nonce_authority: Option<Pubkey>,
|
||||
lamports: u64,
|
||||
},
|
||||
GetNonce(Pubkey),
|
||||
NewNonce {
|
||||
nonce_account: Pubkey,
|
||||
nonce_authority: KeypairEq,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
},
|
||||
ShowNonceAccount {
|
||||
nonce_account_pubkey: Pubkey,
|
||||
@ -136,7 +151,7 @@ pub enum CliCommand {
|
||||
},
|
||||
WithdrawFromNonceAccount {
|
||||
nonce_account: Pubkey,
|
||||
nonce_authority: KeypairEq,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
destination_account_pubkey: Pubkey,
|
||||
lamports: u64,
|
||||
},
|
||||
@ -152,17 +167,23 @@ pub enum CliCommand {
|
||||
},
|
||||
DeactivateStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
stake_authority: Option<KeypairEq>,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
},
|
||||
DelegateStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
vote_account_pubkey: Pubkey,
|
||||
stake_authority: Option<KeypairEq>,
|
||||
force: bool,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<KeypairEq>,
|
||||
},
|
||||
RedeemVoteCredits(Pubkey, Pubkey),
|
||||
ShowStakeHistory {
|
||||
@ -172,8 +193,18 @@ pub enum CliCommand {
|
||||
pubkey: Pubkey,
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
StakeAuthorize(Pubkey, Pubkey, StakeAuthorize),
|
||||
WithdrawStake(Pubkey, Pubkey, u64),
|
||||
StakeAuthorize {
|
||||
stake_account_pubkey: Pubkey,
|
||||
new_authorized_pubkey: Pubkey,
|
||||
stake_authorize: StakeAuthorize,
|
||||
authority: Option<KeypairEq>,
|
||||
},
|
||||
WithdrawStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
destination_account_pubkey: Pubkey,
|
||||
lamports: u64,
|
||||
withdraw_authority: Option<KeypairEq>,
|
||||
},
|
||||
// Storage Commands
|
||||
CreateStorageAccount {
|
||||
account_owner: Pubkey,
|
||||
@ -233,17 +264,7 @@ pub enum CliCommand {
|
||||
},
|
||||
Cancel(Pubkey),
|
||||
Confirm(Signature),
|
||||
Pay {
|
||||
lamports: u64,
|
||||
to: Pubkey,
|
||||
timestamp: Option<DateTime<Utc>>,
|
||||
timestamp_pubkey: Option<Pubkey>,
|
||||
witnesses: Option<Vec<Pubkey>>,
|
||||
cancelable: bool,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
},
|
||||
Pay(PayCommand),
|
||||
ShowAccount {
|
||||
pubkey: Pubkey,
|
||||
output_file: Option<String>,
|
||||
@ -259,11 +280,12 @@ pub struct CliCommandInfo {
|
||||
pub require_keypair: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum CliError {
|
||||
BadParameter(String),
|
||||
CommandNotRecognized(String),
|
||||
InsufficientFundsForFee,
|
||||
InvalidNonce(CliNonceError),
|
||||
DynamicProgramError(String),
|
||||
RpcRequestError(String),
|
||||
KeypairFileNotFound(String),
|
||||
@ -497,9 +519,19 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
let sign_only = matches.is_present("sign_only");
|
||||
let signers = pubkeys_sigs_of(&matches, "signer");
|
||||
let blockhash = value_of(&matches, "blockhash");
|
||||
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
|
||||
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
|
||||
let authority =
|
||||
keypair_of(&matches, NONCE_AUTHORITY_ARG.name).ok_or_else(|| {
|
||||
CliError::BadParameter("Invalid keypair for nonce-authority".into())
|
||||
})?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports,
|
||||
to,
|
||||
timestamp,
|
||||
@ -509,7 +541,9 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash,
|
||||
},
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
}),
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
@ -663,7 +697,6 @@ pub fn parse_create_address_with_seed(
|
||||
"STAKE" => solana_stake_program::id(),
|
||||
"VOTE" => solana_vote_program::id(),
|
||||
"STORAGE" => solana_storage_program::id(),
|
||||
"NONCE" => solana_sdk::nonce_program::id(),
|
||||
_ => pubkey_of(matches, "program_id").unwrap(),
|
||||
};
|
||||
|
||||
@ -887,6 +920,8 @@ fn process_pay(
|
||||
sign_only: bool,
|
||||
signers: &Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
@ -903,7 +938,20 @@ fn process_pay(
|
||||
};
|
||||
|
||||
if timestamp == None && *witnesses == None {
|
||||
let mut tx = system_transaction::transfer(&config.keypair, to, lamports, blockhash);
|
||||
let mut tx = if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
system_transaction::nonced_transfer(
|
||||
&config.keypair,
|
||||
to,
|
||||
lamports,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
blockhash,
|
||||
)
|
||||
} else {
|
||||
system_transaction::transfer(&config.keypair, to, lamports, blockhash)
|
||||
};
|
||||
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
@ -911,6 +959,11 @@ fn process_pay(
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
let nonce_account = rpc_client.get_account(nonce_account)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &blockhash)?;
|
||||
}
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
@ -1148,13 +1201,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Assign authority to nonce account
|
||||
CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
ref nonce_authority,
|
||||
new_authority,
|
||||
} => process_authorize_nonce_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
nonce_authority.as_deref(),
|
||||
new_authority,
|
||||
),
|
||||
// Create nonce account
|
||||
@ -1166,7 +1219,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&rpc_client,
|
||||
config,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
*nonce_authority,
|
||||
*lamports,
|
||||
),
|
||||
// Get the current nonce
|
||||
@ -1176,8 +1229,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Get a new nonce
|
||||
CliCommand::NewNonce {
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
} => process_new_nonce(&rpc_client, config, nonce_account, nonce_authority),
|
||||
ref nonce_authority,
|
||||
} => process_new_nonce(
|
||||
&rpc_client,
|
||||
config,
|
||||
nonce_account,
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
// Show the contents of a nonce account
|
||||
CliCommand::ShowNonceAccount {
|
||||
nonce_account_pubkey,
|
||||
@ -1186,14 +1244,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Withdraw lamports from a nonce account
|
||||
CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
ref nonce_authority,
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
} => process_withdraw_from_nonce_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&nonce_account,
|
||||
nonce_authority,
|
||||
nonce_authority.as_deref(),
|
||||
&destination_account_pubkey,
|
||||
*lamports,
|
||||
),
|
||||
@ -1226,33 +1284,45 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Deactivate stake account
|
||||
CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
ref stake_authority,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
} => process_deactivate_stake_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_authority.as_deref(),
|
||||
*sign_only,
|
||||
signers,
|
||||
*blockhash,
|
||||
*nonce_account,
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
ref stake_authority,
|
||||
force,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
} => process_delegate_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&vote_account_pubkey,
|
||||
stake_authority.as_deref(),
|
||||
*force,
|
||||
*sign_only,
|
||||
signers,
|
||||
*blockhash,
|
||||
*nonce_account,
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey) => {
|
||||
process_redeem_vote_credits(
|
||||
@ -1274,27 +1344,33 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::ShowStakeHistory { use_lamports_unit } => {
|
||||
process_show_stake_history(&rpc_client, config, *use_lamports_unit)
|
||||
}
|
||||
CliCommand::StakeAuthorize(
|
||||
CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey,
|
||||
stake_authorize,
|
||||
) => process_stake_authorize(
|
||||
ref authority,
|
||||
} => process_stake_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&new_authorized_pubkey,
|
||||
*stake_authorize,
|
||||
authority.as_deref(),
|
||||
),
|
||||
|
||||
CliCommand::WithdrawStake(stake_account_pubkey, destination_account_pubkey, lamports) => {
|
||||
process_withdraw_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&destination_account_pubkey,
|
||||
*lamports,
|
||||
)
|
||||
}
|
||||
CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
ref withdraw_authority,
|
||||
} => process_withdraw_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&destination_account_pubkey,
|
||||
*lamports,
|
||||
withdraw_authority.as_deref(),
|
||||
),
|
||||
|
||||
// Storage Commands
|
||||
|
||||
@ -1438,7 +1514,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Confirm the last client transaction by signature
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, signature),
|
||||
// If client has positive balance, pay lamports to another address
|
||||
CliCommand::Pay {
|
||||
CliCommand::Pay(PayCommand {
|
||||
lamports,
|
||||
to,
|
||||
timestamp,
|
||||
@ -1448,7 +1524,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash,
|
||||
} => process_pay(
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
}) => process_pay(
|
||||
&rpc_client,
|
||||
config,
|
||||
*lamports,
|
||||
@ -1460,6 +1538,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*sign_only,
|
||||
signers,
|
||||
*blockhash,
|
||||
*nonce_account,
|
||||
nonce_authority.as_deref(),
|
||||
),
|
||||
CliCommand::ShowAccount {
|
||||
pubkey,
|
||||
@ -1710,7 +1790,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.required(true)
|
||||
.help(
|
||||
"The program_id that the address will ultimately be used for, \n\
|
||||
or one of STAKE, VOTE, NONCE, and STORAGE keywords",
|
||||
or one of STAKE, VOTE, and STORAGE keywords",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
@ -1801,6 +1881,23 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.takes_value(false)
|
||||
.help("Sign the transaction offline"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_ARG.name)
|
||||
.long(NONCE_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.requires("blockhash")
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help(NONCE_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.requires(NONCE_ARG.name)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help(NONCE_AUTHORITY_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("signer")
|
||||
.long("signer")
|
||||
@ -1903,12 +2000,19 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::Value;
|
||||
use solana_client::mock_rpc_client_request::SIGNATURE;
|
||||
use solana_client::{
|
||||
mock_rpc_client_request::SIGNATURE,
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcAccount, RpcResponseContext},
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
nonce_state::{Meta as NonceMeta, NonceState},
|
||||
signature::{read_keypair_file, write_keypair_file},
|
||||
system_program,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use std::path::PathBuf;
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
fn make_tmp_path(name: &str) -> String {
|
||||
let out_dir = std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
@ -2041,7 +2145,6 @@ mod tests {
|
||||
for (name, program_id) in &[
|
||||
("STAKE", solana_stake_program::id()),
|
||||
("VOTE", solana_vote_program::id()),
|
||||
("NONCE", solana_sdk::nonce_program::id()),
|
||||
("STORAGE", solana_storage_program::id()),
|
||||
] {
|
||||
let test_create_address_with_seed = test_commands.clone().get_matches_from(vec![
|
||||
@ -2106,17 +2209,11 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2136,17 +2233,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_multiple_witnesses).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness0, witness1]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2162,17 +2254,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_single_witness).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness0]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2192,17 +2279,13 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_timestamp).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: Some(dt),
|
||||
timestamp_pubkey: Some(witness0),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2219,17 +2302,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true,
|
||||
}
|
||||
);
|
||||
@ -2250,17 +2328,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1)]),
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2283,17 +2356,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1), (key2, sig2)]),
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2313,17 +2381,72 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(blockhash),
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ Nonce
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--nonce",
|
||||
&pubkey_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: Some(pubkey),
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ Nonce and Nonce Authority
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
let keypair = read_keypair_file(&keypair_file).unwrap();
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--nonce",
|
||||
&pubkey_string,
|
||||
"--nonce-authority",
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: Some(pubkey),
|
||||
nonce_authority: Some(keypair.into()),
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2360,17 +2483,14 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_multiple_witnesses).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: Some(dt),
|
||||
timestamp_pubkey: Some(witness0),
|
||||
witnesses: Some(vec![witness0, witness1]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
..PayCommand::default()
|
||||
}),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -2484,16 +2604,24 @@ mod tests {
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::WithdrawStake(stake_pubkey, to_pubkey, 100);
|
||||
config.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
destination_account_pubkey: to_pubkey,
|
||||
lamports: 100,
|
||||
withdraw_authority: None,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
@ -2508,33 +2636,23 @@ mod tests {
|
||||
};
|
||||
assert_eq!(process_command(&config).unwrap(), "1234");
|
||||
|
||||
config.command = CliCommand::Pay {
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let date_string = "\"2018-09-19T17:30:59Z\"";
|
||||
let dt: DateTime<Utc> = serde_json::from_str(&date_string).unwrap();
|
||||
config.command = CliCommand::Pay {
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: Some(dt),
|
||||
timestamp_pubkey: Some(config.keypair.pubkey()),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
let result = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
|
||||
assert_eq!(
|
||||
@ -2548,17 +2666,13 @@ mod tests {
|
||||
);
|
||||
|
||||
let witness = Pubkey::new_rand();
|
||||
config.command = CliCommand::Pay {
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness]),
|
||||
cancelable: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
let result = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
|
||||
assert_eq!(
|
||||
@ -2571,6 +2685,61 @@ mod tests {
|
||||
SIGNATURE.to_string()
|
||||
);
|
||||
|
||||
// Nonced pay
|
||||
let blockhash = Hash::default();
|
||||
let nonce_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(RpcAccount::encode(
|
||||
Account::new_data(
|
||||
1,
|
||||
&NonceState::Initialized(NonceMeta::new(&config.keypair.pubkey()), blockhash),
|
||||
&system_program::ID,
|
||||
)
|
||||
.unwrap()
|
||||
)),
|
||||
});
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetAccountInfo, nonce_response);
|
||||
config.rpc_client = Some(RpcClient::new_mock_with_mocks("".to_string(), mocks));
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
nonce_account: Some(bob_pubkey),
|
||||
blockhash: Some(blockhash),
|
||||
..PayCommand::default()
|
||||
});
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
// Nonced pay w/ non-payer authority
|
||||
let bob_keypair = Keypair::new();
|
||||
let bob_pubkey = bob_keypair.pubkey();
|
||||
let blockhash = Hash::default();
|
||||
let nonce_authority_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(RpcAccount::encode(
|
||||
Account::new_data(
|
||||
1,
|
||||
&NonceState::Initialized(NonceMeta::new(&bob_pubkey), blockhash),
|
||||
&system_program::ID,
|
||||
)
|
||||
.unwrap()
|
||||
)),
|
||||
});
|
||||
let mut mocks = HashMap::new();
|
||||
mocks.insert(RpcRequest::GetAccountInfo, nonce_authority_response);
|
||||
config.rpc_client = Some(RpcClient::new_mock_with_mocks("".to_string(), mocks));
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: Some(bob_pubkey),
|
||||
nonce_authority: Some(bob_keypair.into()),
|
||||
..PayCommand::default()
|
||||
});
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let process_id = Pubkey::new_rand();
|
||||
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
|
||||
let signature = process_command(&config);
|
||||
@ -2669,43 +2838,29 @@ mod tests {
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
config.command = CliCommand::Pay {
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
config.command = CliCommand::Pay {
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: Some(dt),
|
||||
timestamp_pubkey: Some(config.keypair.pubkey()),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
config.command = CliCommand::Pay {
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness]),
|
||||
cancelable: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
|
||||
|
@ -9,7 +9,7 @@ use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_client::{rpc_client::RpcClient, rpc_request::RpcVoteAccountInfo};
|
||||
use solana_client::{rpc_client::RpcClient, rpc_response::RpcVoteAccountInfo};
|
||||
use solana_sdk::{
|
||||
clock::{self, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
@ -446,7 +446,7 @@ pub fn process_show_block_production(
|
||||
first_slot_in_epoch
|
||||
};
|
||||
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
|
||||
let end_slot_index = (end_slot - start_slot) as usize;
|
||||
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message(&format!(
|
||||
@ -458,9 +458,9 @@ pub fn process_show_block_production(
|
||||
let total_slots = end_slot_index - start_slot_index + 1;
|
||||
let total_blocks = confirmed_blocks.len();
|
||||
assert!(total_blocks <= total_slots);
|
||||
let total_slots_missed = total_slots - total_blocks;
|
||||
let total_slots_skipped = total_slots - total_blocks;
|
||||
let mut leader_slot_count = HashMap::new();
|
||||
let mut leader_missed_slots = HashMap::new();
|
||||
let mut leader_skipped_slots = HashMap::new();
|
||||
|
||||
progress_bar.set_message(&format!("Fetching leader schedule for epoch {}...", epoch));
|
||||
let leader_schedule = rpc_client
|
||||
@ -482,7 +482,7 @@ pub fn process_show_block_production(
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"Processing {} slots containing {} blocks and {} empty slots...",
|
||||
total_slots, total_blocks, total_slots_missed
|
||||
total_slots, total_blocks, total_slots_skipped
|
||||
));
|
||||
|
||||
let mut confirmed_blocks_index = 0;
|
||||
@ -491,7 +491,7 @@ pub fn process_show_block_production(
|
||||
let slot = start_slot + slot_index as u64;
|
||||
let slot_count = leader_slot_count.entry(leader).or_insert(0);
|
||||
*slot_count += 1;
|
||||
let missed_slots = leader_missed_slots.entry(leader).or_insert(0);
|
||||
let skipped_slots = leader_skipped_slots.entry(leader).or_insert(0);
|
||||
|
||||
loop {
|
||||
if !confirmed_blocks.is_empty() {
|
||||
@ -506,9 +506,9 @@ pub fn process_show_block_production(
|
||||
break;
|
||||
}
|
||||
}
|
||||
*missed_slots += 1;
|
||||
*skipped_slots += 1;
|
||||
individual_slot_status.push(
|
||||
style(format!(" {:<15} {:<44} MISSED", slot, leader))
|
||||
style(format!(" {:<15} {:<44} SKIPPED", slot, leader))
|
||||
.red()
|
||||
.to_string(),
|
||||
);
|
||||
@ -524,23 +524,23 @@ pub fn process_show_block_production(
|
||||
"Identity Pubkey",
|
||||
"Leader Slots",
|
||||
"Blocks Produced",
|
||||
"Missed Slots",
|
||||
"Missed Block Percentage",
|
||||
"Skipped Slots",
|
||||
"Skipped Slot Percentage",
|
||||
))
|
||||
.bold()
|
||||
);
|
||||
|
||||
let mut table = vec![];
|
||||
for (leader, leader_slots) in leader_slot_count.iter() {
|
||||
let missed_slots = leader_missed_slots.get(leader).unwrap();
|
||||
let blocks_produced = leader_slots - missed_slots;
|
||||
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
|
||||
let blocks_produced = leader_slots - skipped_slots;
|
||||
table.push(format!(
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
|
||||
leader,
|
||||
leader_slots,
|
||||
blocks_produced,
|
||||
missed_slots,
|
||||
*missed_slots as f64 / *leader_slots as f64 * 100.
|
||||
skipped_slots,
|
||||
*skipped_slots as f64 / *leader_slots as f64 * 100.
|
||||
));
|
||||
}
|
||||
table.sort();
|
||||
@ -551,8 +551,8 @@ pub fn process_show_block_production(
|
||||
format!("Epoch {} total:", epoch),
|
||||
total_slots,
|
||||
total_blocks,
|
||||
total_slots_missed,
|
||||
total_slots_missed as f64 / total_slots as f64 * 100.
|
||||
total_slots_skipped,
|
||||
total_slots_skipped as f64 / total_slots as f64 * 100.
|
||||
);
|
||||
println!(
|
||||
" (using data from {} slots: {} to {})",
|
||||
|
@ -110,9 +110,10 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
|
||||
} else {
|
||||
let default_keypair_path = CliConfig::default_keypair_path();
|
||||
if !std::path::Path::new(&default_keypair_path).exists() {
|
||||
return Err(CliError::KeypairFileNotFound(
|
||||
"Generate a new keypair with `solana-keygen new`".to_string(),
|
||||
)
|
||||
return Err(CliError::KeypairFileNotFound(format!(
|
||||
"Generate a new keypair at {} with `solana-keygen new`",
|
||||
default_keypair_path
|
||||
))
|
||||
.into());
|
||||
}
|
||||
default_keypair_path
|
||||
|
221
cli/src/nonce.rs
221
cli/src/nonce.rs
@ -4,20 +4,47 @@ use crate::cli::{
|
||||
CliError, ProcessResult,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
account_utils::State,
|
||||
hash::Hash,
|
||||
nonce_instruction::{authorize, create_nonce_account, nonce, withdraw, NonceError},
|
||||
nonce_program,
|
||||
nonce_state::NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_instruction::SystemError,
|
||||
system_instruction::{
|
||||
create_nonce_account, nonce_advance, nonce_authorize, nonce_withdraw, NonceError,
|
||||
SystemError,
|
||||
},
|
||||
system_program,
|
||||
transaction::Transaction,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum CliNonceError {
|
||||
InvalidAccountOwner,
|
||||
InvalidAccountData,
|
||||
InvalidHash,
|
||||
InvalidAuthority,
|
||||
InvalidState,
|
||||
}
|
||||
|
||||
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "nonce",
|
||||
long: "nonce",
|
||||
help: "Provide the nonce account to use when creating a nonced \n\
|
||||
transaction. Nonced transactions are useful when a transaction \n\
|
||||
requires a lengthy signing process. Learn more about nonced \n\
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce",
|
||||
};
|
||||
|
||||
pub const NONCE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "nonce_authority",
|
||||
long: "nonce-authority",
|
||||
help: "Provide the nonce authority keypair to use when signing a nonced transaction",
|
||||
};
|
||||
|
||||
pub trait NonceSubCommands {
|
||||
fn nonce_subcommands(self) -> Self;
|
||||
}
|
||||
@ -183,20 +210,15 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_nonce_authority(matches: &ArgMatches<'_>) -> Keypair {
|
||||
keypair_of(matches, "nonce_authority")
|
||||
.unwrap_or_else(|| keypair_of(matches, "nonce_account_keypair").unwrap())
|
||||
}
|
||||
|
||||
pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let new_authority = pubkey_of(matches, "new_authority").unwrap();
|
||||
let nonce_authority = resolve_nonce_authority(matches);
|
||||
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account,
|
||||
nonce_authority: nonce_authority.into(),
|
||||
nonce_authority,
|
||||
new_authority,
|
||||
},
|
||||
require_keypair: true,
|
||||
@ -206,8 +228,7 @@ pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliComm
|
||||
pub fn parse_nonce_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = keypair_of(matches, "nonce_account_keypair").unwrap();
|
||||
let lamports = required_lamports_from(matches, "amount", "unit")?;
|
||||
let nonce_authority =
|
||||
pubkey_of(matches, "nonce_authority").unwrap_or_else(|| nonce_account.pubkey());
|
||||
let nonce_authority = pubkey_of(matches, "nonce_authority");
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
@ -230,12 +251,12 @@ pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
|
||||
|
||||
pub fn parse_new_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let nonce_authority = resolve_nonce_authority(matches);
|
||||
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::NewNonce {
|
||||
nonce_account,
|
||||
nonce_authority: nonce_authority.into(),
|
||||
nonce_authority,
|
||||
},
|
||||
require_keypair: true,
|
||||
})
|
||||
@ -260,12 +281,12 @@ pub fn parse_withdraw_from_nonce_account(
|
||||
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
|
||||
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
|
||||
let lamports = required_lamports_from(matches, "amount", "unit")?;
|
||||
let nonce_authority = resolve_nonce_authority(matches);
|
||||
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account,
|
||||
nonce_authority: nonce_authority.into(),
|
||||
nonce_authority,
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
},
|
||||
@ -273,16 +294,45 @@ pub fn parse_withdraw_from_nonce_account(
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if a nonce account is initialized with the given authority and hash
|
||||
pub fn check_nonce_account(
|
||||
nonce_account: &Account,
|
||||
nonce_authority: &Pubkey,
|
||||
nonce_hash: &Hash,
|
||||
) -> Result<(), Box<CliError>> {
|
||||
if nonce_account.owner != system_program::ID {
|
||||
return Err(CliError::InvalidNonce(CliNonceError::InvalidAccountOwner).into());
|
||||
}
|
||||
let nonce_state: NonceState = nonce_account
|
||||
.state()
|
||||
.map_err(|_| Box::new(CliError::InvalidNonce(CliNonceError::InvalidAccountData)))?;
|
||||
match nonce_state {
|
||||
NonceState::Initialized(meta, hash) => {
|
||||
if &hash != nonce_hash {
|
||||
Err(CliError::InvalidNonce(CliNonceError::InvalidHash).into())
|
||||
} else if nonce_authority != &meta.nonce_authority {
|
||||
Err(CliError::InvalidNonce(CliNonceError::InvalidAuthority).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
NonceState::Uninitialized => {
|
||||
Err(CliError::InvalidNonce(CliNonceError::InvalidState).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_authorize_nonce_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Pubkey,
|
||||
nonce_authority: &Keypair,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
new_authority: &Pubkey,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let ix = authorize(nonce_account, &nonce_authority.pubkey(), new_authority);
|
||||
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
|
||||
let ix = nonce_authorize(nonce_account, &nonce_authority.pubkey(), new_authority);
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
vec![ix],
|
||||
Some(&config.keypair.pubkey()),
|
||||
@ -304,7 +354,7 @@ pub fn process_create_nonce_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Keypair,
|
||||
nonce_authority: &Pubkey,
|
||||
nonce_authority: Option<Pubkey>,
|
||||
lamports: u64,
|
||||
) -> ProcessResult {
|
||||
let nonce_account_pubkey = nonce_account.pubkey();
|
||||
@ -313,12 +363,18 @@ pub fn process_create_nonce_account(
|
||||
(&nonce_account_pubkey, "nonce_account_pubkey".to_string()),
|
||||
)?;
|
||||
|
||||
if rpc_client.get_account(&nonce_account_pubkey).is_ok() {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Unable to create nonce account. Nonce account already exists: {}",
|
||||
nonce_account_pubkey,
|
||||
))
|
||||
.into());
|
||||
if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_pubkey) {
|
||||
let err_msg = if nonce_account.owner == system_program::id()
|
||||
&& State::<NonceState>::state(&nonce_account).is_ok()
|
||||
{
|
||||
format!("Nonce account {} already exists", nonce_account_pubkey)
|
||||
} else {
|
||||
format!(
|
||||
"Account {} already exists and is not a nonce account",
|
||||
nonce_account_pubkey
|
||||
)
|
||||
};
|
||||
return Err(CliError::BadParameter(err_msg).into());
|
||||
}
|
||||
|
||||
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(NonceState::size())?;
|
||||
@ -330,10 +386,11 @@ pub fn process_create_nonce_account(
|
||||
.into());
|
||||
}
|
||||
|
||||
let nonce_authority = nonce_authority.unwrap_or_else(|| config.keypair.pubkey());
|
||||
let ixs = create_nonce_account(
|
||||
&config.keypair.pubkey(),
|
||||
&nonce_account_pubkey,
|
||||
nonce_authority,
|
||||
&nonce_authority,
|
||||
lamports,
|
||||
);
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
@ -356,7 +413,7 @@ pub fn process_create_nonce_account(
|
||||
|
||||
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
|
||||
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
|
||||
if nonce_account.owner != nonce_program::id() {
|
||||
if nonce_account.owner != system_program::id() {
|
||||
return Err(CliError::RpcRequestError(format!(
|
||||
"{:?} is not a nonce account",
|
||||
nonce_account_pubkey
|
||||
@ -378,7 +435,7 @@ pub fn process_new_nonce(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Pubkey,
|
||||
nonce_authority: &Keypair,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
@ -392,7 +449,8 @@ pub fn process_new_nonce(
|
||||
.into());
|
||||
}
|
||||
|
||||
let ix = nonce(&nonce_account, &nonce_authority.pubkey());
|
||||
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
|
||||
let ix = nonce_advance(&nonce_account, &nonce_authority.pubkey());
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
vec![ix],
|
||||
@ -417,7 +475,7 @@ pub fn process_show_nonce_account(
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
|
||||
if nonce_account.owner != nonce_program::id() {
|
||||
if nonce_account.owner != system_program::id() {
|
||||
return Err(CliError::RpcRequestError(format!(
|
||||
"{:?} is not a nonce account",
|
||||
nonce_account_pubkey
|
||||
@ -458,13 +516,14 @@ pub fn process_withdraw_from_nonce_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account: &Pubkey,
|
||||
nonce_authority: &Keypair,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
destination_account_pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let ix = withdraw(
|
||||
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
|
||||
let ix = nonce_withdraw(
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
destination_account_pubkey,
|
||||
@ -491,7 +550,13 @@ pub fn process_withdraw_from_nonce_account(
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cli::{app, parse_command};
|
||||
use solana_sdk::signature::{read_keypair_file, write_keypair};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
hash::hash,
|
||||
nonce_state::{Meta as NonceMeta, NonceState},
|
||||
signature::{read_keypair_file, write_keypair},
|
||||
system_program,
|
||||
};
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
fn make_tmp_file() -> (String, NamedTempFile) {
|
||||
@ -524,7 +589,7 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account: nonce_account_pubkey,
|
||||
nonce_authority: read_keypair_file(&keypair_file).unwrap().into(),
|
||||
nonce_authority: None,
|
||||
new_authority: Pubkey::default(),
|
||||
},
|
||||
require_keypair: true,
|
||||
@ -545,7 +610,9 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
|
||||
nonce_authority: Some(
|
||||
read_keypair_file(&authority_keypair_file).unwrap().into()
|
||||
),
|
||||
new_authority: Pubkey::default(),
|
||||
},
|
||||
require_keypair: true,
|
||||
@ -565,7 +632,7 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
|
||||
nonce_authority: nonce_account_pubkey,
|
||||
nonce_authority: None,
|
||||
lamports: 50,
|
||||
},
|
||||
require_keypair: true
|
||||
@ -587,7 +654,9 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: Some(
|
||||
read_keypair_file(&authority_keypair_file).unwrap().pubkey()
|
||||
),
|
||||
lamports: 50,
|
||||
},
|
||||
require_keypair: true
|
||||
@ -619,7 +688,7 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::NewNonce {
|
||||
nonce_account: nonce_account.pubkey(),
|
||||
nonce_authority: nonce_account.into(),
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -639,7 +708,9 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::NewNonce {
|
||||
nonce_account: nonce_account.pubkey(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
|
||||
nonce_authority: Some(
|
||||
read_keypair_file(&authority_keypair_file).unwrap().into()
|
||||
),
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -676,7 +747,7 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&keypair_file).unwrap().into(),
|
||||
nonce_authority: None,
|
||||
destination_account_pubkey: nonce_account_pubkey,
|
||||
lamports: 42
|
||||
},
|
||||
@ -697,7 +768,7 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&keypair_file).unwrap().into(),
|
||||
nonce_authority: None,
|
||||
destination_account_pubkey: nonce_account_pubkey,
|
||||
lamports: 42000000000
|
||||
},
|
||||
@ -721,7 +792,9 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
|
||||
nonce_authority: Some(
|
||||
read_keypair_file(&authority_keypair_file).unwrap().into()
|
||||
),
|
||||
destination_account_pubkey: nonce_account_pubkey,
|
||||
lamports: 42
|
||||
},
|
||||
@ -729,4 +802,66 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_nonce_account() {
|
||||
let blockhash = Hash::default();
|
||||
let nonce_pubkey = Pubkey::new_rand();
|
||||
let valid = Account::new_data(
|
||||
1,
|
||||
&NonceState::Initialized(NonceMeta::new(&nonce_pubkey), blockhash),
|
||||
&system_program::ID,
|
||||
);
|
||||
assert!(check_nonce_account(&valid.unwrap(), &nonce_pubkey, &blockhash).is_ok());
|
||||
|
||||
let invalid_owner = Account::new_data(
|
||||
1,
|
||||
&NonceState::Initialized(NonceMeta::new(&nonce_pubkey), blockhash),
|
||||
&Pubkey::new(&[1u8; 32]),
|
||||
);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_owner.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidAccountOwner
|
||||
))),
|
||||
);
|
||||
|
||||
let invalid_data = Account::new_data(1, &"invalid", &system_program::ID);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_data.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidAccountData
|
||||
))),
|
||||
);
|
||||
|
||||
let invalid_hash = Account::new_data(
|
||||
1,
|
||||
&NonceState::Initialized(NonceMeta::new(&nonce_pubkey), hash(b"invalid")),
|
||||
&system_program::ID,
|
||||
);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_hash.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(CliNonceError::InvalidHash))),
|
||||
);
|
||||
|
||||
let invalid_authority = Account::new_data(
|
||||
1,
|
||||
&NonceState::Initialized(NonceMeta::new(&Pubkey::new_rand()), blockhash),
|
||||
&system_program::ID,
|
||||
);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidAuthority
|
||||
))),
|
||||
);
|
||||
|
||||
let invalid_state = Account::new_data(1, &NonceState::Uninitialized, &system_program::ID);
|
||||
assert_eq!(
|
||||
check_nonce_account(&invalid_state.unwrap(), &nonce_pubkey, &blockhash),
|
||||
Err(Box::new(CliError::InvalidNonce(
|
||||
CliNonceError::InvalidState
|
||||
))),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
540
cli/src/stake.rs
540
cli/src/stake.rs
@ -1,12 +1,15 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
get_blockhash_fee_calculator, log_instruction_custom_error, replace_signatures,
|
||||
required_lamports_from, return_signers, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult,
|
||||
use crate::{
|
||||
cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
get_blockhash_fee_calculator, log_instruction_custom_error, replace_signatures,
|
||||
required_lamports_from, return_signers, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult,
|
||||
},
|
||||
nonce::{check_nonce_account, NONCE_ARG, NONCE_AUTHORITY_ARG},
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use console::style;
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::signature::{Keypair, Signature};
|
||||
use solana_sdk::{
|
||||
@ -29,6 +32,36 @@ use solana_stake_program::{
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::ops::Deref;
|
||||
|
||||
pub const STAKE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "stake_authority",
|
||||
long: "stake-authority",
|
||||
help: "Public key of authorized staker (defaults to cli config pubkey)",
|
||||
};
|
||||
|
||||
pub const WITHDRAW_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "withdraw_authority",
|
||||
long: "withdraw-authority",
|
||||
help: "Public key of authorized withdrawer (defaults to cli config pubkey)",
|
||||
};
|
||||
|
||||
fn stake_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(STAKE_AUTHORITY_ARG.name)
|
||||
.long(STAKE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("KEYPAIR")
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help(STAKE_AUTHORITY_ARG.help)
|
||||
}
|
||||
|
||||
fn withdraw_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(WITHDRAW_AUTHORITY_ARG.name)
|
||||
.long(WITHDRAW_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("KEYPAIR")
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help(WITHDRAW_AUTHORITY_ARG.help)
|
||||
}
|
||||
|
||||
pub trait StakeSubCommands {
|
||||
fn stake_subcommands(self) -> Self;
|
||||
}
|
||||
@ -83,24 +116,25 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
Arg::with_name("lockup_date")
|
||||
.long("lockup-date")
|
||||
.value_name("RFC3339 DATE TIME")
|
||||
.validator(is_rfc3339_datetime)
|
||||
.takes_value(true)
|
||||
.help("The date and time at which this account will be available for withdrawal")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_staker")
|
||||
.long("authorized-staker")
|
||||
Arg::with_name(STAKE_AUTHORITY_ARG.name)
|
||||
.long(STAKE_AUTHORITY_ARG.long)
|
||||
.value_name("PUBKEY")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Public key of authorized staker (defaults to cli config pubkey)")
|
||||
.help(STAKE_AUTHORITY_ARG.help)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.long("authorized-withdrawer")
|
||||
Arg::with_name(WITHDRAW_AUTHORITY_ARG.name)
|
||||
.long(WITHDRAW_AUTHORITY_ARG.long)
|
||||
.value_name("PUBKEY")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Public key of the authorized withdrawer (defaults to cli config pubkey)")
|
||||
.help(WITHDRAW_AUTHORITY_ARG.help)
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
@ -131,6 +165,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("The vote account to which the stake will be delegated")
|
||||
)
|
||||
.arg(stake_authority_arg())
|
||||
.arg(
|
||||
Arg::with_name("sign_only")
|
||||
.long("sign-only")
|
||||
@ -153,6 +188,23 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.validator(is_hash)
|
||||
.help("Use the supplied blockhash"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_ARG.name)
|
||||
.long(NONCE_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.requires("blockhash")
|
||||
.validator(is_pubkey)
|
||||
.help(NONCE_ARG.help)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.requires(NONCE_ARG.name)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help(NONCE_AUTHORITY_ARG.help)
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -176,6 +228,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("New authorized staker")
|
||||
)
|
||||
.arg(stake_authority_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("stake-authorize-withdrawer")
|
||||
@ -198,6 +251,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("New authorized withdrawer")
|
||||
)
|
||||
.arg(withdraw_authority_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("deactivate-stake")
|
||||
@ -210,6 +264,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.required(true)
|
||||
.help("Stake account to be deactivated.")
|
||||
)
|
||||
.arg(stake_authority_arg())
|
||||
.arg(
|
||||
Arg::with_name("sign_only")
|
||||
.long("sign-only")
|
||||
@ -232,6 +287,23 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.validator(is_hash)
|
||||
.help("Use the supplied blockhash"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_ARG.name)
|
||||
.long(NONCE_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.requires("blockhash")
|
||||
.validator(is_pubkey)
|
||||
.help(NONCE_ARG.help)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NONCE_AUTHORITY_ARG.name)
|
||||
.long(NONCE_AUTHORITY_ARG.long)
|
||||
.takes_value(true)
|
||||
.requires(NONCE_ARG.name)
|
||||
.validator(is_keypair_or_ask_keyword)
|
||||
.help(NONCE_AUTHORITY_ARG.help)
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -272,6 +344,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.possible_values(&["SOL", "lamports"])
|
||||
.help("Specify unit to use for request")
|
||||
)
|
||||
.arg(withdraw_authority_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("redeem-vote-credits")
|
||||
@ -330,10 +403,10 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account = keypair_of(matches, "stake_account").unwrap();
|
||||
let epoch = value_of(&matches, "lockup_epoch").unwrap_or(0);
|
||||
let unix_timestamp = unix_timestamp_of(&matches, "lockup_date").unwrap_or(0);
|
||||
let unix_timestamp = unix_timestamp_from_rfc3339_datetime(&matches, "lockup_date").unwrap_or(0);
|
||||
let custodian = pubkey_of(matches, "custodian").unwrap_or_default();
|
||||
let staker = pubkey_of(matches, "authorized_staker");
|
||||
let withdrawer = pubkey_of(matches, "authorized_withdrawer");
|
||||
let staker = pubkey_of(matches, STAKE_AUTHORITY_ARG.name);
|
||||
let withdrawer = pubkey_of(matches, WITHDRAW_AUTHORITY_ARG.name);
|
||||
let lamports = required_lamports_from(matches, "amount", "unit")?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
@ -355,20 +428,38 @@ pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommand
|
||||
pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
|
||||
let authority = keypair_of(&matches, STAKE_AUTHORITY_ARG.name)
|
||||
.ok_or_else(|| CliError::BadParameter("Invalid keypair for stake-authority".into()))?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let force = matches.is_present("force");
|
||||
let sign_only = matches.is_present("sign_only");
|
||||
let signers = pubkeys_sigs_of(&matches, "signer");
|
||||
let blockhash = value_of(matches, "blockhash");
|
||||
let require_keypair = signers.is_none();
|
||||
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
|
||||
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
|
||||
let authority = keypair_of(&matches, NONCE_AUTHORITY_ARG.name)
|
||||
.ok_or_else(|| CliError::BadParameter("Invalid keypair for nonce-authority".into()))?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority,
|
||||
force,
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
},
|
||||
require_keypair,
|
||||
})
|
||||
@ -379,14 +470,27 @@ pub fn parse_stake_authorize(
|
||||
stake_authorize: StakeAuthorize,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
|
||||
let authorized_pubkey = pubkey_of(matches, "authorized_pubkey").unwrap();
|
||||
let new_authorized_pubkey = pubkey_of(matches, "authorized_pubkey").unwrap();
|
||||
let authority_flag = match stake_authorize {
|
||||
StakeAuthorize::Staker => STAKE_AUTHORITY_ARG.name,
|
||||
StakeAuthorize::Withdrawer => WITHDRAW_AUTHORITY_ARG.name,
|
||||
};
|
||||
let authority = if matches.is_present(authority_flag) {
|
||||
let authority = keypair_of(&matches, authority_flag).ok_or_else(|| {
|
||||
CliError::BadParameter(format!("Invalid keypair for {}", authority_flag))
|
||||
})?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize(
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
authorized_pubkey,
|
||||
new_authorized_pubkey,
|
||||
stake_authorize,
|
||||
),
|
||||
authority,
|
||||
},
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
@ -403,17 +507,35 @@ pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result<CliCommandI
|
||||
|
||||
pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
|
||||
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
|
||||
let authority = keypair_of(&matches, STAKE_AUTHORITY_ARG.name)
|
||||
.ok_or_else(|| CliError::BadParameter("Invalid keypair for stake-authority".into()))?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let sign_only = matches.is_present("sign_only");
|
||||
let signers = pubkeys_sigs_of(&matches, "signer");
|
||||
let blockhash = value_of(matches, "blockhash");
|
||||
let require_keypair = signers.is_none();
|
||||
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
|
||||
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
|
||||
let authority = keypair_of(&matches, NONCE_AUTHORITY_ARG.name)
|
||||
.ok_or_else(|| CliError::BadParameter("Invalid keypair for nonce-authority".into()))?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
stake_authority,
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
},
|
||||
require_keypair,
|
||||
})
|
||||
@ -423,13 +545,22 @@ pub fn parse_stake_withdraw_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
|
||||
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
|
||||
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
|
||||
let lamports = required_lamports_from(matches, "amount", "unit")?;
|
||||
let withdraw_authority = if matches.is_present(WITHDRAW_AUTHORITY_ARG.name) {
|
||||
let authority = keypair_of(&matches, WITHDRAW_AUTHORITY_ARG.name).ok_or_else(|| {
|
||||
CliError::BadParameter("Invalid keypair for withdraw-authority".into())
|
||||
})?;
|
||||
Some(authority.into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake(
|
||||
command: CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
),
|
||||
withdraw_authority,
|
||||
},
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
@ -469,12 +600,16 @@ pub fn process_create_stake_account(
|
||||
(&stake_account_pubkey, "stake_account_pubkey".to_string()),
|
||||
)?;
|
||||
|
||||
if rpc_client.get_account(&stake_account_pubkey).is_ok() {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Unable to create stake account. Stake account already exists: {}",
|
||||
stake_account_pubkey
|
||||
))
|
||||
.into());
|
||||
if let Ok(stake_account) = rpc_client.get_account(&stake_account_pubkey) {
|
||||
let err_msg = if stake_account.owner == solana_stake_program::id() {
|
||||
format!("Stake account {} already exists", stake_account_pubkey)
|
||||
} else {
|
||||
format!(
|
||||
"Account {} already exists and is not a stake account",
|
||||
stake_account_pubkey
|
||||
)
|
||||
};
|
||||
return Err(CliError::BadParameter(err_msg).into());
|
||||
}
|
||||
|
||||
let minimum_balance =
|
||||
@ -492,7 +627,6 @@ pub fn process_create_stake_account(
|
||||
staker: staker.unwrap_or(config.keypair.pubkey()),
|
||||
withdrawer: withdrawer.unwrap_or(config.keypair.pubkey()),
|
||||
};
|
||||
println!("{:?}", authorized);
|
||||
|
||||
let ixs = stake_instruction::create_account(
|
||||
&config.keypair.pubkey(),
|
||||
@ -525,23 +659,25 @@ pub fn process_stake_authorize(
|
||||
stake_account_pubkey: &Pubkey,
|
||||
authorized_pubkey: &Pubkey,
|
||||
stake_authorize: StakeAuthorize,
|
||||
authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(stake_account_pubkey, "stake_account_pubkey".to_string()),
|
||||
(authorized_pubkey, "new_authorized_pubkey".to_string()),
|
||||
)?;
|
||||
let authority = authority.unwrap_or(&config.keypair);
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![stake_instruction::authorize(
|
||||
stake_account_pubkey, // stake account to update
|
||||
&config.keypair.pubkey(), // currently authorized
|
||||
authorized_pubkey, // new stake signer
|
||||
stake_authorize, // stake or withdraw
|
||||
stake_account_pubkey, // stake account to update
|
||||
&authority.pubkey(), // currently authorized
|
||||
authorized_pubkey, // new stake signer
|
||||
stake_authorize, // stake or withdraw
|
||||
)];
|
||||
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair],
|
||||
&[&config.keypair, authority],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(
|
||||
@ -558,28 +694,49 @@ pub fn process_deactivate_stake_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
stake_account_pubkey: &Pubkey,
|
||||
stake_authority: Option<&Keypair>,
|
||||
sign_only: bool,
|
||||
signers: &Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
|
||||
let stake_authority = stake_authority.unwrap_or(&config.keypair);
|
||||
let ixs = vec![stake_instruction::deactivate_stake(
|
||||
stake_account_pubkey,
|
||||
&config.keypair.pubkey(),
|
||||
&stake_authority.pubkey(),
|
||||
)];
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
let mut tx = if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
Transaction::new_signed_with_nonce(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair, nonce_authority, stake_authority],
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
recent_blockhash,
|
||||
)
|
||||
} else {
|
||||
Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair, stake_authority],
|
||||
recent_blockhash,
|
||||
)
|
||||
};
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
let nonce_account = rpc_client.get_account(nonce_account)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?;
|
||||
}
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&tx.message.account_keys[0],
|
||||
@ -597,12 +754,14 @@ pub fn process_withdraw_stake(
|
||||
stake_account_pubkey: &Pubkey,
|
||||
destination_account_pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
withdraw_authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let withdraw_authority = withdraw_authority.unwrap_or(&config.keypair);
|
||||
|
||||
let ixs = vec![stake_instruction::withdraw(
|
||||
stake_account_pubkey,
|
||||
&config.keypair.pubkey(),
|
||||
&withdraw_authority.pubkey(),
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
)];
|
||||
@ -610,7 +769,7 @@ pub fn process_withdraw_stake(
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair],
|
||||
&[&config.keypair, withdraw_authority],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(
|
||||
@ -760,20 +919,25 @@ pub fn process_show_stake_history(
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn process_delegate_stake(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
stake_account_pubkey: &Pubkey,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
stake_authority: Option<&Keypair>,
|
||||
force: bool,
|
||||
sign_only: bool,
|
||||
signers: &Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<&Keypair>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
(stake_account_pubkey, "stake_account_pubkey".to_string()),
|
||||
)?;
|
||||
let stake_authority = stake_authority.unwrap_or(&config.keypair);
|
||||
|
||||
// Sanity check the vote account to ensure it is attached to a validator that has recently
|
||||
// voted at the tip of the ledger
|
||||
@ -820,22 +984,38 @@ pub fn process_delegate_stake(
|
||||
|
||||
let ixs = vec![stake_instruction::delegate_stake(
|
||||
stake_account_pubkey,
|
||||
&config.keypair.pubkey(),
|
||||
&stake_authority.pubkey(),
|
||||
vote_account_pubkey,
|
||||
)];
|
||||
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
let mut tx = if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
Transaction::new_signed_with_nonce(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair, nonce_authority, stake_authority],
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
recent_blockhash,
|
||||
)
|
||||
} else {
|
||||
Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair, stake_authority],
|
||||
recent_blockhash,
|
||||
)
|
||||
};
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
|
||||
let nonce_account = rpc_client.get_account(nonce_account)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?;
|
||||
}
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&tx.message.account_keys[0],
|
||||
@ -851,7 +1031,7 @@ pub fn process_delegate_stake(
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cli::{app, parse_command};
|
||||
use solana_sdk::signature::write_keypair;
|
||||
use solana_sdk::signature::{read_keypair_file, write_keypair};
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
fn make_tmp_file() -> (String, NamedTempFile) {
|
||||
@ -859,6 +1039,61 @@ mod tests {
|
||||
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
|
||||
}
|
||||
|
||||
fn parse_authorize_tests(
|
||||
test_commands: &App,
|
||||
stake_account_pubkey: Pubkey,
|
||||
authority_keypair_file: &str,
|
||||
stake_authorize: StakeAuthorize,
|
||||
) {
|
||||
let stake_account_string = stake_account_pubkey.to_string();
|
||||
|
||||
let (subcommand, authority_flag) = match stake_authorize {
|
||||
StakeAuthorize::Staker => ("stake-authorize-staker", "--stake-authority"),
|
||||
StakeAuthorize::Withdrawer => ("stake-authorize-withdrawer", "--withdraw-authority"),
|
||||
};
|
||||
|
||||
// Test Staker Subcommand
|
||||
let test_authorize = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
&subcommand,
|
||||
&stake_account_string,
|
||||
&stake_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: stake_account_pubkey,
|
||||
stake_authorize,
|
||||
authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
// Test Staker Subcommand w/ authority
|
||||
let test_authorize = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
&subcommand,
|
||||
&stake_account_string,
|
||||
&stake_account_string,
|
||||
&authority_flag,
|
||||
&authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: stake_account_pubkey,
|
||||
stake_authorize,
|
||||
authority: Some(read_keypair_file(&authority_keypair_file).unwrap().into()),
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_command() {
|
||||
let test_commands = app("test", "desc", "version");
|
||||
@ -866,41 +1101,21 @@ mod tests {
|
||||
let stake_account_keypair = Keypair::new();
|
||||
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
let stake_account_pubkey = stake_account_keypair.pubkey();
|
||||
let stake_account_string = stake_account_pubkey.to_string();
|
||||
let (stake_authority_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let stake_authority_keypair = Keypair::new();
|
||||
write_keypair(&stake_authority_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let test_authorize_staker = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"stake-authorize-staker",
|
||||
&stake_account_string,
|
||||
&stake_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_staker).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize(
|
||||
stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
StakeAuthorize::Staker
|
||||
),
|
||||
require_keypair: true
|
||||
}
|
||||
parse_authorize_tests(
|
||||
&test_commands,
|
||||
stake_account_pubkey,
|
||||
&stake_authority_keypair_file,
|
||||
StakeAuthorize::Staker,
|
||||
);
|
||||
let test_authorize_withdrawer = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"stake-authorize-withdrawer",
|
||||
&stake_account_string,
|
||||
&stake_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_withdrawer).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize(
|
||||
stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
StakeAuthorize::Withdrawer
|
||||
),
|
||||
require_keypair: true
|
||||
}
|
||||
parse_authorize_tests(
|
||||
&test_commands,
|
||||
stake_account_pubkey,
|
||||
&stake_authority_keypair_file,
|
||||
StakeAuthorize::Withdrawer,
|
||||
);
|
||||
|
||||
// Test CreateStakeAccount SubCommand
|
||||
@ -913,9 +1128,9 @@ mod tests {
|
||||
"create-stake-account",
|
||||
&keypair_file,
|
||||
"50",
|
||||
"--authorized-staker",
|
||||
"--stake-authority",
|
||||
&authorized_string,
|
||||
"--authorized-withdrawer",
|
||||
"--withdraw-authority",
|
||||
&authorized_string,
|
||||
"--custodian",
|
||||
&custodian_string,
|
||||
@ -984,15 +1199,52 @@ mod tests {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority: None,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test DelegateStake Subcommand w/ authority
|
||||
let vote_account_pubkey = Pubkey::new_rand();
|
||||
let vote_account_string = vote_account_pubkey.to_string();
|
||||
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"delegate-stake",
|
||||
&stake_account_string,
|
||||
&vote_account_string,
|
||||
"--stake-authority",
|
||||
&stake_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority: Some(
|
||||
read_keypair_file(&stake_authority_keypair_file)
|
||||
.unwrap()
|
||||
.into()
|
||||
),
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test DelegateStake Subcommand w/ force
|
||||
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"delegate-stake",
|
||||
@ -1006,10 +1258,13 @@ mod tests {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1032,10 +1287,13 @@ mod tests {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority: None,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(blockhash)
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1054,10 +1312,13 @@ mod tests {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority: None,
|
||||
force: false,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1081,10 +1342,13 @@ mod tests {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority: None,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1)]),
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
@ -1110,10 +1374,13 @@ mod tests {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
stake_authority: None,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1), (key2, sig2)]),
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
@ -1132,7 +1399,41 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_withdraw_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake(stake_account_pubkey, stake_account_pubkey, 42),
|
||||
command: CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: stake_account_pubkey,
|
||||
lamports: 42,
|
||||
withdraw_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawStake Subcommand w/ authority
|
||||
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"withdraw-stake",
|
||||
&stake_account_string,
|
||||
&stake_account_string,
|
||||
"42",
|
||||
"lamports",
|
||||
"--withdraw-authority",
|
||||
&stake_authority_keypair_file,
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
parse_command(&test_withdraw_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: stake_account_pubkey,
|
||||
lamports: 42,
|
||||
withdraw_authority: Some(
|
||||
read_keypair_file(&stake_authority_keypair_file)
|
||||
.unwrap()
|
||||
.into()
|
||||
),
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -1148,9 +1449,40 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test DeactivateStake Subcommand w/ authority
|
||||
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"deactivate-stake",
|
||||
&stake_account_string,
|
||||
"--stake-authority",
|
||||
&stake_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
stake_authority: Some(
|
||||
read_keypair_file(&stake_authority_keypair_file)
|
||||
.unwrap()
|
||||
.into()
|
||||
),
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1171,9 +1503,12 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(blockhash)
|
||||
blockhash: Some(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1190,9 +1525,12 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1214,9 +1552,12 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1)]),
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
@ -1240,9 +1581,12 @@ mod tests {
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1), (key2, sig2)]),
|
||||
blockhash: None
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
|
@ -163,6 +163,19 @@ pub fn process_create_storage_account(
|
||||
"storage_account_pubkey".to_string(),
|
||||
),
|
||||
)?;
|
||||
|
||||
if let Ok(storage_account) = rpc_client.get_account(&storage_account_pubkey) {
|
||||
let err_msg = if storage_account.owner == solana_storage_program::id() {
|
||||
format!("Storage account {} already exists", storage_account_pubkey)
|
||||
} else {
|
||||
format!(
|
||||
"Account {} already exists and is not a storage account",
|
||||
storage_account_pubkey
|
||||
)
|
||||
};
|
||||
return Err(CliError::BadParameter(err_msg).into());
|
||||
}
|
||||
|
||||
use solana_storage_program::storage_contract::STORAGE_ACCOUNT_SPACE;
|
||||
let required_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(STORAGE_ACCOUNT_SPACE as usize)?
|
||||
|
@ -312,10 +312,12 @@ pub fn process_set_validator_info(
|
||||
"Publishing info for Validator {:?}",
|
||||
config.keypair.pubkey()
|
||||
);
|
||||
let lamports = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(ValidatorInfo::max_space() as usize)?;
|
||||
let mut instructions = config_instruction::create_account::<ValidatorInfo>(
|
||||
&config.keypair.pubkey(),
|
||||
&info_keypair.pubkey(),
|
||||
1,
|
||||
lamports,
|
||||
keys.clone(),
|
||||
);
|
||||
instructions.extend_from_slice(&[config_instruction::store(
|
||||
|
@ -9,10 +9,9 @@ use crate::{
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::{
|
||||
account::Account, pubkey::Pubkey, signature::KeypairUtil, system_instruction::SystemError,
|
||||
transaction::Transaction,
|
||||
account::Account, pubkey::Pubkey, signature::Keypair, signature::KeypairUtil,
|
||||
system_instruction::SystemError, transaction::Transaction,
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_instruction::{self, VoteError},
|
||||
@ -51,7 +50,8 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.long("commission")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("The commission taken on reward redemption (0-100), default: 0"),
|
||||
.default_value("100")
|
||||
.help("The commission taken on reward redemption (0-100)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_voter")
|
||||
@ -195,7 +195,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account = keypair_of(matches, "vote_account").unwrap();
|
||||
let identity_pubkey = pubkey_of(matches, "identity_pubkey").unwrap();
|
||||
let commission = value_of(&matches, "commission").unwrap_or(0);
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
let authorized_voter = pubkey_of(matches, "authorized_voter");
|
||||
let authorized_withdrawer = pubkey_of(matches, "authorized_withdrawer");
|
||||
|
||||
@ -294,6 +294,18 @@ pub fn process_create_vote_account(
|
||||
(&vote_account_pubkey, "vote_account_pubkey".to_string()),
|
||||
)?;
|
||||
|
||||
if let Ok(vote_account) = rpc_client.get_account(&vote_account_pubkey) {
|
||||
let err_msg = if vote_account.owner == solana_vote_program::id() {
|
||||
format!("Vote account {} already exists", vote_account_pubkey)
|
||||
} else {
|
||||
format!(
|
||||
"Account {} already exists and is not a vote account",
|
||||
vote_account_pubkey
|
||||
)
|
||||
};
|
||||
return Err(CliError::BadParameter(err_msg).into());
|
||||
}
|
||||
|
||||
let required_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(VoteState::size_of())?
|
||||
.max(1);
|
||||
@ -609,7 +621,7 @@ mod tests {
|
||||
node_pubkey,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
commission: 100,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -637,7 +649,7 @@ mod tests {
|
||||
node_pubkey,
|
||||
authorized_voter: Some(authed),
|
||||
authorized_withdrawer: None,
|
||||
commission: 0
|
||||
commission: 100
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -663,7 +675,7 @@ mod tests {
|
||||
node_pubkey,
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: Some(authed),
|
||||
commission: 0
|
||||
commission: 100
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
|
@ -1,4 +1,6 @@
|
||||
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
|
||||
use solana_cli::cli::{
|
||||
process_command, request_and_confirm_airdrop, CliCommand, CliConfig, KeypairEq,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
@ -59,7 +61,7 @@ fn test_nonce() {
|
||||
&mut config_payer,
|
||||
&mut config_nonce,
|
||||
&keypair_file,
|
||||
&keypair_file,
|
||||
None,
|
||||
);
|
||||
|
||||
server.close().unwrap();
|
||||
@ -95,20 +97,24 @@ fn test_nonce_with_authority() {
|
||||
&mut config_payer,
|
||||
&mut config_nonce,
|
||||
&nonce_keypair_file,
|
||||
&authority_keypair_file,
|
||||
Some(&authority_keypair_file),
|
||||
);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
fn read_keypair_from_option(keypair_file: &Option<&str>) -> Option<KeypairEq> {
|
||||
keypair_file.map(|akf| read_keypair_file(&akf).unwrap().into())
|
||||
}
|
||||
|
||||
fn full_battery_tests(
|
||||
rpc_client: &RpcClient,
|
||||
faucet_addr: &std::net::SocketAddr,
|
||||
config_payer: &mut CliConfig,
|
||||
config_nonce: &mut CliConfig,
|
||||
nonce_keypair_file: &str,
|
||||
authority_keypair_file: &str,
|
||||
authority_keypair_file: Option<&str>,
|
||||
) {
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
@ -122,7 +128,8 @@ fn full_battery_tests(
|
||||
// Create nonce account
|
||||
config_payer.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_from_option(&authority_keypair_file)
|
||||
.map(|na: KeypairEq| na.pubkey()),
|
||||
lamports: 1000,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
@ -144,7 +151,7 @@ fn full_battery_tests(
|
||||
// New nonce
|
||||
config_payer.command = CliCommand::NewNonce {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
|
||||
nonce_authority: read_keypair_from_option(&authority_keypair_file),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
@ -159,7 +166,7 @@ fn full_battery_tests(
|
||||
let payee_pubkey = Pubkey::new_rand();
|
||||
config_payer.command = CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
|
||||
nonce_authority: read_keypair_from_option(&authority_keypair_file),
|
||||
destination_account_pubkey: payee_pubkey,
|
||||
lamports: 100,
|
||||
};
|
||||
@ -181,7 +188,7 @@ fn full_battery_tests(
|
||||
write_keypair(&new_authority, tmp_file.as_file_mut()).unwrap();
|
||||
config_payer.command = CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
|
||||
nonce_authority: read_keypair_from_option(&authority_keypair_file),
|
||||
new_authority: read_keypair_file(&new_authority_keypair_file)
|
||||
.unwrap()
|
||||
.pubkey(),
|
||||
@ -191,25 +198,29 @@ fn full_battery_tests(
|
||||
// Old authority fails now
|
||||
config_payer.command = CliCommand::NewNonce {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&authority_keypair_file).unwrap().into(),
|
||||
nonce_authority: read_keypair_from_option(&authority_keypair_file),
|
||||
};
|
||||
process_command(&config_payer).unwrap_err();
|
||||
|
||||
// New authority can advance nonce
|
||||
config_payer.command = CliCommand::NewNonce {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&new_authority_keypair_file)
|
||||
.unwrap()
|
||||
.into(),
|
||||
nonce_authority: Some(
|
||||
read_keypair_file(&new_authority_keypair_file)
|
||||
.unwrap()
|
||||
.into(),
|
||||
),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
// New authority can withdraw from nonce account
|
||||
config_payer.command = CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().pubkey(),
|
||||
nonce_authority: read_keypair_file(&new_authority_keypair_file)
|
||||
.unwrap()
|
||||
.into(),
|
||||
nonce_authority: Some(
|
||||
read_keypair_file(&new_authority_keypair_file)
|
||||
.unwrap()
|
||||
.into(),
|
||||
),
|
||||
destination_account_pubkey: payee_pubkey,
|
||||
lamports: 100,
|
||||
};
|
||||
|
148
cli/tests/pay.rs
148
cli/tests/pay.rs
@ -1,9 +1,17 @@
|
||||
use chrono::prelude::*;
|
||||
use serde_json::Value;
|
||||
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
|
||||
use solana_cli::cli::{
|
||||
process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey, signature::KeypairUtil, signature::Signature};
|
||||
use solana_sdk::{
|
||||
account_utils::State,
|
||||
hash::Hash,
|
||||
nonce_state::NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
|
||||
};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::str::FromStr;
|
||||
use std::sync::mpsc::channel;
|
||||
@ -12,6 +20,12 @@ use std::sync::mpsc::channel;
|
||||
use solana_core::validator::new_validator_for_tests;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
fn make_tmp_file() -> (String, NamedTempFile) {
|
||||
let tmp_file = NamedTempFile::new().unwrap();
|
||||
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
|
||||
}
|
||||
|
||||
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
|
||||
(0..5).for_each(|tries| {
|
||||
@ -69,17 +83,13 @@ fn test_cli_timestamp_tx() {
|
||||
// Make transaction (from config_payer to bob_pubkey) requiring timestamp from config_witness
|
||||
let date_string = "\"2018-09-19T17:30:59Z\"";
|
||||
let dt: DateTime<Utc> = serde_json::from_str(&date_string).unwrap();
|
||||
config_payer.command = CliCommand::Pay {
|
||||
config_payer.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: Some(dt),
|
||||
timestamp_pubkey: Some(config_witness.keypair.pubkey()),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
let sig_response = process_command(&config_payer);
|
||||
|
||||
let object: Value = serde_json::from_str(&sig_response.unwrap()).unwrap();
|
||||
@ -144,17 +154,12 @@ fn test_cli_witness_tx() {
|
||||
.unwrap();
|
||||
|
||||
// Make transaction (from config_payer to bob_pubkey) requiring witness signature from config_witness
|
||||
config_payer.command = CliCommand::Pay {
|
||||
config_payer.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![config_witness.keypair.pubkey()]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
let sig_response = process_command(&config_payer);
|
||||
|
||||
let object: Value = serde_json::from_str(&sig_response.unwrap()).unwrap();
|
||||
@ -212,17 +217,13 @@ fn test_cli_cancel_tx() {
|
||||
.unwrap();
|
||||
|
||||
// Make transaction (from config_payer to bob_pubkey) requiring witness signature from config_witness
|
||||
config_payer.command = CliCommand::Pay {
|
||||
config_payer.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![config_witness.keypair.pubkey()]),
|
||||
cancelable: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
let sig_response = process_command(&config_payer).unwrap();
|
||||
|
||||
let object: Value = serde_json::from_str(&sig_response).unwrap();
|
||||
@ -288,17 +289,12 @@ fn test_offline_pay_tx() {
|
||||
check_balance(50, &rpc_client, &config_offline.keypair.pubkey());
|
||||
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
|
||||
|
||||
config_offline.command = CliCommand::Pay {
|
||||
config_offline.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
|
||||
check_balance(50, &rpc_client, &config_offline.keypair.pubkey());
|
||||
@ -318,17 +314,13 @@ fn test_offline_pay_tx() {
|
||||
})
|
||||
.collect();
|
||||
|
||||
config_online.command = CliCommand::Pay {
|
||||
config_online.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
|
||||
};
|
||||
..PayCommand::default()
|
||||
});
|
||||
process_command(&config_online).unwrap();
|
||||
|
||||
check_balance(40, &rpc_client, &config_offline.keypair.pubkey());
|
||||
@ -338,3 +330,81 @@ fn test_offline_pay_tx() {
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonced_pay_tx() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
&config.keypair.pubkey(),
|
||||
50 + minimum_nonce_balance,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance(
|
||||
50 + minimum_nonce_balance,
|
||||
&rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
);
|
||||
|
||||
// Create nonce account
|
||||
let nonce_account = Keypair::new();
|
||||
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
|
||||
nonce_authority: Some(config.keypair.pubkey()),
|
||||
lamports: minimum_nonce_balance,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
check_balance(50, &rpc_client, &config.keypair.pubkey());
|
||||
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
let nonce_hash = match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => hash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
blockhash: Some(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
..PayCommand::default()
|
||||
});
|
||||
process_command(&config).expect("failed to process pay command");
|
||||
|
||||
check_balance(40, &rpc_client, &config.keypair.pubkey());
|
||||
check_balance(10, &rpc_client, &bob_pubkey);
|
||||
|
||||
// Verify that nonce has been used
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => assert_ne!(hash, nonce_hash),
|
||||
_ => assert!(false, "Nonce is not initialized"),
|
||||
}
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@ -3,11 +3,13 @@ use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand,
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::State,
|
||||
hash::Hash,
|
||||
nonce_state::NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, write_keypair, KeypairUtil, Signature},
|
||||
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
|
||||
};
|
||||
use solana_stake_program::stake_state::Lockup;
|
||||
use solana_stake_program::stake_state::{Lockup, StakeAuthorize, StakeState};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::str::FromStr;
|
||||
use std::sync::mpsc::channel;
|
||||
@ -97,19 +99,25 @@ fn test_stake_delegation_and_deactivation() {
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
// Deactivate stake
|
||||
config_validator.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
@ -181,10 +189,13 @@ fn test_stake_delegation_and_deactivation_offline() {
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
let sig_response = process_command(&config_validator).unwrap();
|
||||
let object: Value = serde_json::from_str(&sig_response).unwrap();
|
||||
@ -204,19 +215,25 @@ fn test_stake_delegation_and_deactivation_offline() {
|
||||
config_payer.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
// Deactivate stake offline
|
||||
config_validator.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
let sig_response = process_command(&config_validator).unwrap();
|
||||
let object: Value = serde_json::from_str(&sig_response).unwrap();
|
||||
@ -235,12 +252,195 @@ fn test_stake_delegation_and_deactivation_offline() {
|
||||
// Deactivate stake online
|
||||
config_payer.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonced_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
|
||||
.unwrap();
|
||||
|
||||
// Create vote account
|
||||
let vote_keypair = Keypair::new();
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&vote_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
node_pubkey: config.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Create stake account
|
||||
let stake_keypair = Keypair::new();
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Create nonce account
|
||||
let nonce_account = Keypair::new();
|
||||
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
|
||||
nonce_authority: Some(config.keypair.pubkey()),
|
||||
lamports: minimum_nonce_balance,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
let nonce_hash = match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => hash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
|
||||
// Delegate stake
|
||||
config.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: vote_keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
let nonce_hash = match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => hash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
|
||||
// Deactivate stake
|
||||
let config_keypair = Keypair::from_bytes(&config.keypair.to_bytes()).unwrap();
|
||||
config.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: Some(config_keypair.into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_authorize() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
|
||||
.unwrap();
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let stake_keypair = Keypair::new();
|
||||
let stake_account_pubkey = stake_keypair.pubkey();
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Assign new online stake authority
|
||||
let online_authority = Keypair::new();
|
||||
let online_authority_pubkey = online_authority.pubkey();
|
||||
let (online_authority_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&online_authority, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: online_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
let stake_state: StakeState = stake_account.state().unwrap();
|
||||
let current_authority = match stake_state {
|
||||
StakeState::Initialized(meta) => meta.authorized.staker,
|
||||
_ => panic!("Unexpected stake state!"),
|
||||
};
|
||||
assert_eq!(current_authority, online_authority_pubkey);
|
||||
|
||||
// Assign new offline stake authority
|
||||
let offline_authority = Keypair::new();
|
||||
let offline_authority_pubkey = offline_authority.pubkey();
|
||||
let (_offline_authority_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&offline_authority, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: offline_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: Some(read_keypair_file(&online_authority_file).unwrap().into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
let stake_state: StakeState = stake_account.state().unwrap();
|
||||
let current_authority = match stake_state {
|
||||
StakeState::Initialized(meta) => meta.authorized.staker,
|
||||
_ => panic!("Unexpected stake state!"),
|
||||
};
|
||||
assert_eq!(current_authority, offline_authority_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,11 +19,11 @@ reqwest = { version = "0.9.24", default-features = false, features = ["rustls-tl
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.5"
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
|
@ -8,4 +8,5 @@ pub mod perf_utils;
|
||||
pub mod rpc_client;
|
||||
pub mod rpc_client_request;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_response;
|
||||
pub mod thin_client;
|
||||
|
@ -1,7 +1,8 @@
|
||||
use crate::rpc_request::{Response, RpcResponseContext};
|
||||
use crate::{
|
||||
client_error::ClientError, generic_rpc_client_request::GenericRpcClientRequest,
|
||||
client_error::ClientError,
|
||||
generic_rpc_client_request::GenericRpcClientRequest,
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{Response, RpcResponseContext},
|
||||
};
|
||||
use serde_json::{Number, Value};
|
||||
use solana_sdk::{
|
||||
@ -9,18 +10,28 @@ use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
transaction::{self, TransactionError},
|
||||
};
|
||||
use std::{collections::HashMap, sync::RwLock};
|
||||
|
||||
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
|
||||
pub const SIGNATURE: &str =
|
||||
"43yNSFC6fYTuPgTNFFhF4axw7AfWxB2BPdurme8yrsWEYwm8299xh8n6TAHjGymiSub1XtyxTNyd9GBfY2hxoBw8";
|
||||
|
||||
pub type Mocks = HashMap<RpcRequest, Value>;
|
||||
pub struct MockRpcClientRequest {
|
||||
mocks: RwLock<Mocks>,
|
||||
url: String,
|
||||
}
|
||||
|
||||
impl MockRpcClientRequest {
|
||||
pub fn new(url: String) -> Self {
|
||||
Self { url }
|
||||
Self::new_with_mocks(url, Mocks::default())
|
||||
}
|
||||
|
||||
pub fn new_with_mocks(url: String, mocks: Mocks) -> Self {
|
||||
Self {
|
||||
url,
|
||||
mocks: RwLock::new(mocks),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -31,6 +42,9 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
params: serde_json::Value,
|
||||
_retries: usize,
|
||||
) -> Result<serde_json::Value, ClientError> {
|
||||
if let Some(value) = self.mocks.write().unwrap().remove(request) {
|
||||
return Ok(value);
|
||||
}
|
||||
if self.url == "fails" {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
|
@ -1,12 +1,13 @@
|
||||
use crate::rpc_request::{Response, RpcResponse};
|
||||
use crate::{
|
||||
client_error::ClientError,
|
||||
generic_rpc_client_request::GenericRpcClientRequest,
|
||||
mock_rpc_client_request::MockRpcClientRequest,
|
||||
mock_rpc_client_request::{MockRpcClientRequest, Mocks},
|
||||
rpc_client_request::RpcClientRequest,
|
||||
rpc_request::{
|
||||
RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule, RpcRequest,
|
||||
RpcVersionInfo, RpcVoteAccountStatus,
|
||||
rpc_request::RpcRequest,
|
||||
rpc_response::{
|
||||
Response, RpcAccount, RpcBlockhashFeeCalculator, RpcConfirmedBlock, RpcContactInfo,
|
||||
RpcEpochInfo, RpcKeyedAccount, RpcLeaderSchedule, RpcResponse, RpcVersionInfo,
|
||||
RpcVoteAccountStatus,
|
||||
},
|
||||
};
|
||||
use bincode::serialize;
|
||||
@ -48,6 +49,12 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
|
||||
Self {
|
||||
client: Box::new(MockRpcClientRequest::new_with_mocks(url, mocks)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_socket(addr: SocketAddr) -> Self {
|
||||
Self::new(get_rpc_request_str(addr, false))
|
||||
}
|
||||
@ -572,9 +579,16 @@ impl RpcClient {
|
||||
format!("AccountNotFound: pubkey={}", pubkey),
|
||||
));
|
||||
}
|
||||
let result = serde_json::from_value::<Response<Option<Account>>>(result_json)?;
|
||||
trace!("Response account {:?} {:?}", pubkey, result);
|
||||
Ok(result)
|
||||
let Response {
|
||||
context,
|
||||
value: rpc_account,
|
||||
} = serde_json::from_value::<Response<Option<RpcAccount>>>(result_json)?;
|
||||
trace!("Response account {:?} {:?}", pubkey, rpc_account);
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode().ok());
|
||||
Ok(Response {
|
||||
context,
|
||||
value: account,
|
||||
})
|
||||
})
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
@ -669,8 +683,8 @@ impl RpcClient {
|
||||
)
|
||||
})?;
|
||||
|
||||
let accounts: Vec<(String, Account)> =
|
||||
serde_json::from_value::<Vec<(String, Account)>>(response).map_err(|err| {
|
||||
let accounts: Vec<RpcKeyedAccount> =
|
||||
serde_json::from_value::<Vec<RpcKeyedAccount>>(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetProgramAccounts parse failure: {:?}", err),
|
||||
@ -678,14 +692,14 @@ impl RpcClient {
|
||||
})?;
|
||||
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for (string, account) in accounts.into_iter() {
|
||||
let pubkey = string.parse().map_err(|err| {
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetProgramAccounts parse failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((pubkey, account));
|
||||
pubkey_accounts.push((pubkey, account.decode().unwrap()));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
}
|
||||
@ -747,8 +761,12 @@ impl RpcClient {
|
||||
|
||||
let Response {
|
||||
context,
|
||||
value: (blockhash_str, fee_calculator),
|
||||
} = serde_json::from_value::<Response<(String, FeeCalculator)>>(response).map_err(
|
||||
value:
|
||||
RpcBlockhashFeeCalculator {
|
||||
blockhash,
|
||||
fee_calculator,
|
||||
},
|
||||
} = serde_json::from_value::<Response<RpcBlockhashFeeCalculator>>(response).map_err(
|
||||
|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -756,7 +774,7 @@ impl RpcClient {
|
||||
)
|
||||
},
|
||||
)?;
|
||||
let blockhash = blockhash_str.parse().map_err(|err| {
|
||||
let blockhash = blockhash.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetRecentBlockhash hash parse failure: {:?}", err),
|
||||
|
@ -1,119 +1,7 @@
|
||||
use jsonrpc_core::Result as JsonResult;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
hash::Hash,
|
||||
transaction::{Result, Transaction},
|
||||
};
|
||||
use std::{collections::HashMap, error, fmt, io, net::SocketAddr};
|
||||
use std::{error, fmt};
|
||||
|
||||
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
|
||||
pub type RpcResponse<T> = io::Result<Response<T>>;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RpcResponseContext {
|
||||
pub slot: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Response<T> {
|
||||
pub context: RpcResponseContext,
|
||||
pub value: T,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedBlock {
|
||||
pub previous_blockhash: Hash,
|
||||
pub blockhash: Hash,
|
||||
pub parent_slot: Slot,
|
||||
pub transactions: Vec<(Transaction, Option<RpcTransactionStatus>)>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionStatus {
|
||||
pub status: Result<()>,
|
||||
pub fee: u64,
|
||||
pub pre_balances: Vec<u64>,
|
||||
pub post_balances: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct RpcContactInfo {
|
||||
/// Pubkey of the node as a base-58 string
|
||||
pub pubkey: String,
|
||||
/// Gossip port
|
||||
pub gossip: Option<SocketAddr>,
|
||||
/// Tpu port
|
||||
pub tpu: Option<SocketAddr>,
|
||||
/// JSON RPC port
|
||||
pub rpc: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
|
||||
pub type RpcLeaderSchedule = HashMap<String, Vec<usize>>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcEpochInfo {
|
||||
/// The current epoch
|
||||
pub epoch: Epoch,
|
||||
|
||||
/// The current slot, relative to the start of the current epoch
|
||||
pub slot_index: u64,
|
||||
|
||||
/// The number of slots in this epoch
|
||||
pub slots_in_epoch: u64,
|
||||
|
||||
/// The absolute current slot
|
||||
pub absolute_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RpcVersionInfo {
|
||||
/// The current version of solana-core
|
||||
pub solana_core: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcVoteAccountStatus {
|
||||
pub current: Vec<RpcVoteAccountInfo>,
|
||||
pub delinquent: Vec<RpcVoteAccountInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcVoteAccountInfo {
|
||||
/// Vote account pubkey as base-58 encoded string
|
||||
pub vote_pubkey: String,
|
||||
|
||||
/// The pubkey of the node that votes using this account
|
||||
pub node_pubkey: String,
|
||||
|
||||
/// The current stake, in lamports, delegated to this vote account
|
||||
pub activated_stake: u64,
|
||||
|
||||
/// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
|
||||
pub commission: u8,
|
||||
|
||||
/// Whether this account is staked for the current epoch
|
||||
pub epoch_vote_account: bool,
|
||||
|
||||
/// History of how many credits earned by the end of each epoch
|
||||
/// each tuple is (Epoch, credits, prev_credits)
|
||||
pub epoch_credits: Vec<(Epoch, u64, u64)>,
|
||||
|
||||
/// Most recent slot voted on by this vote account (0 if no votes exist)
|
||||
pub last_vote: u64,
|
||||
|
||||
/// Current root slot for this vote account (0 if not root slot exists)
|
||||
pub root_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub enum RpcRequest {
|
||||
ConfirmTransaction,
|
||||
DeregisterNode,
|
||||
|
277
client/src/rpc_response.rs
Normal file
277
client/src/rpc_response.rs
Normal file
@ -0,0 +1,277 @@
|
||||
use crate::rpc_request::RpcError;
|
||||
use bincode::serialize;
|
||||
use jsonrpc_core::Result as JsonResult;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::FeeCalculator,
|
||||
message::MessageHeader,
|
||||
pubkey::Pubkey,
|
||||
transaction::{Result, Transaction},
|
||||
};
|
||||
use std::{collections::HashMap, io, net::SocketAddr, str::FromStr};
|
||||
|
||||
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
|
||||
pub type RpcResponse<T> = io::Result<Response<T>>;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RpcResponseContext {
|
||||
pub slot: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Response<T> {
|
||||
pub context: RpcResponseContext,
|
||||
pub value: T,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockCommitment<T> {
|
||||
pub commitment: Option<T>,
|
||||
pub total_stake: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedBlock {
|
||||
pub previous_blockhash: String,
|
||||
pub blockhash: String,
|
||||
pub parent_slot: Slot,
|
||||
pub transactions: Vec<RpcTransactionWithStatusMeta>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionWithStatusMeta {
|
||||
pub transaction: RpcEncodedTransaction,
|
||||
pub meta: Option<RpcTransactionStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTransactionEncoding {
|
||||
Binary,
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum RpcEncodedTransaction {
|
||||
Binary(String),
|
||||
Json(RpcTransaction),
|
||||
}
|
||||
|
||||
impl RpcEncodedTransaction {
|
||||
pub fn encode(transaction: Transaction, encoding: RpcTransactionEncoding) -> Self {
|
||||
if encoding == RpcTransactionEncoding::Json {
|
||||
RpcEncodedTransaction::Json(RpcTransaction {
|
||||
signatures: transaction
|
||||
.signatures
|
||||
.iter()
|
||||
.map(|sig| sig.to_string())
|
||||
.collect(),
|
||||
message: RpcMessage {
|
||||
header: transaction.message.header,
|
||||
account_keys: transaction
|
||||
.message
|
||||
.account_keys
|
||||
.iter()
|
||||
.map(|pubkey| pubkey.to_string())
|
||||
.collect(),
|
||||
recent_blockhash: transaction.message.recent_blockhash.to_string(),
|
||||
instructions: transaction
|
||||
.message
|
||||
.instructions
|
||||
.iter()
|
||||
.map(|instruction| RpcCompiledInstruction {
|
||||
program_id_index: instruction.program_id_index,
|
||||
accounts: instruction.accounts.clone(),
|
||||
data: bs58::encode(instruction.data.clone()).into_string(),
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
RpcEncodedTransaction::Binary(
|
||||
bs58::encode(serialize(&transaction).unwrap()).into_string(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Transaction for pretty JSON serialization
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransaction {
|
||||
pub signatures: Vec<String>,
|
||||
pub message: RpcMessage,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcMessage {
|
||||
pub header: MessageHeader,
|
||||
pub account_keys: Vec<String>,
|
||||
pub recent_blockhash: String,
|
||||
pub instructions: Vec<RpcCompiledInstruction>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcCompiledInstruction {
|
||||
pub program_id_index: u8,
|
||||
pub accounts: Vec<u8>,
|
||||
pub data: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionStatus {
|
||||
pub status: Result<()>,
|
||||
pub fee: u64,
|
||||
pub pre_balances: Vec<u64>,
|
||||
pub post_balances: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockhashFeeCalculator {
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcKeyedAccount {
|
||||
pub pubkey: String,
|
||||
pub account: RpcAccount,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccount {
|
||||
pub lamports: u64,
|
||||
pub data: String,
|
||||
pub owner: String,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
impl RpcAccount {
|
||||
pub fn encode(account: Account) -> Self {
|
||||
RpcAccount {
|
||||
lamports: account.lamports,
|
||||
data: bs58::encode(account.data.clone()).into_string(),
|
||||
owner: account.owner.to_string(),
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> std::result::Result<Account, RpcError> {
|
||||
Ok(Account {
|
||||
lamports: self.lamports,
|
||||
data: bs58::decode(self.data.clone()).into_vec().map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account data".to_string())
|
||||
})?,
|
||||
owner: Pubkey::from_str(&self.owner).map_err(|_| {
|
||||
RpcError::RpcRequestError("Could not parse encoded account owner".to_string())
|
||||
})?,
|
||||
executable: self.executable,
|
||||
rent_epoch: self.rent_epoch,
|
||||
..Account::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct RpcContactInfo {
|
||||
/// Pubkey of the node as a base-58 string
|
||||
pub pubkey: String,
|
||||
/// Gossip port
|
||||
pub gossip: Option<SocketAddr>,
|
||||
/// Tpu port
|
||||
pub tpu: Option<SocketAddr>,
|
||||
/// JSON RPC port
|
||||
pub rpc: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
|
||||
pub type RpcLeaderSchedule = HashMap<String, Vec<usize>>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcEpochInfo {
|
||||
/// The current epoch
|
||||
pub epoch: Epoch,
|
||||
|
||||
/// The current slot, relative to the start of the current epoch
|
||||
pub slot_index: u64,
|
||||
|
||||
/// The number of slots in this epoch
|
||||
pub slots_in_epoch: u64,
|
||||
|
||||
/// The absolute current slot
|
||||
pub absolute_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RpcVersionInfo {
|
||||
/// The current version of solana-core
|
||||
pub solana_core: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcVoteAccountStatus {
|
||||
pub current: Vec<RpcVoteAccountInfo>,
|
||||
pub delinquent: Vec<RpcVoteAccountInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcVoteAccountInfo {
|
||||
/// Vote account pubkey as base-58 encoded string
|
||||
pub vote_pubkey: String,
|
||||
|
||||
/// The pubkey of the node that votes using this account
|
||||
pub node_pubkey: String,
|
||||
|
||||
/// The current stake, in lamports, delegated to this vote account
|
||||
pub activated_stake: u64,
|
||||
|
||||
/// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
|
||||
pub commission: u8,
|
||||
|
||||
/// Whether this account is staked for the current epoch
|
||||
pub epoch_vote_account: bool,
|
||||
|
||||
/// History of how many credits earned by the end of each epoch
|
||||
/// each tuple is (Epoch, credits, prev_credits)
|
||||
pub epoch_credits: Vec<(Epoch, u64, u64)>,
|
||||
|
||||
/// Most recent slot voted on by this vote account (0 if no votes exist)
|
||||
pub last_vote: u64,
|
||||
|
||||
/// Current root slot for this vote account (0 if not root slot exists)
|
||||
pub root_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignatureConfirmation {
|
||||
pub confirmations: usize,
|
||||
pub status: Result<()>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStorageTurn {
|
||||
pub blockhash: String,
|
||||
pub slot: Slot,
|
||||
}
|
@ -3,8 +3,7 @@
|
||||
//! messages to the network directly. The binary encoding of its messages are
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use crate::rpc_client::RpcClient;
|
||||
use crate::rpc_request::Response;
|
||||
use crate::{rpc_client::RpcClient, rpc_response::Response};
|
||||
use bincode::{serialize_into, serialized_size};
|
||||
use log::*;
|
||||
use solana_sdk::{
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -41,26 +41,26 @@ rayon = "1.2.0"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.22.0" }
|
||||
solana-client = { path = "../client", version = "0.22.0" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.22.3" }
|
||||
solana-client = { path = "../client", version = "0.22.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.22.3" }
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
||||
solana-measure = { path = "../measure", version = "0.22.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.0" }
|
||||
solana-perf = { path = "../perf", version = "0.22.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.22.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.22.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.22.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.22.0" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "0.22.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-measure = { path = "../measure", version = "0.22.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
|
||||
solana-perf = { path = "../perf", version = "0.22.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.22.3" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "0.22.3" }
|
||||
symlink = "0.1.0"
|
||||
sys-info = "0.5.8"
|
||||
tempfile = "3.1.0"
|
||||
@ -69,13 +69,9 @@ tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.7.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.3" }
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
jemallocator = "0.3.2"
|
||||
jemalloc-ctl = "0.3.2"
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
matches = "0.1.6"
|
||||
@ -88,7 +84,7 @@ systemstat = "0.1.5"
|
||||
name = "banking_stage"
|
||||
|
||||
[[bench]]
|
||||
name = "blocktree"
|
||||
name = "blockstore"
|
||||
|
||||
[[bench]]
|
||||
name = "gen_keys"
|
||||
|
@ -12,9 +12,9 @@ use solana_core::cluster_info::Node;
|
||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::poh_recorder::WorkingBankEntry;
|
||||
use solana_ledger::blocktree_processor::process_entries;
|
||||
use solana_ledger::blockstore_processor::process_entries;
|
||||
use solana_ledger::entry::{next_hash, Entry};
|
||||
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
@ -57,11 +57,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let my_pubkey = Pubkey::new_rand();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, _signal_receiver) =
|
||||
create_test_recorder(&bank, &blocktree, None);
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
|
||||
let tx = test_tx();
|
||||
let len = 4096;
|
||||
@ -87,7 +87,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
poh_service.join().unwrap();
|
||||
}
|
||||
let _unused = Blocktree::destroy(&ledger_path);
|
||||
let _unused = Blockstore::destroy(&ledger_path);
|
||||
}
|
||||
|
||||
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
|
||||
@ -184,11 +184,11 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blocktree, None);
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let _banking_stage = BankingStage::new(
|
||||
@ -244,7 +244,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
poh_service.join().unwrap();
|
||||
}
|
||||
let _unused = Blocktree::destroy(&ledger_path);
|
||||
let _unused = Blockstore::destroy(&ledger_path);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
|
@ -6,7 +6,7 @@ extern crate test;
|
||||
|
||||
use rand::Rng;
|
||||
use solana_ledger::{
|
||||
blocktree::{entries_to_test_shreds, Blocktree},
|
||||
blockstore::{entries_to_test_shreds, Blockstore},
|
||||
entry::{create_ticks, Entry},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
@ -16,19 +16,19 @@ use test::Bencher;
|
||||
|
||||
// Given some shreds and a ledger at ledger_path, benchmark writing the shreds to the ledger
|
||||
fn bench_write_shreds(bench: &mut Bencher, entries: Vec<Entry>, ledger_path: &Path) {
|
||||
let blocktree =
|
||||
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore =
|
||||
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
|
||||
bench.iter(move || {
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
});
|
||||
|
||||
Blocktree::destroy(ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
// Insert some shreds into the ledger in preparation for read benchmarks
|
||||
fn setup_read_bench(
|
||||
blocktree: &mut Blocktree,
|
||||
blockstore: &mut Blockstore,
|
||||
num_small_shreds: u64,
|
||||
num_large_shreds: u64,
|
||||
slot: Slot,
|
||||
@ -42,7 +42,7 @@ fn setup_read_bench(
|
||||
|
||||
// Convert the entries to shreds, write the shreds to the ledger
|
||||
let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0);
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expectd successful insertion of shreds into ledger");
|
||||
}
|
||||
@ -71,15 +71,15 @@ fn bench_write_big(bench: &mut Bencher) {
|
||||
#[ignore]
|
||||
fn bench_read_sequential(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let mut blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let mut blockstore =
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
|
||||
// Insert some big and small shreds into the ledger
|
||||
let num_small_shreds = 32 * 1024;
|
||||
let num_large_shreds = 32 * 1024;
|
||||
let total_shreds = num_small_shreds + num_large_shreds;
|
||||
let slot = 0;
|
||||
setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot);
|
||||
setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
|
||||
|
||||
let num_reads = total_shreds / 15;
|
||||
let mut rng = rand::thread_rng();
|
||||
@ -87,26 +87,26 @@ fn bench_read_sequential(bench: &mut Bencher) {
|
||||
// Generate random starting point in the range [0, total_shreds - 1], read num_reads shreds sequentially
|
||||
let start_index = rng.gen_range(0, num_small_shreds + num_large_shreds);
|
||||
for i in start_index..start_index + num_reads {
|
||||
let _ = blocktree.get_data_shred(slot, i as u64 % total_shreds);
|
||||
let _ = blockstore.get_data_shred(slot, i as u64 % total_shreds);
|
||||
}
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_read_random(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let mut blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let mut blockstore =
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
|
||||
// Insert some big and small shreds into the ledger
|
||||
let num_small_shreds = 32 * 1024;
|
||||
let num_large_shreds = 32 * 1024;
|
||||
let total_shreds = num_small_shreds + num_large_shreds;
|
||||
let slot = 0;
|
||||
setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot);
|
||||
setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
|
||||
|
||||
let num_reads = total_shreds / 15;
|
||||
|
||||
@ -118,39 +118,39 @@ fn bench_read_random(bench: &mut Bencher) {
|
||||
.collect();
|
||||
bench.iter(move || {
|
||||
for i in indexes.iter() {
|
||||
let _ = blocktree.get_data_shred(slot, *i as u64);
|
||||
let _ = blockstore.get_data_shred(slot, *i as u64);
|
||||
}
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_insert_data_shred_small(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore =
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = create_ticks(num_entries, 0, Hash::default());
|
||||
bench.iter(move || {
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
});
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_insert_data_shred_big(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore =
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = create_ticks(num_entries, 0, Hash::default());
|
||||
bench.iter(move || {
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
});
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
@ -17,9 +17,12 @@ use crossbeam_channel::unbounded;
|
||||
use ed25519_dalek;
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_client::{rpc_client::RpcClient, rpc_request::RpcRequest, thin_client::ThinClient};
|
||||
use solana_client::{
|
||||
rpc_client::RpcClient, rpc_request::RpcRequest, rpc_response::RpcStorageTurn,
|
||||
thin_client::ThinClient,
|
||||
};
|
||||
use solana_ledger::{
|
||||
blocktree::Blocktree, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
|
||||
blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
|
||||
};
|
||||
use solana_net_utils::bind_in_range;
|
||||
use solana_perf::packet::Packets;
|
||||
@ -222,13 +225,13 @@ impl Archiver {
|
||||
// Note for now, this ledger will not contain any of the existing entries
|
||||
// in the ledger located at ledger_path, and will only append on newly received
|
||||
// entries after being passed to window_service
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
|
||||
let gossip_service = GossipService::new(
|
||||
&cluster_info,
|
||||
Some(blocktree.clone()),
|
||||
Some(blockstore.clone()),
|
||||
None,
|
||||
node.sockets.gossip,
|
||||
&exit,
|
||||
@ -294,7 +297,7 @@ impl Archiver {
|
||||
let window_service = match Self::setup(
|
||||
&mut meta,
|
||||
cluster_info.clone(),
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&exit,
|
||||
&node_info,
|
||||
&storage_keypair,
|
||||
@ -320,7 +323,7 @@ impl Archiver {
|
||||
// run archiver
|
||||
Self::run(
|
||||
&mut meta,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
cluster_info,
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
@ -344,14 +347,14 @@ impl Archiver {
|
||||
|
||||
fn run(
|
||||
meta: &mut ArchiverMeta,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) {
|
||||
// encrypt segment
|
||||
Self::encrypt_ledger(meta, blocktree).expect("ledger encrypt not successful");
|
||||
Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful");
|
||||
let enc_file_path = meta.ledger_data_file_encrypted.clone();
|
||||
// do replicate
|
||||
loop {
|
||||
@ -443,7 +446,7 @@ impl Archiver {
|
||||
fn setup(
|
||||
meta: &mut ArchiverMeta,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
@ -498,7 +501,7 @@ impl Archiver {
|
||||
);
|
||||
|
||||
let window_service = WindowService::new(
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
cluster_info.clone(),
|
||||
verified_receiver,
|
||||
retransmit_sender,
|
||||
@ -512,7 +515,7 @@ impl Archiver {
|
||||
Self::wait_for_segment_download(
|
||||
slot,
|
||||
slots_per_segment,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&exit,
|
||||
&node_info,
|
||||
cluster_info,
|
||||
@ -523,7 +526,7 @@ impl Archiver {
|
||||
fn wait_for_segment_download(
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
@ -534,7 +537,7 @@ impl Archiver {
|
||||
);
|
||||
let mut current_slot = start_slot;
|
||||
'outer: loop {
|
||||
while blocktree.is_full(current_slot) {
|
||||
while blockstore.is_full(current_slot) {
|
||||
current_slot += 1;
|
||||
info!("current slot: {}", current_slot);
|
||||
if current_slot >= start_slot + slots_per_segment {
|
||||
@ -559,7 +562,7 @@ impl Archiver {
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_ledger(meta: &mut ArchiverMeta, blocktree: &Arc<Blocktree>) -> Result<()> {
|
||||
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
|
||||
meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME);
|
||||
|
||||
{
|
||||
@ -567,7 +570,7 @@ impl Archiver {
|
||||
ivec.copy_from_slice(&meta.signature.as_ref());
|
||||
|
||||
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
|
||||
blocktree,
|
||||
blockstore,
|
||||
meta.slot,
|
||||
meta.slots_per_segment,
|
||||
&meta.ledger_data_file_encrypted,
|
||||
@ -811,13 +814,15 @@ impl Archiver {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
Error::IO(io::Error::new(ErrorKind::Other, "rpc error"))
|
||||
})?;
|
||||
let (storage_blockhash, turn_slot) =
|
||||
serde_json::from_value::<(String, u64)>(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Couldn't parse response: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
let RpcStorageTurn {
|
||||
blockhash: storage_blockhash,
|
||||
slot: turn_slot,
|
||||
} = serde_json::from_value::<RpcStorageTurn>(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Couldn't parse response: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -844,15 +849,15 @@ impl Archiver {
|
||||
}
|
||||
}
|
||||
|
||||
/// Ask an archiver to populate a given blocktree with its segment.
|
||||
/// Ask an archiver to populate a given blockstore with its segment.
|
||||
/// Return the slot at the start of the archiver's segment
|
||||
///
|
||||
/// It is recommended to use a temporary blocktree for this since the download will not verify
|
||||
/// It is recommended to use a temporary blockstore for this since the download will not verify
|
||||
/// shreds received and might impact the chaining of shreds across slots
|
||||
pub fn download_from_archiver(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
archiver_info: &ContactInfo,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slots_per_segment: u64,
|
||||
) -> Result<u64> {
|
||||
// Create a client which downloads from the archiver and see that it
|
||||
@ -884,7 +889,7 @@ impl Archiver {
|
||||
for _ in 0..120 {
|
||||
// Strategy used by archivers
|
||||
let repairs = RepairService::generate_repairs_in_range(
|
||||
blocktree,
|
||||
blockstore,
|
||||
repair_service::MAX_REPAIR_LENGTH,
|
||||
&repair_slot_range,
|
||||
);
|
||||
@ -930,10 +935,10 @@ impl Archiver {
|
||||
.into_iter()
|
||||
.filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok())
|
||||
.collect();
|
||||
blocktree.insert_shreds(shreds, None, false)?;
|
||||
blockstore.insert_shreds(shreds, None, false)?;
|
||||
}
|
||||
// check if all the slots in the segment are complete
|
||||
if Self::segment_complete(start_slot, slots_per_segment, blocktree) {
|
||||
if Self::segment_complete(start_slot, slots_per_segment, blockstore) {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
@ -942,7 +947,7 @@ impl Archiver {
|
||||
t_receiver.join().unwrap();
|
||||
|
||||
// check if all the slots in the segment are complete
|
||||
if !Self::segment_complete(start_slot, slots_per_segment, blocktree) {
|
||||
if !Self::segment_complete(start_slot, slots_per_segment, blockstore) {
|
||||
return Err(
|
||||
io::Error::new(ErrorKind::Other, "Unable to download the full segment").into(),
|
||||
);
|
||||
@ -953,10 +958,10 @@ impl Archiver {
|
||||
fn segment_complete(
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> bool {
|
||||
for slot in start_slot..(start_slot + slots_per_segment) {
|
||||
if !blocktree.is_full(slot) {
|
||||
if !blockstore.is_full(slot) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -7,17 +7,16 @@ use crate::{
|
||||
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
|
||||
poh_service::PohService,
|
||||
result::{Error, Result},
|
||||
thread_mem_usage,
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
|
||||
use itertools::Itertools;
|
||||
use solana_ledger::{
|
||||
blocktree::Blocktree,
|
||||
blocktree_processor::{send_transaction_status_batch, TransactionStatusSender},
|
||||
blockstore::Blockstore,
|
||||
blockstore_processor::{send_transaction_status_batch, TransactionStatusSender},
|
||||
entry::hash_transactions,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::{measure::Measure, thread_mem_usage};
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
|
||||
use solana_perf::{cuda_runtime::PinnedVec, perf_libs};
|
||||
use solana_runtime::{
|
||||
@ -988,7 +987,7 @@ impl BankingStage {
|
||||
|
||||
pub fn create_test_recorder(
|
||||
bank: &Arc<Bank>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
poh_config: Option<PohConfig>,
|
||||
) -> (
|
||||
Arc<AtomicBool>,
|
||||
@ -1005,7 +1004,7 @@ pub fn create_test_recorder(
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
blocktree,
|
||||
blockstore,
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&poh_config,
|
||||
);
|
||||
@ -1029,8 +1028,9 @@ mod tests {
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use itertools::Itertools;
|
||||
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
|
||||
use solana_ledger::{
|
||||
blocktree::entries_to_test_shreds,
|
||||
blockstore::entries_to_test_shreds,
|
||||
entry::{next_entry, Entry, EntrySlice},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
@ -1051,11 +1051,12 @@ mod tests {
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, _entry_receiever) =
|
||||
create_test_recorder(&bank, &blocktree, None);
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let banking_stage = BankingStage::new(
|
||||
@ -1071,7 +1072,7 @@ mod tests {
|
||||
banking_stage.join().unwrap();
|
||||
poh_service.join().unwrap();
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1088,13 +1089,14 @@ mod tests {
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let mut poh_config = PohConfig::default();
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blocktree, Some(poh_config));
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let banking_stage = BankingStage::new(
|
||||
@ -1122,7 +1124,7 @@ mod tests {
|
||||
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
|
||||
banking_stage.join().unwrap();
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
|
||||
@ -1149,14 +1151,15 @@ mod tests {
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let mut poh_config = PohConfig::default();
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blocktree, Some(poh_config));
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let banking_stage = BankingStage::new(
|
||||
@ -1242,7 +1245,7 @@ mod tests {
|
||||
|
||||
drop(entry_receiver);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1288,15 +1291,15 @@ mod tests {
|
||||
let entry_receiver = {
|
||||
// start a banking_stage to eat verified receiver
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path)
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let mut poh_config = PohConfig::default();
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blocktree, Some(poh_config));
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info =
|
||||
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
@ -1339,7 +1342,7 @@ mod tests {
|
||||
// the account balance below zero before the credit is added.
|
||||
assert_eq!(bank.get_balance(&alice.pubkey()), 2);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1357,8 +1360,8 @@ mod tests {
|
||||
};
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
@ -1366,7 +1369,7 @@ mod tests {
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1446,7 +1449,7 @@ mod tests {
|
||||
// Should receive nothing from PohRecorder b/c record failed
|
||||
assert!(entry_receiver.try_recv().is_err());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1696,8 +1699,8 @@ mod tests {
|
||||
};
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
@ -1705,7 +1708,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&pubkey,
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1762,7 +1765,7 @@ mod tests {
|
||||
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1789,8 +1792,8 @@ mod tests {
|
||||
};
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
@ -1798,7 +1801,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&pubkey,
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1817,7 +1820,7 @@ mod tests {
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(unprocessed.len(), 1);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1877,8 +1880,8 @@ mod tests {
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
@ -1886,7 +1889,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::new_rand(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1905,7 +1908,7 @@ mod tests {
|
||||
assert_eq!(retryable_txs, expected);
|
||||
}
|
||||
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1944,9 +1947,9 @@ mod tests {
|
||||
};
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
@ -1954,7 +1957,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&pubkey,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1963,13 +1966,13 @@ mod tests {
|
||||
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
||||
|
||||
let shreds = entries_to_test_shreds(entries.clone(), bank.slot(), 0, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blocktree.set_roots(&[bank.slot()]).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.set_roots(&[bank.slot()]).unwrap();
|
||||
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
let transaction_status_service = TransactionStatusService::new(
|
||||
transaction_status_receiver,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
);
|
||||
|
||||
@ -1983,25 +1986,29 @@ mod tests {
|
||||
|
||||
transaction_status_service.join().unwrap();
|
||||
|
||||
let confirmed_block = blocktree.get_confirmed_block(bank.slot()).unwrap();
|
||||
let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap();
|
||||
assert_eq!(confirmed_block.transactions.len(), 3);
|
||||
|
||||
for (transaction, result) in confirmed_block.transactions.into_iter() {
|
||||
if transaction.signatures[0] == success_signature {
|
||||
assert_eq!(result.unwrap().status, Ok(()));
|
||||
} else if transaction.signatures[0] == ix_error_signature {
|
||||
assert_eq!(
|
||||
result.unwrap().status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
assert_eq!(result, None);
|
||||
for RpcTransactionWithStatusMeta { transaction, meta } in
|
||||
confirmed_block.transactions.into_iter()
|
||||
{
|
||||
if let RpcEncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == success_signature.to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
} else if transaction.signatures[0] == ix_error_signature.to_string() {
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
assert_eq!(meta, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use crate::blockstream::MockBlockstream as Blockstream;
|
||||
#[cfg(not(test))]
|
||||
use crate::blockstream::SocketBlockstream as Blockstream;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
@ -25,7 +25,7 @@ impl BlockstreamService {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
slot_full_receiver: Receiver<(u64, Pubkey)>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
unix_socket: &Path,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
@ -38,7 +38,7 @@ impl BlockstreamService {
|
||||
break;
|
||||
}
|
||||
if let Err(e) =
|
||||
Self::process_entries(&slot_full_receiver, &blocktree, &mut blockstream)
|
||||
Self::process_entries(&slot_full_receiver, &blockstore, &mut blockstream)
|
||||
{
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
@ -52,18 +52,18 @@ impl BlockstreamService {
|
||||
}
|
||||
fn process_entries(
|
||||
slot_full_receiver: &Receiver<(u64, Pubkey)>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
blockstream: &mut Blockstream,
|
||||
) -> Result<()> {
|
||||
let timeout = Duration::new(1, 0);
|
||||
let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?;
|
||||
|
||||
let entries = blocktree.get_slot_entries(slot, 0, None).unwrap();
|
||||
let blocktree_meta = blocktree.meta(slot).unwrap().unwrap();
|
||||
let entries = blockstore.get_slot_entries(slot, 0, None).unwrap();
|
||||
let blockstore_meta = blockstore.meta(slot).unwrap().unwrap();
|
||||
let _parent_slot = if slot == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(blocktree_meta.parent_slot)
|
||||
Some(blockstore_meta.parent_slot)
|
||||
};
|
||||
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
|
||||
let mut tick_height = ticks_per_slot * slot;
|
||||
@ -113,14 +113,14 @@ mod test {
|
||||
let ticks_per_slot = 5;
|
||||
let leader_pubkey = Pubkey::new_rand();
|
||||
|
||||
// Set up genesis config and blocktree
|
||||
// Set up genesis config and blockstore
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(1000);
|
||||
genesis_config.ticks_per_slot = ticks_per_slot;
|
||||
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
|
||||
// Set up blockstream
|
||||
let mut blockstream = Blockstream::new(&PathBuf::from("test_stream"));
|
||||
@ -143,7 +143,7 @@ mod test {
|
||||
let expected_entries = entries.clone();
|
||||
let expected_tick_heights = [6, 7, 8, 9, 9, 10];
|
||||
|
||||
blocktree
|
||||
blockstore
|
||||
.write_entries(
|
||||
1,
|
||||
0,
|
||||
@ -160,7 +160,7 @@ mod test {
|
||||
slot_full_sender.send((1, leader_pubkey)).unwrap();
|
||||
BlockstreamService::process_entries(
|
||||
&slot_full_receiver,
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&mut blockstream,
|
||||
)
|
||||
.unwrap();
|
||||
|
@ -5,7 +5,7 @@ use self::standard_broadcast_run::StandardBroadcastRun;
|
||||
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
|
||||
use crate::poh_recorder::WorkingBankEntry;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_ledger::staking_utils;
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
@ -44,7 +44,7 @@ impl BroadcastStageType {
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
receiver: Receiver<WorkingBankEntry>,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
shred_version: u16,
|
||||
) -> BroadcastStage {
|
||||
let keypair = cluster_info.read().unwrap().keypair.clone();
|
||||
@ -54,7 +54,7 @@ impl BroadcastStageType {
|
||||
cluster_info,
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
blockstore,
|
||||
StandardBroadcastRun::new(keypair, shred_version),
|
||||
),
|
||||
|
||||
@ -63,7 +63,7 @@ impl BroadcastStageType {
|
||||
cluster_info,
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
blockstore,
|
||||
FailEntryVerificationBroadcastRun::new(keypair, shred_version),
|
||||
),
|
||||
|
||||
@ -72,7 +72,7 @@ impl BroadcastStageType {
|
||||
cluster_info,
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
blockstore,
|
||||
BroadcastFakeShredsRun::new(keypair, 0, shred_version),
|
||||
),
|
||||
}
|
||||
@ -83,10 +83,10 @@ type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
|
||||
trait BroadcastRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
) -> Result<()>;
|
||||
fn transmit(
|
||||
&self,
|
||||
@ -97,7 +97,7 @@ trait BroadcastRun {
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()>;
|
||||
}
|
||||
|
||||
@ -126,14 +126,15 @@ pub struct BroadcastStage {
|
||||
impl BroadcastStage {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn run(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
mut broadcast_stage_run: impl BroadcastRun,
|
||||
) -> BroadcastStageReturnType {
|
||||
loop {
|
||||
let res = broadcast_stage_run.run(blocktree, receiver, socket_sender, blocktree_sender);
|
||||
let res =
|
||||
broadcast_stage_run.run(blockstore, receiver, socket_sender, blockstore_sender);
|
||||
let res = Self::handle_error(res);
|
||||
if let Some(res) = res {
|
||||
return res;
|
||||
@ -180,19 +181,25 @@ impl BroadcastStage {
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
receiver: Receiver<WorkingBankEntry>,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone,
|
||||
) -> Self {
|
||||
let btree = blocktree.clone();
|
||||
let btree = blockstore.clone();
|
||||
let exit = exit_sender.clone();
|
||||
let (socket_sender, socket_receiver) = channel();
|
||||
let (blocktree_sender, blocktree_receiver) = channel();
|
||||
let (blockstore_sender, blockstore_receiver) = channel();
|
||||
let bs_run = broadcast_stage_run.clone();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
let _finalizer = Finalizer::new(exit);
|
||||
Self::run(&btree, &receiver, &socket_sender, &blocktree_sender, bs_run)
|
||||
Self::run(
|
||||
&btree,
|
||||
&receiver,
|
||||
&socket_sender,
|
||||
&blockstore_sender,
|
||||
bs_run,
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
let mut thread_hdls = vec![thread_hdl];
|
||||
@ -213,15 +220,15 @@ impl BroadcastStage {
|
||||
.unwrap();
|
||||
thread_hdls.push(t);
|
||||
}
|
||||
let blocktree_receiver = Arc::new(Mutex::new(blocktree_receiver));
|
||||
let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
|
||||
for _ in 0..NUM_INSERT_THREADS {
|
||||
let blocktree_receiver = blocktree_receiver.clone();
|
||||
let blockstore_receiver = blockstore_receiver.clone();
|
||||
let bs_record = broadcast_stage_run.clone();
|
||||
let btree = blocktree.clone();
|
||||
let btree = blockstore.clone();
|
||||
let t = Builder::new()
|
||||
.name("solana-broadcaster-record".to_string())
|
||||
.spawn(move || loop {
|
||||
let res = bs_record.record(&blocktree_receiver, &btree);
|
||||
let res = bs_record.record(&blockstore_receiver, &btree);
|
||||
let res = Self::handle_error(res);
|
||||
if let Some(res) = res {
|
||||
return res;
|
||||
@ -248,7 +255,7 @@ mod test {
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::entry::create_ticks;
|
||||
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
@ -261,7 +268,7 @@ mod test {
|
||||
use std::time::Duration;
|
||||
|
||||
struct MockBroadcastStage {
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
broadcast_service: BroadcastStage,
|
||||
bank: Arc<Bank>,
|
||||
}
|
||||
@ -272,7 +279,7 @@ mod test {
|
||||
entry_receiver: Receiver<WorkingBankEntry>,
|
||||
) -> MockBroadcastStage {
|
||||
// Make the database ledger
|
||||
let blocktree = Arc::new(Blocktree::open(ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap());
|
||||
|
||||
// Make the leader node and scheduler
|
||||
let leader_info = Node::new_localhost_with_pubkey(leader_pubkey);
|
||||
@ -298,12 +305,12 @@ mod test {
|
||||
cluster_info,
|
||||
entry_receiver,
|
||||
&exit_sender,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
StandardBroadcastRun::new(leader_keypair, 0),
|
||||
);
|
||||
|
||||
MockBroadcastStage {
|
||||
blocktree,
|
||||
blockstore,
|
||||
broadcast_service,
|
||||
bank,
|
||||
}
|
||||
@ -350,8 +357,8 @@ mod test {
|
||||
ticks_per_slot,
|
||||
);
|
||||
|
||||
let blocktree = broadcast_service.blocktree;
|
||||
let (entries, _, _) = blocktree
|
||||
let blockstore = broadcast_service.blockstore;
|
||||
let (entries, _, _) = blockstore
|
||||
.get_slot_entries_with_shred_info(slot, 0)
|
||||
.expect("Expect entries to be present");
|
||||
assert_eq!(entries.len(), max_tick_height as usize);
|
||||
@ -363,6 +370,6 @@ mod test {
|
||||
.expect("Expect successful join of broadcast service");
|
||||
}
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
}
|
||||
|
@ -26,17 +26,17 @@ impl BroadcastFakeShredsRun {
|
||||
impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
let bank = receive_results.bank.clone();
|
||||
let last_tick_height = receive_results.last_tick_height;
|
||||
|
||||
let next_shred_index = blocktree
|
||||
let next_shred_index = blockstore
|
||||
.meta(bank.slot())
|
||||
.expect("Database error")
|
||||
.map(|meta| meta.consumed)
|
||||
@ -83,7 +83,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
}
|
||||
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
blocktree_sender.send(data_shreds.clone())?;
|
||||
blockstore_sender.send(data_shreds.clone())?;
|
||||
|
||||
// 3) Start broadcast step
|
||||
//some indicates fake shreds
|
||||
@ -121,10 +121,10 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
for data_shreds in receiver.lock().unwrap().iter() {
|
||||
blocktree.insert_shreds(data_shreds.to_vec(), None, true)?;
|
||||
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -21,10 +21,10 @@ impl FailEntryVerificationBroadcastRun {
|
||||
impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
@ -38,7 +38,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
last_entry.hash = Hash::default();
|
||||
}
|
||||
|
||||
let next_shred_index = blocktree
|
||||
let next_shred_index = blockstore
|
||||
.meta(bank.slot())
|
||||
.expect("Database error")
|
||||
.map(|meta| meta.consumed)
|
||||
@ -61,7 +61,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
);
|
||||
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
blocktree_sender.send(data_shreds.clone())?;
|
||||
blockstore_sender.send(data_shreds.clone())?;
|
||||
// 3) Start broadcast step
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
@ -90,12 +90,12 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let all_shreds = receiver.lock().unwrap().recv()?;
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(all_shreds.to_vec(), None, true)
|
||||
.expect("Failed to insert shreds in blocktree");
|
||||
.expect("Failed to insert shreds in blockstore");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -83,13 +83,13 @@ impl StandardBroadcastRun {
|
||||
|
||||
last_unfinished_slot_shred
|
||||
}
|
||||
fn init_shredder(&self, blocktree: &Blocktree, reference_tick: u8) -> (Shredder, u32) {
|
||||
fn init_shredder(&self, blockstore: &Blockstore, reference_tick: u8) -> (Shredder, u32) {
|
||||
let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
|
||||
let next_shred_index = self
|
||||
.unfinished_slot
|
||||
.map(|s| s.next_shred_index)
|
||||
.unwrap_or_else(|| {
|
||||
blocktree
|
||||
blockstore
|
||||
.meta(slot)
|
||||
.expect("Database error")
|
||||
.map(|meta| meta.consumed)
|
||||
@ -132,27 +132,27 @@ impl StandardBroadcastRun {
|
||||
&mut self,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
sock: &UdpSocket,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receive_results: ReceiveResults,
|
||||
) -> Result<()> {
|
||||
let (bsend, brecv) = channel();
|
||||
let (ssend, srecv) = channel();
|
||||
self.process_receive_results(&blocktree, &ssend, &bsend, receive_results)?;
|
||||
self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?;
|
||||
let srecv = Arc::new(Mutex::new(srecv));
|
||||
let brecv = Arc::new(Mutex::new(brecv));
|
||||
//data
|
||||
let _ = self.transmit(&srecv, cluster_info, sock);
|
||||
//coding
|
||||
let _ = self.transmit(&srecv, cluster_info, sock);
|
||||
let _ = self.record(&brecv, blocktree);
|
||||
let _ = self.record(&brecv, blockstore);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_receive_results(
|
||||
&mut self,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
receive_results: ReceiveResults,
|
||||
) -> Result<()> {
|
||||
let mut receive_elapsed = receive_results.time_elapsed;
|
||||
@ -181,7 +181,7 @@ impl StandardBroadcastRun {
|
||||
// 2) Convert entries to shreds and coding shreds
|
||||
|
||||
let (shredder, next_shred_index) = self.init_shredder(
|
||||
blocktree,
|
||||
blockstore,
|
||||
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
||||
);
|
||||
let mut data_shreds = self.entries_to_data_shreds(
|
||||
@ -190,13 +190,13 @@ impl StandardBroadcastRun {
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
);
|
||||
//Insert the first shred so blocktree stores that the leader started this block
|
||||
//Insert the first shred so blockstore stores that the leader started this block
|
||||
//This must be done before the blocks are sent out over the wire.
|
||||
if !data_shreds.is_empty() && data_shreds[0].index() == 0 {
|
||||
let first = vec![data_shreds[0].clone()];
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(first, None, true)
|
||||
.expect("Failed to insert shreds in blocktree");
|
||||
.expect("Failed to insert shreds in blockstore");
|
||||
}
|
||||
let last_data_shred = data_shreds.len();
|
||||
if let Some(last_shred) = last_unfinished_slot_shred {
|
||||
@ -209,7 +209,7 @@ impl StandardBroadcastRun {
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
socket_sender.send((stakes.clone(), data_shreds.clone()))?;
|
||||
blocktree_sender.send(data_shreds.clone())?;
|
||||
blockstore_sender.send(data_shreds.clone())?;
|
||||
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
|
||||
let coding_shreds = Arc::new(coding_shreds);
|
||||
socket_sender.send((stakes, coding_shreds))?;
|
||||
@ -227,8 +227,8 @@ impl StandardBroadcastRun {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn insert(&self, blocktree: &Arc<Blocktree>, shreds: Arc<Vec<Shred>>) -> Result<()> {
|
||||
// Insert shreds into blocktree
|
||||
fn insert(&self, blockstore: &Arc<Blockstore>, shreds: Arc<Vec<Shred>>) -> Result<()> {
|
||||
// Insert shreds into blockstore
|
||||
let insert_shreds_start = Instant::now();
|
||||
//The first shred is inserted synchronously
|
||||
let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 {
|
||||
@ -236,9 +236,9 @@ impl StandardBroadcastRun {
|
||||
} else {
|
||||
shreds.to_vec()
|
||||
};
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(data_shreds, None, true)
|
||||
.expect("Failed to insert shreds in blocktree");
|
||||
.expect("Failed to insert shreds in blockstore");
|
||||
let insert_shreds_elapsed = insert_shreds_start.elapsed();
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
|
||||
@ -317,13 +317,18 @@ impl StandardBroadcastRun {
|
||||
impl BroadcastRun for StandardBroadcastRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
) -> Result<()> {
|
||||
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
self.process_receive_results(blocktree, socket_sender, blocktree_sender, receive_results)
|
||||
self.process_receive_results(
|
||||
blockstore,
|
||||
socket_sender,
|
||||
blockstore_sender,
|
||||
receive_results,
|
||||
)
|
||||
}
|
||||
fn transmit(
|
||||
&self,
|
||||
@ -337,10 +342,10 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let shreds = receiver.lock().unwrap().recv()?;
|
||||
self.insert(blocktree, shreds)
|
||||
self.insert(blockstore, shreds)
|
||||
}
|
||||
}
|
||||
|
||||
@ -350,7 +355,7 @@ mod test {
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::genesis_utils::create_genesis_config;
|
||||
use solana_ledger::{
|
||||
blocktree::Blocktree, entry::create_ticks, get_tmp_ledger_path,
|
||||
blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path,
|
||||
shred::max_ticks_per_n_shreds,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
@ -365,7 +370,7 @@ mod test {
|
||||
fn setup(
|
||||
num_shreds_per_slot: Slot,
|
||||
) -> (
|
||||
Arc<Blocktree>,
|
||||
Arc<Blockstore>,
|
||||
GenesisConfig,
|
||||
Arc<RwLock<ClusterInfo>>,
|
||||
Arc<Bank>,
|
||||
@ -374,8 +379,8 @@ mod test {
|
||||
) {
|
||||
// Setup
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
@ -388,7 +393,7 @@ mod test {
|
||||
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
|
||||
let bank0 = Arc::new(Bank::new(&genesis_config));
|
||||
(
|
||||
blocktree,
|
||||
blockstore,
|
||||
genesis_config,
|
||||
cluster_info,
|
||||
bank0,
|
||||
@ -433,7 +438,7 @@ mod test {
|
||||
fn test_slot_interrupt() {
|
||||
// Setup
|
||||
let num_shreds_per_slot = 2;
|
||||
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) =
|
||||
let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
|
||||
setup(num_shreds_per_slot);
|
||||
|
||||
// Insert 1 less than the number of ticks needed to finish the slot
|
||||
@ -448,14 +453,14 @@ mod test {
|
||||
// Step 1: Make an incomplete transmission for slot 0
|
||||
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone(), 0);
|
||||
standard_broadcast_run
|
||||
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
|
||||
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
|
||||
.unwrap();
|
||||
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
|
||||
assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds_per_slot);
|
||||
assert_eq!(unfinished_slot.slot, 0);
|
||||
assert_eq!(unfinished_slot.parent, 0);
|
||||
// Make sure the slot is not complete
|
||||
assert!(!blocktree.is_full(0));
|
||||
assert!(!blockstore.is_full(0));
|
||||
// Modify the stats, should reset later
|
||||
standard_broadcast_run
|
||||
.stats
|
||||
@ -463,10 +468,10 @@ mod test {
|
||||
.unwrap()
|
||||
.receive_elapsed = 10;
|
||||
|
||||
// Try to fetch ticks from blocktree, nothing should break
|
||||
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
// Try to fetch ticks from blockstore, nothing should break
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
blocktree
|
||||
blockstore
|
||||
.get_slot_entries(0, num_shreds_per_slot, None)
|
||||
.unwrap(),
|
||||
vec![],
|
||||
@ -487,7 +492,7 @@ mod test {
|
||||
last_tick_height: (ticks1.len() - 1) as u64,
|
||||
};
|
||||
standard_broadcast_run
|
||||
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
|
||||
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
|
||||
.unwrap();
|
||||
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
|
||||
|
||||
@ -503,10 +508,10 @@ mod test {
|
||||
0
|
||||
);
|
||||
|
||||
// Try to fetch the incomplete ticks from blocktree, should succeed
|
||||
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
// Try to fetch the incomplete ticks from blockstore, should succeed
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
blocktree
|
||||
blockstore
|
||||
.get_slot_entries(0, num_shreds_per_slot, None)
|
||||
.unwrap(),
|
||||
vec![],
|
||||
@ -517,7 +522,7 @@ mod test {
|
||||
fn test_slot_finish() {
|
||||
// Setup
|
||||
let num_shreds_per_slot = 2;
|
||||
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) =
|
||||
let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
|
||||
setup(num_shreds_per_slot);
|
||||
|
||||
// Insert complete slot of ticks needed to finish the slot
|
||||
@ -531,7 +536,7 @@ mod test {
|
||||
|
||||
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0);
|
||||
standard_broadcast_run
|
||||
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
|
||||
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
|
||||
.unwrap();
|
||||
assert!(standard_broadcast_run.unfinished_slot.is_none())
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
@ -12,7 +12,7 @@ pub const CHACHA_BLOCK_SIZE: usize = 64;
|
||||
pub const CHACHA_KEY_SIZE: usize = 32;
|
||||
|
||||
pub fn chacha_cbc_encrypt_ledger(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
start_slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
out_path: &Path,
|
||||
@ -28,7 +28,7 @@ pub fn chacha_cbc_encrypt_ledger(
|
||||
let mut current_slot = start_slot;
|
||||
let mut start_index = 0;
|
||||
loop {
|
||||
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
|
||||
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
|
||||
Ok((last_index, mut size)) => {
|
||||
debug!(
|
||||
"chacha: encrypting slice: {} num_shreds: {} data_len: {}",
|
||||
@ -75,7 +75,7 @@ pub fn chacha_cbc_encrypt_ledger(
|
||||
mod tests {
|
||||
use crate::chacha::chacha_cbc_encrypt_ledger;
|
||||
use crate::gen_keys::GenKeys;
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::hash::{hash, Hash, Hasher};
|
||||
@ -131,7 +131,7 @@ mod tests {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 16;
|
||||
let slots_per_segment = 32;
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let out_path = tmp_file_path("test_encrypt_ledger");
|
||||
|
||||
let seed = [2u8; 32];
|
||||
@ -139,7 +139,7 @@ mod tests {
|
||||
let keypair = rnd.gen_keypair();
|
||||
|
||||
let entries = make_tiny_deterministic_test_entries(slots_per_segment);
|
||||
blocktree
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
@ -157,8 +157,14 @@ mod tests {
|
||||
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
|
||||
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
|
||||
);
|
||||
chacha_cbc_encrypt_ledger(&blocktree, 0, slots_per_segment as u64, &out_path, &mut key)
|
||||
.unwrap();
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
&out_path,
|
||||
&mut key,
|
||||
)
|
||||
.unwrap();
|
||||
let mut out_file = File::open(&out_path).unwrap();
|
||||
let mut buf = vec![];
|
||||
let size = out_file.read_to_end(&mut buf).unwrap();
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Module used by validators to approve storage mining proofs in parallel using the GPU
|
||||
|
||||
use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_perf::perf_libs;
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::io;
|
||||
@ -13,7 +13,7 @@ use std::sync::Arc;
|
||||
// Then sample each block at the offsets provided by samples argument with sha256
|
||||
// and return the vec of sha states
|
||||
pub fn chacha_cbc_encrypt_file_many_keys(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
segment: u64,
|
||||
slots_per_segment: u64,
|
||||
ivecs: &mut [u8],
|
||||
@ -46,7 +46,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
|
||||
(api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32);
|
||||
}
|
||||
loop {
|
||||
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
|
||||
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
|
||||
Ok((last_index, mut size)) => {
|
||||
debug!(
|
||||
"chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}",
|
||||
@ -134,9 +134,9 @@ mod tests {
|
||||
let entries = create_ticks(slots_per_segment, 0, Hash::default());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 16;
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
blocktree
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
@ -160,7 +160,7 @@ mod tests {
|
||||
|
||||
let mut cpu_iv = ivecs.clone();
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
out_path,
|
||||
@ -171,7 +171,7 @@ mod tests {
|
||||
let ref_hash = sample_file(&out_path, &samples).unwrap();
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
0,
|
||||
slots_per_segment as u64,
|
||||
&mut ivecs,
|
||||
@ -196,8 +196,8 @@ mod tests {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ticks_per_slot = 90;
|
||||
let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default());
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
blocktree
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
blockstore
|
||||
.write_entries(
|
||||
0,
|
||||
0,
|
||||
@ -224,7 +224,7 @@ mod tests {
|
||||
ivec[0] = i;
|
||||
ivecs.extend(ivec.clone().iter());
|
||||
chacha_cbc_encrypt_ledger(
|
||||
&blocktree.clone(),
|
||||
&blockstore.clone(),
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
out_path,
|
||||
@ -242,7 +242,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let hashes = chacha_cbc_encrypt_file_many_keys(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut ivecs,
|
||||
@ -267,9 +267,9 @@ mod tests {
|
||||
let mut keys = hex!("abc123");
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let samples = [0];
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
assert!(chacha_cbc_encrypt_file_many_keys(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
0,
|
||||
DEFAULT_SLOTS_PER_SEGMENT,
|
||||
&mut keys,
|
||||
|
@ -24,14 +24,14 @@ use crate::{
|
||||
repair_service::RepairType,
|
||||
result::{Error, Result},
|
||||
sendmmsg::{multicast, send_mmsg},
|
||||
thread_mem_usage,
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
};
|
||||
use bincode::{serialize, serialized_size};
|
||||
use core::cmp;
|
||||
use itertools::Itertools;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree, staking_utils};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore, staking_utils};
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
|
||||
use solana_net_utils::{
|
||||
bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range,
|
||||
@ -67,11 +67,11 @@ pub const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
||||
/// the number of slots to respond with when responding to `Orphan` requests
|
||||
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
|
||||
/// The maximum size of a bloom filter
|
||||
pub const MAX_BLOOM_SIZE: usize = 1030;
|
||||
pub const MAX_BLOOM_SIZE: usize = 1028;
|
||||
/// The maximum size of a protocol payload
|
||||
const MAX_PROTOCOL_PAYLOAD_SIZE: u64 = PACKET_DATA_SIZE as u64 - MAX_PROTOCOL_HEADER_SIZE;
|
||||
/// The largest protocol header size
|
||||
const MAX_PROTOCOL_HEADER_SIZE: u64 = 202;
|
||||
const MAX_PROTOCOL_HEADER_SIZE: u64 = 204;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ClusterInfoError {
|
||||
@ -272,7 +272,7 @@ impl ClusterInfo {
|
||||
|
||||
let ip_addr = node.gossip.ip();
|
||||
format!(
|
||||
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}\n",
|
||||
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}| v{}\n",
|
||||
if ContactInfo::is_valid_address(&node.gossip) {
|
||||
ip_addr.to_string()
|
||||
} else {
|
||||
@ -290,15 +290,16 @@ impl ClusterInfo {
|
||||
addr_to_string(&ip_addr, &node.storage_addr),
|
||||
addr_to_string(&ip_addr, &node.rpc),
|
||||
addr_to_string(&ip_addr, &node.rpc_pubsub),
|
||||
node.shred_version,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
format!(
|
||||
"IP Address |Age(ms)| Node identifier \
|
||||
|Gossip| TPU |TPU fwd| TVU |TVU fwd|Repair|Storage| RPC |PubSub\n\
|
||||
|Gossip| TPU |TPU fwd| TVU |TVU fwd|Repair|Storage| RPC |PubSub|ShredVer\n\
|
||||
------------------+-------+----------------------------------------------+\
|
||||
------+------+-------+------+-------+------+-------+------+------\n\
|
||||
------+------+-------+------+-------+------+-------+------+------+--------\n\
|
||||
{}\
|
||||
Nodes: {}{}{}",
|
||||
nodes.join(""),
|
||||
@ -405,13 +406,13 @@ impl ClusterInfo {
|
||||
}
|
||||
|
||||
pub fn rpc_peers(&self) -> Vec<ContactInfo> {
|
||||
let me = self.my_data().id;
|
||||
let me = self.my_data();
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
.values()
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| x.id != me)
|
||||
.filter(|x| x.id != me.id)
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.rpc))
|
||||
.cloned()
|
||||
.collect()
|
||||
@ -446,7 +447,7 @@ impl ClusterInfo {
|
||||
|
||||
/// all validators that have a valid tvu port.
|
||||
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
|
||||
let me = self.my_data().id;
|
||||
let me = self.my_data();
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
@ -454,34 +455,34 @@ impl ClusterInfo {
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
|
||||
.filter(|x| !ClusterInfo::is_archiver(x))
|
||||
.filter(|x| x.id != me)
|
||||
.filter(|x| x.id != me.id)
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// all peers that have a valid storage addr
|
||||
pub fn storage_peers(&self) -> Vec<ContactInfo> {
|
||||
let me = self.my_data().id;
|
||||
let me = self.my_data();
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
.values()
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
|
||||
.filter(|x| x.id != me)
|
||||
.filter(|x| x.id != me.id)
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// all peers that have a valid tvu
|
||||
pub fn retransmit_peers(&self) -> Vec<ContactInfo> {
|
||||
let me = self.my_data().id;
|
||||
let me = self.my_data();
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
.values()
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| x.id != me)
|
||||
.filter(|x| x.id != me.id)
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.tvu_forwards))
|
||||
.cloned()
|
||||
@ -490,10 +491,10 @@ impl ClusterInfo {
|
||||
|
||||
/// all tvu peers with valid gossip addrs that likely have the slot being requested
|
||||
fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
|
||||
let me = self.my_data().id;
|
||||
let me = self.my_data();
|
||||
ClusterInfo::tvu_peers(self)
|
||||
.into_iter()
|
||||
.filter(|x| x.id != me)
|
||||
.filter(|x| x.id != me.id)
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
|
||||
.filter(|x| {
|
||||
self.get_epoch_state_for_node(&x.id, None)
|
||||
@ -1113,12 +1114,12 @@ impl ClusterInfo {
|
||||
}
|
||||
|
||||
fn get_data_shred_as_packet(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
dest: &SocketAddr,
|
||||
) -> Result<Option<Packet>> {
|
||||
let data = blocktree.get_data_shred(slot, shred_index)?;
|
||||
let data = blockstore.get_data_shred(slot, shred_index)?;
|
||||
Ok(data.map(|data| {
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = data.len();
|
||||
@ -1132,14 +1133,14 @@ impl ClusterInfo {
|
||||
recycler: &PacketsRecycler,
|
||||
from: &ContactInfo,
|
||||
from_addr: &SocketAddr,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
me: &ContactInfo,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
) -> Option<Packets> {
|
||||
if let Some(blocktree) = blocktree {
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the requested index in one of the slots
|
||||
let packet = Self::get_data_shred_as_packet(blocktree, slot, shred_index, from_addr);
|
||||
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
|
||||
|
||||
if let Ok(Some(packet)) = packet {
|
||||
inc_new_counter_debug!("cluster_info-window-request-ledger", 1);
|
||||
@ -1166,17 +1167,17 @@ impl ClusterInfo {
|
||||
fn run_highest_window_request(
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
slot: Slot,
|
||||
highest_index: u64,
|
||||
) -> Option<Packets> {
|
||||
let blocktree = blocktree?;
|
||||
let blockstore = blockstore?;
|
||||
// Try to find the requested index in one of the slots
|
||||
let meta = blocktree.meta(slot).ok()??;
|
||||
let meta = blockstore.meta(slot).ok()??;
|
||||
if meta.received > highest_index {
|
||||
// meta.received must be at least 1 by this point
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr)
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
|
||||
.ok()??;
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
@ -1190,19 +1191,19 @@ impl ClusterInfo {
|
||||
fn run_orphan(
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
mut slot: Slot,
|
||||
max_responses: usize,
|
||||
) -> Option<Packets> {
|
||||
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
|
||||
if let Some(blocktree) = blocktree {
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the next "n" parent slots of the input slot
|
||||
while let Ok(Some(meta)) = blocktree.meta(slot) {
|
||||
while let Ok(Some(meta)) = blockstore.meta(slot) {
|
||||
if meta.received == 0 {
|
||||
break;
|
||||
}
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr);
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
|
||||
if let Ok(Some(packet)) = packet {
|
||||
res.packets.push(packet);
|
||||
}
|
||||
@ -1222,7 +1223,7 @@ impl ClusterInfo {
|
||||
fn handle_packets(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
packets: Packets,
|
||||
response_sender: &PacketSender,
|
||||
@ -1330,7 +1331,8 @@ impl ClusterInfo {
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
let rsp = Self::handle_repair(me, recycler, &from_addr, blocktree, request);
|
||||
let rsp =
|
||||
Self::handle_repair(me, recycler, &from_addr, blockstore, request);
|
||||
if let Some(rsp) = rsp {
|
||||
let _ignore_disconnect = response_sender.send(rsp);
|
||||
}
|
||||
@ -1475,7 +1477,7 @@ impl ClusterInfo {
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
request: Protocol,
|
||||
) -> Option<Packets> {
|
||||
let now = Instant::now();
|
||||
@ -1511,7 +1513,7 @@ impl ClusterInfo {
|
||||
recycler,
|
||||
from,
|
||||
&from_addr,
|
||||
blocktree,
|
||||
blockstore,
|
||||
&my_info,
|
||||
*slot,
|
||||
*shred_index,
|
||||
@ -1526,7 +1528,7 @@ impl ClusterInfo {
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blocktree,
|
||||
blockstore,
|
||||
*slot,
|
||||
*highest_index,
|
||||
),
|
||||
@ -1539,7 +1541,7 @@ impl ClusterInfo {
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blocktree,
|
||||
blockstore,
|
||||
*slot,
|
||||
MAX_ORPHAN_REPAIR_RESPONSES,
|
||||
),
|
||||
@ -1559,7 +1561,7 @@ impl ClusterInfo {
|
||||
fn run_listen(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
blocktree: Option<&Arc<Blocktree>>,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
bank_forks: Option<&Arc<RwLock<BankForks>>>,
|
||||
requests_receiver: &PacketReceiver,
|
||||
response_sender: &PacketSender,
|
||||
@ -1574,12 +1576,12 @@ impl ClusterInfo {
|
||||
None => HashMap::new(),
|
||||
};
|
||||
|
||||
Self::handle_packets(obj, &recycler, blocktree, &stakes, reqs, response_sender);
|
||||
Self::handle_packets(obj, &recycler, blockstore, &stakes, reqs, response_sender);
|
||||
Ok(())
|
||||
}
|
||||
pub fn listen(
|
||||
me: Arc<RwLock<Self>>,
|
||||
blocktree: Option<Arc<Blocktree>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
requests_receiver: PacketReceiver,
|
||||
response_sender: PacketSender,
|
||||
@ -1593,7 +1595,7 @@ impl ClusterInfo {
|
||||
let e = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blocktree.as_ref(),
|
||||
blockstore.as_ref(),
|
||||
bank_forks.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
@ -1916,9 +1918,9 @@ mod tests {
|
||||
use crate::repair_service::RepairType;
|
||||
use crate::result::Error;
|
||||
use rayon::prelude::*;
|
||||
use solana_ledger::blocktree::make_many_slot_entries;
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blocktree_processor::fill_blocktree_slot_with_ticks;
|
||||
use solana_ledger::blockstore::make_many_slot_entries;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore_processor::fill_blockstore_slot_with_ticks;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_ledger::shred::{
|
||||
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
|
||||
@ -2062,7 +2064,7 @@ mod tests {
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let me = ContactInfo::new(
|
||||
&Pubkey::new_rand(),
|
||||
socketaddr!("127.0.0.1:1234"),
|
||||
@ -2080,7 +2082,7 @@ mod tests {
|
||||
&recycler,
|
||||
&me,
|
||||
&socketaddr_any!(),
|
||||
Some(&blocktree),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
0,
|
||||
0,
|
||||
@ -2097,7 +2099,7 @@ mod tests {
|
||||
CodingShredHeader::default(),
|
||||
);
|
||||
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(vec![shred_info], None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
@ -2105,7 +2107,7 @@ mod tests {
|
||||
&recycler,
|
||||
&me,
|
||||
&socketaddr_any!(),
|
||||
Some(&blocktree),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
2,
|
||||
1,
|
||||
@ -2121,7 +2123,7 @@ mod tests {
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
}
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
|
||||
@ -2131,18 +2133,18 @@ mod tests {
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rv = ClusterInfo::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blocktree),
|
||||
Some(&blockstore),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
|
||||
let _ = fill_blocktree_slot_with_ticks(
|
||||
&blocktree,
|
||||
let _ = fill_blockstore_slot_with_ticks(
|
||||
&blockstore,
|
||||
max_ticks_per_n_shreds(1) + 1,
|
||||
2,
|
||||
1,
|
||||
@ -2152,7 +2154,7 @@ mod tests {
|
||||
let rv = ClusterInfo::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blocktree),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
1,
|
||||
);
|
||||
@ -2163,21 +2165,21 @@ mod tests {
|
||||
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
|
||||
.collect();
|
||||
assert!(!rv.is_empty());
|
||||
let index = blocktree.meta(2).unwrap().unwrap().received - 1;
|
||||
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
|
||||
assert_eq!(rv[0].index(), index as u32);
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
|
||||
let rv = ClusterInfo::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blocktree),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
index + 1,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
}
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2186,25 +2188,27 @@ mod tests {
|
||||
let recycler = PacketsRecycler::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 2, 0);
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rv =
|
||||
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// Create slots 1, 2, 3 with 5 shreds apiece
|
||||
let (shreds, _) = make_many_slot_entries(1, 3, 5);
|
||||
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
// We don't have slot 4, so we don't know how to service this requeset
|
||||
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 4, 5);
|
||||
let rv =
|
||||
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
|
||||
// for this request
|
||||
let rv: Vec<_> =
|
||||
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 3, 5)
|
||||
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
@ -2213,9 +2217,9 @@ mod tests {
|
||||
let expected: Vec<_> = (1..=3)
|
||||
.rev()
|
||||
.map(|slot| {
|
||||
let index = blocktree.meta(slot).unwrap().unwrap().received - 1;
|
||||
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
||||
ClusterInfo::get_data_shred_as_packet(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
slot,
|
||||
index,
|
||||
&socketaddr_any!(),
|
||||
@ -2227,7 +2231,7 @@ mod tests {
|
||||
assert_eq!(rv, expected)
|
||||
}
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
fn assert_in_range(x: u16, range: (u16, u16)) {
|
||||
@ -2572,7 +2576,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_split_messages_packet_size() {
|
||||
// Test that if a value is smaller than payload size but too large to be wrappe in a vec
|
||||
// Test that if a value is smaller than payload size but too large to be wrapped in a vec
|
||||
// that it is still dropped
|
||||
let payload: Vec<CrdsValue> = vec![];
|
||||
let vec_size = serialized_size(&payload).unwrap();
|
||||
@ -2586,7 +2590,7 @@ mod tests {
|
||||
}));
|
||||
|
||||
let mut i = 0;
|
||||
while value.size() < desired_size {
|
||||
while value.size() <= desired_size {
|
||||
let slots = (0..i).collect::<BTreeSet<_>>();
|
||||
if slots.len() > 200 {
|
||||
panic!(
|
||||
|
@ -5,7 +5,7 @@ use byteorder::{ByteOrder, LittleEndian};
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::rooted_slot_iterator::RootedSlotIterator;
|
||||
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
|
||||
use std::{
|
||||
@ -89,13 +89,13 @@ pub struct ClusterInfoRepairListener {
|
||||
|
||||
impl ClusterInfoRepairListener {
|
||||
pub fn new(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
epoch_schedule: EpochSchedule,
|
||||
) -> Self {
|
||||
let exit = exit.clone();
|
||||
let blocktree = blocktree.clone();
|
||||
let blockstore = blockstore.clone();
|
||||
let thread = Builder::new()
|
||||
.name("solana-cluster_info_repair_listener".to_string())
|
||||
.spawn(move || {
|
||||
@ -105,7 +105,7 @@ impl ClusterInfoRepairListener {
|
||||
// 2) The latest root the peer gossiped
|
||||
let mut peer_infos: HashMap<Pubkey, RepaireeInfo> = HashMap::new();
|
||||
let _ = Self::recv_loop(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&mut peer_infos,
|
||||
&exit,
|
||||
&cluster_info,
|
||||
@ -119,7 +119,7 @@ impl ClusterInfoRepairListener {
|
||||
}
|
||||
|
||||
fn recv_loop(
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
@ -134,7 +134,7 @@ impl ClusterInfoRepairListener {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let lowest_slot = blocktree.lowest_slot();
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
let peers = cluster_info.read().unwrap().gossip_peers();
|
||||
let mut peers_needing_repairs: HashMap<Pubkey, EpochSlots> = HashMap::new();
|
||||
|
||||
@ -156,7 +156,7 @@ impl ClusterInfoRepairListener {
|
||||
// After updating all the peers, send out repairs to those that need it
|
||||
let _ = Self::serve_repairs(
|
||||
&my_pubkey,
|
||||
blocktree,
|
||||
blockstore,
|
||||
peer_infos,
|
||||
&peers_needing_repairs,
|
||||
&socket,
|
||||
@ -219,7 +219,7 @@ impl ClusterInfoRepairListener {
|
||||
|
||||
fn serve_repairs(
|
||||
my_pubkey: &Pubkey,
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
|
||||
repairees: &HashMap<Pubkey, EpochSlots>,
|
||||
socket: &UdpSocket,
|
||||
@ -258,7 +258,7 @@ impl ClusterInfoRepairListener {
|
||||
my_pubkey,
|
||||
repairee_pubkey,
|
||||
my_root,
|
||||
blocktree,
|
||||
blockstore,
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen,
|
||||
socket,
|
||||
@ -286,7 +286,7 @@ impl ClusterInfoRepairListener {
|
||||
my_pubkey: &Pubkey,
|
||||
repairee_pubkey: &Pubkey,
|
||||
my_root: Slot,
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
repairee_epoch_slots: &EpochSlots,
|
||||
eligible_repairmen: &[&Pubkey],
|
||||
socket: &UdpSocket,
|
||||
@ -295,7 +295,7 @@ impl ClusterInfoRepairListener {
|
||||
epoch_schedule: &EpochSchedule,
|
||||
last_repaired_slot_and_ts: (u64, u64),
|
||||
) -> Result<Option<Slot>> {
|
||||
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blocktree);
|
||||
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blockstore);
|
||||
if slot_iter.is_err() {
|
||||
info!(
|
||||
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
|
||||
@ -325,9 +325,14 @@ impl ClusterInfoRepairListener {
|
||||
if slot > my_root
|
||||
|| num_slots_repaired >= num_slots_to_repair
|
||||
|| slot > max_confirmed_repairee_slot
|
||||
// Don't repair if the next rooted slot jumps, because that means
|
||||
// we started from a snapshot and don't have the immediate next
|
||||
// slot that the repairee needs
|
||||
|| slot_meta.is_none()
|
||||
{
|
||||
break;
|
||||
}
|
||||
let slot_meta = slot_meta.unwrap();
|
||||
if !repairee_epoch_slots.slots.contains(&slot) {
|
||||
// Calculate the shred indexes this node is responsible for repairing. Note that
|
||||
// because we are only repairing slots that are before our root, the slot.received
|
||||
@ -338,7 +343,7 @@ impl ClusterInfoRepairListener {
|
||||
// the cluster
|
||||
let num_shreds_in_slot = slot_meta.received as usize;
|
||||
|
||||
// Check if I'm responsible for repairing this slots
|
||||
// Check if I'm responsible for repairing this slot
|
||||
if let Some(my_repair_indexes) = Self::calculate_my_repairman_index_for_slot(
|
||||
my_pubkey,
|
||||
&eligible_repairmen,
|
||||
@ -361,17 +366,17 @@ impl ClusterInfoRepairListener {
|
||||
// a database iterator over the slots because by the time this node is
|
||||
// sending the shreds in this slot for repair, we expect these slots
|
||||
// to be full.
|
||||
if let Some(shred_data) = blocktree
|
||||
if let Some(shred_data) = blockstore
|
||||
.get_data_shred(slot, shred_index as u64)
|
||||
.expect("Failed to read data shred from blocktree")
|
||||
.expect("Failed to read data shred from blockstore")
|
||||
{
|
||||
socket.send_to(&shred_data[..], repairee_addr)?;
|
||||
total_data_shreds_sent += 1;
|
||||
}
|
||||
|
||||
if let Some(coding_bytes) = blocktree
|
||||
if let Some(coding_bytes) = blockstore
|
||||
.get_coding_shred(slot, shred_index as u64)
|
||||
.expect("Failed to read coding shred from blocktree")
|
||||
.expect("Failed to read coding shred from blockstore")
|
||||
{
|
||||
socket.send_to(&coding_bytes[..], repairee_addr)?;
|
||||
total_coding_shreds_sent += 1;
|
||||
@ -545,7 +550,7 @@ mod tests {
|
||||
use crate::packet::Packets;
|
||||
use crate::streamer;
|
||||
use crate::streamer::PacketReceiver;
|
||||
use solana_ledger::blocktree::make_many_slot_entries;
|
||||
use solana_ledger::blockstore::make_many_slot_entries;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use std::collections::BTreeSet;
|
||||
@ -694,16 +699,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_serve_same_repairs_to_repairee() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let num_slots = 2;
|
||||
let (shreds, _) = make_many_slot_entries(0, num_slots, 1);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Write roots so that these slots will qualify to be sent by the repairman
|
||||
let last_root = num_slots - 1;
|
||||
let roots: Vec<_> = (0..=last_root).collect();
|
||||
blocktree.set_roots(&roots).unwrap();
|
||||
blockstore.set_roots(&roots).unwrap();
|
||||
|
||||
// Set up my information
|
||||
let my_pubkey = Pubkey::new_rand();
|
||||
@ -724,7 +729,7 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&mock_repairee.id,
|
||||
num_slots - 1,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen,
|
||||
&my_socket,
|
||||
@ -744,7 +749,7 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&mock_repairee.id,
|
||||
num_slots - 1,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen,
|
||||
&my_socket,
|
||||
@ -760,20 +765,20 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_serve_repairs_to_repairee() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let entries_per_slot = 5;
|
||||
let num_slots = 10;
|
||||
assert_eq!(num_slots % 2, 0);
|
||||
let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
|
||||
let num_shreds_per_slot = shreds.len() as u64 / num_slots;
|
||||
|
||||
// Write slots in the range [0, num_slots] to blocktree
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
// Write slots in the range [0, num_slots] to blockstore
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Write roots so that these slots will qualify to be sent by the repairman
|
||||
let roots: Vec<_> = (0..=num_slots - 1).collect();
|
||||
blocktree.set_roots(&roots).unwrap();
|
||||
blockstore.set_roots(&roots).unwrap();
|
||||
|
||||
// Set up my information
|
||||
let my_pubkey = Pubkey::new_rand();
|
||||
@ -804,7 +809,7 @@ mod tests {
|
||||
&repairman_pubkey,
|
||||
&mock_repairee.id,
|
||||
num_slots - 1,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen_refs,
|
||||
&my_socket,
|
||||
@ -843,26 +848,26 @@ mod tests {
|
||||
|
||||
// Shutdown
|
||||
mock_repairee.close().unwrap();
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_repair_past_confirmed_epoch() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let stakers_slot_offset = 16;
|
||||
let slots_per_epoch = stakers_slot_offset * 2;
|
||||
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, false);
|
||||
|
||||
// Create shreds for first two epochs and write them to blocktree
|
||||
// Create shreds for first two epochs and write them to blockstore
|
||||
let total_slots = slots_per_epoch * 2;
|
||||
let (shreds, _) = make_many_slot_entries(0, total_slots, 1);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Write roots so that these slots will qualify to be sent by the repairman
|
||||
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();
|
||||
blocktree.set_roots(&roots).unwrap();
|
||||
blockstore.set_roots(&roots).unwrap();
|
||||
|
||||
// Set up my information
|
||||
let my_pubkey = Pubkey::new_rand();
|
||||
@ -891,7 +896,7 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&mock_repairee.id,
|
||||
total_slots - 1,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&repairee_epoch_slots,
|
||||
&vec![&my_pubkey],
|
||||
&my_socket,
|
||||
@ -914,7 +919,7 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&mock_repairee.id,
|
||||
total_slots - 1,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&repairee_epoch_slots,
|
||||
&vec![&my_pubkey],
|
||||
&my_socket,
|
||||
@ -931,8 +936,8 @@ mod tests {
|
||||
|
||||
// Shutdown
|
||||
mock_repairee.close().unwrap();
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -432,7 +432,7 @@ impl Tower {
|
||||
|
||||
fn maybe_timestamp(&mut self, current_slot: Slot) -> Option<UnixTimestamp> {
|
||||
if self.last_timestamp.slot == 0
|
||||
|| self.last_timestamp.slot + TIMESTAMP_SLOT_INTERVAL <= current_slot
|
||||
|| self.last_timestamp.slot < (current_slot - (current_slot % TIMESTAMP_SLOT_INTERVAL))
|
||||
{
|
||||
let timestamp = Utc::now().timestamp();
|
||||
self.last_timestamp = BlockTimestamp {
|
||||
|
@ -31,6 +31,8 @@ pub struct ContactInfo {
|
||||
pub rpc_pubsub: SocketAddr,
|
||||
/// latest wallclock picked
|
||||
pub wallclock: u64,
|
||||
/// node shred version
|
||||
pub shred_version: u16,
|
||||
}
|
||||
|
||||
impl Ord for ContactInfo {
|
||||
@ -84,6 +86,7 @@ impl Default for ContactInfo {
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
wallclock: 0,
|
||||
shred_version: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -115,6 +118,7 @@ impl ContactInfo {
|
||||
rpc,
|
||||
rpc_pubsub,
|
||||
wallclock: now,
|
||||
shred_version: 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@ use crate::packet::PacketsRecycler;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::streamer::{self, PacketReceiver, PacketSender};
|
||||
use crate::thread_mem_usage;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
|
@ -6,7 +6,7 @@ use crate::streamer;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_client::thin_client::{create_client, ThinClient};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
@ -24,7 +24,7 @@ pub struct GossipService {
|
||||
impl GossipService {
|
||||
pub fn new(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
blocktree: Option<Arc<Blocktree>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
gossip_socket: UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@ -47,7 +47,7 @@ impl GossipService {
|
||||
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
|
||||
let t_listen = ClusterInfo::listen(
|
||||
cluster_info.clone(),
|
||||
blocktree,
|
||||
blockstore,
|
||||
bank_forks.clone(),
|
||||
request_receiver,
|
||||
response_sender.clone(),
|
||||
@ -199,7 +199,6 @@ fn spy(
|
||||
.unwrap()
|
||||
.tvu_peers()
|
||||
.into_iter()
|
||||
.filter(|node| !ClusterInfo::is_archiver(&node))
|
||||
.collect::<Vec<_>>();
|
||||
archivers = spy_ref.read().unwrap().storage_peers();
|
||||
if let Some(num) = num_nodes {
|
||||
|
@ -1,7 +1,7 @@
|
||||
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
|
||||
|
||||
use crate::result::{Error, Result};
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::string::ToString;
|
||||
@ -28,7 +28,7 @@ pub struct LedgerCleanupService {
|
||||
impl LedgerCleanupService {
|
||||
pub fn new(
|
||||
new_root_receiver: Receiver<Slot>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
max_ledger_slots: u64,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
@ -46,7 +46,7 @@ impl LedgerCleanupService {
|
||||
}
|
||||
if let Err(e) = Self::cleanup_ledger(
|
||||
&new_root_receiver,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
&mut next_purge_batch,
|
||||
) {
|
||||
@ -63,20 +63,20 @@ impl LedgerCleanupService {
|
||||
|
||||
fn cleanup_ledger(
|
||||
new_root_receiver: &Receiver<Slot>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
max_ledger_slots: u64,
|
||||
next_purge_batch: &mut u64,
|
||||
) -> Result<()> {
|
||||
let disk_utilization_pre = blocktree.storage_size();
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
|
||||
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
if root > *next_purge_batch {
|
||||
//cleanup
|
||||
blocktree.purge_slots(0, Some(root - max_ledger_slots));
|
||||
blockstore.purge_slots(0, Some(root - max_ledger_slots));
|
||||
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
|
||||
}
|
||||
|
||||
let disk_utilization_post = blocktree.storage_size();
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
|
||||
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
|
||||
(disk_utilization_pre, disk_utilization_post)
|
||||
@ -103,39 +103,39 @@ impl LedgerCleanupService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::blocktree::make_many_slot_entries;
|
||||
use solana_ledger::blockstore::make_many_slot_entries;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
fn test_cleanup() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let (shreds, _) = make_many_slot_entries(0, 50, 5);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
let blocktree = Arc::new(blocktree);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
//send a signal to kill slots 0-40
|
||||
let mut next_purge_slot = 0;
|
||||
sender.send(50).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blocktree, 10, &mut next_purge_slot)
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
|
||||
.unwrap();
|
||||
|
||||
//check that 0-40 don't exist
|
||||
blocktree
|
||||
blockstore
|
||||
.slot_meta_iterator(0)
|
||||
.unwrap()
|
||||
.for_each(|(slot, _)| assert!(slot > 40));
|
||||
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compaction() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
|
||||
|
||||
let n = 10_000;
|
||||
let batch_size = 100;
|
||||
@ -144,10 +144,10 @@ mod tests {
|
||||
|
||||
for i in 0..batches {
|
||||
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
}
|
||||
|
||||
let u1 = blocktree.storage_size().unwrap() as f64;
|
||||
let u1 = blockstore.storage_size().unwrap() as f64;
|
||||
|
||||
// send signal to cleanup slots
|
||||
let (sender, receiver) = channel();
|
||||
@ -155,7 +155,7 @@ mod tests {
|
||||
let mut next_purge_batch = 0;
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
&mut next_purge_batch,
|
||||
)
|
||||
@ -163,18 +163,18 @@ mod tests {
|
||||
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
|
||||
let u2 = blocktree.storage_size().unwrap() as f64;
|
||||
let u2 = blockstore.storage_size().unwrap() as f64;
|
||||
|
||||
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
|
||||
|
||||
// check that early slots don't exist
|
||||
let max_slot = n - max_ledger_slots;
|
||||
blocktree
|
||||
blockstore
|
||||
.slot_meta_iterator(0)
|
||||
.unwrap()
|
||||
.for_each(|(slot, _)| assert!(slot > max_slot));
|
||||
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ pub mod chacha_cuda;
|
||||
pub mod cluster_info_vote_listener;
|
||||
pub mod commitment;
|
||||
pub mod shred_fetch_stage;
|
||||
pub mod thread_mem_usage;
|
||||
#[macro_use]
|
||||
pub mod contact_info;
|
||||
pub mod archiver;
|
||||
@ -84,10 +83,3 @@ extern crate solana_metrics;
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
#[cfg(unix)]
|
||||
extern crate jemallocator;
|
||||
|
||||
#[cfg(unix)]
|
||||
#[global_allocator]
|
||||
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
|
@ -11,7 +11,7 @@
|
||||
//! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height
|
||||
//!
|
||||
use crate::result::{Error, Result};
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::poh::Poh;
|
||||
@ -60,7 +60,7 @@ pub struct PohRecorder {
|
||||
leader_last_tick_height: u64, // zero if none
|
||||
grace_ticks: u64,
|
||||
id: Pubkey,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
||||
poh_config: Arc<PohConfig>,
|
||||
ticks_per_slot: u64,
|
||||
@ -74,7 +74,7 @@ impl PohRecorder {
|
||||
&self.id,
|
||||
bank.slot(),
|
||||
&bank,
|
||||
Some(&self.blocktree),
|
||||
Some(&self.blockstore),
|
||||
);
|
||||
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
|
||||
let (leader_first_tick_height, leader_last_tick_height, grace_ticks) =
|
||||
@ -401,7 +401,7 @@ impl PohRecorder {
|
||||
next_leader_slot: Option<(Slot, Slot)>,
|
||||
ticks_per_slot: u64,
|
||||
id: &Pubkey,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
clear_bank_signal: Option<SyncSender<bool>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
poh_config: &Arc<PohConfig>,
|
||||
@ -427,7 +427,7 @@ impl PohRecorder {
|
||||
leader_last_tick_height,
|
||||
grace_ticks,
|
||||
id: *id,
|
||||
blocktree: blocktree.clone(),
|
||||
blockstore: blockstore.clone(),
|
||||
leader_schedule_cache: leader_schedule_cache.clone(),
|
||||
ticks_per_slot,
|
||||
poh_config: poh_config.clone(),
|
||||
@ -446,7 +446,7 @@ impl PohRecorder {
|
||||
next_leader_slot: Option<(Slot, Slot)>,
|
||||
ticks_per_slot: u64,
|
||||
id: &Pubkey,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
poh_config: &Arc<PohConfig>,
|
||||
) -> (Self, Receiver<WorkingBankEntry>) {
|
||||
@ -457,7 +457,7 @@ impl PohRecorder {
|
||||
next_leader_slot,
|
||||
ticks_per_slot,
|
||||
id,
|
||||
blocktree,
|
||||
blockstore,
|
||||
None,
|
||||
leader_schedule_cache,
|
||||
poh_config,
|
||||
@ -469,7 +469,7 @@ impl PohRecorder {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_sdk::hash::hash;
|
||||
@ -480,8 +480,8 @@ mod tests {
|
||||
let prev_hash = Hash::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
@ -490,7 +490,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -499,7 +499,7 @@ mod tests {
|
||||
assert_eq!(poh_recorder.tick_cache[0].1, 1);
|
||||
assert_eq!(poh_recorder.tick_height, 1);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -507,8 +507,8 @@ mod tests {
|
||||
let prev_hash = Hash::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
@ -517,7 +517,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -527,15 +527,15 @@ mod tests {
|
||||
assert_eq!(poh_recorder.tick_cache[1].1, 2);
|
||||
assert_eq!(poh_recorder.tick_height, 2);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_reset_clears_cache() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
@ -543,7 +543,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -552,15 +552,15 @@ mod tests {
|
||||
poh_recorder.reset(Hash::default(), 0, Some((4, 4)));
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_clear() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -571,7 +571,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -586,15 +586,15 @@ mod tests {
|
||||
poh_recorder.clear_bank();
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_tick_sent_after_min() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -605,7 +605,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -635,15 +635,15 @@ mod tests {
|
||||
assert_eq!(num_entries, 3);
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_tick_sent_upto_and_including_max() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -654,7 +654,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -682,15 +682,15 @@ mod tests {
|
||||
}
|
||||
assert_eq!(num_entries, 3);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_record_to_early() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -701,7 +701,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -720,15 +720,15 @@ mod tests {
|
||||
.is_err());
|
||||
assert!(entry_receiver.try_recv().is_err());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_record_bad_slot() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -739,7 +739,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -760,15 +760,15 @@ mod tests {
|
||||
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
|
||||
);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_record_at_min_passes() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -779,7 +779,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -806,15 +806,15 @@ mod tests {
|
||||
let (_bank, (e, _tick_height)) = entry_receiver.recv().expect("recv 2");
|
||||
assert!(!e.is_tick());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_record_at_max_fails() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -825,7 +825,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -850,15 +850,15 @@ mod tests {
|
||||
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
|
||||
assert!(entry.is_tick());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_cache_on_disconnect() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -869,7 +869,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -888,15 +888,15 @@ mod tests {
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 3);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_current() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
@ -904,7 +904,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -915,15 +915,15 @@ mod tests {
|
||||
poh_recorder.reset(hash, 0, Some((4, 4)));
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_with_cached() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
@ -931,7 +931,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -941,7 +941,7 @@ mod tests {
|
||||
poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4)));
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -950,8 +950,8 @@ mod tests {
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
@ -959,7 +959,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -974,15 +974,15 @@ mod tests {
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_clear_bank() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
@ -992,7 +992,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1005,15 +1005,15 @@ mod tests {
|
||||
poh_recorder.reset(hash(b"hello"), 0, Some((4, 4)));
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_clear_signal() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let (sender, receiver) = sync_channel(1);
|
||||
@ -1024,7 +1024,7 @@ mod tests {
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
Some(sender),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
&Arc::new(PohConfig::default()),
|
||||
@ -1033,7 +1033,7 @@ mod tests {
|
||||
poh_recorder.clear_bank();
|
||||
assert!(receiver.try_recv().is_ok());
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1041,8 +1041,8 @@ mod tests {
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let ticks_per_slot = 5;
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config, ..
|
||||
@ -1058,7 +1058,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1085,7 +1085,7 @@ mod tests {
|
||||
// Make sure the starting slot is updated
|
||||
assert_eq!(poh_recorder.start_slot, end_slot);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1094,8 +1094,8 @@ mod tests {
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -1106,7 +1106,7 @@ mod tests {
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1207,15 +1207,15 @@ mod tests {
|
||||
assert_eq!(grace_ticks, overshoot_factor * bank.ticks_per_slot());
|
||||
assert_eq!(leader_slot, 9);
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_would_be_leader_soon() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
@ -1226,7 +1226,7 @@ mod tests {
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
@ -1281,8 +1281,8 @@ mod tests {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
// test that virtual ticks are flushed into a newly set bank asap
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let genesis_hash = bank.last_blockhash();
|
||||
@ -1294,7 +1294,7 @@ mod tests {
|
||||
Some((2, 2)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&Arc::new(PohConfig::default()),
|
||||
);
|
||||
|
@ -124,7 +124,7 @@ mod tests {
|
||||
use crate::poh_recorder::WorkingBank;
|
||||
use crate::result::Result;
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::hash;
|
||||
@ -138,8 +138,8 @@ mod tests {
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
let poh_config = Arc::new(PohConfig {
|
||||
hashes_per_tick: Some(2),
|
||||
target_tick_duration: Duration::from_millis(42),
|
||||
@ -152,7 +152,7 @@ mod tests {
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&Arc::new(blocktree),
|
||||
&Arc::new(blockstore),
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&poh_config,
|
||||
);
|
||||
@ -231,6 +231,6 @@ mod tests {
|
||||
let _ = poh_service.join().unwrap();
|
||||
let _ = entry_producer.join().unwrap();
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ use crate::{
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta},
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
|
||||
};
|
||||
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
|
||||
use std::{
|
||||
@ -71,7 +71,7 @@ pub struct RepairService {
|
||||
|
||||
impl RepairService {
|
||||
pub fn new(
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
exit: Arc<AtomicBool>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
@ -81,7 +81,7 @@ impl RepairService {
|
||||
RepairStrategy::RepairAll {
|
||||
ref epoch_schedule, ..
|
||||
} => Some(ClusterInfoRepairListener::new(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&exit,
|
||||
cluster_info.clone(),
|
||||
*epoch_schedule,
|
||||
@ -94,7 +94,7 @@ impl RepairService {
|
||||
.name("solana-repair-service".to_string())
|
||||
.spawn(move || {
|
||||
Self::run(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&exit,
|
||||
&repair_socket,
|
||||
&cluster_info,
|
||||
@ -110,7 +110,7 @@ impl RepairService {
|
||||
}
|
||||
|
||||
fn run(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
repair_socket: &Arc<UdpSocket>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
@ -123,10 +123,10 @@ impl RepairService {
|
||||
ref epoch_schedule, ..
|
||||
} = repair_strategy
|
||||
{
|
||||
current_root = blocktree.last_root();
|
||||
current_root = blockstore.last_root();
|
||||
Self::initialize_epoch_slots(
|
||||
id,
|
||||
blocktree,
|
||||
blockstore,
|
||||
&mut epoch_slots,
|
||||
current_root,
|
||||
epoch_schedule,
|
||||
@ -143,7 +143,7 @@ impl RepairService {
|
||||
RepairStrategy::RepairRange(ref repair_slot_range) => {
|
||||
// Strategy used by archivers
|
||||
Self::generate_repairs_in_range(
|
||||
blocktree,
|
||||
blockstore,
|
||||
MAX_REPAIR_LENGTH,
|
||||
repair_slot_range,
|
||||
)
|
||||
@ -153,8 +153,8 @@ impl RepairService {
|
||||
ref completed_slots_receiver,
|
||||
..
|
||||
} => {
|
||||
let new_root = blocktree.last_root();
|
||||
let lowest_slot = blocktree.lowest_slot();
|
||||
let new_root = blockstore.last_root();
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_epoch_slots(
|
||||
id,
|
||||
new_root,
|
||||
@ -164,7 +164,7 @@ impl RepairService {
|
||||
&cluster_info,
|
||||
completed_slots_receiver,
|
||||
);
|
||||
Self::generate_repairs(blocktree, new_root, MAX_REPAIR_LENGTH)
|
||||
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -195,7 +195,7 @@ impl RepairService {
|
||||
|
||||
// Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end
|
||||
pub fn generate_repairs_in_range(
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
max_repairs: usize,
|
||||
repair_range: &RepairSlotRange,
|
||||
) -> Result<Vec<RepairType>> {
|
||||
@ -206,7 +206,7 @@ impl RepairService {
|
||||
break;
|
||||
}
|
||||
|
||||
let meta = blocktree
|
||||
let meta = blockstore
|
||||
.meta(slot)
|
||||
.expect("Unable to lookup slot meta")
|
||||
.unwrap_or(SlotMeta {
|
||||
@ -215,7 +215,7 @@ impl RepairService {
|
||||
});
|
||||
|
||||
let new_repairs = Self::generate_repairs_for_slot(
|
||||
blocktree,
|
||||
blockstore,
|
||||
slot,
|
||||
&meta,
|
||||
max_repairs - repairs.len(),
|
||||
@ -227,18 +227,18 @@ impl RepairService {
|
||||
}
|
||||
|
||||
fn generate_repairs(
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
root: Slot,
|
||||
max_repairs: usize,
|
||||
) -> Result<Vec<RepairType>> {
|
||||
// Slot height and shred indexes for shreds we want to repair
|
||||
let mut repairs: Vec<RepairType> = vec![];
|
||||
Self::generate_repairs_for_fork(blocktree, &mut repairs, max_repairs, root);
|
||||
Self::generate_repairs_for_fork(blockstore, &mut repairs, max_repairs, root);
|
||||
|
||||
// TODO: Incorporate gossip to determine priorities for repair?
|
||||
|
||||
// Try to resolve orphans in blocktree
|
||||
let mut orphans = blocktree.get_orphans(Some(MAX_ORPHANS));
|
||||
// Try to resolve orphans in blockstore
|
||||
let mut orphans = blockstore.get_orphans(Some(MAX_ORPHANS));
|
||||
orphans.retain(|x| *x > root);
|
||||
|
||||
Self::generate_repairs_for_orphans(&orphans[..], &mut repairs);
|
||||
@ -246,7 +246,7 @@ impl RepairService {
|
||||
}
|
||||
|
||||
fn generate_repairs_for_slot(
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
slot: Slot,
|
||||
slot_meta: &SlotMeta,
|
||||
max_repairs: usize,
|
||||
@ -256,7 +256,7 @@ impl RepairService {
|
||||
} else if slot_meta.consumed == slot_meta.received {
|
||||
vec![RepairType::HighestShred(slot, slot_meta.received)]
|
||||
} else {
|
||||
let reqs = blocktree.find_missing_data_indexes(
|
||||
let reqs = blockstore.find_missing_data_indexes(
|
||||
slot,
|
||||
slot_meta.first_shred_timestamp,
|
||||
slot_meta.consumed,
|
||||
@ -275,7 +275,7 @@ impl RepairService {
|
||||
|
||||
/// Repairs any fork starting at the input slot
|
||||
fn generate_repairs_for_fork(
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
repairs: &mut Vec<RepairType>,
|
||||
max_repairs: usize,
|
||||
slot: Slot,
|
||||
@ -283,9 +283,9 @@ impl RepairService {
|
||||
let mut pending_slots = vec![slot];
|
||||
while repairs.len() < max_repairs && !pending_slots.is_empty() {
|
||||
let slot = pending_slots.pop().unwrap();
|
||||
if let Some(slot_meta) = blocktree.meta(slot).unwrap() {
|
||||
if let Some(slot_meta) = blockstore.meta(slot).unwrap() {
|
||||
let new_repairs = Self::generate_repairs_for_slot(
|
||||
blocktree,
|
||||
blockstore,
|
||||
slot,
|
||||
&slot_meta,
|
||||
max_repairs - repairs.len(),
|
||||
@ -300,7 +300,7 @@ impl RepairService {
|
||||
}
|
||||
|
||||
fn get_completed_slots_past_root(
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
slots_in_gossip: &mut BTreeSet<Slot>,
|
||||
root: Slot,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
@ -308,7 +308,7 @@ impl RepairService {
|
||||
let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root);
|
||||
let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch);
|
||||
|
||||
let meta_iter = blocktree
|
||||
let meta_iter = blockstore
|
||||
.slot_meta_iterator(root + 1)
|
||||
.expect("Couldn't get db iterator");
|
||||
|
||||
@ -324,22 +324,22 @@ impl RepairService {
|
||||
|
||||
fn initialize_epoch_slots(
|
||||
id: Pubkey,
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
slots_in_gossip: &mut BTreeSet<Slot>,
|
||||
root: Slot,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
cluster_info: &RwLock<ClusterInfo>,
|
||||
) {
|
||||
Self::get_completed_slots_past_root(blocktree, slots_in_gossip, root, epoch_schedule);
|
||||
Self::get_completed_slots_past_root(blockstore, slots_in_gossip, root, epoch_schedule);
|
||||
|
||||
// Safe to set into gossip because by this time, the leader schedule cache should
|
||||
// also be updated with the latest root (done in blocktree_processor) and thus
|
||||
// also be updated with the latest root (done in blockstore_processor) and thus
|
||||
// will provide a schedule to window_service for any incoming shreds up to the
|
||||
// last_confirmed_epoch.
|
||||
cluster_info.write().unwrap().push_epoch_slots(
|
||||
id,
|
||||
root,
|
||||
blocktree.lowest_slot(),
|
||||
blockstore.lowest_slot(),
|
||||
slots_in_gossip.clone(),
|
||||
);
|
||||
}
|
||||
@ -409,60 +409,60 @@ mod test {
|
||||
use itertools::Itertools;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_ledger::blocktree::{
|
||||
use solana_ledger::blockstore::{
|
||||
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
|
||||
};
|
||||
use solana_ledger::shred::max_ticks_per_n_shreds;
|
||||
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread::Builder;
|
||||
|
||||
#[test]
|
||||
pub fn test_repair_orphan() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
// Create some orphan slots
|
||||
let (mut shreds, _) = make_slot_entries(1, 0, 1);
|
||||
let (shreds2, _) = make_slot_entries(5, 2, 1);
|
||||
shreds.extend(shreds2);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(),
|
||||
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
|
||||
vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)]
|
||||
);
|
||||
}
|
||||
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_repair_empty_slot() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let (shreds, _) = make_slot_entries(2, 0, 1);
|
||||
|
||||
// Write this shred to slot 2, should chain to slot 0, which we haven't received
|
||||
// any shreds for
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Check that repair tries to patch the empty slot
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(),
|
||||
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
|
||||
vec![RepairType::HighestShred(0, 0)]
|
||||
);
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_generate_repairs() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let nth = 3;
|
||||
let num_slots = 2;
|
||||
@ -483,7 +483,7 @@ mod test {
|
||||
missing_indexes_per_slot.insert(0, index);
|
||||
}
|
||||
}
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(shreds_to_write, None, false)
|
||||
.unwrap();
|
||||
// sleep so that the holes are ready for repair
|
||||
@ -497,23 +497,23 @@ mod test {
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(),
|
||||
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
|
||||
expected
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blocktree, 0, expected.len() - 2).unwrap()[..],
|
||||
RepairService::generate_repairs(&blockstore, 0, expected.len() - 2).unwrap()[..],
|
||||
expected[0..expected.len() - 2]
|
||||
);
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_generate_highest_repair() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let num_entries_per_slot = 100;
|
||||
|
||||
@ -524,25 +524,25 @@ mod test {
|
||||
// Remove last shred (which is also last in slot) so that slot is not complete
|
||||
shreds.pop();
|
||||
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// We didn't get the last shred for this slot, so ask for the highest shred for that slot
|
||||
let expected: Vec<RepairType> =
|
||||
vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)];
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(),
|
||||
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
|
||||
expected
|
||||
);
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_repair_range() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
|
||||
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
|
||||
@ -550,7 +550,7 @@ mod test {
|
||||
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
|
||||
for (mut slot_shreds, _) in shreds.into_iter() {
|
||||
slot_shreds.remove(0);
|
||||
blocktree.insert_shreds(slot_shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(slot_shreds, None, false).unwrap();
|
||||
}
|
||||
// sleep to make slot eligible for repair
|
||||
sleep(Duration::from_secs(1));
|
||||
@ -574,7 +574,7 @@ mod test {
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs_in_range(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
std::usize::MAX,
|
||||
&repair_slot_range
|
||||
)
|
||||
@ -584,14 +584,14 @@ mod test {
|
||||
}
|
||||
}
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_repair_range_highest() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let num_entries_per_slot = 10;
|
||||
|
||||
@ -603,7 +603,7 @@ mod test {
|
||||
let parent = if i > 0 { i - 1 } else { 0 };
|
||||
let (shreds, _) = make_slot_entries(i, parent, num_entries_per_slot as u64);
|
||||
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
}
|
||||
|
||||
let end = 4;
|
||||
@ -619,7 +619,7 @@ mod test {
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs_in_range(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
std::usize::MAX,
|
||||
&repair_slot_range
|
||||
)
|
||||
@ -627,14 +627,14 @@ mod test {
|
||||
expected
|
||||
);
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_get_completed_slots_past_root() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let num_entries_per_slot = 10;
|
||||
let root = 10;
|
||||
|
||||
@ -656,8 +656,8 @@ mod test {
|
||||
.collect();
|
||||
let mut full_slots = BTreeSet::new();
|
||||
|
||||
blocktree.insert_shreds(fork1_shreds, None, false).unwrap();
|
||||
blocktree
|
||||
blockstore.insert_shreds(fork1_shreds, None, false).unwrap();
|
||||
blockstore
|
||||
.insert_shreds(fork2_incomplete_shreds, None, false)
|
||||
.unwrap();
|
||||
|
||||
@ -665,7 +665,7 @@ mod test {
|
||||
let epoch_schedule = EpochSchedule::custom(32, 32, false);
|
||||
|
||||
RepairService::get_completed_slots_past_root(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&mut full_slots,
|
||||
root,
|
||||
&epoch_schedule,
|
||||
@ -682,9 +682,9 @@ mod test {
|
||||
.into_iter()
|
||||
.flat_map(|(shreds, _)| shreds)
|
||||
.collect();
|
||||
blocktree.insert_shreds(fork3_shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(fork3_shreds, None, false).unwrap();
|
||||
RepairService::get_completed_slots_past_root(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&mut full_slots,
|
||||
root,
|
||||
&epoch_schedule,
|
||||
@ -692,25 +692,25 @@ mod test {
|
||||
expected.insert(last_slot);
|
||||
assert_eq!(full_slots, expected);
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_update_epoch_slots() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
// Create blocktree
|
||||
let (blocktree, _, completed_slots_receiver) =
|
||||
Blocktree::open_with_signal(&blocktree_path).unwrap();
|
||||
// Create blockstore
|
||||
let (blockstore, _, completed_slots_receiver) =
|
||||
Blockstore::open_with_signal(&blockstore_path).unwrap();
|
||||
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
let mut root = 0;
|
||||
let num_slots = 100;
|
||||
let entries_per_slot = 5;
|
||||
let blocktree_ = blocktree.clone();
|
||||
let blockstore_ = blockstore.clone();
|
||||
|
||||
// Spin up thread to write to blocktree
|
||||
// Spin up thread to write to blockstore
|
||||
let writer = Builder::new()
|
||||
.name("writer".to_string())
|
||||
.spawn(move || {
|
||||
@ -729,7 +729,7 @@ mod test {
|
||||
let step = rng.gen_range(1, max_step + 1) as usize;
|
||||
let step = std::cmp::min(step, num_shreds - i);
|
||||
let shreds_to_insert = shreds.drain(..step).collect_vec();
|
||||
blocktree_
|
||||
blockstore_
|
||||
.insert_shreds(shreds_to_insert, None, false)
|
||||
.unwrap();
|
||||
sleep(Duration::from_millis(repair_interval_ms));
|
||||
@ -748,7 +748,7 @@ mod test {
|
||||
RepairService::update_epoch_slots(
|
||||
Pubkey::default(),
|
||||
root,
|
||||
blocktree.lowest_slot(),
|
||||
blockstore.lowest_slot(),
|
||||
&mut root.clone(),
|
||||
&mut completed_slots,
|
||||
&cluster_info,
|
||||
@ -762,7 +762,7 @@ mod test {
|
||||
// Update with new root, should filter out the slots <= root
|
||||
root = num_slots / 2;
|
||||
let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
RepairService::update_epoch_slots(
|
||||
Pubkey::default(),
|
||||
root,
|
||||
@ -777,7 +777,7 @@ mod test {
|
||||
assert_eq!(completed_slots, expected);
|
||||
writer.join().unwrap();
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -7,18 +7,17 @@ use crate::{
|
||||
poh_recorder::PohRecorder,
|
||||
result::{Error, Result},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
thread_mem_usage,
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
block_error::BlockError,
|
||||
blocktree::{Blocktree, BlocktreeError},
|
||||
blocktree_processor::{self, TransactionStatusSender},
|
||||
blockstore::{Blockstore, BlockstoreError},
|
||||
blockstore_processor::{self, TransactionStatusSender},
|
||||
entry::{Entry, EntrySlice, VerifyRecyclers},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
snapshot_package::SnapshotPackageSender,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::{measure::Measure, thread_mem_usage};
|
||||
use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
@ -180,7 +179,7 @@ impl ReplayStage {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
config: ReplayStageConfig,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
ledger_signal_receiver: Receiver<bool>,
|
||||
@ -237,7 +236,7 @@ impl ReplayStage {
|
||||
|
||||
let start = allocated.get();
|
||||
Self::generate_new_bank_forks(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
@ -255,7 +254,7 @@ impl ReplayStage {
|
||||
|
||||
let start = allocated.get();
|
||||
let did_complete_bank = Self::replay_active_banks(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&my_pubkey,
|
||||
&mut progress,
|
||||
@ -311,7 +310,7 @@ impl ReplayStage {
|
||||
&vote_account,
|
||||
&voting_keypair,
|
||||
&cluster_info,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&leader_schedule_cache,
|
||||
&root_bank_sender,
|
||||
stats.total_staked,
|
||||
@ -328,7 +327,7 @@ impl ReplayStage {
|
||||
if last_reset != bank.last_blockhash() {
|
||||
Self::reset_poh_recorder(
|
||||
&my_pubkey,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&bank,
|
||||
&poh_recorder,
|
||||
&leader_schedule_cache,
|
||||
@ -409,7 +408,7 @@ impl ReplayStage {
|
||||
match result {
|
||||
Err(RecvTimeoutError::Timeout) => continue,
|
||||
Err(_) => break,
|
||||
Ok(_) => trace!("blocktree signal"),
|
||||
Ok(_) => trace!("blockstore signal"),
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
@ -535,15 +534,15 @@ impl ReplayStage {
|
||||
!Bank::can_commit(&tx_error)
|
||||
}
|
||||
Err(Error::BlockError(_)) => true,
|
||||
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true,
|
||||
Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_))) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the replay result and the number of replayed transactions
|
||||
fn replay_blocktree_into_bank(
|
||||
fn replay_blockstore_into_bank(
|
||||
bank: &Arc<Bank>,
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
bank_progress: &mut ForkProgress,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
@ -551,7 +550,7 @@ impl ReplayStage {
|
||||
let mut tx_count = 0;
|
||||
let now = Instant::now();
|
||||
let load_result =
|
||||
Self::load_blocktree_entries_with_shred_info(bank, blocktree, bank_progress);
|
||||
Self::load_blockstore_entries_with_shred_info(bank, blockstore, bank_progress);
|
||||
let fetch_entries_elapsed = now.elapsed().as_micros();
|
||||
if load_result.is_err() {
|
||||
bank_progress.stats.fetch_entries_fail_elapsed += fetch_entries_elapsed as u64;
|
||||
@ -590,17 +589,17 @@ impl ReplayStage {
|
||||
("error", format!("error: {:?}", replay_result), String),
|
||||
("slot", bank.slot(), i64)
|
||||
);
|
||||
Self::mark_dead_slot(bank.slot(), blocktree, bank_progress);
|
||||
Self::mark_dead_slot(bank.slot(), blockstore, bank_progress);
|
||||
}
|
||||
|
||||
(replay_result, tx_count)
|
||||
}
|
||||
|
||||
fn mark_dead_slot(slot: Slot, blocktree: &Blocktree, bank_progress: &mut ForkProgress) {
|
||||
fn mark_dead_slot(slot: Slot, blockstore: &Blockstore, bank_progress: &mut ForkProgress) {
|
||||
bank_progress.is_dead = true;
|
||||
blocktree
|
||||
blockstore
|
||||
.set_dead_slot(slot)
|
||||
.expect("Failed to mark slot as dead in blocktree");
|
||||
.expect("Failed to mark slot as dead in blockstore");
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
@ -612,7 +611,7 @@ impl ReplayStage {
|
||||
vote_account: &Pubkey,
|
||||
voting_keypair: &Option<Arc<Keypair>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
|
||||
total_staked: u64,
|
||||
@ -636,12 +635,12 @@ impl ReplayStage {
|
||||
let mut rooted_banks = root_bank.parents();
|
||||
rooted_banks.push(root_bank);
|
||||
let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect();
|
||||
// Call leader schedule_cache.set_root() before blocktree.set_root() because
|
||||
// Call leader schedule_cache.set_root() before blockstore.set_root() because
|
||||
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to
|
||||
// get shreds for repair on gossip before we update leader schedule, otherwise they may
|
||||
// get dropped.
|
||||
leader_schedule_cache.set_root(rooted_banks.last().unwrap());
|
||||
blocktree
|
||||
blockstore
|
||||
.set_roots(&rooted_slots)
|
||||
.expect("Ledger set roots failed");
|
||||
bank_forks
|
||||
@ -698,13 +697,17 @@ impl ReplayStage {
|
||||
|
||||
fn reset_poh_recorder(
|
||||
my_pubkey: &Pubkey,
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
bank: &Arc<Bank>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
) {
|
||||
let next_leader_slot =
|
||||
leader_schedule_cache.next_leader_slot(&my_pubkey, bank.slot(), &bank, Some(blocktree));
|
||||
let next_leader_slot = leader_schedule_cache.next_leader_slot(
|
||||
&my_pubkey,
|
||||
bank.slot(),
|
||||
&bank,
|
||||
Some(blockstore),
|
||||
);
|
||||
poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
@ -726,7 +729,7 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
fn replay_active_banks(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
my_pubkey: &Pubkey,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
@ -755,9 +758,9 @@ impl ReplayStage {
|
||||
.entry(bank.slot())
|
||||
.or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash()));
|
||||
if bank.collector_id() != my_pubkey {
|
||||
let (replay_result, replay_tx_count) = Self::replay_blocktree_into_bank(
|
||||
let (replay_result, replay_tx_count) = Self::replay_blockstore_into_bank(
|
||||
&bank,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
bank_progress,
|
||||
transaction_status_sender.clone(),
|
||||
verify_recyclers,
|
||||
@ -958,12 +961,12 @@ impl ReplayStage {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_blocktree_entries_with_shred_info(
|
||||
fn load_blockstore_entries_with_shred_info(
|
||||
bank: &Bank,
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
bank_progress: &mut ForkProgress,
|
||||
) -> Result<(Vec<Entry>, usize, bool)> {
|
||||
blocktree
|
||||
blockstore
|
||||
.get_slot_entries_with_shred_info(bank.slot(), bank_progress.num_shreds as u64)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
@ -1073,7 +1076,7 @@ impl ReplayStage {
|
||||
|
||||
let mut replay_elapsed = Measure::start("replay_elapsed");
|
||||
let res =
|
||||
blocktree_processor::process_entries(bank, entries, true, transaction_status_sender);
|
||||
blockstore_processor::process_entries(bank, entries, true, transaction_status_sender);
|
||||
replay_elapsed.stop();
|
||||
bank_progress.stats.replay_elapsed += replay_elapsed.as_us();
|
||||
|
||||
@ -1103,7 +1106,6 @@ impl ReplayStage {
|
||||
slot_full_senders: &[Sender<(u64, Pubkey)>],
|
||||
) {
|
||||
bank.freeze();
|
||||
info!("bank frozen {}", bank.slot());
|
||||
slot_full_senders.iter().for_each(|sender| {
|
||||
if let Err(e) = sender.send((bank.slot(), *bank.collector_id())) {
|
||||
trace!("{} slot_full alert failed: {:?}", my_pubkey, e);
|
||||
@ -1112,7 +1114,7 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
fn generate_new_bank_forks(
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
forks_lock: &RwLock<BankForks>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
@ -1121,7 +1123,7 @@ impl ReplayStage {
|
||||
let forks = forks_lock.read().unwrap();
|
||||
let frozen_banks = forks.frozen_banks();
|
||||
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
||||
let next_slots = blocktree
|
||||
let next_slots = blockstore
|
||||
.get_slots_since(&frozen_bank_slots)
|
||||
.expect("Db error");
|
||||
// Filter out what we've already seen
|
||||
@ -1182,9 +1184,10 @@ pub(crate) mod tests {
|
||||
transaction_status_service::TransactionStatusService,
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
|
||||
use solana_ledger::{
|
||||
blocktree::make_slot_entries,
|
||||
blocktree::{entries_to_test_shreds, BlocktreeError},
|
||||
blockstore::make_slot_entries,
|
||||
blockstore::{entries_to_test_shreds, BlockstoreError},
|
||||
create_new_tmp_ledger,
|
||||
entry::{self, next_entry},
|
||||
get_tmp_ledger_path,
|
||||
@ -1194,22 +1197,21 @@ pub(crate) mod tests {
|
||||
},
|
||||
};
|
||||
use solana_runtime::genesis_utils::GenesisConfigInfo;
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::rent::Rent;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
hash::{hash, Hash},
|
||||
instruction::InstructionError,
|
||||
packet::PACKET_DATA_SIZE,
|
||||
rent::Rent,
|
||||
signature::{Keypair, KeypairUtil, Signature},
|
||||
system_transaction,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state;
|
||||
use solana_vote_program::vote_state::{Vote, VoteState};
|
||||
use std::iter;
|
||||
use solana_vote_program::vote_state::{self, Vote, VoteState};
|
||||
use std::{
|
||||
fs::remove_dir_all,
|
||||
iter,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
@ -1494,8 +1496,9 @@ pub(crate) mod tests {
|
||||
fn test_child_slots_of_same_parent() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
|
||||
let genesis_config = create_genesis_config(10_000).genesis_config;
|
||||
@ -1507,11 +1510,11 @@ pub(crate) mod tests {
|
||||
|
||||
// Insert shred for slot 1, generate new forks, check result
|
||||
let (shreds, _) = make_slot_entries(1, 0, 8);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks.get(1).is_none());
|
||||
let bank_forks = RwLock::new(bank_forks);
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
@ -1520,10 +1523,10 @@ pub(crate) mod tests {
|
||||
|
||||
// Insert shred for slot 3, generate new forks, check result
|
||||
let (shreds, _) = make_slot_entries(2, 0, 8);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks.read().unwrap().get(2).is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
@ -1745,7 +1748,7 @@ pub(crate) mod tests {
|
||||
|
||||
assert_matches!(
|
||||
res,
|
||||
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_)))
|
||||
Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_)))
|
||||
);
|
||||
}
|
||||
|
||||
@ -1757,8 +1760,9 @@ pub(crate) mod tests {
|
||||
{
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let res = {
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
@ -1773,10 +1777,10 @@ pub(crate) mod tests {
|
||||
.entry(bank0.slot())
|
||||
.or_insert_with(|| ForkProgress::new(0, last_blockhash));
|
||||
let shreds = shred_to_insert(&mint_keypair, bank0.clone());
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
let (res, _tx_count) = ReplayStage::replay_blocktree_into_bank(
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let (res, _tx_count) = ReplayStage::replay_blockstore_into_bank(
|
||||
&bank0,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&mut bank0_progress,
|
||||
None,
|
||||
&VerifyRecyclers::default(),
|
||||
@ -1788,8 +1792,8 @@ pub(crate) mod tests {
|
||||
.map(|b| b.is_dead)
|
||||
.unwrap_or(false));
|
||||
|
||||
// Check that the erroring bank was marked as dead in blocktree
|
||||
assert!(blocktree.is_dead(bank0.slot()));
|
||||
// Check that the erroring bank was marked as dead in blockstore
|
||||
assert!(blockstore.is_dead(bank0.slot()));
|
||||
res
|
||||
};
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
@ -1897,11 +1901,11 @@ pub(crate) mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn create_test_transactions_and_populate_blocktree(
|
||||
pub fn create_test_transactions_and_populate_blockstore(
|
||||
keypairs: Vec<&Keypair>,
|
||||
previous_slot: Slot,
|
||||
bank: Arc<Bank>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
) -> Vec<Signature> {
|
||||
let mint_keypair = keypairs[0];
|
||||
let keypair1 = keypairs[1];
|
||||
@ -1928,19 +1932,19 @@ pub(crate) mod tests {
|
||||
let entries = vec![entry_1, entry_2, entry_3];
|
||||
|
||||
let shreds = entries_to_test_shreds(entries.clone(), slot, previous_slot, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blocktree.set_roots(&[slot]).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.set_roots(&[slot]).unwrap();
|
||||
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
let transaction_status_service = TransactionStatusService::new(
|
||||
transaction_status_receiver,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
);
|
||||
|
||||
// Check that process_entries successfully writes can_commit transactions statuses, and
|
||||
// that they are matched properly by get_confirmed_block
|
||||
let _result = blocktree_processor::process_entries(
|
||||
let _result = blockstore_processor::process_entries(
|
||||
&bank,
|
||||
&entries,
|
||||
true,
|
||||
@ -1961,9 +1965,9 @@ pub(crate) mod tests {
|
||||
} = create_genesis_config(1000);
|
||||
let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config);
|
||||
{
|
||||
let blocktree = Blocktree::open(&ledger_path)
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
.expect("Expected to successfully open database ledger");
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
@ -1977,32 +1981,36 @@ pub(crate) mod tests {
|
||||
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
||||
let slot = bank1.slot();
|
||||
|
||||
let signatures = create_test_transactions_and_populate_blocktree(
|
||||
let signatures = create_test_transactions_and_populate_blockstore(
|
||||
vec![&mint_keypair, &keypair1, &keypair2, &keypair3],
|
||||
bank0.slot(),
|
||||
bank1,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
);
|
||||
|
||||
let confirmed_block = blocktree.get_confirmed_block(slot).unwrap();
|
||||
let confirmed_block = blockstore.get_confirmed_block(slot, None).unwrap();
|
||||
assert_eq!(confirmed_block.transactions.len(), 3);
|
||||
|
||||
for (transaction, result) in confirmed_block.transactions.into_iter() {
|
||||
if transaction.signatures[0] == signatures[0] {
|
||||
assert_eq!(result.unwrap().status, Ok(()));
|
||||
} else if transaction.signatures[0] == signatures[1] {
|
||||
assert_eq!(
|
||||
result.unwrap().status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
assert_eq!(result, None);
|
||||
for RpcTransactionWithStatusMeta { transaction, meta } in
|
||||
confirmed_block.transactions.into_iter()
|
||||
{
|
||||
if let RpcEncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == signatures[0].to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
} else if transaction.signatures[0] == signatures[1].to_string() {
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
assert_eq!(meta, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Blocktree::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
use crate::cluster_info;
|
||||
use crate::poh_recorder;
|
||||
use solana_ledger::block_error;
|
||||
use solana_ledger::blocktree;
|
||||
use solana_ledger::blockstore;
|
||||
use solana_ledger::snapshot_utils;
|
||||
use solana_sdk::transaction;
|
||||
use std::any::Any;
|
||||
@ -27,7 +27,7 @@ pub enum Error {
|
||||
SendError,
|
||||
PohRecorderError(poh_recorder::PohRecorderError),
|
||||
BlockError(block_error::BlockError),
|
||||
BlocktreeError(blocktree::BlocktreeError),
|
||||
BlockstoreError(blockstore::BlockstoreError),
|
||||
FsExtra(fs_extra::error::Error),
|
||||
SnapshotError(snapshot_utils::SnapshotError),
|
||||
}
|
||||
@ -127,9 +127,9 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
|
||||
Error::PohRecorderError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<blocktree::BlocktreeError> for Error {
|
||||
fn from(e: blocktree::BlocktreeError) -> Error {
|
||||
Error::BlocktreeError(e)
|
||||
impl std::convert::From<blockstore::BlockstoreError> for Error {
|
||||
fn from(e: blockstore::BlockstoreError) -> Error {
|
||||
Error::BlockstoreError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<snapshot_utils::SnapshotError> for Error {
|
||||
|
@ -12,7 +12,7 @@ use crate::{
|
||||
use crossbeam_channel::Receiver as CrossbeamReceiver;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blocktree::{Blocktree, CompletedSlotsReceiver},
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
staking_utils,
|
||||
};
|
||||
@ -205,7 +205,7 @@ impl RetransmitStage {
|
||||
pub fn new(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
retransmit_sockets: Arc<Vec<UdpSocket>>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
@ -234,7 +234,7 @@ impl RetransmitStage {
|
||||
};
|
||||
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||
let window_service = WindowService::new(
|
||||
blocktree,
|
||||
blockstore,
|
||||
cluster_info.clone(),
|
||||
verified_receiver,
|
||||
retransmit_sender,
|
||||
@ -281,7 +281,7 @@ mod tests {
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use crate::packet::{self, Meta, Packet, Packets};
|
||||
use solana_ledger::blocktree_processor::{process_blocktree, ProcessOptions};
|
||||
use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions};
|
||||
use solana_ledger::create_new_tmp_ledger;
|
||||
use solana_net_utils::find_available_port_in_range;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
@ -290,13 +290,13 @@ mod tests {
|
||||
fn test_skip_repair() {
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let opts = ProcessOptions {
|
||||
full_leader_cache: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, _, cached_leader_schedule) =
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
|
||||
let leader_schedule_cache = Arc::new(cached_leader_schedule);
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
|
345
core/src/rpc.rs
345
core/src/rpc.rs
@ -11,21 +11,21 @@ use crate::{
|
||||
use bincode::serialize;
|
||||
use jsonrpc_core::{Error, Metadata, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use solana_client::rpc_request::{
|
||||
Response, RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule,
|
||||
RpcResponseContext, RpcVersionInfo, RpcVoteAccountInfo, RpcVoteAccountStatus,
|
||||
use solana_client::rpc_response::{
|
||||
Response, RpcAccount, RpcBlockCommitment, RpcBlockhashFeeCalculator, RpcConfirmedBlock,
|
||||
RpcContactInfo, RpcEpochInfo, RpcKeyedAccount, RpcLeaderSchedule, RpcResponseContext,
|
||||
RpcSignatureConfirmation, RpcStorageTurn, RpcTransactionEncoding, RpcVersionInfo,
|
||||
RpcVoteAccountInfo, RpcVoteAccountStatus,
|
||||
};
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks, blocktree::Blocktree, rooted_slot_iterator::RootedSlotIterator,
|
||||
bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, UnixTimestamp},
|
||||
commitment_config::{CommitmentConfig, CommitmentLevel},
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
@ -68,7 +68,7 @@ impl Default for JsonRpcConfig {
|
||||
pub struct JsonRpcRequestProcessor {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
config: JsonRpcConfig,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
@ -93,7 +93,7 @@ impl JsonRpcRequestProcessor {
|
||||
config: JsonRpcConfig,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
) -> Self {
|
||||
@ -101,7 +101,7 @@ impl JsonRpcRequestProcessor {
|
||||
config,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
blocktree,
|
||||
blockstore,
|
||||
storage_state,
|
||||
validator_exit,
|
||||
}
|
||||
@ -111,10 +111,10 @@ impl JsonRpcRequestProcessor {
|
||||
&self,
|
||||
pubkey: Result<Pubkey>,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<Option<Account>> {
|
||||
) -> RpcResponse<Option<RpcAccount>> {
|
||||
let bank = &*self.bank(commitment);
|
||||
match pubkey {
|
||||
Ok(key) => new_response(bank, bank.get_account(&key)),
|
||||
Ok(key) => new_response(bank, bank.get_account(&key).map(RpcAccount::encode)),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
@ -133,12 +133,15 @@ impl JsonRpcRequestProcessor {
|
||||
&self,
|
||||
program_id: &Pubkey,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Vec<(String, Account)>> {
|
||||
) -> Result<Vec<RpcKeyedAccount>> {
|
||||
Ok(self
|
||||
.bank(commitment)
|
||||
.get_program_accounts(&program_id)
|
||||
.into_iter()
|
||||
.map(|(pubkey, account)| (pubkey.to_string(), account))
|
||||
.map(|(pubkey, account)| RpcKeyedAccount {
|
||||
pubkey: pubkey.to_string(),
|
||||
account: RpcAccount::encode(account),
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
@ -167,10 +170,16 @@ impl JsonRpcRequestProcessor {
|
||||
fn get_recent_blockhash(
|
||||
&self,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<(String, FeeCalculator)> {
|
||||
) -> RpcResponse<RpcBlockhashFeeCalculator> {
|
||||
let bank = &*self.bank(commitment);
|
||||
let (blockhash, fee_calculator) = bank.confirmed_last_blockhash();
|
||||
new_response(bank, (blockhash.to_string(), fee_calculator))
|
||||
new_response(
|
||||
bank,
|
||||
RpcBlockhashFeeCalculator {
|
||||
blockhash: blockhash.to_string(),
|
||||
fee_calculator,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn confirm_transaction(
|
||||
@ -191,21 +200,25 @@ impl JsonRpcRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_block_commitment(&self, block: Slot) -> (Option<BlockCommitment>, u64) {
|
||||
fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<BlockCommitment> {
|
||||
let r_block_commitment = self.block_commitment_cache.read().unwrap();
|
||||
(
|
||||
r_block_commitment.get_block_commitment(block).cloned(),
|
||||
r_block_commitment.total_stake(),
|
||||
)
|
||||
RpcBlockCommitment {
|
||||
commitment: r_block_commitment.get_block_commitment(block).cloned(),
|
||||
total_stake: r_block_commitment.total_stake(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_signature_confirmation_status(
|
||||
&self,
|
||||
signature: Signature,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Option<(usize, transaction::Result<()>)> {
|
||||
) -> Option<RpcSignatureConfirmation> {
|
||||
self.bank(commitment)
|
||||
.get_signature_confirmation_status(&signature)
|
||||
.map(|(confirmations, status)| RpcSignatureConfirmation {
|
||||
confirmations,
|
||||
status,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_slot(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
|
||||
@ -282,21 +295,25 @@ impl JsonRpcRequestProcessor {
|
||||
Ok(self.storage_state.get_storage_turn_rate())
|
||||
}
|
||||
|
||||
fn get_storage_turn(&self) -> Result<(String, u64)> {
|
||||
Ok((
|
||||
self.storage_state.get_storage_blockhash().to_string(),
|
||||
self.storage_state.get_slot(),
|
||||
))
|
||||
fn get_storage_turn(&self) -> Result<RpcStorageTurn> {
|
||||
Ok(RpcStorageTurn {
|
||||
blockhash: self.storage_state.get_storage_blockhash().to_string(),
|
||||
slot: self.storage_state.get_slot(),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_slots_per_segment(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
|
||||
Ok(self.bank(commitment).slots_per_segment())
|
||||
}
|
||||
|
||||
fn get_storage_pubkeys_for_slot(&self, slot: Slot) -> Result<Vec<Pubkey>> {
|
||||
Ok(self
|
||||
fn get_storage_pubkeys_for_slot(&self, slot: Slot) -> Result<Vec<String>> {
|
||||
let pubkeys: Vec<String> = self
|
||||
.storage_state
|
||||
.get_pubkeys_for_slot(slot, &self.bank_forks))
|
||||
.get_pubkeys_for_slot(slot, &self.bank_forks)
|
||||
.iter()
|
||||
.map(|pubkey| pubkey.to_string())
|
||||
.collect();
|
||||
Ok(pubkeys)
|
||||
}
|
||||
|
||||
pub fn validator_exit(&self) -> Result<bool> {
|
||||
@ -312,8 +329,12 @@ impl JsonRpcRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> Result<Option<RpcConfirmedBlock>> {
|
||||
Ok(self.blocktree.get_confirmed_block(slot).ok())
|
||||
pub fn get_confirmed_block(
|
||||
&self,
|
||||
slot: Slot,
|
||||
encoding: Option<RpcTransactionEncoding>,
|
||||
) -> Result<Option<RpcConfirmedBlock>> {
|
||||
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
|
||||
}
|
||||
|
||||
pub fn get_confirmed_blocks(
|
||||
@ -326,9 +347,9 @@ impl JsonRpcRequestProcessor {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let start_slot = (start_slot..end_slot).find(|&slot| self.blocktree.is_root(slot));
|
||||
let start_slot = (start_slot..end_slot).find(|&slot| self.blockstore.is_root(slot));
|
||||
if let Some(start_slot) = start_slot {
|
||||
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blocktree)
|
||||
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blockstore)
|
||||
.unwrap()
|
||||
.map(|(slot, _)| slot)
|
||||
.collect();
|
||||
@ -344,14 +365,14 @@ impl JsonRpcRequestProcessor {
|
||||
// genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being
|
||||
// queried). If these values will be variable in the future, those timing parameters will
|
||||
// need to be stored persistently, and the slot_duration calculation will likely need to be
|
||||
// moved upstream into blocktree. Also, an explicit commitment level will need to be set.
|
||||
// moved upstream into blockstore. Also, an explicit commitment level will need to be set.
|
||||
let bank = self.bank(None);
|
||||
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
|
||||
let epoch = bank.epoch_schedule().get_epoch(slot);
|
||||
let stakes = HashMap::new();
|
||||
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
|
||||
|
||||
Ok(self.blocktree.get_block_time(slot, slot_duration, stakes))
|
||||
Ok(self.blockstore.get_block_time(slot, slot_duration, stakes))
|
||||
}
|
||||
}
|
||||
|
||||
@ -394,7 +415,7 @@ pub trait RpcSol {
|
||||
meta: Self::Metadata,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<Option<Account>>;
|
||||
) -> RpcResponse<Option<RpcAccount>>;
|
||||
|
||||
#[rpc(meta, name = "getProgramAccounts")]
|
||||
fn get_program_accounts(
|
||||
@ -402,7 +423,7 @@ pub trait RpcSol {
|
||||
meta: Self::Metadata,
|
||||
program_id_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Vec<(String, Account)>>;
|
||||
) -> Result<Vec<RpcKeyedAccount>>;
|
||||
|
||||
#[rpc(meta, name = "getMinimumBalanceForRentExemption")]
|
||||
fn get_minimum_balance_for_rent_exemption(
|
||||
@ -445,7 +466,7 @@ pub trait RpcSol {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
block: Slot,
|
||||
) -> Result<(Option<BlockCommitment>, u64)>;
|
||||
) -> Result<RpcBlockCommitment<BlockCommitment>>;
|
||||
|
||||
#[rpc(meta, name = "getGenesisHash")]
|
||||
fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String>;
|
||||
@ -463,7 +484,7 @@ pub trait RpcSol {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<(String, FeeCalculator)>;
|
||||
) -> RpcResponse<RpcBlockhashFeeCalculator>;
|
||||
|
||||
#[rpc(meta, name = "getSignatureStatus")]
|
||||
fn get_signature_status(
|
||||
@ -520,7 +541,7 @@ pub trait RpcSol {
|
||||
fn get_storage_turn_rate(&self, meta: Self::Metadata) -> Result<u64>;
|
||||
|
||||
#[rpc(meta, name = "getStorageTurn")]
|
||||
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<(String, u64)>;
|
||||
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<RpcStorageTurn>;
|
||||
|
||||
#[rpc(meta, name = "getSlotsPerSegment")]
|
||||
fn get_slots_per_segment(
|
||||
@ -530,7 +551,7 @@ pub trait RpcSol {
|
||||
) -> Result<u64>;
|
||||
|
||||
#[rpc(meta, name = "getStoragePubkeysForSlot")]
|
||||
fn get_storage_pubkeys_for_slot(&self, meta: Self::Metadata, slot: u64) -> Result<Vec<Pubkey>>;
|
||||
fn get_storage_pubkeys_for_slot(&self, meta: Self::Metadata, slot: u64) -> Result<Vec<String>>;
|
||||
|
||||
#[rpc(meta, name = "validatorExit")]
|
||||
fn validator_exit(&self, meta: Self::Metadata) -> Result<bool>;
|
||||
@ -549,7 +570,7 @@ pub trait RpcSol {
|
||||
meta: Self::Metadata,
|
||||
signature_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<(usize, transaction::Result<()>)>>;
|
||||
) -> Result<Option<RpcSignatureConfirmation>>;
|
||||
|
||||
#[rpc(meta, name = "getVersion")]
|
||||
fn get_version(&self, meta: Self::Metadata) -> Result<RpcVersionInfo>;
|
||||
@ -562,6 +583,7 @@ pub trait RpcSol {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
slot: Slot,
|
||||
encoding: Option<RpcTransactionEncoding>,
|
||||
) -> Result<Option<RpcConfirmedBlock>>;
|
||||
|
||||
#[rpc(meta, name = "getBlockTime")]
|
||||
@ -599,7 +621,7 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
pubkey_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<Option<Account>> {
|
||||
) -> RpcResponse<Option<RpcAccount>> {
|
||||
debug!("get_account_info rpc request received: {:?}", pubkey_str);
|
||||
let pubkey = verify_pubkey(pubkey_str);
|
||||
meta.request_processor
|
||||
@ -629,7 +651,7 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
program_id_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Vec<(String, Account)>> {
|
||||
) -> Result<Vec<RpcKeyedAccount>> {
|
||||
debug!(
|
||||
"get_program_accounts rpc request received: {:?}",
|
||||
program_id_str
|
||||
@ -728,7 +750,7 @@ impl RpcSol for RpcSolImpl {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
block: Slot,
|
||||
) -> Result<(Option<BlockCommitment>, u64)> {
|
||||
) -> Result<RpcBlockCommitment<BlockCommitment>> {
|
||||
Ok(meta
|
||||
.request_processor
|
||||
.read()
|
||||
@ -772,7 +794,7 @@ impl RpcSol for RpcSolImpl {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<(String, FeeCalculator)> {
|
||||
) -> RpcResponse<RpcBlockhashFeeCalculator> {
|
||||
debug!("get_recent_blockhash rpc request received");
|
||||
meta.request_processor
|
||||
.read()
|
||||
@ -787,7 +809,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<transaction::Result<()>>> {
|
||||
self.get_signature_confirmation(meta, signature_str, commitment)
|
||||
.map(|res| res.map(|x| x.1))
|
||||
.map(|res| res.map(|x| x.status))
|
||||
}
|
||||
|
||||
fn get_slot(&self, meta: Self::Metadata, commitment: Option<CommitmentConfig>) -> Result<u64> {
|
||||
@ -801,7 +823,7 @@ impl RpcSol for RpcSolImpl {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<usize>> {
|
||||
self.get_signature_confirmation(meta, signature_str, commitment)
|
||||
.map(|res| res.map(|x| x.0))
|
||||
.map(|res| res.map(|x| x.confirmations))
|
||||
}
|
||||
|
||||
fn get_signature_confirmation(
|
||||
@ -809,7 +831,7 @@ impl RpcSol for RpcSolImpl {
|
||||
meta: Self::Metadata,
|
||||
signature_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<(usize, transaction::Result<()>)>> {
|
||||
) -> Result<Option<RpcSignatureConfirmation>> {
|
||||
debug!(
|
||||
"get_signature_confirmation rpc request received: {:?}",
|
||||
signature_str
|
||||
@ -909,7 +931,7 @@ impl RpcSol for RpcSolImpl {
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_confirmation_status(signature, commitment.clone())
|
||||
.map(|x| x.1);
|
||||
.map(|x| x.status);
|
||||
|
||||
if signature_status == Some(Ok(())) {
|
||||
info!("airdrop signature ok");
|
||||
@ -986,7 +1008,7 @@ impl RpcSol for RpcSolImpl {
|
||||
.get_storage_turn_rate()
|
||||
}
|
||||
|
||||
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<(String, u64)> {
|
||||
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<RpcStorageTurn> {
|
||||
meta.request_processor.read().unwrap().get_storage_turn()
|
||||
}
|
||||
|
||||
@ -1005,7 +1027,7 @@ impl RpcSol for RpcSolImpl {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
slot: Slot,
|
||||
) -> Result<Vec<Pubkey>> {
|
||||
) -> Result<Vec<String>> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
@ -1031,11 +1053,12 @@ impl RpcSol for RpcSolImpl {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
slot: Slot,
|
||||
encoding: Option<RpcTransactionEncoding>,
|
||||
) -> Result<Option<RpcConfirmedBlock>> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_block(slot)
|
||||
.get_confirmed_block(slot, encoding)
|
||||
}
|
||||
|
||||
fn get_confirmed_blocks(
|
||||
@ -1061,11 +1084,13 @@ pub mod tests {
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
replay_stage::tests::create_test_transactions_and_populate_blocktree,
|
||||
replay_stage::tests::create_test_transactions_and_populate_blockstore,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
|
||||
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
|
||||
use solana_ledger::{
|
||||
blocktree::entries_to_test_shreds, blocktree_processor::fill_blocktree_slot_with_ticks,
|
||||
blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks,
|
||||
entry::next_entry_mut, get_tmp_ledger_path,
|
||||
};
|
||||
use solana_sdk::{
|
||||
@ -1103,12 +1128,12 @@ pub mod tests {
|
||||
}
|
||||
|
||||
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
|
||||
start_rpc_handler_with_tx_and_blocktree(pubkey, vec![], 0)
|
||||
start_rpc_handler_with_tx_and_blockstore(pubkey, vec![], 0)
|
||||
}
|
||||
|
||||
fn start_rpc_handler_with_tx_and_blocktree(
|
||||
fn start_rpc_handler_with_tx_and_blockstore(
|
||||
pubkey: &Pubkey,
|
||||
blocktree_roots: Vec<Slot>,
|
||||
blockstore_roots: Vec<Slot>,
|
||||
default_timestamp: i64,
|
||||
) -> RpcHandler {
|
||||
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
|
||||
@ -1126,21 +1151,21 @@ pub mod tests {
|
||||
let block_commitment_cache =
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
let keypair3 = Keypair::new();
|
||||
bank.transfer(4, &alice, &keypair2.pubkey()).unwrap();
|
||||
let confirmed_block_signatures = create_test_transactions_and_populate_blocktree(
|
||||
let confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
|
||||
vec![&alice, &keypair1, &keypair2, &keypair3],
|
||||
0,
|
||||
bank.clone(),
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
);
|
||||
|
||||
// Add timestamp vote to blocktree
|
||||
// Add timestamp vote to blockstore
|
||||
let vote = Vote {
|
||||
slots: vec![1],
|
||||
hash: Hash::default(),
|
||||
@ -1163,10 +1188,10 @@ pub mod tests {
|
||||
true,
|
||||
0,
|
||||
);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blocktree.set_roots(&[1]).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.set_roots(&[1]).unwrap();
|
||||
|
||||
let mut roots = blocktree_roots.clone();
|
||||
let mut roots = blockstore_roots.clone();
|
||||
if !roots.is_empty() {
|
||||
roots.retain(|&x| x > 1);
|
||||
let mut parent_bank = bank;
|
||||
@ -1177,9 +1202,9 @@ pub mod tests {
|
||||
parent_bank.squash();
|
||||
bank_forks.write().unwrap().set_root(*root, &None);
|
||||
let parent = if i > 0 { roots[i - 1] } else { 1 };
|
||||
fill_blocktree_slot_with_ticks(&blocktree, 5, *root, parent, Hash::default());
|
||||
fill_blockstore_slot_with_ticks(&blockstore, 5, *root, parent, Hash::default());
|
||||
}
|
||||
blocktree.set_roots(&roots).unwrap();
|
||||
blockstore.set_roots(&roots).unwrap();
|
||||
let new_bank = Bank::new_from_parent(
|
||||
&parent_bank,
|
||||
parent_bank.collector_id(),
|
||||
@ -1205,7 +1230,7 @@ pub mod tests {
|
||||
JsonRpcConfig::default(),
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
blocktree,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
)));
|
||||
@ -1252,12 +1277,12 @@ pub mod tests {
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Arc::new(blocktree),
|
||||
Arc::new(blockstore),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
@ -1525,11 +1550,11 @@ pub mod tests {
|
||||
"result": {
|
||||
"context":{"slot":0},
|
||||
"value":{
|
||||
"owner": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
||||
"owner": "11111111111111111111111111111111",
|
||||
"lamports": 20,
|
||||
"data": [],
|
||||
"data": "",
|
||||
"executable": false,
|
||||
"rent_epoch": 0
|
||||
"rentEpoch": 0
|
||||
},
|
||||
},
|
||||
"id": 1,
|
||||
@ -1563,17 +1588,22 @@ pub mod tests {
|
||||
let expected = format!(
|
||||
r#"{{
|
||||
"jsonrpc":"2.0",
|
||||
"result":[["{}", {{
|
||||
"owner": {:?},
|
||||
"lamports": 20,
|
||||
"data": [],
|
||||
"executable": false,
|
||||
"rent_epoch": 0
|
||||
}}]],
|
||||
"result":[
|
||||
{{
|
||||
"pubkey": "{}",
|
||||
"account": {{
|
||||
"owner": "{}",
|
||||
"lamports": 20,
|
||||
"data": "",
|
||||
"executable": false,
|
||||
"rentEpoch": 0
|
||||
}}
|
||||
}}
|
||||
],
|
||||
"id":1}}
|
||||
"#,
|
||||
bob.pubkey(),
|
||||
new_program_id.as_ref()
|
||||
new_program_id
|
||||
);
|
||||
let expected: Response =
|
||||
serde_json::from_str(&expected).expect("expected response deserialization");
|
||||
@ -1700,14 +1730,17 @@ pub mod tests {
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"context":{"slot":0},
|
||||
"value":[ blockhash.to_string(), {
|
||||
"burnPercent": DEFAULT_BURN_PERCENT,
|
||||
"lamportsPerSignature": 0,
|
||||
"maxLamportsPerSignature": 0,
|
||||
"minLamportsPerSignature": 0,
|
||||
"targetLamportsPerSignature": 0,
|
||||
"targetSignaturesPerSlot": 0
|
||||
}]},
|
||||
"value":{
|
||||
"blockhash": blockhash.to_string(),
|
||||
"feeCalculator": {
|
||||
"burnPercent": DEFAULT_BURN_PERCENT,
|
||||
"lamportsPerSignature": 0,
|
||||
"maxLamportsPerSignature": 0,
|
||||
"minLamportsPerSignature": 0,
|
||||
"targetLamportsPerSignature": 0,
|
||||
"targetSignaturesPerSlot": 0
|
||||
}
|
||||
}},
|
||||
"id": 1
|
||||
});
|
||||
let expected: Response =
|
||||
@ -1743,7 +1776,7 @@ pub mod tests {
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
|
||||
let mut io = MetaIoHandler::default();
|
||||
let rpc = RpcSolImpl;
|
||||
@ -1754,7 +1787,7 @@ pub mod tests {
|
||||
JsonRpcConfig::default(),
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
Arc::new(blocktree),
|
||||
Arc::new(blockstore),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
@ -1847,12 +1880,12 @@ pub mod tests {
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
Arc::new(blocktree),
|
||||
Arc::new(blockstore),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
@ -1866,14 +1899,14 @@ pub mod tests {
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let mut config = JsonRpcConfig::default();
|
||||
config.enable_validator_exit = true;
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
Arc::new(blocktree),
|
||||
Arc::new(blockstore),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
@ -1918,7 +1951,7 @@ pub mod tests {
|
||||
let block_commitment_cache =
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
|
||||
let mut config = JsonRpcConfig::default();
|
||||
config.enable_validator_exit = true;
|
||||
@ -1926,19 +1959,31 @@ pub mod tests {
|
||||
config,
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
Arc::new(blocktree),
|
||||
Arc::new(blockstore),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
assert_eq!(
|
||||
request_processor.get_block_commitment(0),
|
||||
(Some(commitment_slot0), 42)
|
||||
RpcBlockCommitment {
|
||||
commitment: Some(commitment_slot0),
|
||||
total_stake: 42,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
request_processor.get_block_commitment(1),
|
||||
(Some(commitment_slot1), 42)
|
||||
RpcBlockCommitment {
|
||||
commitment: Some(commitment_slot1),
|
||||
total_stake: 42,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
request_processor.get_block_commitment(2),
|
||||
RpcBlockCommitment {
|
||||
commitment: None,
|
||||
total_stake: 42,
|
||||
}
|
||||
);
|
||||
assert_eq!(request_processor.get_block_commitment(2), (None, 42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1956,16 +2001,18 @@ pub mod tests {
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let (commitment, total_staked): (Option<BlockCommitment>, u64) =
|
||||
if let Response::Single(res) = result {
|
||||
if let Output::Success(res) = res {
|
||||
serde_json::from_value(res.result).unwrap()
|
||||
} else {
|
||||
panic!("Expected success");
|
||||
}
|
||||
let RpcBlockCommitment {
|
||||
commitment,
|
||||
total_stake,
|
||||
} = if let Response::Single(res) = result {
|
||||
if let Output::Success(res) = res {
|
||||
serde_json::from_value(res.result).unwrap()
|
||||
} else {
|
||||
panic!("Expected single response");
|
||||
};
|
||||
panic!("Expected success");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected single response");
|
||||
};
|
||||
assert_eq!(
|
||||
commitment,
|
||||
block_commitment_cache
|
||||
@ -1974,14 +2021,14 @@ pub mod tests {
|
||||
.get_block_commitment(0)
|
||||
.cloned()
|
||||
);
|
||||
assert_eq!(total_staked, 42);
|
||||
assert_eq!(total_stake, 42);
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let (commitment, total_staked): (Option<BlockCommitment>, u64) =
|
||||
let commitment_response: RpcBlockCommitment<BlockCommitment> =
|
||||
if let Response::Single(res) = result {
|
||||
if let Output::Success(res) = res {
|
||||
serde_json::from_value(res.result).unwrap()
|
||||
@ -1991,8 +2038,8 @@ pub mod tests {
|
||||
} else {
|
||||
panic!("Expected single response");
|
||||
};
|
||||
assert_eq!(commitment, None);
|
||||
assert_eq!(total_staked, 42);
|
||||
assert_eq!(commitment_response.commitment, None);
|
||||
assert_eq!(commitment_response.total_stake, 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2008,6 +2055,38 @@ pub mod tests {
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_block: Option<RpcConfirmedBlock> =
|
||||
serde_json::from_value(result["result"].clone()).unwrap();
|
||||
let confirmed_block = confirmed_block.unwrap();
|
||||
assert_eq!(confirmed_block.transactions.len(), 3);
|
||||
|
||||
for RpcTransactionWithStatusMeta { transaction, meta } in
|
||||
confirmed_block.transactions.into_iter()
|
||||
{
|
||||
if let RpcEncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == confirmed_block_signatures[0].to_string() {
|
||||
assert_eq!(transaction.message.recent_blockhash, blockhash.to_string());
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
} else if transaction.signatures[0] == confirmed_block_signatures[1].to_string() {
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
assert_eq!(meta, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0, "binary"]}}"#
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
@ -2016,20 +2095,26 @@ pub mod tests {
|
||||
let confirmed_block = confirmed_block.unwrap();
|
||||
assert_eq!(confirmed_block.transactions.len(), 3);
|
||||
|
||||
for (transaction, result) in confirmed_block.transactions.into_iter() {
|
||||
if transaction.signatures[0] == confirmed_block_signatures[0] {
|
||||
assert_eq!(transaction.message.recent_blockhash, blockhash);
|
||||
assert_eq!(result.unwrap().status, Ok(()));
|
||||
} else if transaction.signatures[0] == confirmed_block_signatures[1] {
|
||||
assert_eq!(
|
||||
result.unwrap().status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
assert_eq!(result, None);
|
||||
for RpcTransactionWithStatusMeta { transaction, meta } in
|
||||
confirmed_block.transactions.into_iter()
|
||||
{
|
||||
if let RpcEncodedTransaction::Binary(transaction) = transaction {
|
||||
let decoded_transaction: Transaction =
|
||||
deserialize(&bs58::decode(&transaction).into_vec().unwrap()).unwrap();
|
||||
if decoded_transaction.signatures[0] == confirmed_block_signatures[0] {
|
||||
assert_eq!(decoded_transaction.message.recent_blockhash, blockhash);
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
} else if decoded_transaction.signatures[0] == confirmed_block_signatures[1] {
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
assert_eq!(meta, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2039,7 +2124,7 @@ pub mod tests {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let roots = vec![0, 1, 3, 4, 8];
|
||||
let RpcHandler { io, meta, .. } =
|
||||
start_rpc_handler_with_tx_and_blocktree(&bob_pubkey, roots.clone(), 0);
|
||||
start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone(), 0);
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
|
||||
@ -2086,7 +2171,7 @@ pub mod tests {
|
||||
fn test_get_block_time() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let base_timestamp = 1576183541;
|
||||
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blocktree(
|
||||
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blockstore(
|
||||
&bob_pubkey,
|
||||
vec![1, 2, 3, 4, 5, 6, 7],
|
||||
base_timestamp,
|
||||
|
@ -3,12 +3,9 @@
|
||||
use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use jsonrpc_pubsub::typed::Subscriber;
|
||||
use jsonrpc_pubsub::{Session, SubscriptionId};
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::transaction;
|
||||
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
|
||||
use solana_client::rpc_response::{RpcAccount, RpcKeyedAccount};
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Signature, transaction};
|
||||
use std::sync::{atomic, Arc};
|
||||
|
||||
// Suppress needless_return due to
|
||||
@ -28,10 +25,10 @@ pub trait RpcSolPubSub {
|
||||
)]
|
||||
fn account_subscribe(
|
||||
&self,
|
||||
_: Self::Metadata,
|
||||
_: Subscriber<Account>,
|
||||
_: String,
|
||||
_: Option<Confirmations>,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcAccount>,
|
||||
pubkey_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
);
|
||||
|
||||
// Unsubscribe from account notification subscription.
|
||||
@ -40,7 +37,8 @@ pub trait RpcSolPubSub {
|
||||
unsubscribe,
|
||||
name = "accountUnsubscribe"
|
||||
)]
|
||||
fn account_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
|
||||
fn account_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId)
|
||||
-> Result<bool>;
|
||||
|
||||
// Get notification every time account data owned by a particular program is changed
|
||||
// Accepts pubkey parameter as base-58 encoded string
|
||||
@ -51,10 +49,10 @@ pub trait RpcSolPubSub {
|
||||
)]
|
||||
fn program_subscribe(
|
||||
&self,
|
||||
_: Self::Metadata,
|
||||
_: Subscriber<(String, Account)>,
|
||||
_: String,
|
||||
_: Option<Confirmations>,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcKeyedAccount>,
|
||||
pubkey_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
);
|
||||
|
||||
// Unsubscribe from account notification subscription.
|
||||
@ -63,7 +61,8 @@ pub trait RpcSolPubSub {
|
||||
unsubscribe,
|
||||
name = "programUnsubscribe"
|
||||
)]
|
||||
fn program_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
|
||||
fn program_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId)
|
||||
-> Result<bool>;
|
||||
|
||||
// Get notification when signature is verified
|
||||
// Accepts signature parameter as base-58 encoded string
|
||||
@ -74,10 +73,10 @@ pub trait RpcSolPubSub {
|
||||
)]
|
||||
fn signature_subscribe(
|
||||
&self,
|
||||
_: Self::Metadata,
|
||||
_: Subscriber<transaction::Result<()>>,
|
||||
_: String,
|
||||
_: Option<Confirmations>,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<transaction::Result<()>>,
|
||||
signature_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
);
|
||||
|
||||
// Unsubscribe from signature notification subscription.
|
||||
@ -86,11 +85,15 @@ pub trait RpcSolPubSub {
|
||||
unsubscribe,
|
||||
name = "signatureUnsubscribe"
|
||||
)]
|
||||
fn signature_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
|
||||
fn signature_unsubscribe(
|
||||
&self,
|
||||
meta: Option<Self::Metadata>,
|
||||
id: SubscriptionId,
|
||||
) -> Result<bool>;
|
||||
|
||||
// Get notification when slot is encountered
|
||||
#[pubsub(subscription = "slotNotification", subscribe, name = "slotSubscribe")]
|
||||
fn slot_subscribe(&self, _: Self::Metadata, _: Subscriber<SlotInfo>);
|
||||
fn slot_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<SlotInfo>);
|
||||
|
||||
// Unsubscribe from slot notification subscription.
|
||||
#[pubsub(
|
||||
@ -98,7 +101,7 @@ pub trait RpcSolPubSub {
|
||||
unsubscribe,
|
||||
name = "slotUnsubscribe"
|
||||
)]
|
||||
fn slot_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
|
||||
fn slot_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
@ -130,7 +133,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
fn account_subscribe(
|
||||
&self,
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<Account>,
|
||||
subscriber: Subscriber<RpcAccount>,
|
||||
pubkey_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
) {
|
||||
@ -168,7 +171,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
fn program_subscribe(
|
||||
&self,
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<(String, Account)>,
|
||||
subscriber: Subscriber<RpcKeyedAccount>,
|
||||
pubkey_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
) {
|
||||
@ -277,21 +280,18 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use jsonrpc_core::futures::sync::mpsc;
|
||||
use jsonrpc_core::Response;
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use solana_budget_program;
|
||||
use solana_budget_program::budget_instruction;
|
||||
use solana_budget_program::{self, budget_instruction};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_program;
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::transaction::{self, Transaction};
|
||||
use std::sync::RwLock;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_program, system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{sync::RwLock, thread::sleep, time::Duration};
|
||||
use tokio::prelude::{Async, Stream};
|
||||
|
||||
fn process_transaction_and_notify(
|
||||
@ -467,11 +467,11 @@ mod tests {
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"owner": budget_program_id,
|
||||
"owner": budget_program_id.to_string(),
|
||||
"lamports": 51,
|
||||
"data": expected_data,
|
||||
"data": bs58::encode(expected_data).into_string(),
|
||||
"executable": false,
|
||||
"rent_epoch": 1,
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
"subscription": 0,
|
||||
}
|
||||
@ -614,11 +614,11 @@ mod tests {
|
||||
"method": "accountNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"owner": system_program::id(),
|
||||
"owner": system_program::id().to_string(),
|
||||
"lamports": 100,
|
||||
"data": [],
|
||||
"data": "",
|
||||
"executable": false,
|
||||
"rent_epoch": 1,
|
||||
"rentEpoch": 1,
|
||||
},
|
||||
"subscription": 0,
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ use jsonrpc_http_server::{
|
||||
hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware,
|
||||
RequestMiddlewareAction, ServerBuilder,
|
||||
};
|
||||
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
@ -91,7 +91,7 @@ impl JsonRpcService {
|
||||
config: JsonRpcConfig,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
genesis_hash: Hash,
|
||||
ledger_path: &Path,
|
||||
@ -104,7 +104,7 @@ impl JsonRpcService {
|
||||
config,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
blocktree,
|
||||
blockstore,
|
||||
storage_state,
|
||||
validator_exit.clone(),
|
||||
)));
|
||||
@ -204,13 +204,13 @@ mod tests {
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let mut rpc_service = JsonRpcService::new(
|
||||
rpc_addr,
|
||||
JsonRpcConfig::default(),
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Arc::new(blocktree),
|
||||
Arc::new(blockstore),
|
||||
cluster_info,
|
||||
Hash::default(),
|
||||
&PathBuf::from("farf"),
|
||||
|
@ -4,14 +4,17 @@ use core::hash::Hash;
|
||||
use jsonrpc_core::futures::Future;
|
||||
use jsonrpc_pubsub::{typed::Sink, SubscriptionId};
|
||||
use serde::Serialize;
|
||||
use solana_client::rpc_response::{RpcAccount, RpcKeyedAccount};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
pub type Confirmations = usize;
|
||||
|
||||
@ -23,9 +26,9 @@ pub struct SlotInfo {
|
||||
}
|
||||
|
||||
type RpcAccountSubscriptions =
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<Account>, Confirmations)>>>;
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<RpcAccount>, Confirmations)>>>;
|
||||
type RpcProgramSubscriptions =
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<(String, Account)>, Confirmations)>>>;
|
||||
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<RpcKeyedAccount>, Confirmations)>>>;
|
||||
type RpcSignatureSubscriptions = RwLock<
|
||||
HashMap<Signature, HashMap<SubscriptionId, (Sink<transaction::Result<()>>, Confirmations)>>,
|
||||
>;
|
||||
@ -127,13 +130,10 @@ fn check_confirmations_and_notify<K, S, F, N, X>(
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_account<S>(result: Option<(S, Slot)>, sink: &Sink<S>, root: Slot)
|
||||
where
|
||||
S: Clone + Serialize,
|
||||
{
|
||||
fn notify_account(result: Option<(Account, Slot)>, sink: &Sink<RpcAccount>, root: Slot) {
|
||||
if let Some((account, fork)) = result {
|
||||
if fork >= root {
|
||||
sink.notify(Ok(account)).wait().unwrap();
|
||||
sink.notify(Ok(RpcAccount::encode(account))).wait().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -147,11 +147,14 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_program(accounts: Vec<(Pubkey, Account)>, sink: &Sink<(String, Account)>, _root: Slot) {
|
||||
fn notify_program(accounts: Vec<(Pubkey, Account)>, sink: &Sink<RpcKeyedAccount>, _root: Slot) {
|
||||
for (pubkey, account) in accounts.iter() {
|
||||
sink.notify(Ok((pubkey.to_string(), account.clone())))
|
||||
.wait()
|
||||
.unwrap();
|
||||
sink.notify(Ok(RpcKeyedAccount {
|
||||
pubkey: pubkey.to_string(),
|
||||
account: RpcAccount::encode(account.clone()),
|
||||
}))
|
||||
.wait()
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,7 +234,7 @@ impl RpcSubscriptions {
|
||||
pubkey: &Pubkey,
|
||||
confirmations: Option<Confirmations>,
|
||||
sub_id: &SubscriptionId,
|
||||
sink: &Sink<Account>,
|
||||
sink: &Sink<RpcAccount>,
|
||||
) {
|
||||
let mut subscriptions = self.account_subscriptions.write().unwrap();
|
||||
add_subscription(&mut subscriptions, pubkey, confirmations, sub_id, sink);
|
||||
@ -247,7 +250,7 @@ impl RpcSubscriptions {
|
||||
program_id: &Pubkey,
|
||||
confirmations: Option<Confirmations>,
|
||||
sub_id: &SubscriptionId,
|
||||
sink: &Sink<(String, Account)>,
|
||||
sink: &Sink<RpcKeyedAccount>,
|
||||
) {
|
||||
let mut subscriptions = self.program_subscriptions.write().unwrap();
|
||||
add_subscription(&mut subscriptions, program_id, confirmations, sub_id, sink);
|
||||
@ -328,8 +331,10 @@ mod tests {
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use jsonrpc_pubsub::typed::Subscriber;
|
||||
use solana_budget_program;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::{
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_transaction,
|
||||
};
|
||||
use tokio::prelude::{Async, Stream};
|
||||
|
||||
#[test]
|
||||
@ -376,7 +381,7 @@ mod tests {
|
||||
let string = transport_receiver.poll();
|
||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||
let expected = format!(
|
||||
r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[2,203,81,223,225,24,34,35,203,214,138,130,144,208,35,77,63,16,87,51,47,198,115,123,98,188,19,160,0,0,0,0],"rent_epoch":1}},"subscription":0}}}}"#
|
||||
r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":"1111111111111111","executable":false,"lamports":1,"owner":"Budget1111111111111111111111111111111111111","rentEpoch":1}},"subscription":0}}}}"#
|
||||
);
|
||||
assert_eq!(expected, response);
|
||||
}
|
||||
@ -433,7 +438,7 @@ mod tests {
|
||||
let string = transport_receiver.poll();
|
||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||
let expected = format!(
|
||||
r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[2,203,81,223,225,24,34,35,203,214,138,130,144,208,35,77,63,16,87,51,47,198,115,123,98,188,19,160,0,0,0,0],"rent_epoch":1}}],"subscription":0}}}}"#,
|
||||
r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":{{"account":{{"data":"1111111111111111","executable":false,"lamports":1,"owner":"Budget1111111111111111111111111111111111111","rentEpoch":1}},"pubkey":"{:?}"}},"subscription":0}}}}"#,
|
||||
alice.pubkey()
|
||||
);
|
||||
assert_eq!(expected, response);
|
||||
|
@ -10,7 +10,7 @@ use crate::{
|
||||
};
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
@ -177,7 +177,7 @@ impl StorageStage {
|
||||
pub fn new(
|
||||
storage_state: &StorageState,
|
||||
bank_receiver: Receiver<Vec<Arc<Bank>>>,
|
||||
blocktree: Option<Arc<Blocktree>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@ -197,12 +197,12 @@ impl StorageStage {
|
||||
let mut current_key = 0;
|
||||
let mut storage_slots = StorageSlots::default();
|
||||
loop {
|
||||
if let Some(ref some_blocktree) = blocktree {
|
||||
if let Some(ref some_blockstore) = blockstore {
|
||||
if let Err(e) = Self::process_entries(
|
||||
&storage_keypair,
|
||||
&storage_state_inner,
|
||||
&bank_receiver,
|
||||
&some_blocktree,
|
||||
&some_blockstore,
|
||||
&mut storage_slots,
|
||||
&mut current_key,
|
||||
slots_per_turn,
|
||||
@ -368,7 +368,7 @@ impl StorageStage {
|
||||
fn process_turn(
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
state: &Arc<RwLock<StorageStateInner>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
blockhash: Hash,
|
||||
slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
@ -431,7 +431,7 @@ impl StorageStage {
|
||||
let mut statew = state.write().unwrap();
|
||||
|
||||
match chacha_cbc_encrypt_file_many_keys(
|
||||
blocktree,
|
||||
blockstore,
|
||||
segment as u64,
|
||||
statew.slots_per_segment,
|
||||
&mut statew.storage_keys,
|
||||
@ -502,7 +502,7 @@ impl StorageStage {
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||
bank_receiver: &Receiver<Vec<Arc<Bank>>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
storage_slots: &mut StorageSlots,
|
||||
current_key_idx: &mut usize,
|
||||
slots_per_turn: u64,
|
||||
@ -541,7 +541,7 @@ impl StorageStage {
|
||||
let _ignored = Self::process_turn(
|
||||
&storage_keypair,
|
||||
&storage_state,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
bank.slots_per_segment(),
|
||||
|
@ -4,7 +4,7 @@
|
||||
use crate::packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH};
|
||||
use crate::recvmmsg::NUM_RCVMMSGS;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::thread_mem_usage;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_sdk::timing::duration_as_ms;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
@ -12,7 +12,7 @@ use crate::{
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusSender};
|
||||
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender};
|
||||
use std::{
|
||||
net::UdpSocket,
|
||||
sync::{
|
||||
@ -42,7 +42,7 @@ impl Tpu {
|
||||
broadcast_sockets: Vec<UdpSocket>,
|
||||
sigverify_disabled: bool,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
broadcast_type: &BroadcastStageType,
|
||||
exit: &Arc<AtomicBool>,
|
||||
shred_version: u16,
|
||||
@ -87,7 +87,7 @@ impl Tpu {
|
||||
cluster_info.clone(),
|
||||
entry_receiver,
|
||||
&exit,
|
||||
blocktree,
|
||||
blockstore,
|
||||
shred_version,
|
||||
);
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::result::{Error, Result};
|
||||
use crossbeam_channel::{Receiver, RecvTimeoutError};
|
||||
use solana_client::rpc_request::RpcTransactionStatus;
|
||||
use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusBatch};
|
||||
use solana_client::rpc_response::RpcTransactionStatus;
|
||||
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusBatch};
|
||||
use solana_runtime::bank::{Bank, HashAgeKind};
|
||||
use std::{
|
||||
sync::{
|
||||
@ -20,7 +20,7 @@ impl TransactionStatusService {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
write_transaction_status_receiver: Receiver<TransactionStatusBatch>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let exit = exit.clone();
|
||||
@ -32,7 +32,7 @@ impl TransactionStatusService {
|
||||
}
|
||||
if let Err(e) = Self::write_transaction_status_batch(
|
||||
&write_transaction_status_receiver,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
@ -47,7 +47,7 @@ impl TransactionStatusService {
|
||||
|
||||
fn write_transaction_status_batch(
|
||||
write_transaction_status_receiver: &Receiver<TransactionStatusBatch>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let TransactionStatusBatch {
|
||||
bank,
|
||||
@ -64,7 +64,7 @@ impl TransactionStatusService {
|
||||
.zip(balances.post_balances)
|
||||
{
|
||||
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
|
||||
let fee_hash = if let Some(HashAgeKind::DurableNonce) = hash_age_kind {
|
||||
let fee_hash = if let Some(HashAgeKind::DurableNonce(_, _)) = hash_age_kind {
|
||||
bank.last_blockhash()
|
||||
} else {
|
||||
transaction.message().recent_blockhash
|
||||
@ -73,7 +73,7 @@ impl TransactionStatusService {
|
||||
.get_fee_calculator(&fee_hash)
|
||||
.expect("FeeCalculator must exist");
|
||||
let fee = fee_calculator.calculate_fee(transaction.message());
|
||||
blocktree
|
||||
blockstore
|
||||
.write_transaction_status(
|
||||
(slot, transaction.signatures[0]),
|
||||
&RpcTransactionStatus {
|
||||
|
@ -21,8 +21,8 @@ use crossbeam_channel::unbounded;
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blocktree::{Blocktree, CompletedSlotsReceiver},
|
||||
blocktree_processor::TransactionStatusSender,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
blockstore_processor::TransactionStatusSender,
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
@ -63,7 +63,7 @@ impl Tvu {
|
||||
/// # Arguments
|
||||
/// * `cluster_info` - The cluster_info state.
|
||||
/// * `sockets` - fetch, repair, and retransmit sockets
|
||||
/// * `blocktree` - the ledger itself
|
||||
/// * `blockstore` - the ledger itself
|
||||
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
vote_account: &Pubkey,
|
||||
@ -72,7 +72,7 @@ impl Tvu {
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
sockets: Sockets,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
storage_state: &StorageState,
|
||||
blockstream_unix_socket: Option<&PathBuf>,
|
||||
max_ledger_slots: Option<u64>,
|
||||
@ -133,7 +133,7 @@ impl Tvu {
|
||||
let retransmit_stage = RetransmitStage::new(
|
||||
bank_forks.clone(),
|
||||
leader_schedule_cache,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
&cluster_info,
|
||||
Arc::new(retransmit_sockets),
|
||||
repair_socket,
|
||||
@ -175,7 +175,7 @@ impl Tvu {
|
||||
|
||||
let (replay_stage, root_bank_receiver) = ReplayStage::new(
|
||||
replay_stage_config,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
bank_forks.clone(),
|
||||
cluster_info.clone(),
|
||||
ledger_signal_receiver,
|
||||
@ -185,7 +185,7 @@ impl Tvu {
|
||||
let blockstream_service = if let Some(blockstream_unix_socket) = blockstream_unix_socket {
|
||||
let blockstream_service = BlockstreamService::new(
|
||||
blockstream_slot_receiver,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
blockstream_unix_socket,
|
||||
&exit,
|
||||
);
|
||||
@ -197,7 +197,7 @@ impl Tvu {
|
||||
let ledger_cleanup_service = max_ledger_slots.map(|max_ledger_slots| {
|
||||
LedgerCleanupService::new(
|
||||
ledger_cleanup_slot_receiver,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
max_ledger_slots,
|
||||
&exit,
|
||||
)
|
||||
@ -206,7 +206,7 @@ impl Tvu {
|
||||
let storage_stage = StorageStage::new(
|
||||
storage_state,
|
||||
root_bank_receiver,
|
||||
Some(blocktree),
|
||||
Some(blockstore),
|
||||
&keypair,
|
||||
storage_keypair,
|
||||
&exit,
|
||||
@ -272,14 +272,14 @@ pub mod tests {
|
||||
cluster_info1.insert_info(leader.info.clone());
|
||||
let cref1 = Arc::new(RwLock::new(cluster_info1));
|
||||
|
||||
let (blocktree_path, _) = create_new_tmp_ledger!(&genesis_config);
|
||||
let (blocktree, l_receiver, completed_slots_receiver) =
|
||||
Blocktree::open_with_signal(&blocktree_path)
|
||||
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
|
||||
let (blockstore, l_receiver, completed_slots_receiver) =
|
||||
Blockstore::open_with_signal(&blockstore_path)
|
||||
.expect("Expected to successfully open ledger");
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let bank = bank_forks.working_bank();
|
||||
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
||||
create_test_recorder(&bank, &blocktree, None);
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let voting_keypair = Keypair::new();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
@ -298,7 +298,7 @@ pub mod tests {
|
||||
forwards: target1.sockets.tvu_forwards,
|
||||
}
|
||||
},
|
||||
blocktree,
|
||||
blockstore,
|
||||
&StorageState::default(),
|
||||
None,
|
||||
None,
|
||||
|
@ -23,8 +23,8 @@ use crossbeam_channel::unbounded;
|
||||
use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blocktree::{Blocktree, CompletedSlotsReceiver},
|
||||
blocktree_processor::{self, BankForksInfo},
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
blockstore_processor::{self, BankForksInfo},
|
||||
create_new_tmp_ledger,
|
||||
leader_schedule::FixedSchedule,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
@ -48,7 +48,8 @@ use std::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::Receiver,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
thread::Result,
|
||||
thread::{sleep, Result},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -67,6 +68,7 @@ pub struct ValidatorConfig {
|
||||
pub broadcast_stage_type: BroadcastStageType,
|
||||
pub partition_cfg: Option<PartitionCfg>,
|
||||
pub fixed_leader_schedule: Option<FixedSchedule>,
|
||||
pub wait_for_supermajority: bool,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@ -86,6 +88,7 @@ impl Default for ValidatorConfig {
|
||||
broadcast_stage_type: BroadcastStageType::Standard,
|
||||
partition_cfg: None,
|
||||
fixed_leader_schedule: None,
|
||||
wait_for_supermajority: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -138,32 +141,10 @@ impl Validator {
|
||||
|
||||
warn!("identity pubkey: {:?}", id);
|
||||
warn!("vote pubkey: {:?}", vote_account);
|
||||
warn!(
|
||||
"CUDA is {}abled",
|
||||
if solana_perf::perf_libs::api().is_some() {
|
||||
"en"
|
||||
} else {
|
||||
"dis"
|
||||
}
|
||||
);
|
||||
|
||||
// Validator binaries built on a machine with AVX support will generate invalid opcodes
|
||||
// when run on machines without AVX causing a non-obvious process abort. Instead detect
|
||||
// the mismatch and error cleanly.
|
||||
#[target_feature(enable = "avx")]
|
||||
{
|
||||
if is_x86_feature_detected!("avx") {
|
||||
info!("AVX detected");
|
||||
} else {
|
||||
error!("Your machine does not have AVX support, please rebuild from source on your machine");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
report_target_features();
|
||||
|
||||
info!("entrypoint: {:?}", entrypoint_info_option);
|
||||
|
||||
Self::print_node_info(&node);
|
||||
|
||||
info!("Initializing sigverify, this could take a while...");
|
||||
sigverify::init();
|
||||
info!("Done.");
|
||||
@ -173,12 +154,12 @@ impl Validator {
|
||||
genesis_hash,
|
||||
bank_forks,
|
||||
bank_forks_info,
|
||||
blocktree,
|
||||
blockstore,
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
poh_config,
|
||||
) = new_banks_from_blocktree(
|
||||
) = new_banks_from_blockstore(
|
||||
config.expected_genesis_hash,
|
||||
ledger_path,
|
||||
config.account_paths.clone(),
|
||||
@ -194,8 +175,6 @@ impl Validator {
|
||||
let bank = bank_forks[bank_info.bank_slot].clone();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
// The version used by shreds, derived from genesis
|
||||
let shred_version = Shred::version_from_hash(&genesis_hash);
|
||||
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit_ = exit.clone();
|
||||
@ -203,6 +182,9 @@ impl Validator {
|
||||
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
|
||||
|
||||
node.info.wallclock = timestamp();
|
||||
node.info.shred_version = Shred::version_from_hash(&genesis_hash);
|
||||
Self::print_node_info(&node);
|
||||
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
|
||||
node.info.clone(),
|
||||
keypair.clone(),
|
||||
@ -214,7 +196,7 @@ impl Validator {
|
||||
bank.slots_per_segment(),
|
||||
);
|
||||
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
let rpc_service = if node.info.rpc.port() == 0 {
|
||||
None
|
||||
@ -224,7 +206,7 @@ impl Validator {
|
||||
config.rpc_config.clone(),
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
cluster_info.clone(),
|
||||
genesis_hash,
|
||||
ledger_path,
|
||||
@ -254,7 +236,7 @@ impl Validator {
|
||||
Some(transaction_status_sender),
|
||||
Some(TransactionStatusService::new(
|
||||
transaction_status_receiver,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
&exit,
|
||||
)),
|
||||
)
|
||||
@ -282,31 +264,24 @@ impl Validator {
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)),
|
||||
leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blockstore)),
|
||||
bank.ticks_per_slot(),
|
||||
&id,
|
||||
&blocktree,
|
||||
blocktree.new_shreds_signals.first().cloned(),
|
||||
&blockstore,
|
||||
blockstore.new_shreds_signals.first().cloned(),
|
||||
&leader_schedule_cache,
|
||||
&poh_config,
|
||||
);
|
||||
if config.snapshot_config.is_some() {
|
||||
poh_recorder.set_bank(&bank);
|
||||
}
|
||||
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
|
||||
assert_eq!(
|
||||
blocktree.new_shreds_signals.len(),
|
||||
1,
|
||||
"New shred signal for the TVU should be the same as the clear bank signal."
|
||||
);
|
||||
|
||||
let ip_echo_server = solana_net_utils::ip_echo_server(node.sockets.ip_echo.unwrap());
|
||||
|
||||
let gossip_service = GossipService::new(
|
||||
&cluster_info,
|
||||
Some(blocktree.clone()),
|
||||
Some(blockstore.clone()),
|
||||
Some(bank_forks.clone()),
|
||||
node.sockets.gossip,
|
||||
&exit,
|
||||
@ -321,6 +296,22 @@ impl Validator {
|
||||
.set_entrypoint(entrypoint_info.clone());
|
||||
}
|
||||
|
||||
if config.wait_for_supermajority {
|
||||
info!(
|
||||
"Waiting for more than 66% of activated stake at slot {} to be in gossip...",
|
||||
bank.slot()
|
||||
);
|
||||
loop {
|
||||
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
|
||||
|
||||
info!("{}% of activated stake in gossip", gossip_stake_percent,);
|
||||
if gossip_stake_percent > 66 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
}
|
||||
|
||||
let sockets = Sockets {
|
||||
repair: node
|
||||
.sockets
|
||||
@ -353,6 +344,13 @@ impl Validator {
|
||||
Some(voting_keypair.clone())
|
||||
};
|
||||
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
|
||||
assert_eq!(
|
||||
blockstore.new_shreds_signals.len(),
|
||||
1,
|
||||
"New shred signal for the TVU should be the same as the clear bank signal."
|
||||
);
|
||||
|
||||
let tvu = Tvu::new(
|
||||
vote_account,
|
||||
voting_keypair,
|
||||
@ -360,7 +358,7 @@ impl Validator {
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
sockets,
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
&storage_state,
|
||||
config.blockstream_unix_socket.as_ref(),
|
||||
config.max_ledger_slots,
|
||||
@ -373,7 +371,7 @@ impl Validator {
|
||||
block_commitment_cache,
|
||||
config.dev_sigverify_disabled,
|
||||
config.partition_cfg.clone(),
|
||||
shred_version,
|
||||
node.info.shred_version,
|
||||
transaction_status_sender.clone(),
|
||||
);
|
||||
|
||||
@ -390,10 +388,10 @@ impl Validator {
|
||||
node.sockets.broadcast,
|
||||
config.dev_sigverify_disabled,
|
||||
transaction_status_sender,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&config.broadcast_stage_type,
|
||||
&exit,
|
||||
shred_version,
|
||||
node.info.shred_version,
|
||||
);
|
||||
|
||||
datapoint_info!("validator-new", ("id", id.to_string(), String));
|
||||
@ -471,9 +469,9 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_banks_from_blocktree(
|
||||
pub fn new_banks_from_blockstore(
|
||||
expected_genesis_hash: Option<Hash>,
|
||||
blocktree_path: &Path,
|
||||
blockstore_path: &Path,
|
||||
account_paths: Vec<PathBuf>,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
poh_verify: bool,
|
||||
@ -483,14 +481,14 @@ pub fn new_banks_from_blocktree(
|
||||
Hash,
|
||||
BankForks,
|
||||
Vec<BankForksInfo>,
|
||||
Blocktree,
|
||||
Blockstore,
|
||||
Receiver<bool>,
|
||||
CompletedSlotsReceiver,
|
||||
LeaderScheduleCache,
|
||||
PohConfig,
|
||||
) {
|
||||
let genesis_config = GenesisConfig::load(blocktree_path).unwrap_or_else(|err| {
|
||||
error!("Failed to load genesis from {:?}: {}", blocktree_path, err);
|
||||
let genesis_config = GenesisConfig::load(blockstore_path).unwrap_or_else(|err| {
|
||||
error!("Failed to load genesis from {:?}: {}", blockstore_path, err);
|
||||
process::exit(1);
|
||||
});
|
||||
let genesis_hash = genesis_config.hash();
|
||||
@ -501,24 +499,24 @@ pub fn new_banks_from_blocktree(
|
||||
error!("genesis hash mismatch: expected {}", expected_genesis_hash);
|
||||
error!(
|
||||
"Delete the ledger directory to continue: {:?}",
|
||||
blocktree_path
|
||||
blockstore_path
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
let (blocktree, ledger_signal_receiver, completed_slots_receiver) =
|
||||
Blocktree::open_with_signal(blocktree_path).expect("Failed to open ledger database");
|
||||
let (blockstore, ledger_signal_receiver, completed_slots_receiver) =
|
||||
Blockstore::open_with_signal(blockstore_path).expect("Failed to open ledger database");
|
||||
|
||||
let process_options = blocktree_processor::ProcessOptions {
|
||||
let process_options = blockstore_processor::ProcessOptions {
|
||||
poh_verify,
|
||||
dev_halt_at_slot,
|
||||
..blocktree_processor::ProcessOptions::default()
|
||||
..blockstore_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load(
|
||||
&genesis_config,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
account_paths,
|
||||
snapshot_config.as_ref(),
|
||||
process_options,
|
||||
@ -536,7 +534,7 @@ pub fn new_banks_from_blocktree(
|
||||
genesis_hash,
|
||||
bank_forks,
|
||||
bank_forks_info,
|
||||
blocktree,
|
||||
blockstore,
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
@ -584,6 +582,63 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
|
||||
(node, contact_info, mint_keypair, ledger_path)
|
||||
}
|
||||
|
||||
fn report_target_features() {
|
||||
warn!(
|
||||
"CUDA is {}abled",
|
||||
if solana_perf::perf_libs::api().is_some() {
|
||||
"en"
|
||||
} else {
|
||||
"dis"
|
||||
}
|
||||
);
|
||||
|
||||
// Validator binaries built on a machine with AVX support will generate invalid opcodes
|
||||
// when run on machines without AVX causing a non-obvious process abort. Instead detect
|
||||
// the mismatch and error cleanly.
|
||||
#[target_feature(enable = "avx")]
|
||||
{
|
||||
if is_x86_feature_detected!("avx") {
|
||||
info!("AVX detected");
|
||||
} else {
|
||||
error!("Your machine does not have AVX support, please rebuild from source on your machine");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the activated stake percentage (based on the provided bank) that is visible in gossip
|
||||
fn get_stake_percent_in_gossip(
|
||||
bank: &Arc<solana_runtime::bank::Bank>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
) -> u64 {
|
||||
let mut gossip_stake = 0;
|
||||
let mut total_activated_stake = 0;
|
||||
let tvu_peers = cluster_info.read().unwrap().tvu_peers();
|
||||
let me = cluster_info.read().unwrap().my_data();
|
||||
|
||||
for (activated_stake, vote_account) in bank.vote_accounts().values() {
|
||||
let vote_state =
|
||||
solana_vote_program::vote_state::VoteState::from(&vote_account).unwrap_or_default();
|
||||
total_activated_stake += activated_stake;
|
||||
if tvu_peers
|
||||
.iter()
|
||||
.filter(|peer| peer.shred_version == me.shred_version)
|
||||
.any(|peer| peer.id == vote_state.node_pubkey)
|
||||
{
|
||||
trace!(
|
||||
"observed {} in gossip, (activated_stake={})",
|
||||
vote_state.node_pubkey,
|
||||
activated_stake
|
||||
);
|
||||
gossip_stake += activated_stake;
|
||||
} else if vote_state.node_pubkey == cluster_info.read().unwrap().id() {
|
||||
gossip_stake += activated_stake;
|
||||
}
|
||||
}
|
||||
|
||||
gossip_stake * 100 / total_activated_stake
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -1,5 +1,5 @@
|
||||
//! `window_service` handles the data plane incoming shreds, storing them in
|
||||
//! blocktree and retransmitting where required
|
||||
//! blockstore and retransmitting where required
|
||||
//!
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::packet::Packets;
|
||||
@ -13,7 +13,7 @@ use rayon::iter::IntoParallelRefMutIterator;
|
||||
use rayon::iter::ParallelIterator;
|
||||
use rayon::ThreadPool;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blocktree::{self, Blocktree};
|
||||
use solana_ledger::blockstore::{self, Blockstore};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
|
||||
@ -30,7 +30,7 @@ use std::time::{Duration, Instant};
|
||||
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
|
||||
if shred.is_data() {
|
||||
// Only data shreds have parent information
|
||||
blocktree::verify_shred_slots(shred.slot(), shred.parent(), root)
|
||||
blockstore::verify_shred_slots(shred.slot(), shred.parent(), root)
|
||||
} else {
|
||||
// Filter out outdated coding shreds
|
||||
shred.slot() >= root
|
||||
@ -72,7 +72,7 @@ pub fn should_retransmit_and_persist(
|
||||
|
||||
fn run_insert(
|
||||
shred_receiver: &CrossbeamReceiver<Vec<Shred>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
@ -82,15 +82,15 @@ fn run_insert(
|
||||
shreds.append(&mut more_shreds)
|
||||
}
|
||||
|
||||
let blocktree_insert_metrics =
|
||||
blocktree.insert_shreds(shreds, Some(leader_schedule_cache), false)?;
|
||||
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds");
|
||||
let blockstore_insert_metrics =
|
||||
blockstore.insert_shreds(shreds, Some(leader_schedule_cache), false)?;
|
||||
blockstore_insert_metrics.report_metrics("recv-window-insert-shreds");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv_window<F>(
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
insert_shred_sender: &CrossbeamSender<Vec<Shred>>,
|
||||
my_pubkey: &Pubkey,
|
||||
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
|
||||
@ -114,7 +114,7 @@ where
|
||||
let now = Instant::now();
|
||||
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
|
||||
|
||||
let last_root = blocktree.last_root();
|
||||
let last_root = blockstore.last_root();
|
||||
let shreds: Vec<_> = thread_pool.install(|| {
|
||||
packets
|
||||
.par_iter_mut()
|
||||
@ -195,7 +195,7 @@ pub struct WindowService {
|
||||
impl WindowService {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new<F>(
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
retransmit: PacketSender,
|
||||
@ -217,7 +217,7 @@ impl WindowService {
|
||||
};
|
||||
|
||||
let repair_service = RepairService::new(
|
||||
blocktree.clone(),
|
||||
blockstore.clone(),
|
||||
exit.clone(),
|
||||
repair_socket,
|
||||
cluster_info.clone(),
|
||||
@ -228,7 +228,7 @@ impl WindowService {
|
||||
|
||||
let t_insert = Self::start_window_insert_thread(
|
||||
exit,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
leader_schedule_cache,
|
||||
insert_receiver,
|
||||
);
|
||||
@ -236,7 +236,7 @@ impl WindowService {
|
||||
let t_window = Self::start_recv_window_thread(
|
||||
cluster_info.read().unwrap().id(),
|
||||
exit,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
insert_sender,
|
||||
verified_receiver,
|
||||
shred_filter,
|
||||
@ -253,12 +253,12 @@ impl WindowService {
|
||||
|
||||
fn start_window_insert_thread(
|
||||
exit: &Arc<AtomicBool>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
insert_receiver: CrossbeamReceiver<Vec<Shred>>,
|
||||
) -> JoinHandle<()> {
|
||||
let exit = exit.clone();
|
||||
let blocktree = blocktree.clone();
|
||||
let blockstore = blockstore.clone();
|
||||
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||
let mut handle_timeout = || {};
|
||||
let handle_error = || {
|
||||
@ -271,7 +271,7 @@ impl WindowService {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Err(e) = run_insert(&insert_receiver, &blocktree, &leader_schedule_cache) {
|
||||
if let Err(e) = run_insert(&insert_receiver, &blockstore, &leader_schedule_cache) {
|
||||
if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) {
|
||||
break;
|
||||
}
|
||||
@ -283,7 +283,7 @@ impl WindowService {
|
||||
fn start_recv_window_thread<F>(
|
||||
id: Pubkey,
|
||||
exit: &Arc<AtomicBool>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
insert_sender: CrossbeamSender<Vec<Shred>>,
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
shred_filter: F,
|
||||
@ -297,7 +297,7 @@ impl WindowService {
|
||||
+ std::marker::Sync,
|
||||
{
|
||||
let exit = exit.clone();
|
||||
let blocktree = blocktree.clone();
|
||||
let blockstore = blockstore.clone();
|
||||
Builder::new()
|
||||
.name("solana-window".to_string())
|
||||
.spawn(move || {
|
||||
@ -324,7 +324,7 @@ impl WindowService {
|
||||
}
|
||||
};
|
||||
if let Err(e) = recv_window(
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&insert_sender,
|
||||
&id,
|
||||
&verified_receiver,
|
||||
@ -391,7 +391,7 @@ mod test {
|
||||
use rand::thread_rng;
|
||||
use solana_ledger::shred::DataShredHeader;
|
||||
use solana_ledger::{
|
||||
blocktree::{make_many_slot_entries, Blocktree},
|
||||
blockstore::{make_many_slot_entries, Blockstore},
|
||||
entry::{create_ticks, Entry},
|
||||
get_tmp_ledger_path,
|
||||
shred::Shredder,
|
||||
@ -424,23 +424,23 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_process_shred() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
|
||||
let num_entries = 10;
|
||||
let original_entries = create_ticks(num_entries, 0, Hash::default());
|
||||
let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new()));
|
||||
shreds.reverse();
|
||||
blocktree
|
||||
blockstore
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful processing of shred");
|
||||
|
||||
assert_eq!(
|
||||
blocktree.get_slot_entries(0, 0, None).unwrap(),
|
||||
blockstore.get_slot_entries(0, 0, None).unwrap(),
|
||||
original_entries
|
||||
);
|
||||
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -519,18 +519,18 @@ mod test {
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> WindowService {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let (blocktree, _, _) = Blocktree::open_with_signal(&blocktree_path)
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let (blockstore, _, _) = Blockstore::open_with_signal(&blockstore_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (retransmit_sender, _retransmit_receiver) = channel();
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::new_localhost(&Pubkey::default(), 0),
|
||||
)));
|
||||
let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap());
|
||||
let window = WindowService::new(
|
||||
blocktree,
|
||||
blockstore,
|
||||
cluster_info,
|
||||
verified_receiver,
|
||||
retransmit_sender,
|
||||
|
@ -3,7 +3,7 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use solana_core::ledger_cleanup_service::LedgerCleanupService;
|
||||
use solana_ledger::blocktree::{make_many_slot_entries, Blocktree};
|
||||
use solana_ledger::blockstore::{make_many_slot_entries, Blockstore};
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_ledger::shred::Shred;
|
||||
use std::collections::VecDeque;
|
||||
@ -33,7 +33,7 @@ mod tests {
|
||||
pub stop_size_bytes: u64,
|
||||
pub stop_size_iterations: u64,
|
||||
pub pre_generate_data: bool,
|
||||
pub cleanup_blocktree: bool,
|
||||
pub cleanup_blockstore: bool,
|
||||
pub emit_cpu_info: bool,
|
||||
pub assert_compaction: bool,
|
||||
}
|
||||
@ -150,7 +150,7 @@ mod tests {
|
||||
let stop_size_bytes = read_env("STOP_SIZE_BYTES", DEFAULT_STOP_SIZE_BYTES);
|
||||
let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS);
|
||||
let pre_generate_data = read_env("PRE_GENERATE_DATA", false);
|
||||
let cleanup_blocktree = read_env("CLEANUP_BLOCKTREE", true);
|
||||
let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true);
|
||||
let emit_cpu_info = read_env("EMIT_CPU_INFO", true);
|
||||
// set default to `true` once compaction is merged
|
||||
let assert_compaction = read_env("ASSERT_COMPACTION", false);
|
||||
@ -163,7 +163,7 @@ mod tests {
|
||||
stop_size_bytes,
|
||||
stop_size_iterations,
|
||||
pre_generate_data,
|
||||
cleanup_blocktree,
|
||||
cleanup_blockstore,
|
||||
emit_cpu_info,
|
||||
assert_compaction,
|
||||
}
|
||||
@ -181,11 +181,11 @@ mod tests {
|
||||
batch_size: u64,
|
||||
entries: u64,
|
||||
max_slots: i64,
|
||||
blocktree: &Blocktree,
|
||||
blockstore: &Blockstore,
|
||||
cpu: &CpuStatsInner,
|
||||
) {
|
||||
let time_now = Instant::now();
|
||||
let storage_now = blocktree.storage_size().unwrap_or(0);
|
||||
let storage_now = blockstore.storage_size().unwrap_or(0);
|
||||
let (cpu_user, cpu_system, cpu_idle) = (cpu.cpu_user, cpu.cpu_system, cpu.cpu_idle);
|
||||
|
||||
println!(
|
||||
@ -209,11 +209,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_ledger_cleanup_compaction() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
|
||||
let config = get_benchmark_config();
|
||||
eprintln!("BENCHMARK CONFIG: {:?}", config);
|
||||
eprintln!("LEDGER_PATH: {:?}", &blocktree_path);
|
||||
eprintln!("LEDGER_PATH: {:?}", &blockstore_path);
|
||||
|
||||
let benchmark_slots = config.benchmark_slots;
|
||||
let batch_size = config.batch_size;
|
||||
@ -227,7 +227,7 @@ mod tests {
|
||||
let (sender, receiver) = channel();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let cleaner =
|
||||
LedgerCleanupService::new(receiver, blocktree.clone(), max_ledger_slots, &exit);
|
||||
LedgerCleanupService::new(receiver, blockstore.clone(), max_ledger_slots, &exit);
|
||||
|
||||
let exit_cpu = Arc::new(AtomicBool::new(false));
|
||||
let sys = CpuStatsUpdater::new(&exit_cpu);
|
||||
@ -259,7 +259,7 @@ mod tests {
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&sys.get_stats(),
|
||||
);
|
||||
|
||||
@ -272,7 +272,7 @@ mod tests {
|
||||
make_many_slot_entries(x, batch_size, entries_per_slot).0
|
||||
};
|
||||
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
sender.send(x).unwrap();
|
||||
|
||||
emit_stats(
|
||||
@ -283,7 +283,7 @@ mod tests {
|
||||
batch_size,
|
||||
batch_size,
|
||||
max_ledger_slots as i64,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&sys.get_stats(),
|
||||
);
|
||||
|
||||
@ -313,7 +313,7 @@ mod tests {
|
||||
0,
|
||||
0,
|
||||
max_ledger_slots as i64,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&sys.get_stats(),
|
||||
);
|
||||
|
||||
@ -329,7 +329,7 @@ mod tests {
|
||||
0,
|
||||
0,
|
||||
max_ledger_slots as i64,
|
||||
&blocktree,
|
||||
&blockstore,
|
||||
&sys.get_stats(),
|
||||
);
|
||||
|
||||
@ -349,9 +349,10 @@ mod tests {
|
||||
assert!(u2 < u1, "expected compaction! pre={},post={}", u1, u2);
|
||||
}
|
||||
|
||||
if config.cleanup_blocktree {
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
if config.cleanup_blockstore {
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path)
|
||||
.expect("Expected successful database destruction");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ fn test_rpc_send_tx() {
|
||||
.send()
|
||||
.unwrap();
|
||||
let json: Value = serde_json::from_str(&response.text().unwrap()).unwrap();
|
||||
let blockhash: Hash = json["result"]["value"][0]
|
||||
let blockhash: Hash = json["result"]["value"]["blockhash"]
|
||||
.as_str()
|
||||
.unwrap()
|
||||
.parse()
|
||||
|
@ -7,9 +7,9 @@ mod tests {
|
||||
use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST};
|
||||
use solana_core::storage_stage::{StorageStage, StorageState};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blocktree_processor;
|
||||
use solana_ledger::blockstore_processor;
|
||||
use solana_ledger::entry;
|
||||
use solana_ledger::{blocktree::Blocktree, create_new_tmp_ledger};
|
||||
use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_sdk::hash::Hash;
|
||||
@ -44,7 +44,7 @@ mod tests {
|
||||
.push(solana_storage_program::solana_storage_program!());
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank = Arc::new(bank);
|
||||
@ -63,7 +63,7 @@ mod tests {
|
||||
let storage_stage = StorageStage::new(
|
||||
&storage_state,
|
||||
bank_receiver,
|
||||
Some(blocktree.clone()),
|
||||
Some(blockstore.clone()),
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&exit.clone(),
|
||||
@ -109,7 +109,7 @@ mod tests {
|
||||
|
||||
let next_bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 2));
|
||||
//register ticks so the program reports a different segment
|
||||
blocktree_processor::process_entries(
|
||||
blockstore_processor::process_entries(
|
||||
&next_bank,
|
||||
&entry::create_ticks(
|
||||
DEFAULT_TICKS_PER_SLOT * next_bank.slots_per_segment() + 1,
|
||||
@ -164,7 +164,7 @@ mod tests {
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000);
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let slot = 1;
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(
|
||||
@ -182,7 +182,7 @@ mod tests {
|
||||
let storage_stage = StorageStage::new(
|
||||
&storage_state,
|
||||
bank_receiver,
|
||||
Some(blocktree.clone()),
|
||||
Some(blockstore.clone()),
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&exit.clone(),
|
||||
@ -203,7 +203,7 @@ mod tests {
|
||||
let rooted_banks = (slot..slot + last_bank.slots_per_segment() + 1)
|
||||
.map(|i| {
|
||||
let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i));
|
||||
blocktree_processor::process_entries(
|
||||
blockstore_processor::process_entries(
|
||||
&bank,
|
||||
&entry::create_ticks(64, 0, bank.last_blockhash()),
|
||||
true,
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "0.22.0"
|
||||
version = "0.22.3"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.0" }
|
||||
solana-logger = { path = "../logger", version = "0.22.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
|
||||
solana-logger = { path = "../logger", version = "0.22.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.22.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.22.3" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user