Compare commits
277 Commits
Author | SHA1 | Date | |
---|---|---|---|
9c90e29a00 | |||
c01789d2a8 | |||
a0f9d968fe | |||
888072d4c2 | |||
af1010cfd3 | |||
fe419db5b4 | |||
a86dc44c96 | |||
ebda293dc4 | |||
6acfc2cf0f | |||
a863e82741 | |||
52da207f83 | |||
ef8eff69e4 | |||
1abdeca4c1 | |||
6e82978931 | |||
4e827af392 | |||
f6b63a7dbc | |||
6bb22902cc | |||
881a6dc0f7 | |||
877e7a3893 | |||
bb80116605 | |||
0ffe7a9c8f | |||
9b8d59d2e9 | |||
f7bd7a41d2 | |||
3fc5009ef2 | |||
bde4ba04af | |||
f1ad69c84e | |||
97ea75a890 | |||
52f6da5cee | |||
aeaa0feb61 | |||
1207664bbb | |||
19d16e75c6 | |||
51cf559ce1 | |||
63d62c33c6 | |||
919c066e5a | |||
4125d01668 | |||
087c43b9ef | |||
c18ea3ccc9 | |||
564b590c89 | |||
d36ecb5c91 | |||
e2d6f01ad3 | |||
5034331131 | |||
faafee6b42 | |||
80f618f011 | |||
84f763d079 | |||
0dc0594aaa | |||
d651cb7a25 | |||
f18aa4e423 | |||
ab4f370e15 | |||
d6f824abc0 | |||
3450b9a44d | |||
afaf95cf53 | |||
8c371dd2fb | |||
bb558acdf0 | |||
159e518671 | |||
4798e7fa73 | |||
f4534ef12d | |||
8e0f41a790 | |||
b1203da82c | |||
e366fb6328 | |||
32de5e6e7a | |||
93ae98812b | |||
2c2de12e88 | |||
bd193535c9 | |||
d4d1e5e15b | |||
f7a670596f | |||
a8b82a0b68 | |||
bb25a06baa | |||
8b7cca986a | |||
626e16a177 | |||
814af378a7 | |||
a252acf539 | |||
01eb7600d9 | |||
52c2191545 | |||
25403e61ed | |||
f402b477b2 | |||
8df8f84701 | |||
ccee6241a6 | |||
4d13d3871d | |||
bb0c9d6145 | |||
8d105042ea | |||
84304cb0fc | |||
89fe297416 | |||
d853b20d7f | |||
b28407d98a | |||
4fa795b026 | |||
c298474e6f | |||
d925902b3f | |||
99eeb63f71 | |||
ff95f6dcfa | |||
8258532791 | |||
e73cbdda61 | |||
94f1132fb6 | |||
4ee212ae4c | |||
d5fb493aa4 | |||
88ea950652 | |||
e4519d6447 | |||
471bc73a23 | |||
75a2b74751 | |||
4e69408f54 | |||
38602d60b3 | |||
1fe1550a30 | |||
827f2b3a5c | |||
a948c9b7f9 | |||
1363841f32 | |||
4688f9821f | |||
0c90c889cd | |||
9f6c9c428b | |||
fd443d85c4 | |||
b4f0f4abcc | |||
d22848f9b1 | |||
79416381dc | |||
d791c70d90 | |||
802537564b | |||
1d0608200c | |||
cd14a940d8 | |||
58d4e32c97 | |||
1b6a200d6f | |||
08f6a2ea3e | |||
97d57d168b | |||
2b219228ce | |||
07d11be6ab | |||
7981431f09 | |||
a43922ccbf | |||
687818aad6 | |||
b7a5136136 | |||
0fde19239b | |||
771d1a78fd | |||
a8eb0409b7 | |||
b6151b5200 | |||
c68ebbb0a6 | |||
1b84092b94 | |||
b1d43ace14 | |||
6085109171 | |||
cd89f280b7 | |||
54f4d13350 | |||
799d3b1575 | |||
b3b782988c | |||
5e128f8cc2 | |||
c8c0815144 | |||
d59aae4849 | |||
342733be54 | |||
2da7601084 | |||
958c345f0c | |||
fe83c66686 | |||
5884469d11 | |||
9ee5f36068 | |||
c02373493b | |||
4090600717 | |||
8a4179da67 | |||
ed093f86f9 | |||
07a049aa59 | |||
7b77fbd525 | |||
e1e295e1b6 | |||
5b4ee36cfd | |||
784943ecab | |||
4f86c0b74a | |||
5b4f24eabd | |||
a2986d3b6b | |||
032d523737 | |||
238aa2133d | |||
eaf1b91148 | |||
4ae48b56f3 | |||
8c15214923 | |||
7a603d72bf | |||
5b51bb27b6 | |||
8231d2b672 | |||
6597c71e23 | |||
e30ca01999 | |||
12bb05c320 | |||
8aa7a851ca | |||
2a17e90b7b | |||
f154a53e5e | |||
7911895b67 | |||
d6aaab0b2c | |||
be9fa22db7 | |||
b72c5689c9 | |||
9dcf3347f5 | |||
72e9492ca6 | |||
572e942413 | |||
3ae9357a36 | |||
1dbb5c8647 | |||
06d8c06119 | |||
cc0e455a51 | |||
a01520e694 | |||
c524d62ce0 | |||
dd4640e1ed | |||
42c7d57fc0 | |||
efd09ecd37 | |||
14f6d5c82b | |||
c7710fdd24 | |||
b5aa03dd7c | |||
a81dd80d60 | |||
09ca92d416 | |||
56ed033233 | |||
e56efe237c | |||
3f0ff45de0 | |||
3709dc6558 | |||
6ec0318bae | |||
92e419f1c7 | |||
ccc0f2d956 | |||
80bb0158b7 | |||
f12592826f | |||
8d38777c1f | |||
832dfd4ab0 | |||
04d2db4dbb | |||
6f269e5a0e | |||
eb3991b9ba | |||
aee63f15c2 | |||
aced847735 | |||
e360e63b74 | |||
a6c4525998 | |||
77b196a226 | |||
b6b9c2cf56 | |||
59d900977d | |||
0f5acb86d3 | |||
911dee24c5 | |||
f03e066ec5 | |||
f7d3f55566 | |||
4298b1f595 | |||
870503ee36 | |||
4d14abbd04 | |||
5212b2716c | |||
97c0573c7d | |||
43cc9fcb1d | |||
47b5ba44e9 | |||
e95397e0a8 | |||
c7cdf8ba93 | |||
6ee734e1b4 | |||
3ab1b46ef7 | |||
22891b39d6 | |||
b6ce7ec782 | |||
a41c7451f1 | |||
6cb2040a1b | |||
937f9ad049 | |||
c2fc0f2418 | |||
9278201198 | |||
149a63100d | |||
d09afdbefe | |||
1d6bafbc77 | |||
01d2b4e952 | |||
05f3437601 | |||
f859243191 | |||
9ddc25283c | |||
388d4a8592 | |||
0b0b679120 | |||
3b752876ac | |||
9b8b7dbfd7 | |||
c209e14e40 | |||
6df1f6450f | |||
6d7cb23c61 | |||
bd7e269280 | |||
b05b42d74d | |||
af733a678a | |||
8a5045f05c | |||
4a336eb5ff | |||
b7e08052ae | |||
f6a4acfac3 | |||
68eff230f0 | |||
c78db6a94b | |||
294d9288d2 | |||
7dc5cc26a6 | |||
d7a2b790dc | |||
a7a10e12c7 | |||
8d243221f0 | |||
84368697af | |||
4a57cd3300 | |||
2214d2dbb5 | |||
50a991fdf9 | |||
4e093525c7 | |||
506b305959 | |||
e83efcfc80 | |||
4f1c881227 | |||
a642168369 | |||
8d296d0969 | |||
68b11c1c29 | |||
c209718a6f | |||
b8835312bb |
@ -1,4 +1,3 @@
|
||||
os: Visual Studio 2017
|
||||
version: '{build}'
|
||||
|
||||
branches:
|
||||
@ -16,7 +15,7 @@ build_script:
|
||||
notifications:
|
||||
- provider: Slack
|
||||
incoming_webhook:
|
||||
secure: 6HnLbeS6/Iv7JSMrrHQ7V9OSIjH/3KFzvZiinNWgQqEN0e9A6zaE4MwEXUYDWbcvVJiQneWit6dswY8Scoms2rS1PWEN5N6sjgLgyzroptc=
|
||||
secure: GJsBey+F5apAtUm86MHVJ68Uqa6WN1SImcuIc4TsTZrDhA8K1QWUNw9FFQPybUWDyOcS5dly3kubnUqlGt9ux6Ad2efsfRIQYWv0tOVXKeY=
|
||||
channel: ci-status
|
||||
on_build_success: false
|
||||
on_build_failure: true
|
||||
@ -25,16 +24,16 @@ notifications:
|
||||
deploy:
|
||||
- provider: S3
|
||||
access_key_id:
|
||||
secure: G6uzyGqbkMCXS2+sCeBCT/+s/11AHLWXCuGayfKcMEE=
|
||||
secure: fTbJl6JpFebR40J7cOWZ2mXBa3kIvEiXgzxAj6L3N7A=
|
||||
secret_access_key:
|
||||
secure: Lc+aVrbcPSXoDV7h2J7gqKT+HX0n3eEzp3JIrSP2pcKxbAikGnCtOogCiHO9/er2
|
||||
secure: vItsBXb2rEFLvkWtVn/Rcxu5a5+2EwC+b7GsA0waJy9hXh6XuBAD0lnHd9re3g/4
|
||||
bucket: release.solana.com
|
||||
region: us-west-1
|
||||
set_public: true
|
||||
|
||||
- provider: GitHub
|
||||
auth_token:
|
||||
secure: JdggY+mrznklWDcV0yvetHhD9eRcNdc627q6NcZdZAJsDidYcGgZ/tgYJiXb9D1A
|
||||
secure: 81fEmPZ0cV1wLtNuUrcmtgxKF6ROQF1+/ft5m+fHX21z6PoeCbaNo8cTyLioWBj7
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
|
@ -10,7 +10,13 @@
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
buildkite-agent pipeline upload ci/buildkite.yml
|
||||
if [[ -n $BUILDKITE_TAG ]]; then
|
||||
buildkite-agent annotate --style info --context release-tag \
|
||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
||||
buildkite-agent pipeline upload ci/buildkite-release.yml
|
||||
else
|
||||
buildkite-agent pipeline upload ci/buildkite.yml
|
||||
fi
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
|
24
.github/stale.yml
vendored
Normal file
24
.github/stale.yml
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
only: pulls
|
||||
|
||||
# Number of days of inactivity before a pull request becomes stale
|
||||
daysUntilStale: 30
|
||||
|
||||
# Number of days of inactivity before a stale pull request is closed
|
||||
daysUntilClose: 7
|
||||
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- security
|
||||
|
||||
# Label to use when marking a pull request as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking a pull request as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs.
|
||||
|
||||
# Comment to post when closing a stale pull request. Set to `false` to disable
|
||||
closeComment: >
|
||||
This stale pull request has been automatically closed.
|
||||
Thank you for your contributions.
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -11,10 +11,7 @@
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
|
||||
# node config that is rsynced
|
||||
/config/
|
||||
# node config that remains local
|
||||
/config-local/
|
||||
|
||||
# log files
|
||||
*.log
|
||||
|
32
.mergify.yml
32
.mergify.yml
@ -43,3 +43,35 @@ pull_request_rules:
|
||||
backport:
|
||||
branches:
|
||||
- v0.18
|
||||
- name: v0.19 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.19
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.19
|
||||
- name: v0.20 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.20
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.20
|
||||
- name: v0.21 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.21
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.21
|
||||
- name: v0.22 backport
|
||||
conditions:
|
||||
- base=master
|
||||
- label=v0.22
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.22
|
||||
|
@ -4,7 +4,7 @@ os:
|
||||
language: rust
|
||||
cache: cargo
|
||||
rust:
|
||||
- 1.36.0
|
||||
- 1.37.0
|
||||
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
|
1977
Cargo.lock
generated
1977
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
11
Cargo.toml
11
Cargo.toml
@ -3,23 +3,23 @@ members = [
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"sdk-c",
|
||||
"chacha-sys",
|
||||
"client",
|
||||
"core",
|
||||
"drone",
|
||||
"validator",
|
||||
"genesis",
|
||||
"genesis_programs",
|
||||
"gossip",
|
||||
"install",
|
||||
"keygen",
|
||||
"kvstore",
|
||||
"ledger-tool",
|
||||
"local_cluster",
|
||||
"logger",
|
||||
"merkle-tree",
|
||||
"measure",
|
||||
"metrics",
|
||||
"netutil",
|
||||
"programs/bpf",
|
||||
"programs/bpf_loader_api",
|
||||
"programs/bpf_loader_program",
|
||||
@ -27,11 +27,13 @@ members = [
|
||||
"programs/budget_program",
|
||||
"programs/config_api",
|
||||
"programs/config_program",
|
||||
"programs/config_tests",
|
||||
"programs/exchange_api",
|
||||
"programs/exchange_program",
|
||||
"programs/failure_program",
|
||||
"programs/move_loader_api",
|
||||
"programs/move_loader_program",
|
||||
"programs/librapay_api",
|
||||
"programs/noop_program",
|
||||
"programs/stake_api",
|
||||
"programs/stake_program",
|
||||
@ -45,10 +47,13 @@ members = [
|
||||
"replicator",
|
||||
"runtime",
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"upload-perf",
|
||||
"validator-info",
|
||||
"utils/netutil",
|
||||
"utils/fixed_buf",
|
||||
"vote-signer",
|
||||
"wallet",
|
||||
"cli",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
|
@ -78,7 +78,7 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
If your rustc version is lower than 1.34.0, please update it:
|
||||
If your rustc version is lower than 1.37.0, please update it:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
@ -240,5 +240,3 @@ problem is solved by this code?" On the other hand, if a test does fail and you
|
||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||
send us that patch!
|
||||
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,33 +10,34 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.1.4"
|
||||
bs58 = "0.2.0"
|
||||
bs58 = "0.2.4"
|
||||
clap = "2.32.0"
|
||||
env_logger = "0.6.2"
|
||||
itertools = "0.8.0"
|
||||
log = "0.4.7"
|
||||
log = "0.4.8"
|
||||
num-derive = "0.2"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rayon = "1.1.0"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
serde = "1.0.99"
|
||||
serde_derive = "1.0.99"
|
||||
serde_json = "1.0.40"
|
||||
serde_yaml = "0.8.9"
|
||||
# solana-runtime = { path = "../solana/runtime"}
|
||||
solana = { path = "../core", version = "0.17.0" }
|
||||
solana-client = { path = "../client", version = "0.17.0" }
|
||||
solana-drone = { path = "../drone", version = "0.17.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.17.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.17.0" }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
solana-core = { path = "../core", version = "0.18.0" }
|
||||
solana-local-cluster = { path = "../local_cluster", version = "0.18.0" }
|
||||
solana-client = { path = "../client", version = "0.18.0" }
|
||||
solana-drone = { path = "../drone", version = "0.18.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.18.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.18.0" }
|
||||
solana-logger = { path = "../logger", version = "0.18.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.18.0" }
|
||||
solana-netutil = { path = "../utils/netutil", version = "0.18.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.18.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.18.0" }
|
||||
untrusted = "0.7.0"
|
||||
ws = "0.8.1"
|
||||
ws = "0.9.0"
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
cuda = ["solana-core/cuda"]
|
||||
|
||||
|
@ -23,7 +23,7 @@ demo demonstrates one way to host an exchange on the Solana blockchain by
|
||||
emulating a currency exchange.
|
||||
|
||||
The assets are virtual tokens held by investors who may post order requests to
|
||||
the exchange. A Swapper monitors the exchange and posts swap requests for
|
||||
the exchange. A Matcher monitors the exchange and posts swap requests for
|
||||
matching orders. All the transactions can execute concurrently.
|
||||
|
||||
## Premise
|
||||
@ -42,30 +42,26 @@ matching orders. All the transactions can execute concurrently.
|
||||
- A request to create a token account
|
||||
- Token request
|
||||
- A request to deposit tokens of a particular type into a token account.
|
||||
- Token pair
|
||||
- A unique ordered list of two tokens. For the four types of tokens used in
|
||||
this demo, the valid pairs are AB, AC, AD, BC, BD, CD.
|
||||
- Direction of trade
|
||||
- Describes which token in the pair the investor wants to sell and buy and can
|
||||
be either "To" or "From". For example, if an investor issues a "To" trade
|
||||
for "AB" then they which to exchange A tokens to B tokens. A "From" order
|
||||
would read the other way, A tokens from B tokens.
|
||||
- Asset pair
|
||||
- A struct with fields Base and Quote, representing the two assets which make up a
|
||||
trading pair, which themselves are Tokens. The Base or 'primary' asset is the
|
||||
numerator and the Quote is the denominator for pricing purposes.
|
||||
- Order side
|
||||
- Describes which side of the market an investor wants to place a trade on. Options
|
||||
are "Bid" or "Ask", where a bid represents an offer to purchase the Base asset of
|
||||
the AssetPair for a sum of the Quote Asset and an Ask is an offer to sell Base asset
|
||||
for the Quote asset.
|
||||
- Price ratio
|
||||
- An expression of the relative prices of two tokens. They consist of the
|
||||
price of the primary token and the price of the secondary token. For
|
||||
simplicity sake, the primary token's price is always 1, which forces the
|
||||
secondary to be the common denominator. For example, if token A was worth
|
||||
2 and token B was worth 6, the price ratio would be 1:3 or just 3. Price
|
||||
ratios are represented as fixed point numbers. The fixed point scaler is
|
||||
defined in
|
||||
- An expression of the relative prices of two tokens. Calculated with the Base
|
||||
Asset as the numerator and the Quote Asset as the denominator. Ratios are
|
||||
represented as fixed point numbers. The fixed point scaler is defined in
|
||||
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
|
||||
- Order request
|
||||
- A Solana transaction executed by the exchange requesting the trade of one
|
||||
type of token for another. order requests are made up of the token pair,
|
||||
the direction of the trade, quantity of the primary token, the price ratio,
|
||||
and the two token accounts to be credited/deducted. An example trade
|
||||
request looks like "T AB 5 2" which reads "Exchange 5 A tokens to B tokens
|
||||
at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
||||
- A Solana transaction sent by a trader to the exchange to submit an order.
|
||||
Order requests are made up of the token pair, the order side (bid or ask),
|
||||
quantity of the primary token, the price ratio, and the two token accounts
|
||||
to be credited/deducted. An example trade request looks like "T AB 5 2"
|
||||
which reads "Exchange 5 A tokens to B tokens at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
||||
deducted and 10 B tokens credited to the trade initiator's token accounts.
|
||||
Successful order requests result in an order.
|
||||
- Order
|
||||
@ -75,59 +71,62 @@ matching orders. All the transactions can execute concurrently.
|
||||
contain the same information as the order request.
|
||||
- Price spread
|
||||
- The difference between the two matching orders. The spread is the
|
||||
profit of the Swapper initiating the swap request.
|
||||
- Swap requirements
|
||||
profit of the Matcher initiating the swap request.
|
||||
- Match requirements
|
||||
- Policies that result in a successful trade swap.
|
||||
- Swap request
|
||||
- A request to exchange tokens between to orders
|
||||
- Trade swap
|
||||
- A successful trade. A swap consists of two matching orders that meet
|
||||
swap requirements. A trade swap may not wholly satisfy one or both of the
|
||||
orders in which case the orders are adjusted appropriately. As
|
||||
long as the swap requirements are met there will be an exchange of tokens
|
||||
between accounts. Any price spread is deposited into the Swapper's profit
|
||||
account. All trade swaps are recorded in a new account for posterity.
|
||||
- Match request
|
||||
- A request to fill two complementary orders (bid/ask), resulting if successful,
|
||||
in a trade being created.
|
||||
- Trade
|
||||
- A successful trade is created from two matching orders that meet
|
||||
swap requirements which are submitted in a Match Request by a Matcher and
|
||||
executed by the exchange. A trade may not wholly satisfy one or both of the
|
||||
orders in which case the orders are adjusted appropriately. Upon execution,
|
||||
tokens are distributed to the traders' accounts and any overlap or
|
||||
"negative spread" between orders is deposited into the Matcher's profit
|
||||
account. All successful trades are recorded in the data of a new solana
|
||||
account for posterity.
|
||||
- Investor
|
||||
- Individual investors who hold a number of tokens and wish to trade them on
|
||||
the exchange. Investors operate as Solana thin clients who own a set of
|
||||
accounts containing tokens and/or order requests. Investors post
|
||||
transactions to the exchange in order to request tokens and post or cancel
|
||||
order requests.
|
||||
- Swapper
|
||||
- An agent who facilitates trading between investors. Swappers operate as
|
||||
- Matcher
|
||||
- An agent who facilitates trading between investors. Matchers operate as
|
||||
Solana thin clients who monitor all the orders looking for a trade
|
||||
match. Once found, the Swapper issues a swap request to the exchange.
|
||||
Swappers are the engine of the exchange and are rewarded for their efforts by
|
||||
accumulating the price spreads of the swaps they initiate. Swappers also
|
||||
match. Once found, the Matcher issues a swap request to the exchange.
|
||||
Matchers are the engine of the exchange and are rewarded for their efforts by
|
||||
accumulating the price spreads of the swaps they initiate. Matchers also
|
||||
provide current bid/ask price and OHLCV (Open, High, Low, Close, Volume)
|
||||
information on demand via a public network port.
|
||||
- Transaction fees
|
||||
- Solana transaction fees are paid for by the transaction submitters who are
|
||||
the Investors and Swappers.
|
||||
the Investors and Matchers.
|
||||
|
||||
## Exchange startup
|
||||
|
||||
The exchange is up and running when it reaches a state where it can take
|
||||
investor's trades and Swapper's swap requests. To achieve this state the
|
||||
investors' trades and Matchers' match requests. To achieve this state the
|
||||
following must occur in order:
|
||||
|
||||
- Start the Solana blockchain
|
||||
- Start the Swapper thin-client
|
||||
- The Swapper subscribes to change notifications for all the accounts owned by
|
||||
- Start the thin-client
|
||||
- The Matcher subscribes to change notifications for all the accounts owned by
|
||||
the exchange program id. The subscription is managed via Solana's JSON RPC
|
||||
interface.
|
||||
- The Swapper starts responding to queries for bid/ask price and OHLCV
|
||||
- The Matcher starts responding to queries for bid/ask price and OHLCV
|
||||
|
||||
The Swapper responding successfully to price and OHLCV requests is the signal to
|
||||
The Matcher responding successfully to price and OHLCV requests is the signal to
|
||||
the investors that trades submitted after that point will be analyzed. <!--This
|
||||
is not ideal, and instead investors should be able to submit trades at any time,
|
||||
and the Swapper could come and go without missing a trade. One way to achieve
|
||||
this is for the Swapper to read the current state of all accounts looking for all
|
||||
and the Matcher could come and go without missing a trade. One way to achieve
|
||||
this is for the Matcher to read the current state of all accounts looking for all
|
||||
open orders.-->
|
||||
|
||||
Investors will initially query the exchange to discover their current balance
|
||||
for each type of token. If the investor does not already have an account for
|
||||
each type of token, they will submit account requests. Swappers as well will
|
||||
each type of token, they will submit account requests. Matcher as well will
|
||||
request accounts to hold the tokens they earn by initiating trade swaps.
|
||||
|
||||
```rust
|
||||
@ -165,7 +164,7 @@ pub struct TokenAccountInfo {
|
||||
}
|
||||
```
|
||||
|
||||
For this demo investors or Swappers can request more tokens from the exchange at
|
||||
For this demo investors or Matcher can request more tokens from the exchange at
|
||||
any time by submitting token requests. In non-demos, an exchange of this type
|
||||
would provide another way to exchange a 3rd party asset into tokens.
|
||||
|
||||
@ -269,10 +268,10 @@ pub enum ExchangeInstruction {
|
||||
|
||||
## Trade swaps
|
||||
|
||||
The Swapper is monitoring the accounts assigned to the exchange program and
|
||||
The Matcher is monitoring the accounts assigned to the exchange program and
|
||||
building a trade-order table. The order table is used to identify
|
||||
matching orders which could be fulfilled. When a match is found the
|
||||
Swapper should issue a swap request. Swap requests may not satisfy the entirety
|
||||
Matcher should issue a swap request. Swap requests may not satisfy the entirety
|
||||
of either order, but the exchange will greedily fulfill it. Any leftover tokens
|
||||
in either account will keep the order valid for further swap requests in
|
||||
the future.
|
||||
@ -310,14 +309,14 @@ whole for clarity.
|
||||
| 5 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
As part of a successful swap request, the exchange will credit tokens to the
|
||||
Swapper's account equal to the difference in the price ratios or the two orders.
|
||||
These tokens are considered the Swapper's profit for initiating the trade.
|
||||
Matcher's account equal to the difference in the price ratios or the two orders.
|
||||
These tokens are considered the Matcher's profit for initiating the trade.
|
||||
|
||||
The Swapper would initiate the following swap on the order table above:
|
||||
The Matcher would initiate the following swap on the order table above:
|
||||
|
||||
- Row 1, To: Investor 1 trades 2 A tokens to 8 B tokens
|
||||
- Row 1, From: Investor 2 trades 2 A tokens from 8 B tokens
|
||||
- Swapper takes 8 B tokens as profit
|
||||
- Matcher takes 8 B tokens as profit
|
||||
|
||||
Both row 1 trades are fully realized, table becomes:
|
||||
|
||||
@ -328,11 +327,11 @@ Both row 1 trades are fully realized, table becomes:
|
||||
| 3 | 1 T AB 2 8 | 2 F AB 3 6 |
|
||||
| 4 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
The Swapper would initiate the following swap:
|
||||
The Matcher would initiate the following swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 1 A token to 4 B tokens
|
||||
- Row 1, From: Investor 2 trades 1 A token from 4 B tokens
|
||||
- Swapper takes 4 B tokens as profit
|
||||
- Matcher takes 4 B tokens as profit
|
||||
|
||||
Row 1 From is not fully realized, table becomes:
|
||||
|
||||
@ -343,11 +342,11 @@ Row 1 From is not fully realized, table becomes:
|
||||
| 3 | 1 T AB 2 10 | 2 F AB 3 6 |
|
||||
| 4 | | 2 F AB 1 5 |
|
||||
|
||||
The Swapper would initiate the following swap:
|
||||
The Matcher would initiate the following swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 1 A token to 6 B tokens
|
||||
- Row 1, From: Investor 2 trades 1 A token from 6 B tokens
|
||||
- Swapper takes 2 B tokens as profit
|
||||
- Matcher takes 2 B tokens as profit
|
||||
|
||||
Row 1 To is now fully realized, table becomes:
|
||||
|
||||
@ -357,11 +356,11 @@ Row 1 To is now fully realized, table becomes:
|
||||
| 2 | 1 T AB 2 8 | 2 F AB 3 5 |
|
||||
| 3 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
The Swapper would initiate the following last swap:
|
||||
The Matcher would initiate the following last swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 2 A token to 12 B tokens
|
||||
- Row 1, From: Investor 2 trades 2 A token from 12 B tokens
|
||||
- Swapper takes 4 B tokens as profit
|
||||
- Matcher takes 4 B tokens as profit
|
||||
|
||||
Table becomes:
|
||||
|
||||
@ -383,7 +382,7 @@ pub enum ExchangeInstruction {
|
||||
/// key 3 - `From` order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
||||
/// key 6 - Token account in which to deposit the Matcher profit from the swap.
|
||||
SwapRequest,
|
||||
}
|
||||
|
||||
@ -442,14 +441,14 @@ pub enum ExchangeInstruction {
|
||||
/// key 3 - `From` order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
||||
/// key 6 - Token account in which to deposit the Matcher profit from the swap.
|
||||
SwapRequest,
|
||||
}
|
||||
```
|
||||
|
||||
## Quotes and OHLCV
|
||||
|
||||
The Swapper will provide current bid/ask price quotes based on trade actively and
|
||||
The Matcher will provide current bid/ask price quotes based on trade actively and
|
||||
also provide OHLCV based on some time window. The details of how the bid/ask
|
||||
price quotes are calculated are yet to be decided.
|
||||
|
||||
|
@ -5,8 +5,8 @@ use itertools::izip;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana::gen_keys::GenKeys;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_drone::drone::request_airdrop_transaction;
|
||||
use solana_exchange_api::exchange_instruction;
|
||||
use solana_exchange_api::exchange_state::*;
|
||||
@ -527,21 +527,21 @@ fn trader<T>(
|
||||
let mut trade_infos = vec![];
|
||||
let start = account_group * batch_size as usize;
|
||||
let end = account_group * batch_size as usize + batch_size as usize;
|
||||
let mut direction = Direction::To;
|
||||
let mut side = OrderSide::Ask;
|
||||
for (signer, trade, src) in izip!(
|
||||
signers[start..end].iter(),
|
||||
trade_keys,
|
||||
srcs[start..end].iter(),
|
||||
) {
|
||||
direction = if direction == Direction::To {
|
||||
Direction::From
|
||||
side = if side == OrderSide::Ask {
|
||||
OrderSide::Bid
|
||||
} else {
|
||||
Direction::To
|
||||
OrderSide::Ask
|
||||
};
|
||||
let order_info = OrderInfo {
|
||||
/// Owner of the trade order
|
||||
owner: Pubkey::default(), // don't care
|
||||
direction,
|
||||
side,
|
||||
pair,
|
||||
tokens,
|
||||
price,
|
||||
@ -551,7 +551,7 @@ fn trader<T>(
|
||||
trade_account: trade.pubkey(),
|
||||
order_info,
|
||||
});
|
||||
trades.push((signer, trade.pubkey(), direction, src));
|
||||
trades.push((signer, trade.pubkey(), side, src));
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
@ -562,7 +562,7 @@ fn trader<T>(
|
||||
trades.chunks(chunk_size).for_each(|chunk| {
|
||||
let trades_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(signer, trade, direction, src)| {
|
||||
.map(|(signer, trade, side, src)| {
|
||||
let s: &Keypair = &signer;
|
||||
let owner = &signer.pubkey();
|
||||
let space = mem::size_of::<ExchangeState>() as u64;
|
||||
@ -571,7 +571,7 @@ fn trader<T>(
|
||||
vec![
|
||||
system_instruction::create_account(owner, trade, 1, space, &id()),
|
||||
exchange_instruction::trade_request(
|
||||
owner, trade, *direction, pair, tokens, price, src,
|
||||
owner, trade, *side, pair, tokens, price, src,
|
||||
),
|
||||
],
|
||||
blockhash,
|
||||
@ -660,7 +660,7 @@ fn verify_funding_transfer<T: SyncClient + ?Sized>(
|
||||
false
|
||||
}
|
||||
|
||||
pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
||||
pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
||||
let total = lamports * (dests.len() as u64 + 1);
|
||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||
let mut notfunded: Vec<&Arc<Keypair>> = dests.iter().collect();
|
||||
@ -778,7 +778,7 @@ pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamp
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_token_accounts(client: &Client, signers: &[Arc<Keypair>], accounts: &[Pubkey]) {
|
||||
pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], accounts: &[Pubkey]) {
|
||||
let mut notfunded: Vec<(&Arc<Keypair>, &Pubkey)> = signers.iter().zip(accounts).collect();
|
||||
|
||||
while !notfunded.is_empty() {
|
||||
@ -908,7 +908,7 @@ fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
||||
rnd.gen_n_keypairs(num)
|
||||
}
|
||||
|
||||
pub fn airdrop_lamports(client: &Client, drone_addr: &SocketAddr, id: &Keypair, amount: u64) {
|
||||
pub fn airdrop_lamports(client: &dyn Client, drone_addr: &SocketAddr, id: &Keypair, amount: u64) {
|
||||
let balance = client.get_balance(&id.pubkey());
|
||||
let balance = balance.unwrap_or(0);
|
||||
if balance >= amount {
|
||||
@ -963,11 +963,11 @@ pub fn airdrop_lamports(client: &Client, drone_addr: &SocketAddr, id: &Keypair,
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use solana::validator::ValidatorConfig;
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_exchange_api::exchange_processor::process_instruction;
|
||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::genesis_block::create_genesis_block;
|
||||
|
@ -1,5 +1,5 @@
|
||||
use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, ArgMatches};
|
||||
use solana::gen_keys::GenKeys;
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_drone::drone::DRONE_PORT;
|
||||
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
||||
use std::net::SocketAddr;
|
||||
|
@ -8,7 +8,7 @@ extern crate solana_exchange_program;
|
||||
|
||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||
use log::*;
|
||||
use solana::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
|
||||
fn main() {
|
||||
|
@ -96,12 +96,12 @@ impl OrderBook {
|
||||
// Ok(())
|
||||
// }
|
||||
pub fn push(&mut self, pubkey: Pubkey, info: OrderInfo) -> Result<(), Box<dyn error::Error>> {
|
||||
check_trade(info.direction, info.tokens, info.price)?;
|
||||
match info.direction {
|
||||
Direction::To => {
|
||||
check_trade(info.side, info.tokens, info.price)?;
|
||||
match info.side {
|
||||
OrderSide::Ask => {
|
||||
self.to_ab.push(ToOrder { pubkey, info });
|
||||
}
|
||||
Direction::From => {
|
||||
OrderSide::Bid => {
|
||||
self.from_ab.push(FromOrder { pubkey, info });
|
||||
}
|
||||
}
|
||||
|
@ -2,17 +2,17 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana = { path = "../core", version = "0.17.0" }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-core = { path = "../core", version = "0.18.0" }
|
||||
solana-logger = { path = "../logger", version = "0.18.0" }
|
||||
solana-netutil = { path = "../utils/netutil", version = "0.18.0" }
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
cuda = ["solana-core/cuda"]
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
||||
use solana::packet::PacketsRecycler;
|
||||
use solana::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use solana::result::Result;
|
||||
use solana::streamer::{receiver, PacketReceiver};
|
||||
use solana_core::packet::PacketsRecycler;
|
||||
use solana_core::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use solana_core::result::Result;
|
||||
use solana_core::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
|
@ -2,28 +2,34 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.1.4"
|
||||
clap = "2.33.0"
|
||||
log = "0.4.7"
|
||||
log = "0.4.8"
|
||||
rayon = "1.1.0"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
serde = "1.0.99"
|
||||
serde_derive = "1.0.99"
|
||||
serde_json = "1.0.40"
|
||||
serde_yaml = "0.8.9"
|
||||
solana = { path = "../core", version = "0.17.0" }
|
||||
solana-client = { path = "../client", version = "0.17.0" }
|
||||
solana-drone = { path = "../drone", version = "0.17.0" }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
solana-core = { path = "../core", version = "0.18.0" }
|
||||
solana-local-cluster = { path = "../local_cluster", version = "0.18.0" }
|
||||
solana-client = { path = "../client", version = "0.18.0" }
|
||||
solana-drone = { path = "../drone", version = "0.18.0" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.18.0" }
|
||||
solana-logger = { path = "../logger", version = "0.18.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.18.0" }
|
||||
solana-measure = { path = "../measure", version = "0.18.0" }
|
||||
solana-netutil = { path = "../utils/netutil", version = "0.18.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.18.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.18.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.18.0" }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.18.0" }
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
cuda = ["solana-core/cuda"]
|
||||
|
||||
|
@ -1,13 +1,17 @@
|
||||
use solana_metrics;
|
||||
|
||||
use bincode;
|
||||
use log::*;
|
||||
use rayon::prelude::*;
|
||||
use solana::gen_keys::GenKeys;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_drone::drone::request_airdrop_transaction;
|
||||
use solana_librapay_api::{create_genesis, upload_mint_program, upload_payment_program};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_sdk::client::Client;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_instruction;
|
||||
use solana_sdk::system_transaction;
|
||||
@ -24,6 +28,8 @@ use std::thread::Builder;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use solana_librapay_api::librapay_transaction;
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 128;
|
||||
|
||||
@ -43,6 +49,7 @@ pub struct Config {
|
||||
pub duration: Duration,
|
||||
pub tx_count: usize,
|
||||
pub sustained: bool,
|
||||
pub use_move: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -54,15 +61,19 @@ impl Default for Config {
|
||||
duration: Duration::new(std::u64::MAX, 0),
|
||||
tx_count: 500_000,
|
||||
sustained: false,
|
||||
use_move: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
clients: Vec<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
keypair0_balance: u64,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
@ -74,6 +85,7 @@ where
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
||||
@ -165,6 +177,7 @@ where
|
||||
&keypairs[len..],
|
||||
threads,
|
||||
reclaim_lamports_back_to_source_account,
|
||||
&libra_args,
|
||||
);
|
||||
// In sustained mode overlap the transfers with generation
|
||||
// this has higher average performance but lower peak performance
|
||||
@ -221,6 +234,74 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
||||
);
|
||||
}
|
||||
|
||||
fn generate_move_txs(
|
||||
source: &[Keypair],
|
||||
dest: &[Keypair],
|
||||
reclaim: bool,
|
||||
move_keypairs: &[Keypair],
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_id: &Pubkey,
|
||||
blockhash: &Hash,
|
||||
) -> Vec<(Transaction, u64)> {
|
||||
let count = move_keypairs.len() / 2;
|
||||
let source_move = &move_keypairs[..count];
|
||||
let dest_move = &move_keypairs[count..];
|
||||
let pairs: Vec<_> = if !reclaim {
|
||||
source_move
|
||||
.iter()
|
||||
.zip(dest_move.iter())
|
||||
.zip(source.iter())
|
||||
.collect()
|
||||
} else {
|
||||
dest_move
|
||||
.iter()
|
||||
.zip(source_move.iter())
|
||||
.zip(dest.iter())
|
||||
.collect()
|
||||
};
|
||||
|
||||
pairs
|
||||
.par_iter()
|
||||
.map(|((from, to), payer)| {
|
||||
(
|
||||
librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
libra_mint_id,
|
||||
&payer,
|
||||
&from,
|
||||
&to.pubkey(),
|
||||
1,
|
||||
*blockhash,
|
||||
),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_system_txs(
|
||||
source: &[Keypair],
|
||||
dest: &[Keypair],
|
||||
reclaim: bool,
|
||||
blockhash: &Hash,
|
||||
) -> Vec<(Transaction, u64)> {
|
||||
let pairs: Vec<_> = if !reclaim {
|
||||
source.iter().zip(dest.iter()).collect()
|
||||
} else {
|
||||
dest.iter().zip(source.iter()).collect()
|
||||
};
|
||||
|
||||
pairs
|
||||
.par_iter()
|
||||
.map(|(from, to)| {
|
||||
(
|
||||
system_transaction::create_user_account(from, &to.pubkey(), 1, *blockhash),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_txs(
|
||||
shared_txs: &SharedTransactions,
|
||||
blockhash: &Hash,
|
||||
@ -228,25 +309,31 @@ fn generate_txs(
|
||||
dest: &[Keypair],
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
libra_args: &Option<LibraKeys>,
|
||||
) {
|
||||
let tx_count = source.len();
|
||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let pairs: Vec<_> = if !reclaim {
|
||||
source.iter().zip(dest.iter()).collect()
|
||||
let transactions = if let Some((
|
||||
libra_genesis_keypair,
|
||||
libra_pay_program_id,
|
||||
_libra_mint_program_id,
|
||||
libra_keys,
|
||||
)) = libra_args
|
||||
{
|
||||
generate_move_txs(
|
||||
source,
|
||||
dest,
|
||||
reclaim,
|
||||
&libra_keys,
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_keypair.pubkey(),
|
||||
blockhash,
|
||||
)
|
||||
} else {
|
||||
dest.iter().zip(source.iter()).collect()
|
||||
generate_system_txs(source, dest, reclaim, blockhash)
|
||||
};
|
||||
let transactions: Vec<_> = pairs
|
||||
.par_iter()
|
||||
.map(|(id, keypair)| {
|
||||
(
|
||||
system_transaction::create_user_account(id, &keypair.pubkey(), 1, *blockhash),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
@ -353,7 +440,12 @@ pub fn fund_keys<T: Client>(
|
||||
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
||||
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
|
||||
|
||||
println!("funding keys {}", dests.len());
|
||||
println!(
|
||||
"funding keys {} with lamports: {:?} total: {}",
|
||||
dests.len(),
|
||||
client.get_balance(&source.pubkey()),
|
||||
total
|
||||
);
|
||||
while !notfunded.is_empty() {
|
||||
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
|
||||
let mut to_fund = vec![];
|
||||
@ -392,13 +484,10 @@ pub fn fund_keys<T: Client>(
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
(
|
||||
k.clone(),
|
||||
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||
&k.pubkey(),
|
||||
&m,
|
||||
)),
|
||||
)
|
||||
let tx = Transaction::new_unsigned_instructions(
|
||||
system_instruction::transfer_many(&k.pubkey(), &m),
|
||||
);
|
||||
(k.clone(), tx)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -602,15 +691,170 @@ pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
fn fund_move_keys<T: Client>(
|
||||
client: &T,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
total: u64,
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_program_id: &Pubkey,
|
||||
libra_mint_key: &Keypair,
|
||||
) {
|
||||
let (mut blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
|
||||
info!("creating the libra funding account..");
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(
|
||||
funding_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
1,
|
||||
blockhash,
|
||||
);
|
||||
client.send_message(&[funding_key], tx.message).unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
let tx = librapay_transaction::mint_tokens(
|
||||
&libra_mint_program_id,
|
||||
funding_key,
|
||||
libra_mint_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
total,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[funding_key, libra_mint_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("creating {} move accounts...", keypairs.len());
|
||||
let create_len = 8;
|
||||
let mut funding_time = Measure::start("funding_time");
|
||||
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
||||
if client.get_balance(&keys[0].pubkey()).unwrap_or(0) > 0 {
|
||||
// already created these accounts.
|
||||
break;
|
||||
}
|
||||
|
||||
let pubkeys: Vec<_> = keys.iter().map(|k| k.pubkey()).collect();
|
||||
let tx = librapay_transaction::create_accounts(funding_key, &pubkeys, 1, blockhash);
|
||||
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
|
||||
client.send_message(&[funding_key], tx.message).unwrap();
|
||||
|
||||
if i % 10 == 0 {
|
||||
info!(
|
||||
"size: {} created {} accounts of {}",
|
||||
ser_size,
|
||||
i,
|
||||
(keypairs.len() / create_len),
|
||||
);
|
||||
}
|
||||
}
|
||||
funding_time.stop();
|
||||
info!("funding accounts {}ms", funding_time.as_ms());
|
||||
|
||||
const NUM_FUNDING_KEYS: usize = 4;
|
||||
let funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
let pubkey_amounts: Vec<_> = funding_keys
|
||||
.iter()
|
||||
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
||||
.collect();
|
||||
let tx = Transaction::new_signed_instructions(
|
||||
&[funding_key],
|
||||
system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts),
|
||||
blockhash,
|
||||
);
|
||||
client.send_message(&[funding_key], tx.message).unwrap();
|
||||
let mut balance = 0;
|
||||
for _ in 0..20 {
|
||||
balance = client.get_balance(&funding_keys[0].pubkey()).unwrap();
|
||||
if balance > 0 {
|
||||
break;
|
||||
} else {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
assert!(balance > 0);
|
||||
info!("funded multiple funding accounts.. {:?}", balance);
|
||||
|
||||
let libra_funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
for (i, key) in libra_funding_keys.iter().enumerate() {
|
||||
let tx =
|
||||
librapay_transaction::create_account(&funding_keys[i], &key.pubkey(), 1, blockhash);
|
||||
client
|
||||
.send_message(&[&funding_keys[i]], tx.message)
|
||||
.unwrap();
|
||||
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_mint_key.pubkey(),
|
||||
&funding_keys[i],
|
||||
&libra_funding_key,
|
||||
&key.pubkey(),
|
||||
total / NUM_FUNDING_KEYS as u64,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("funded libra funding key {}", i);
|
||||
}
|
||||
|
||||
let tx_count = keypairs.len();
|
||||
let amount = total / (tx_count as u64);
|
||||
for (i, keys) in keypairs[..tx_count].chunks(NUM_FUNDING_KEYS).enumerate() {
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_mint_key.pubkey(),
|
||||
&funding_keys[j],
|
||||
&libra_funding_keys[j],
|
||||
&key.pubkey(),
|
||||
amount,
|
||||
blockhash,
|
||||
);
|
||||
|
||||
let _sig = client
|
||||
.async_send_transaction(tx.clone())
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
}
|
||||
|
||||
info!("sent... checking balance {}", i);
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let mut times = 0;
|
||||
loop {
|
||||
let balance =
|
||||
librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
||||
if balance >= amount {
|
||||
break;
|
||||
} else if times > 20 {
|
||||
info!("timed out.. {} key: {} balance: {}", i, j, balance);
|
||||
break;
|
||||
} else {
|
||||
times += 1;
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("funded: {} of {}", i, keypairs.len() / NUM_FUNDING_KEYS);
|
||||
blockhash = client.get_recent_blockhash().unwrap().0;
|
||||
}
|
||||
|
||||
info!("done funding keys..");
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: Client>(
|
||||
client: &T,
|
||||
drone_addr: Option<SocketAddr>,
|
||||
funding_pubkey: &Keypair,
|
||||
funding_key: &Keypair,
|
||||
tx_count: usize,
|
||||
lamports_per_account: u64,
|
||||
) -> Result<(Vec<Keypair>, u64)> {
|
||||
use_move: bool,
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
|
||||
info!("Creating {} keypairs...", tx_count * 2);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, tx_count as u64 * 2);
|
||||
info!("Get lamports...");
|
||||
|
||||
// Sample the first keypair, see if it has lamports, if so then resume.
|
||||
@ -619,19 +863,60 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
||||
.get_balance(&keypairs[tx_count * 2 - 1].pubkey())
|
||||
.unwrap_or(0);
|
||||
|
||||
let mut move_keypairs_ret = None;
|
||||
|
||||
if lamports_per_account > last_keypair_balance {
|
||||
let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
let (_blockhash, fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
let account_desired_balance =
|
||||
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
||||
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
|
||||
let total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
||||
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
|
||||
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
|
||||
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
||||
if use_move {
|
||||
total *= 3;
|
||||
}
|
||||
info!("adding more lamports {}", account_desired_balance);
|
||||
|
||||
println!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
|
||||
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
|
||||
account_desired_balance, total
|
||||
);
|
||||
|
||||
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
||||
airdrop_lamports(client, &drone_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
|
||||
if use_move {
|
||||
let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000);
|
||||
let libra_mint_program_id = upload_mint_program(&funding_key, client);
|
||||
let libra_pay_program_id = upload_payment_program(&funding_key, client);
|
||||
|
||||
// Generate another set of keypairs for move accounts.
|
||||
// Still fund the solana ones which will be used for fees.
|
||||
let seed = [0u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
let move_keypairs = rnd.gen_n_keypairs(tx_count as u64 * 2);
|
||||
fund_move_keys(
|
||||
client,
|
||||
funding_key,
|
||||
&move_keypairs,
|
||||
total / 3,
|
||||
&libra_pay_program_id,
|
||||
&libra_mint_program_id,
|
||||
&libra_genesis_keypair,
|
||||
);
|
||||
move_keypairs_ret = Some((
|
||||
libra_genesis_keypair,
|
||||
libra_pay_program_id,
|
||||
libra_mint_program_id,
|
||||
move_keypairs,
|
||||
));
|
||||
|
||||
// Give solana keys 1/3 and move keys 1/3 the lamports. Keep 1/3 for fees.
|
||||
total /= 3;
|
||||
}
|
||||
|
||||
fund_keys(
|
||||
client,
|
||||
funding_pubkey,
|
||||
funding_key,
|
||||
&keypairs,
|
||||
total,
|
||||
fee_calculator.max_lamports_per_signature,
|
||||
@ -642,17 +927,18 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(2 * tx_count);
|
||||
|
||||
Ok((keypairs, last_keypair_balance))
|
||||
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use solana::validator::ValidatorConfig;
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::client::SyncClient;
|
||||
@ -675,47 +961,68 @@ mod tests {
|
||||
assert_eq!(should_switch_directions(20, 101), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_local_cluster() {
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
solana_logger::setup();
|
||||
const NUM_NODES: usize = 1;
|
||||
let cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 2_000_000,
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||
native_instruction_processors: vec![solana_move_loader_program!()],
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let drone_keypair = Keypair::new();
|
||||
cluster.transfer(&cluster.funding_keypair, &drone_keypair.pubkey(), 1_000_000);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_drone(drone_keypair, addr_sender, None);
|
||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(5);
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&drone_keypair.pubkey(),
|
||||
100_000_000,
|
||||
);
|
||||
|
||||
let client = create_client(
|
||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_drone(drone_keypair, addr_sender, None);
|
||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
let lamports_per_account = 100;
|
||||
let (keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
||||
|
||||
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
||||
&client,
|
||||
Some(drone_addr),
|
||||
&config.id,
|
||||
config.tx_count,
|
||||
lamports_per_account,
|
||||
config.use_move,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let total = do_bench_tps(vec![client], config, keypairs, 0);
|
||||
let total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
|
||||
assert!(total > 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_local_cluster_solana() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(20);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
let (genesis_block, id) = create_genesis_block(10_000);
|
||||
@ -727,10 +1034,11 @@ mod tests {
|
||||
config.tx_count = 10;
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let (keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20).unwrap();
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20, false)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(clients, config, keypairs, 0);
|
||||
do_bench_tps(clients, config, keypairs, 0, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -741,8 +1049,8 @@ mod tests {
|
||||
let tx_count = 10;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
@ -759,8 +1067,8 @@ mod tests {
|
||||
let tx_count = 10;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
|
||||
|
||||
let max_fee = client
|
||||
.get_recent_blockhash()
|
||||
|
@ -22,6 +22,7 @@ pub struct Config {
|
||||
pub write_to_client_file: bool,
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub use_move: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -40,6 +41,7 @@ impl Default for Config {
|
||||
write_to_client_file: false,
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
||||
use_move: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -100,6 +102,11 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("use-move")
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tx_count")
|
||||
.long("tx_count")
|
||||
@ -211,5 +218,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
|
||||
args
|
||||
}
|
||||
|
@ -1,10 +1,14 @@
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate solana_move_loader_program;
|
||||
|
||||
mod bench;
|
||||
mod cli;
|
||||
|
||||
use crate::bench::{
|
||||
do_bench_tps, generate_and_fund_keypairs, generate_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT,
|
||||
};
|
||||
use solana::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::collections::HashMap;
|
||||
@ -17,7 +21,7 @@ use std::process::exit;
|
||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
solana_logger::setup_with_filter("solana=info");
|
||||
solana_metrics::set_panic_hook("bench-tps");
|
||||
|
||||
let matches = cli::build_args().get_matches();
|
||||
@ -37,6 +41,7 @@ fn main() {
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
} = cli_config;
|
||||
|
||||
if write_to_client_file {
|
||||
@ -78,7 +83,7 @@ fn main() {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let (keypairs, keypair_balance) = if read_from_client_file {
|
||||
let (keypairs, move_keypairs, keypair_balance) = if read_from_client_file && !use_move {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
@ -95,7 +100,7 @@ fn main() {
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||
(keypairs, last_balance)
|
||||
(keypairs, None, last_balance)
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
&client,
|
||||
@ -103,6 +108,7 @@ fn main() {
|
||||
&id,
|
||||
tx_count,
|
||||
NUM_LAMPORTS_PER_ACCOUNT,
|
||||
use_move,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
@ -117,7 +123,14 @@ fn main() {
|
||||
duration,
|
||||
tx_count,
|
||||
sustained,
|
||||
use_move,
|
||||
};
|
||||
|
||||
do_bench_tps(vec![client], config, keypairs, keypair_balance);
|
||||
do_bench_tps(
|
||||
vec![client],
|
||||
config,
|
||||
keypairs,
|
||||
keypair_balance,
|
||||
move_keypairs,
|
||||
);
|
||||
}
|
||||
|
@ -6,8 +6,7 @@
|
||||
|
||||
- [Getting Started](getting-started.md)
|
||||
- [Testnet Participation](testnet-participation.md)
|
||||
- [Testnet Replicator](testnet-replicator.md)
|
||||
- [Example: Web Wallet](webwallet.md)
|
||||
- [Example Client: Web Wallet](webwallet.md)
|
||||
|
||||
- [Programming Model](programs.md)
|
||||
- [Example: Tic-Tac-Toe](tictactoe.md)
|
||||
@ -30,16 +29,29 @@
|
||||
- [Blocktree](blocktree.md)
|
||||
- [Gossip Service](gossip.md)
|
||||
- [The Runtime](runtime.md)
|
||||
|
||||
|
||||
- [Anatomy of a Transaction](transaction.md)
|
||||
|
||||
- [Running a Validator](running-validator.md)
|
||||
- [Hardware Requirements](validator-hardware.md)
|
||||
- [Choosing a Testnet](validator-testnet.md)
|
||||
- [Installing the Validator Software](validator-software.md)
|
||||
- [Starting a Validator](validator-start.md)
|
||||
- [Staking](validator-stake.md)
|
||||
- [Monitoring a Validator](validator-monitor.md)
|
||||
- [Publishing Validator Info](validator-info.md)
|
||||
- [Troubleshooting](validator-troubleshoot.md)
|
||||
- [FAQ](validator-faq.md)
|
||||
|
||||
- [Running a Replicator](running-replicator.md)
|
||||
|
||||
- [API Reference](api-reference.md)
|
||||
- [Transaction](transaction-api.md)
|
||||
- [Instruction](instruction-api.md)
|
||||
- [Blockstreamer](blockstreamer.md)
|
||||
- [JSON RPC API](jsonrpc-api.md)
|
||||
- [JavaScript API](javascript-api.md)
|
||||
- [solana-wallet CLI](wallet.md)
|
||||
- [solana CLI](cli.md)
|
||||
|
||||
- [Accepted Design Proposals](proposals.md)
|
||||
- [Ledger Replication](ledger-replication-to-implement.md)
|
||||
@ -59,10 +71,8 @@
|
||||
- [Economic Design MVP](ed_mvp.md)
|
||||
- [References](ed_references.md)
|
||||
- [Cluster Test Framework](cluster-test-framework.md)
|
||||
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
||||
- [Validator](validator-proposal.md)
|
||||
- [Simple Payment and State Verification](simple-payment-and-state-verification.md)
|
||||
- [Embedding the Move Langauge](embedding-move.md)
|
||||
- [Cross-Program Invocation](cross-program-invocation.md)
|
||||
|
||||
- [Implemented Design Proposals](implemented-proposals.md)
|
||||
@ -75,5 +85,7 @@
|
||||
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
|
||||
- [Persistent Account Storage](persistent-account-storage.md)
|
||||
- [Reliable Vote Transmission](reliable-vote-transmission.md)
|
||||
- [Repair Service](repair-service.md)
|
||||
- [Repair Service](repair-service.md)
|
||||
- [Testing Programs](testing-programs.md)
|
||||
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
||||
- [Embedding the Move Langauge](embedding-move.md)
|
||||
|
@ -1,6 +1,6 @@
|
||||
## solana-wallet CLI
|
||||
## solana CLI
|
||||
|
||||
The [solana crate](https://crates.io/crates/solana) is distributed with a command-line interface tool
|
||||
The [solana-cli crate](https://crates.io/crates/solana-cli) provides a command-line interface tool for Solana
|
||||
|
||||
### Examples
|
||||
|
||||
@ -8,7 +8,7 @@ The [solana crate](https://crates.io/crates/solana) is distributed with a comman
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet address
|
||||
$ solana address
|
||||
|
||||
// Return
|
||||
<PUBKEY>
|
||||
@ -18,7 +18,7 @@ $ solana-wallet address
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet airdrop 123
|
||||
$ solana airdrop 123
|
||||
|
||||
// Return
|
||||
"Your balance is: 123"
|
||||
@ -28,7 +28,7 @@ $ solana-wallet airdrop 123
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet balance
|
||||
$ solana balance
|
||||
|
||||
// Return
|
||||
"Your balance is: 123"
|
||||
@ -38,7 +38,7 @@ $ solana-wallet balance
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet confirm <TX_SIGNATURE>
|
||||
$ solana confirm <TX_SIGNATURE>
|
||||
|
||||
// Return
|
||||
"Confirmed" / "Not found" / "Transaction failed with error <ERR>"
|
||||
@ -48,7 +48,7 @@ $ solana-wallet confirm <TX_SIGNATURE>
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet deploy <PATH>
|
||||
$ solana deploy <PATH>
|
||||
|
||||
// Return
|
||||
<PROGRAM_ID>
|
||||
@ -58,7 +58,7 @@ $ solana-wallet deploy <PATH>
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123
|
||||
$ solana pay <PUBKEY> 123
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
@ -68,7 +68,7 @@ $ solana-wallet pay <PUBKEY> 123
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
$ solana pay <PUBKEY> 123 \
|
||||
--after 2018-12-24T23:59:00 --require-timestamp-from <PUBKEY>
|
||||
|
||||
// Return
|
||||
@ -81,7 +81,7 @@ $ solana-wallet pay <PUBKEY> 123 \
|
||||
A third party must send a signature to unlock the lamports.
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
$ solana pay <PUBKEY> 123 \
|
||||
--require-signature-from <PUBKEY>
|
||||
|
||||
// Return
|
||||
@ -92,7 +92,7 @@ $ solana-wallet pay <PUBKEY> 123 \
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
$ solana pay <PUBKEY> 123 \
|
||||
--after 2018-12-24T23:59 --require-timestamp-from <PUBKEY> \
|
||||
--require-signature-from <PUBKEY>
|
||||
|
||||
@ -104,7 +104,7 @@ $ solana-wallet pay <PUBKEY> 123 \
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
$ solana pay <PUBKEY> 123 \
|
||||
--require-signature-from <PUBKEY> \
|
||||
--require-signature-from <PUBKEY>
|
||||
|
||||
@ -116,7 +116,7 @@ $ solana-wallet pay <PUBKEY> 123 \
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
$ solana pay <PUBKEY> 123 \
|
||||
--require-signature-from <PUBKEY> \
|
||||
--cancelable
|
||||
|
||||
@ -128,7 +128,7 @@ $ solana-wallet pay <PUBKEY> 123 \
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet cancel <PROCESS_ID>
|
||||
$ solana cancel <PROCESS_ID>
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
@ -138,7 +138,7 @@ $ solana-wallet cancel <PROCESS_ID>
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet send-signature <PUBKEY> <PROCESS_ID>
|
||||
$ solana send-signature <PUBKEY> <PROCESS_ID>
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
@ -149,7 +149,7 @@ $ solana-wallet send-signature <PUBKEY> <PROCESS_ID>
|
||||
Use the current system time:
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID>
|
||||
$ solana send-timestamp <PUBKEY> <PROCESS_ID>
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
@ -159,7 +159,7 @@ Or specify some other arbitrary timestamp:
|
||||
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
$ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
@ -168,10 +168,10 @@ $ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
### Usage
|
||||
|
||||
```manpage
|
||||
solana-wallet 0.12.0
|
||||
solana 0.12.0
|
||||
|
||||
USAGE:
|
||||
solana-wallet [FLAGS] [OPTIONS] [SUBCOMMAND]
|
||||
solana [FLAGS] [OPTIONS] [SUBCOMMAND]
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -201,11 +201,11 @@ SUBCOMMANDS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-address
|
||||
solana-address
|
||||
Get your public key
|
||||
|
||||
USAGE:
|
||||
solana-wallet address
|
||||
solana address
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -213,11 +213,11 @@ FLAGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-airdrop
|
||||
solana-airdrop
|
||||
Request a batch of lamports
|
||||
|
||||
USAGE:
|
||||
solana-wallet airdrop <NUM>
|
||||
solana airdrop <NUM>
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -228,11 +228,11 @@ ARGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-balance
|
||||
solana-balance
|
||||
Get your balance
|
||||
|
||||
USAGE:
|
||||
solana-wallet balance
|
||||
solana balance
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -240,11 +240,11 @@ FLAGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-cancel
|
||||
solana-cancel
|
||||
Cancel a transfer
|
||||
|
||||
USAGE:
|
||||
solana-wallet cancel <PROCESS_ID>
|
||||
solana cancel <PROCESS_ID>
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -255,11 +255,11 @@ ARGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-confirm
|
||||
solana-confirm
|
||||
Confirm transaction by signature
|
||||
|
||||
USAGE:
|
||||
solana-wallet confirm <SIGNATURE>
|
||||
solana confirm <SIGNATURE>
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -270,11 +270,11 @@ ARGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-deploy
|
||||
solana-deploy
|
||||
Deploy a program
|
||||
|
||||
USAGE:
|
||||
solana-wallet deploy <PATH>
|
||||
solana deploy <PATH>
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -285,11 +285,11 @@ ARGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-fees
|
||||
solana-fees
|
||||
Display current cluster fees
|
||||
|
||||
USAGE:
|
||||
solana-wallet fees
|
||||
solana fees
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -297,11 +297,11 @@ FLAGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-get-transaction-count
|
||||
solana-get-transaction-count
|
||||
Get current transaction count
|
||||
|
||||
USAGE:
|
||||
solana-wallet get-transaction-count
|
||||
solana get-transaction-count
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -309,11 +309,11 @@ FLAGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-pay
|
||||
solana-pay
|
||||
Send a payment
|
||||
|
||||
USAGE:
|
||||
solana-wallet pay [FLAGS] [OPTIONS] <PUBKEY> <NUM>
|
||||
solana pay [FLAGS] [OPTIONS] <PUBKEY> <NUM>
|
||||
|
||||
FLAGS:
|
||||
--cancelable
|
||||
@ -331,11 +331,11 @@ ARGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-send-signature
|
||||
solana-send-signature
|
||||
Send a signature to authorize a transfer
|
||||
|
||||
USAGE:
|
||||
solana-wallet send-signature <PUBKEY> <PROCESS_ID>
|
||||
solana send-signature <PUBKEY> <PROCESS_ID>
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
@ -347,11 +347,11 @@ ARGS:
|
||||
```
|
||||
|
||||
```manpage
|
||||
solana-wallet-send-timestamp
|
||||
solana-send-timestamp
|
||||
Send a timestamp to unlock a transfer
|
||||
|
||||
USAGE:
|
||||
solana-wallet send-timestamp [OPTIONS] <PUBKEY> <PROCESS_ID>
|
||||
solana send-timestamp [OPTIONS] <PUBKEY> <PROCESS_ID>
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
@ -2,15 +2,15 @@
|
||||
|
||||
Solana’s crypto-economic system is designed to promote a healthy, long term self-sustaining economy with participant incentives aligned to the security and decentralization of the network. The main participants in this economy are validation-clients and replication-clients. Their contributions to the network, state validation and data storage respectively, and their requisite remittance mechanisms are discussed below.
|
||||
|
||||
The main channels of participant remittances are referred to as protocol-based rewards and transaction fees. Protocol-based rewards are protocol-derived issuances from a network-controlled reserve of tokens (sometimes referred to as the ‘mining pool’). These rewards will constitute the total reward delivered to replication clients and a portion of the total rewards for validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to join the network.
|
||||
The main channels of participant remittances are referred to as protocol-based rewards and transaction fees. Protocol-based rewards are protocol-derived issuances from a protocol-defined, global inflation rate. These rewards will constitute the total reward delivered to replication clients and a portion of the total rewards for validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to join the network.
|
||||
|
||||
These protocol-based rewards, to be distributed to participating validation and replication clients, are to be specified as annual interest rates calculated per, real-time, Solana epoch [DEFINITION]. As discussed further below, the issuance rates are determined as a function of total network validator staked percentage and total replication provided by replicators in each previous epoch. The choice for validator and replicator client rewards to be based on participation rates, rather than a global fixed inflation or interest rate, emphasizes a protocol priority of overall economic security, rather than monetary supply predictability. Due to Solana’s hard total supply cap of 1B tokens and the bounds of client participant rates in the protocol, we believe that global interest, and supply issuance, scenarios should be able to be modeled with reasonable uncertainties.
|
||||
These protocol-based rewards, to be distributed to participating validation and replication clients, are to be a result of a global supply inflation rate, calculated per Solana epoch and distributed amongst the active validator set. As discussed further below, the per annum inflation rate is based on a pre-determined disinflationary schedule. This provides the network with monetary supply predictability which supports long term economic stability and security.
|
||||
|
||||
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction (be it a state execution or proof-of-replication verification). A mechanism for continuous and long-term funding of the mining pool through a pre-dedicated portion of transaction fees is also discussed below.
|
||||
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction (be it a state execution or proof-of-replication verification). A mechanism for continuous and long-term economic stability through partial burning of each transaction fee is also discussed below.
|
||||
|
||||
A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics.md), [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. The [Replication-client Economics](ed_replication_client_economics.md) chapter will review the Solana network design for global ledger storage/redundancy and replicator-client economics ([Storage-replication rewards](ed_rce_storage_replication_rewards.md)) along with a replicator-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md). The [Economic Sustainability](ed_economic_sustainability.md) section dives deeper into Solana’s design for long-term economic sustainability and outlines the constraints and conditions for a self-sustaining economy. An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
|
||||
A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics.md), [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rend_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics.md) chapter will review the Solana network design for global ledger storage/redundancy and replicator-client economics ([Storage-replication rewards](ed_rce_storage_replication_rewards.md)) along with a replicator-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md). The [Economic Sustainability](ed_economic_sustainability.md) section dives deeper into Solana’s design for long-term economic sustainability and outlines the constraints and conditions for a self-sustaining economy. An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
|
||||
|
||||
<!--  -->
|
||||
<p style="text-align:center;"><img src="img/solana_economic_design.png" alt="== Solana Economic Design Diagram ==" width="800"/></p>
|
||||
<p style="text-align:center;"><img src="img/economic_design_infl_230719.png" alt="== Solana Economic Design Diagram ==" width="800"/></p>
|
||||
|
||||
**Figure 1**: Schematic overview of Solana economic incentive design.
|
||||
|
@ -1,3 +1,3 @@
|
||||
## Validation-client Economics
|
||||
|
||||
Validator-clients are eligible to receive protocol-based (i.e. via mining pool) rewards issued via stake-based annual interest rates by providing compute (CPU+GPU) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic schedule as a function of total amount of Solana tokens staked in the system and duration since network launch (genesis block). Additionally, these clients may earn revenue through two types of transaction fees: state-validation transaction fees and pooled Proof-of-Replication (PoRep) transaction fees. The distribution of these two types of transaction fees to the participating validation set are designed independently as economic goals and attack vectors are unique between the state- generation/validation mechanism and the ledger replication/validation mechanism. For clarity, we separately describe the design and motivation of the three types of potential revenue streams for validation-clients below: state-validation protocol-based rewards, state-validation transaction fees and PoRep-validation transaction fees.
|
||||
Validator-clients are eligible to receive protocol-based (i.e. via inflation) rewards issued via stake-based annual interest rates (calculated per epoch) by providing compute (CPU+GPU) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. Additionally, these clients may earn revenue through fees via state-validation transactions and Proof-of-Replication (PoRep) transactions. For clarity, we separately describe the design and motivation of these revenue distriubutions for validation-clients below: state-validation protocol-based rewards, state-validation transaction fees and rent, and PoRep-validation transaction fees.
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
As previously mentioned, validator-clients will also be responsible for validating PoReps submitted into the PoH stream by replicator-clients. In this case, validators are providing compute (CPU/GPU) and light storage resources to confirm that these replication proofs could only be generated by a client that is storing the referenced PoH leger block.2
|
||||
|
||||
While replication-clients are incentivized and rewarded through protocol-based rewards schedule (see [Replication-client Economics](ed_replication_client_economics.md)), validator-clients will be incentivized to include and validate PoReps in PoH through the distribution of the transaction fees associated with the submitted PoRep. As will be described in detail in the Section 3.1, replication-client rewards are protocol-based and designed to reward based on a global data redundancy factor. I.e. the protocol will incentivize replication-client participation through rewards based on a target ledger redundancy (e.g. 10x data redundancy). It was chosen not to include a distribution of these rewards to PoRep validators, and to rely only on the collection of PoRep attached transaction fees, due to the fact that the confluence of two participation incentive modes (state-validation inflation rate via global staked % and replication-validation rewards based on global redundancy factor) on the incentives of a single network participant (a validator-client) potentially opened up a significant incentive-driven attack surface area.
|
||||
While replication-clients are incentivized and rewarded through protocol-based rewards schedule (see [Replication-client Economics](ed_replication_client_economics.md)), validator-clients will be incentivized to include and validate PoReps in PoH through collection of transaction fees associated with the submitted PoReps and distribution of protocol rewards proportional to the validated PoReps. As will be described in detail in the Section 3.1, replication-client rewards are protocol-based and designed to reward based on a global data redundancy factor. I.e. the protocol will incentivize replication-client participation through rewards based on a target ledger redundancy (e.g. 10x data redundancy).
|
||||
|
||||
The validation of PoReps by validation-clients is computationally more expensive than state-validation (detail in the [Economic Sustainability](ed_economic_sustainability.md) chapter), thus the transaction fees are expected to be proportionally higher. However, because replication-client rewards are distributed in proportion to and only after submitted PoReps are validated, they are uniquely motivated for the inclusion and validation of their proofs. This pressure is expected to generate an adequate market economy between replication-clients and validation-clients. Additionally, transaction fees submitted with PoReps have no minimum amount pre-allocated to the mining pool, as do state-validation transaction fees.
|
||||
The validation of PoReps by validation-clients is computationally more expensive than state-validation (detail in the [Economic Sustainability](ed_economic_sustainability.md) chapter), thus the transaction fees are expected to be proportionally higher.
|
||||
|
||||
There are various attack vectors available for colluding validation and replication clients, as described in detail below in [Economic Sustainability](ed_economic_sustainability). To protect against various collusion attack vectors, for a given epoch, PoRep transaction fees are pooled, and redistributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of invalidated PoReps [DIAGRAM]. This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps (note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid).
|
||||
There are various attack vectors available for colluding validation and replication clients, as described in detail below in [Economic Sustainability](ed_economic_sustainability). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the replicators challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps (note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid).
|
||||
|
@ -1,46 +1,40 @@
|
||||
### State-validation protocol-based rewards
|
||||
|
||||
Validator-clients have two functional roles in the Solana network
|
||||
Validator-clients have two functional roles in the Solana network:
|
||||
|
||||
* Validate (vote) the current global state of that PoH along with any Proofs-of-Replication (see [Replication Client Economics](ed_replication_client_economics.md)) that they are eligible to validate
|
||||
* Validate (vote) the current global state of that PoH along with any Proofs-of-Replication (see [Replication Client Economics](ed_replication_client_economics.md)) that they are eligible to validate.
|
||||
|
||||
* Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and Proofs-of-Replication and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity.
|
||||
|
||||
Validator-client rewards for these services are to be distributed at the end of each Solana epoch. Compensation for validator-clients is provided via a protocol-based annual interest rate dispersed in proportion to the stake-weight of each validator (see below) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each non-PoRep transaction fee, less a protocol-specified amount that is returned to the mining pool (see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)). PoRep transaction fees are not collected directly by the leader client but pooled and returned to the validator set in proportion to the number of successfully validated PoReps. (see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md))
|
||||
Validator-client rewards for these services are to be distributed at the end of each Solana epoch. Compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator (see below) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed (see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)). PoRep transaction fees are also collected by the leader client and validator PoRep rewards are distributed in proportion to the number of validated PoReps less the number of PoReps that mismatch a replicator's challenge. (see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md))
|
||||
|
||||
|
||||
The protocol-based annual interest-rate (%) per epoch to be distributed to validation-clients is to be a function of:
|
||||
The effective protocol-based annual interest rate (%) per epoch to be distributed to validation-clients is to be a function of:
|
||||
|
||||
* the current fraction of staked SOLs out of the current total circulating supply,
|
||||
* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule
|
||||
|
||||
* the global time since the genesis block instantiation
|
||||
* the fraction of staked SOLs out of the current total circulating supply,
|
||||
|
||||
* the up-time/participation [% of available slots/blocks that validator had opportunity to vote on?] of a given validator over the previous epoch.
|
||||
* the up-time/participation [% of available slots that validator had opportunity to vote on] of a given validator over the previous epoch.
|
||||
|
||||
The first two factors are protocol parameters only (i.e. independent of validator behavior in a given epoch) and describe a global validation reward schedule designed to both incentivize early participation and optimal security in the network. This schedule sets a maximum annual validator-client interest rate per epoch.
|
||||
The first factor is a function of protocol parameters only (i.e. independent of validator behavior in a given epoch) and results in a global validation reward schedule designed to incentivize early participation, provide clear montetary stability and provide optimal security in the network.
|
||||
|
||||
At any given point in time, this interest rate is pegged to a defined value given a specific % staked SOL out of the circulating supply (e.g. 10% interest rate when 66% of circulating SOL is staked). The interest rate adjusts as the square-root [TBD] of the % staked, leading to higher validation-client interest rates as the % staked drops below the targeted goal, thus incentivizing more participation leading to more security in the network. An example of such a schedule, for a specified point in time (e.g. network launch) is shown in **Table 1**.
|
||||
At any given point in time, a specific validator's interest rate can be determined based on the porportion of circulating supply that is staked by the network and the validator's uptime/activity in the previous epoch. For an illustrative example, consider a hypothetical instance of the network with an initial circulating token supply of 250MM tokens with an additional 250MM vesting over 3 years. Additionally an inflation rate is specified at network launch of 7.5%, and a disinflationary schedule of 20% decrease in inflation rate per year (the actual rates to be implemented are to be worked out during the testnet experimentation phase of mainnet launch). With these broad assumptions, the 10-year inflation rate (adjusted daily for this example) is shown in **Figure 2**, while the total circulating token supply is illustrated in **Figure 3**. Neglected in this toy-model is the inflation supression due to the portion of each transaction fee that is to be destroyed.
|
||||
|
||||
| Percentage circulating supply staked [%] | Annual validator-client interest rate [%] |
|
||||
| ---: | ---: |
|
||||
| 5 | 13.87 |
|
||||
| 15 | 13.31 |
|
||||
| 25 | 12.73 |
|
||||
| 35 | 12.12 |
|
||||
| 45 | 11.48 |
|
||||
| 55 | 10.80 |
|
||||
| **66** | **10.00** |
|
||||
| 75 | 9.29 |
|
||||
| 85 | 8.44 |
|
||||
<p style="text-align:center;"><img src="img/p_ex_schedule.png" alt="drawing" width="800"/></p>
|
||||
**Figure 2:** In this example schedule, the annual inflation rate [%] reduces at around 20% per year, until it reaches the long-term, fixed, 1.5% rate.
|
||||
|
||||
**Table 1:** Example interest rate schedule based on % SOL staked out of circulating supply. In this case, interest rates are fixed at 10% for 66% of staked circulating supply
|
||||
<p style="text-align:center;"><img src="img/p_ex_supply.png" alt="drawing" width="800"/></p>
|
||||
**Figure 3:** The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in **Figure 2**
|
||||
|
||||
Over time, the interest rate, at any network staked percentage, will drop as described by an algorithmic schedule. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. This mining-pool provided interest rate will reduce over time until a network-chosen baseline value is reached. This is a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for both state-validation and ledger storage replication (PoReps) are not accounted for here. A validation-client interest rate schedule as a function of % network staked and time is shown in** Figure 2**.
|
||||
Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabalize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for both state-validation and ledger storage replication (PoReps) are not accounted for here.
|
||||
|
||||
Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators and replicator nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assummed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in** Figure 4**.
|
||||
|
||||
<!--  -->
|
||||
|
||||
<p style="text-align:center;"><img src="img/validation_client_interest_rates.png" alt="drawing" width="800"/></p>
|
||||
<p style="text-align:center;"><img src="img/p_ex_interest.png" alt="drawing" width="800"/></p>
|
||||
|
||||
**Figure 2:** In this example schedule, the annual interest rate [%] reduces at around 16.7% per year, until it reaches the long-term, fixed, 4% rate.
|
||||
**Figure 4:** Shown here are example validator interest rates over time, neglecting transaction fees, segmented by fraction of total circulating supply bonded as stake.
|
||||
|
||||
This epoch-specific protocol-defined interest rate sets an upper limit of *protocol-generated* annual interest rate (not absolute total interest rate) possible to be delivered to any validator-client per epoch. The distributed interest rate per epoch is then discounted from this value based on the participation of the validator-client during the previous epoch. Each epoch is comprised of XXX slots. The protocol-defined interest rate is then discounted by the log [TBD] of the % of slots a given validator submitted a vote on a PoH branch during that epoch, see **Figure XX**
|
||||
This epoch-specific protocol-defined interest rate sets an upper limit of *protocol-generated* annual interest rate (not absolute total interest rate) possible to be delivered to any validator-client per epoch. The distributed interest rate per epoch is then discounted from this value based on the participation of the validator-client during the previous epoch.
|
||||
|
@ -1,6 +1,6 @@
|
||||
### State-validation Transaction Fees
|
||||
|
||||
Each message sent through the network, to be processed by the current leader validation-client and confirmed as a global state transaction, must contain a transaction fee. Transaction fees offer many benefits in the Solana economic design, for example they:
|
||||
Each transaction sent through the network, to be processed by the current leader validation-client and confirmed as a global state transaction, must contain a transaction fee. Transaction fees offer many benefits in the Solana economic design, for example they:
|
||||
|
||||
* provide unit compensation to the validator network for the CPU/GPU resources necessary to process the state transaction,
|
||||
|
||||
@ -10,11 +10,11 @@ Each message sent through the network, to be processed by the current leader val
|
||||
|
||||
* and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below.
|
||||
|
||||
Many current blockchain economies (e.g. Bitcoin, Ethereum), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion of each transaction fee is sent to the mining pool, with the resulting fee going to the current leader processing the transaction. These pooled fees, then re-enter the system through rewards distributed to validation-clients, through the process described above, and replication-clients, as discussed below.
|
||||
Many current blockchain economies (e.g. Bitcoin, Ethereum), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above, and replication-clients, as discussed below.
|
||||
|
||||
The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing a redistribution avenue that protects against "tax evasion" attacks (i.e. side-channel fee payments)<sup>[1](ed_referenced.md)</sup>. Constraints on the fixed portion of transaction fees going to the mining pool, to establish long-term economic sustainability, are established and discussed in detail in the [Economic Sustainability](ed_economic_sustainability.md) section.
|
||||
Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical gas usage. In this way, the protocol can use the minimum fee to target a desired hardware utilisation. By monitoring a protocol specified gas usage with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual gas usage per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level.
|
||||
|
||||
This minimum, protocol-earmarked, portion of each transaction fee can be dynamically adjusted depending on historical gas usage. In this way, the protocol can use the minimum fee to target a desired hardware utilisation. By monitoring a protocol specified gas usage with respect to a desired, target usage amount (e.g. 50% of a block's capacity), the minimum fee can be raised/lowered which should, in turn, lower/raise the actual gas usage per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level.
|
||||
As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechansim that protects against "tax evasion" attacks (i.e. side-channel fee payments)<sup>[1](ed_referenced.md)</sup>.
|
||||
|
||||
Additionally, the minimum protocol captured fee can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total procotol captured fee to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the fees on their fork themselves, thus potentially reducing the incentive to censor in the first place.
|
||||
Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place.
|
||||
|
||||
|
BIN
book/src/img/economic_design_infl_230719.png
Normal file
BIN
book/src/img/economic_design_infl_230719.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 64 KiB |
BIN
book/src/img/p_ex_schedule.png
Normal file
BIN
book/src/img/p_ex_schedule.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 256 KiB |
BIN
book/src/img/p_ex_supply.png
Normal file
BIN
book/src/img/p_ex_supply.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 269 KiB |
Binary file not shown.
Before Width: | Height: | Size: 120 KiB |
@ -12,7 +12,7 @@ updates is managed using an on-chain update manifest program.
|
||||
#### Fetch and run a pre-built installer using a bootstrap curl/shell script
|
||||
The easiest install method for supported platforms:
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh
|
||||
```
|
||||
|
||||
This script will check github for the latest tagged release and download and run the
|
||||
@ -23,7 +23,7 @@ If additional arguments need to be specified during the installation, the
|
||||
following shell syntax is used:
|
||||
```bash
|
||||
$ init_args=.... # arguments for `solana-install-init ...`
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s - ${init_args}
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - ${init_args}
|
||||
```
|
||||
|
||||
#### Fetch and run a pre-built installer from a Github release
|
||||
@ -31,7 +31,7 @@ With a well-known release URL, a pre-built binary can be obtained for supported
|
||||
platforms:
|
||||
|
||||
```bash
|
||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.16.0/solana-install-init-x86_64-apple-darwin
|
||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.18.0/solana-install-init-x86_64-apple-darwin
|
||||
$ chmod +x ./solana-install-init
|
||||
$ ./solana-install-init --help
|
||||
```
|
||||
@ -130,7 +130,7 @@ FLAGS:
|
||||
-V, --version Prints version information
|
||||
|
||||
OPTIONS:
|
||||
-c, --config <PATH> Configuration file to use [default: /Users/mvines/Library/Preferences/solana/install.yml]
|
||||
-c, --config <PATH> Configuration file to use [default: .../Library/Preferences/solana/install.yml]
|
||||
|
||||
SUBCOMMANDS:
|
||||
deploy deploys a new update
|
||||
@ -152,7 +152,7 @@ FLAGS:
|
||||
-h, --help Prints help information
|
||||
|
||||
OPTIONS:
|
||||
-d, --data_dir <PATH> Directory to store install data [default: /Users/mvines/Library/Application Support/solana]
|
||||
-d, --data_dir <PATH> Directory to store install data [default: .../Library/Application Support/solana]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster [default: http://testnet.solana.com:8899]
|
||||
-p, --pubkey <PUBKEY> Public key of the update manifest [default: 9XX329sPuskWhH4DQh6k16c87dHKhXLBZTL3Gxmve8Gp]
|
||||
```
|
||||
|
@ -26,10 +26,12 @@ Methods
|
||||
* [getBalance](#getbalance)
|
||||
* [getClusterNodes](#getclusternodes)
|
||||
* [getEpochInfo](#getepochinfo)
|
||||
* [getGenesisBlockhash](#getgenesisblockhash)
|
||||
* [getLeaderSchedule](#getleaderschedule)
|
||||
* [getProgramAccounts](#getprogramaccounts)
|
||||
* [getRecentBlockhash](#getrecentblockhash)
|
||||
* [getSignatureStatus](#getsignaturestatus)
|
||||
* [getSlot](#getslot)
|
||||
* [getSlotLeader](#getslotleader)
|
||||
* [getSlotsPerSegment](#getslotspersegment)
|
||||
* [getStorageTurn](#getstorageturn)
|
||||
@ -37,7 +39,8 @@ Methods
|
||||
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
||||
* [getTransactionCount](#gettransactioncount)
|
||||
* [getTotalSupply](#gettotalsupply)
|
||||
* [getEpochVoteAccounts](#getepochvoteaccounts)
|
||||
* [getVersion](#getversion)
|
||||
* [getVoteAccounts](#getvoteaccounts)
|
||||
* [requestAirdrop](#requestairdrop)
|
||||
* [sendTransaction](#sendtransaction)
|
||||
* [startSubscriptionChannel](#startsubscriptionchannel)
|
||||
@ -195,6 +198,25 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
### getGenesisBlockhash
|
||||
Returns the genesis block hash
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
* `string` - a Hash as base-58 encoded string
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getGenesisBlockhash"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### getLeaderSchedule
|
||||
@ -226,7 +248,7 @@ Returns all accounts owned by the provided program Pubkey
|
||||
|
||||
##### Results:
|
||||
The result field will be an array of arrays. Each sub array will contain:
|
||||
* `string` - a the account Pubkey as base-58 encoded string
|
||||
* `string` - the account Pubkey as base-58 encoded string
|
||||
and a JSON object, with the following sub fields:
|
||||
|
||||
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||
@ -293,6 +315,25 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
||||
|
||||
-----
|
||||
|
||||
### getSlot
|
||||
Returns the current slot the node is processing
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
* `u64` - Current slot
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlot"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"1234","id":1}
|
||||
```
|
||||
-----
|
||||
|
||||
### getSlotLeader
|
||||
Returns the current slot leader
|
||||
|
||||
@ -433,30 +474,52 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
|
||||
---
|
||||
|
||||
### getEpochVoteAccounts
|
||||
Returns the account info and associated stake for all the voting accounts in the current epoch.
|
||||
### getVersion
|
||||
Returns the current solana versions running on the node
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
The result field will be an array of JSON objects, each with the following sub fields:
|
||||
* `votePubkey` - Vote account public key, as base-58 encoded string
|
||||
* `nodePubkey` - Node public key, as base-58 encoded string
|
||||
* `stake` - the stake, in lamports, delegated to this vote account
|
||||
* `commission`, a 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
|
||||
The result field will be a JSON object with the following sub fields:
|
||||
* `solana-core`, software version of solana-core
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochVoteAccounts"}' http://localhost:8899
|
||||
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":[{"commission":0,"nodePubkey":"Et2RaZJdJRTzTkodUwiHr4H6sLkVmijBFv8tkd7oSSFY","stake":42,"votePubkey":"B4CdWq3NBSoH2wYsVE1CaZSWPo2ZtopE4SJipQhZ3srF"}],"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "0.17.2"},"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### getVoteAccounts
|
||||
Returns the account info and associated stake for all the voting accounts in the current bank.
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
The result field will be a JSON object of `current` and `delinquent` accounts,
|
||||
each containing an array of JSON objects with the following sub fields:
|
||||
* `votePubkey` - Vote account public key, as base-58 encoded string
|
||||
* `nodePubkey` - Node public key, as base-58 encoded string
|
||||
* `activatedStake` - the stake, in lamports, delegated to this vote account and active in this epoch
|
||||
* `epochVoteAccount` - bool, whether the vote account is staked for this epoch
|
||||
* `commission`, an 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
|
||||
* `lastVote` - Most recent slot voted on by this vote account
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### requestAirdrop
|
||||
Requests an airdrop of lamports to a Pubkey
|
||||
|
@ -7,14 +7,16 @@ confirmed by super majority of the cluster (Confirmation Time).
|
||||
Each cluster node maintains various counters that are incremented on certain events.
|
||||
These counters are periodically uploaded to a cloud based database. Solana's metrics
|
||||
dashboard fetches these counters, and computes the performance metrics and displays
|
||||
it on the dashboard.
|
||||
it on the dashboard.
|
||||
|
||||
## TPS
|
||||
|
||||
The leader node's banking stage maintains a count of transactions that it recorded.
|
||||
The dashboard displays the count averaged over 2 second period in the TPS time series
|
||||
graph. The dashboard also shows per second mean, maximum and total TPS as a running
|
||||
counter.
|
||||
Each node's bank runtime maintains a count of transactions that it has processed.
|
||||
The dashboard first calculates the median count of transactions across all metrics
|
||||
enabled nodes in the cluster. The median cluster transaction count is then averaged
|
||||
over a 2 second period and displayed in the TPS time series graph. The dashboard also
|
||||
shows the Mean TPS, Max TPS and Total Transaction Count stats which are all calculated from
|
||||
the median transaction count.
|
||||
|
||||
## Confirmation Time
|
||||
|
||||
@ -26,4 +28,4 @@ super majority vote, and when one of its children forks is frozen.
|
||||
The node assigns a timestamp to every new fork, and computes the time it took to confirm
|
||||
the fork. This time is reflected as validator confirmation time in performance metrics.
|
||||
The performance dashboard displays the average of each validator node's confirmation time
|
||||
as a time series graph.
|
||||
as a time series graph.
|
||||
|
@ -1,4 +1,4 @@
|
||||
## Testnet Replicator
|
||||
## Running a Replicator
|
||||
This document describes how to setup a replicator in the testnet
|
||||
|
||||
Please note some of the information and instructions described here may change
|
||||
@ -53,8 +53,7 @@ software.
|
||||
|
||||
##### Linux and mac OS
|
||||
```bash
|
||||
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the
|
||||
@ -130,10 +129,10 @@ $ export STORAGE_IDENTITY=$(solana-keygen pubkey storage-keypair.json)
|
||||
```
|
||||
Then set up the storage accounts for your replicator by running:
|
||||
```bash
|
||||
$ solana-wallet --keypair replicator-keypair.json airdrop 100000
|
||||
$ solana-wallet --keypair replicator-keypair.json create-replicator-storage-account $REPLICATOR_IDENTITY $STORAGE_IDENTITY
|
||||
$ solana --keypair replicator-keypair.json airdrop 100000
|
||||
$ solana --keypair replicator-keypair.json create-replicator-storage-account $REPLICATOR_IDENTITY $STORAGE_IDENTITY
|
||||
```
|
||||
Note: Every time the testnet restarts, run the wallet steps to setup the replicator accounts again.
|
||||
Note: Every time the testnet restarts, run the steps to setup the replicator accounts again.
|
||||
|
||||
To start the replicator:
|
||||
```bash
|
||||
@ -147,8 +146,8 @@ gossip network by running:
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
```
|
||||
|
||||
Provide the **storage account pubkey** to the `solana-wallet show-storage-account` command to view
|
||||
Provide the **storage account pubkey** to the `solana show-storage-account` command to view
|
||||
the recent mining activity from your replicator:
|
||||
```bash
|
||||
$ solana-wallet --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||
$ solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||
```
|
35
book/src/running-validator.md
Normal file
35
book/src/running-validator.md
Normal file
@ -0,0 +1,35 @@
|
||||
# Running a Validator
|
||||
This document describes how to participate in the Solana testnet as a
|
||||
validator node.
|
||||
|
||||
Please note some of the information and instructions described here may change
|
||||
in future releases, and documentation will be updated for mainnet participation.
|
||||
|
||||
## Overview
|
||||
Solana currently maintains several testnets, each featuring a validator that can
|
||||
serve as the entrypoint to the cluster for your validator.
|
||||
|
||||
Current testnet entrypoints:
|
||||
- Stable, testnet.solana.com
|
||||
- Beta, beta.testnet.solana.com
|
||||
- Edge, edge.testnet.solana.com
|
||||
|
||||
Solana may launch special testnets for validator participation; we will provide
|
||||
you with a specific entrypoint URL to use.
|
||||
|
||||
Prior to mainnet, the testnets may be running different versions of solana
|
||||
software, which may feature breaking changes. For information on choosing a
|
||||
testnet and finding software version info, jump to
|
||||
[Choosing a Testnet](validator-testnet.md).
|
||||
|
||||
The testnets are configured to reset the ledger daily, or sooner,
|
||||
should the hourly automated cluster sanity test fail.
|
||||
|
||||
There is a network explorer that shows the status of solana testnets available
|
||||
at [http://explorer.solana.com/](https://explorer.solana.com/).
|
||||
|
||||
There is a **#validator-support** Discord channel available to reach other
|
||||
testnet participants, [https://discord.gg/pquxPsq](https://discord.gg/pquxPsq).
|
||||
|
||||
Also we'd love it if you choose to register your validator node with us at
|
||||
[https://forms.gle/LfFscZqJELbuUP139](https://forms.gle/LfFscZqJELbuUP139).
|
@ -131,7 +131,9 @@ stake account lamports.
|
||||
|
||||
* `account[1]` - R - The VoteState instance.
|
||||
|
||||
* `account[2]` - R - syscall::current account, carries information about current Bank epoch
|
||||
* `account[2]` - R - sysvar::current account, carries information about current Bank epoch
|
||||
|
||||
* `account[3]` - R - stake_api::Config accoount, carries warmup, cooldown, and slashing configuration
|
||||
|
||||
### StakeInstruction::RedeemVoteCredits
|
||||
|
||||
@ -146,10 +148,11 @@ lamport, rewards paid are proportional to the number of lamports staked.
|
||||
* `account[0]` - RW - The StakeState::Stake instance that is redeeming rewards.
|
||||
* `account[1]` - R - The VoteState instance, must be the same as `StakeState::voter_pubkey`
|
||||
* `account[2]` - RW - The StakeState::RewardsPool instance that will fulfill the request (picked at random).
|
||||
* `account[3]` - R - syscall::rewards account from the Bank that carries point value.
|
||||
* `account[3]` - R - sysvar::rewards account from the Bank that carries point value.
|
||||
* `account[4]` - R - sysvar::stake_history account from the Bank that carries stake warmup/cooldown history
|
||||
|
||||
Reward is paid out for the difference between `VoteState::credits` to
|
||||
`StakeState::Stake::credits_observed`, multiplied by `syscall::rewards::Rewards::validator_point_value`.
|
||||
`StakeState::Stake::credits_observed`, multiplied by `sysvar::rewards::Rewards::validator_point_value`.
|
||||
`StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token
|
||||
balance, and the reward is deposited to the Stake account token balance.
|
||||
|
||||
@ -167,7 +170,8 @@ stake_state.credits_observed = vote_state.credits;
|
||||
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake instance that is deactivating, the transaction must be signed by this key.
|
||||
* `account[1]` - R - syscall::current account from the Bank that carries current epoch
|
||||
* `account[1]` - R - The VoteState instance to which this stake is delegated, required in case of slashing
|
||||
* `account[2]` - R - sysvar::current account from the Bank that carries current epoch
|
||||
|
||||
StakeState::Stake::deactivated is set to the current epoch + cool down. The account's stake will ramp down to zero by
|
||||
that epoch, and Account::lamports will be available for withdrawal.
|
||||
@ -178,7 +182,8 @@ Lamports build up over time in a Stake account and any excess over activated sta
|
||||
|
||||
* `account[0]` - RW - The StakeState::Stake from which to withdraw, the transaction must be signed by this key.
|
||||
* `account[1]` - RW - Account that should be credited with the withdrawn lamports.
|
||||
* `account[2]` - R - syscall::current account from the Bank that carries current epoch, to calculate stake.
|
||||
* `account[2]` - R - sysvar::current account from the Bank that carries current epoch, to calculate stake.
|
||||
* `account[3]` - R - sysvar::stake_history account from the Bank that carries stake warmup/cooldown history
|
||||
|
||||
|
||||
## Benefits of the design
|
||||
@ -195,3 +200,106 @@ stake.
|
||||
## Example Callflow
|
||||
|
||||
<img alt="Passive Staking Callflow" src="img/passive-staking-callflow.svg" class="center"/>
|
||||
|
||||
## Staking Rewards
|
||||
|
||||
The specific mechanics and rules of the validator rewards regime is outlined
|
||||
here. Rewards are earned by delegating stake to a validator that is voting
|
||||
correctly. Voting incorrectly exposes that validator's stakes to
|
||||
[slashing](staking-and-rewards.md).
|
||||
|
||||
### Basics
|
||||
|
||||
The network pays rewards from a portion of network [inflation](inflation.md).
|
||||
The number of lamports available to pay rewards for an epoch is fixed and
|
||||
must be evenly divided among all staked nodes according to their relative stake
|
||||
weight and participation. The weighting unit is called a
|
||||
[point](terminology.md#point).
|
||||
|
||||
Rewards for an epoch are not available until the end of that epoch.
|
||||
|
||||
At the end of each epoch, the total number of points earned during the epoch is
|
||||
summed and used to divide the rewards portion of epoch inflation to arrive at a
|
||||
point value. This value is recorded in the bank in a
|
||||
[sysvar](terminology.md#sysvar) that maps epochs to point values.
|
||||
|
||||
During redemption, the stake program counts the points earned by the stake for
|
||||
each epoch, multiplies that by the epoch's point value, and transfers lamports in
|
||||
that amount from a rewards account into the stake and vote accounts according to
|
||||
the vote account's commission setting.
|
||||
|
||||
### Economics
|
||||
|
||||
Point value for an epoch depends on aggregate network participation. If participation
|
||||
in an epoch drops off, point values are higher for those that do participate.
|
||||
|
||||
### Earning credits
|
||||
|
||||
Validators earn one vote credit for every correct vote that exceeds maximum
|
||||
lockout, i.e. every time the validator's vote account retires a slot from its
|
||||
lockout list, making that vote a root for the node.
|
||||
|
||||
Stakers who have delegated to that validator earn points in proportion to their
|
||||
stake. Points earned is the product of vote credits and stake.
|
||||
|
||||
### Stake warmup, cooldown, withdrawal
|
||||
|
||||
Stakes, once delegated, do not become effective immediately. They must first
|
||||
pass through a warm up period. During this period some portion of the stake is
|
||||
considered "effective", the rest is considered "activating". Changes occur on
|
||||
epoch boundaries.
|
||||
|
||||
The stake program limits the rate of change to total network stake, reflected
|
||||
in the stake program's `config::warmup_rate` (typically 15% per epoch).
|
||||
|
||||
The amount of stake that can be warmed up each epoch is a function of the
|
||||
previous epoch's total effective stake, total activating stake, and the stake
|
||||
program's configured warmup rate.
|
||||
|
||||
Cooldown works the same way. Once a stake is deactivated, some part of it
|
||||
is considered "effective", and also "deactivating". As the stake cools
|
||||
down, it continues to earn rewards and be exposed to slashing, but it also
|
||||
becomes available for withdrawal.
|
||||
|
||||
Bootstrap stakes are not subject to warmup.
|
||||
|
||||
Rewards are paid against the "effective" portion of the stake for that epoch.
|
||||
|
||||
#### Warmup example
|
||||
|
||||
Consider the situation of a single stake of 1,000 activated at epoch N, with
|
||||
network warmup rate of 20%, and a quiescent total network stake at epoch N of 2,000.
|
||||
|
||||
At epoch N+1, the amount available to be activated for the network is 400 (20%
|
||||
of 200), and at epoch N, this example stake is the only stake activating, and so
|
||||
is entitled to all of the warmup room available.
|
||||
|
||||
|
||||
|epoch | effective | activating | total effective | total activating|
|
||||
|------|----------:|-----------:|----------------:|----------------:|
|
||||
|N-1 | | | 2,000 | 0 |
|
||||
|N | 0 | 1,000 | 2,000 | 1,000 |
|
||||
|N+1 | 400 | 600 | 2,400 | 600 |
|
||||
|N+2 | 880 | 120 | 2,880 | 120 |
|
||||
|N+3 | 1000 | 0 | 3,000 | 0 |
|
||||
|
||||
|
||||
Were 2 stakes (X and Y) to activate at epoch N, they would be awarded a portion of the 20%
|
||||
in proportion to their stakes. At each epoch effective and activating for each stake is
|
||||
a function of the previous epoch's state.
|
||||
|
||||
|epoch | X eff | X act | Y eff | Y act | total effective | total activating|
|
||||
|------|----------:|-----------:|----------:|-----------:|----------------:|----------------:|
|
||||
|N-1 | | | | | 2,000 | 0 |
|
||||
|N | 0 | 1,000 | 0 | 200 | 2,000 | 1,200 |
|
||||
|N+1 | 320 | 680 | 80 | 120 | 2,400 | 800 |
|
||||
|N+2 | 728 | 272 | 152 | 48 | 2,880 | 320 |
|
||||
|N+3 | 1000 | 0 | 200 | 0 | 3,200 | 0 |
|
||||
|
||||
|
||||
### Withdrawal
|
||||
|
||||
As rewards are earned lamports can be withdrawn from a stake account. Only
|
||||
lamports in excess of effective+activating stake may be withdrawn at any time.
|
||||
This means that during warmup, effectively no stake can be withdrawn. During
|
||||
cooldown, any tokens in excess of effective stake may be withdrawn (activating == 0);
|
||||
|
@ -58,6 +58,17 @@ with a ledger interpretation that matches the leader's.
|
||||
|
||||
A gossip network connecting all [nodes](#node) of a [cluster](#cluster).
|
||||
|
||||
#### cooldown period
|
||||
|
||||
Some number of epochs after stake has been deactivated while it progressively
|
||||
becomes available for withdrawal. During this period, the stake is considered to
|
||||
be "deactivating". More info about:
|
||||
[warmup and cooldown](stake-delegation-and-rewards.md#stake-warmup-cooldown-withdrawal)
|
||||
|
||||
#### credit
|
||||
|
||||
See [vote credit](#vote-credit).
|
||||
|
||||
#### data plane
|
||||
|
||||
A multicast network used to efficiently validate [entries](#entry) and gain
|
||||
@ -193,6 +204,10 @@ The number of [fullnodes](#fullnode) participating in a [cluster](#cluster).
|
||||
|
||||
See [Proof of History](#proof-of-history).
|
||||
|
||||
#### point
|
||||
|
||||
A weighted [credit](#credit) in a rewards regime. In the validator [rewards regime](staking-rewards.md), the number of points owed to a stake during redemption is the product of the [vote credits](#vote-credit) earned and the number of lamports staked.
|
||||
|
||||
#### program
|
||||
|
||||
The code that interprets [instructions](#instruction).
|
||||
@ -276,6 +291,11 @@ hash values and a bit which says if this hash is valid or fake.
|
||||
|
||||
The number of keys and samples that a validator can verify each storage epoch.
|
||||
|
||||
#### sysvar
|
||||
|
||||
A synthetic [account](#account) provided by the runtime to allow programs to
|
||||
access network state such as current tick height, rewards [points](#point) values, etc.
|
||||
|
||||
#### thin client
|
||||
|
||||
A type of [client](#client) that trusts it is communicating with a valid
|
||||
@ -323,3 +343,15 @@ that it ran, which can then be verified in less time than it took to produce.
|
||||
#### vote
|
||||
|
||||
See [ledger vote](#ledger-vote).
|
||||
|
||||
#### vote credit
|
||||
|
||||
A reward tally for validators. A vote credit is awarded to a validator in its
|
||||
vote account when the validator reaches a [root](#root).
|
||||
|
||||
#### warmup period
|
||||
|
||||
Some number of epochs after stake has been delegated while it progressively
|
||||
becomes effective. During this period, the stake is considered to be
|
||||
"activating". More info about:
|
||||
[warmup and cooldown](stake-delegation-and-rewards.md#stake-warmup-cooldown-withdrawal)
|
||||
|
@ -1,284 +1,5 @@
|
||||
## Testnet Participation
|
||||
This document describes how to participate in the testnet as a
|
||||
validator node.
|
||||
|
||||
Please note some of the information and instructions described here may change
|
||||
in future releases.
|
||||
|
||||
### Overview
|
||||
The testnet features a validator running at testnet.solana.com, which
|
||||
serves as the entrypoint to the cluster for your validator.
|
||||
|
||||
Additionally there is a blockexplorer available at
|
||||
[http://testnet.solana.com/](http://testnet.solana.com/).
|
||||
|
||||
The testnet is configured to reset the ledger daily, or sooner
|
||||
should the hourly automated cluster sanity test fail.
|
||||
|
||||
There is a **#validator-support** Discord channel available to reach other
|
||||
testnet participants, [https://discord.gg/pquxPsq](https://discord.gg/pquxPsq).
|
||||
|
||||
Also we'd love it if you choose to register your validator node with us at
|
||||
[https://forms.gle/LfFscZqJELbuUP139](https://forms.gle/LfFscZqJELbuUP139).
|
||||
|
||||
### Machine Requirements
|
||||
Since the testnet is not intended for stress testing of max transaction
|
||||
throughput, a higher-end machine with a GPU is not necessary to participate.
|
||||
|
||||
However ensure the machine used is not behind a residential NAT to avoid NAT
|
||||
traversal issues. A cloud-hosted machine works best. **Ensure that IP ports
|
||||
8000 through 10000 are not blocked for Internet inbound and outbound traffic.**
|
||||
|
||||
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
||||
MacOS or WSL users may build from source.
|
||||
|
||||
For a performance testnet with many transactions we have some preliminary recommended setups:
|
||||
|
||||
| | Low end | Medium end | High end | Notes |
|
||||
| --- | ---------|------------|----------| -- |
|
||||
| CPU | AMD Threadripper 1900x | AMD Threadripper 2920x | AMD Threadripper 2950x | Consider a 10Gb-capable motherboard with as many PCIe lanes and m.2 slots as possible. |
|
||||
| RAM | 16GB | 32GB | 64GB | |
|
||||
| OS Drive | Samsung 860 Evo 2TB | Samsung 860 Evo 4TB | Samsung 860 Evo 4TB | Or equivalent SSD |
|
||||
| Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
|
||||
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
|
||||
|
||||
#### GPU Requirements
|
||||
CUDA is required to make use of the GPU on your system. The provided Solana
|
||||
release binaries are built on Ubuntu 18.04 with <a
|
||||
href="https://developer.nvidia.com/cuda-toolkit-archive">CUDA Toolkit 10.1
|
||||
update 1"</a>. If your machine is using a different CUDA version then you will
|
||||
need to rebuild from source.
|
||||
|
||||
#### Confirm The Testnet Is Reachable
|
||||
Before attaching a validator node, sanity check that the cluster is accessible
|
||||
to your machine by running some simple commands. If any of the commands fail,
|
||||
please retry 5-10 minutes later to confirm the testnet is not just restarting
|
||||
itself before debugging further.
|
||||
|
||||
Fetch the current transaction count over JSON RPC:
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
|
||||
|
||||
View the [metrics dashboard](
|
||||
https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet)
|
||||
for more detail on cluster activity.
|
||||
|
||||
### Validator Setup
|
||||
#### Obtaining The Software
|
||||
##### Bootstrap with `solana-install`
|
||||
|
||||
The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||
software on Linux x86_64 and mac OS systems.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.5/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the
|
||||
following command to obtain the same result:
|
||||
```bash
|
||||
$ solana-install init
|
||||
```
|
||||
|
||||
After a successful install, `solana-install update` may be used to easily update the cluster
|
||||
software to a newer version at any time.
|
||||
|
||||
##### Download Prebuilt Binaries
|
||||
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
||||
|
||||
###### Linux
|
||||
Download the binaries by navigating to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
||||
archive:
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
###### mac OS
|
||||
Download the binaries by navigating to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
|
||||
archive:
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
##### Build From Source
|
||||
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
||||
from source, navigate to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
and download the **Source Code** archive. Extract the code and build the
|
||||
binaries with:
|
||||
```bash
|
||||
$ ./scripts/cargo-install-all.sh .
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
If building for CUDA (Linux only), fetch the perf-libs first then include the
|
||||
`cuda` feature flag when building:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ source /home/mvines/ws/solana/target/perf-libs/env.sh
|
||||
$ ./scripts/cargo-install-all.sh . cuda
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
### Starting The Validator
|
||||
Sanity check that you are able to interact with the cluster by receiving a small
|
||||
airdrop of lamports from the testnet drone:
|
||||
```bash
|
||||
$ solana-wallet airdrop 123
|
||||
$ solana-wallet balance
|
||||
```
|
||||
|
||||
Also try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
Now configure a key pair for your validator by running:
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-keypair.json
|
||||
```
|
||||
|
||||
Then use one of the following commands, depending on your installation
|
||||
choice, to start the node:
|
||||
|
||||
If this is a `solana-install`-installation:
|
||||
```bash
|
||||
$ validator.sh --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
|
||||
```
|
||||
|
||||
Alternatively, the `solana-install run` command can be used to run the validator
|
||||
node while periodically checking for and applying software updates:
|
||||
```bash
|
||||
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
|
||||
```
|
||||
|
||||
If you built from source:
|
||||
```bash
|
||||
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
|
||||
```
|
||||
|
||||
#### Enabling CUDA
|
||||
By default CUDA is disabled. If your machine has a GPU with CUDA installed,
|
||||
define the SOLANA_CUDA flag in your environment *before* running any of the
|
||||
previusly mentioned commands
|
||||
```bash
|
||||
$ export SOLANA_CUDA=1
|
||||
```
|
||||
|
||||
When your validator is started look for the following log message to indicate that CUDA is enabled:
|
||||
`"[<timestamp> solana::validator] CUDA is enabled"`
|
||||
|
||||
#### Controlling local network port allocation
|
||||
By default the validator will dynamically select available network ports in the
|
||||
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
||||
example, `validator.sh --dynamic-port-range 11000-11010 ...` will restrict the
|
||||
validator to ports 11000-11011.
|
||||
|
||||
### Validator Monitoring
|
||||
When `validator.sh` starts, it will output a validator configuration that looks
|
||||
similar to:
|
||||
```bash
|
||||
======================[ validator configuration ]======================
|
||||
identity pubkey: 4ceWXsL3UJvn7NYZiRkw7NsryMpviaKBDYr8GK7J61Dm
|
||||
vote pubkey: 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
ledger: ...
|
||||
accounts: ...
|
||||
======================================================================
|
||||
```
|
||||
|
||||
The **identity pubkey** for your validator can also be found by running:
|
||||
```bash
|
||||
$ solana-keygen pubkey ~/validator-keypair.json
|
||||
```
|
||||
|
||||
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the
|
||||
gossip network by running:
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
```
|
||||
|
||||
Provide the **vote pubkey** to the `solana-wallet show-vote-account` command to view
|
||||
the recent voting activity from your validator:
|
||||
```bash
|
||||
$ solana-wallet show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
```
|
||||
|
||||
The vote pubkey for the validator can also be found by running:
|
||||
```bash
|
||||
# If this is a `solana-install`-installation run:
|
||||
$ solana-keygen pubkey ~/.local/share/solana/install/active_release/config-local/validator-vote-keypair.json
|
||||
# Otherwise run:
|
||||
$ solana-keygen pubkey ./config-local/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
|
||||
#### Validator Metrics
|
||||
Metrics are available for local monitoring of your validator.
|
||||
|
||||
Docker must be installed and the current user added to the docker group. Then
|
||||
download `solana-metrics.tar.bz2` from the Github Release and run
|
||||
```bash
|
||||
$ tar jxf solana-metrics.tar.bz2
|
||||
$ cd solana-metrics/
|
||||
$ ./start.sh
|
||||
```
|
||||
|
||||
A local InfluxDB and Grafana instance is now running on your machine. Define
|
||||
`SOLANA_METRICS_CONFIG` in your environment as described at the end of the
|
||||
`start.sh` output and restart your validator.
|
||||
|
||||
Metrics should now be streaming and visible from your local Grafana dashboard.
|
||||
|
||||
#### Timezone For Log Messages
|
||||
Log messages emitted by your validator include a timestamp. When sharing logs
|
||||
with others to help triage issues, that timestamp can cause confusion as it does
|
||||
not contain timezone information.
|
||||
|
||||
To make it easier to compare logs between different sources we request that
|
||||
everybody use Pacific Time on their validator nodes. In Linux this can be
|
||||
accomplished by running:
|
||||
```bash
|
||||
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
|
||||
```
|
||||
|
||||
#### Publishing Validator Info
|
||||
|
||||
You can publish your validator information to the chain to be publicly visible
|
||||
to other users.
|
||||
|
||||
Run the solana-validator-info CLI to populate a validator-info account:
|
||||
```bash
|
||||
$ solana-validator-info publish ~/validator-keypair.json <VALIDATOR_NAME> <VALIDATOR_INFO_ARGS>
|
||||
```
|
||||
Optional fields for VALIDATOR_INFO_ARGS:
|
||||
* Website
|
||||
* Keybase Username
|
||||
* Details
|
||||
|
||||
##### Keybase
|
||||
|
||||
Including a Keybase username allows client applications (like the Solana Network
|
||||
Explorer) to automatically pull in your validator public profile, including
|
||||
cryptographic proofs, brand identity, etc. To connect your validator pubkey with
|
||||
Keybase:
|
||||
|
||||
1. Join https://keybase.io/ and complete the profile for your validator
|
||||
2. Add your validator **identity pubkey** to Keybase:
|
||||
* Create an empty file on your local computer called `validator-<PUBKEY>`
|
||||
* In Keybase, navigate to the Files section, and upload your pubkey file to
|
||||
a `solana` subdirectory in your public folder: `/keybase/public/<KEYBASE_USERNAME>/solana`
|
||||
* To check your pubkey, ensure you can successfully browse to
|
||||
`https://keybase.pub/<KEYBASE_USERNAME>/solana/validator-<PUBKEY>`
|
||||
3. Add or update your `solana-validator-info` with your Keybase username. The
|
||||
CLI will verify the `validator-<PUBKEY>` file
|
||||
Participate in our testnet:
|
||||
* [Running a Validator](running-validator.md)
|
||||
* [Running a Replicator](running-replicator.md)
|
||||
|
2
book/src/validator-faq.md
Normal file
2
book/src/validator-faq.md
Normal file
@ -0,0 +1,2 @@
|
||||
# Validator FAQ
|
||||
Coming soon...
|
28
book/src/validator-hardware.md
Normal file
28
book/src/validator-hardware.md
Normal file
@ -0,0 +1,28 @@
|
||||
# Validator Hardware Requirements
|
||||
Since the testnet is not intended for stress testing of max transaction
|
||||
throughput, a higher-end machine with a GPU is not necessary to participate.
|
||||
|
||||
However ensure the machine used is not behind a residential NAT to avoid NAT
|
||||
traversal issues. A cloud-hosted machine works best. **Ensure that IP ports
|
||||
8000 through 10000 are not blocked for Internet inbound and outbound traffic.**
|
||||
|
||||
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
||||
MacOS or WSL users may build from source.
|
||||
|
||||
## Recommended Setups
|
||||
For a performance testnet with many transactions we have some preliminary recommended setups:
|
||||
|
||||
| | Low end | Medium end | High end | Notes |
|
||||
| --- | ---------|------------|----------| -- |
|
||||
| CPU | AMD Threadripper 1900x | AMD Threadripper 2920x | AMD Threadripper 2950x | Consider a 10Gb-capable motherboard with as many PCIe lanes and m.2 slots as possible. |
|
||||
| RAM | 16GB | 32GB | 64GB | |
|
||||
| OS Drive | Samsung 860 Evo 2TB | Samsung 860 Evo 4TB | Samsung 860 Evo 4TB | Or equivalent SSD |
|
||||
| Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
|
||||
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
|
||||
|
||||
## GPU Requirements
|
||||
CUDA is required to make use of the GPU on your system. The provided Solana
|
||||
release binaries are built on Ubuntu 18.04 with <a
|
||||
href="https://developer.nvidia.com/cuda-toolkit-archive">CUDA Toolkit 10.1
|
||||
update 1"</a>. If your machine is using a different CUDA version then you will
|
||||
need to rebuild from source.
|
31
book/src/validator-info.md
Normal file
31
book/src/validator-info.md
Normal file
@ -0,0 +1,31 @@
|
||||
# Publishing Validator Info
|
||||
|
||||
You can publish your validator information to the chain to be publicly visible
|
||||
to other users.
|
||||
|
||||
## Run solana-validator-info
|
||||
Run the solana-validator-info CLI to populate a validator-info account:
|
||||
```bash
|
||||
$ solana-validator-info publish ~/validator-keypair.json <VALIDATOR_NAME> <VALIDATOR_INFO_ARGS>
|
||||
```
|
||||
Optional fields for VALIDATOR_INFO_ARGS:
|
||||
* Website
|
||||
* Keybase Username
|
||||
* Details
|
||||
|
||||
## Keybase
|
||||
|
||||
Including a Keybase username allows client applications (like the Solana Network
|
||||
Explorer) to automatically pull in your validator public profile, including
|
||||
cryptographic proofs, brand identity, etc. To connect your validator pubkey with
|
||||
Keybase:
|
||||
|
||||
1. Join https://keybase.io/ and complete the profile for your validator
|
||||
2. Add your validator **identity pubkey** to Keybase:
|
||||
* Create an empty file on your local computer called `validator-<PUBKEY>`
|
||||
* In Keybase, navigate to the Files section, and upload your pubkey file to
|
||||
a `solana` subdirectory in your public folder: `/keybase/public/<KEYBASE_USERNAME>/solana`
|
||||
* To check your pubkey, ensure you can successfully browse to
|
||||
`https://keybase.pub/<KEYBASE_USERNAME>/solana/validator-<PUBKEY>`
|
||||
3. Add or update your `solana-validator-info` with your Keybase username. The
|
||||
CLI will verify the `validator-<PUBKEY>` file
|
106
book/src/validator-monitor.md
Normal file
106
book/src/validator-monitor.md
Normal file
@ -0,0 +1,106 @@
|
||||
# Validator Monitoring
|
||||
When `validator.sh` starts, it will output a validator configuration that looks
|
||||
similar to:
|
||||
```bash
|
||||
======================[ validator configuration ]======================
|
||||
identity pubkey: 4ceWXsL3UJvn7NYZiRkw7NsryMpviaKBDYr8GK7J61Dm
|
||||
vote pubkey: 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
ledger: ...
|
||||
accounts: ...
|
||||
======================================================================
|
||||
```
|
||||
|
||||
## Check Gossip
|
||||
The **identity pubkey** for your validator can also be found by running:
|
||||
```bash
|
||||
$ solana-keygen pubkey ~/validator-keypair.json
|
||||
```
|
||||
|
||||
From another console, confirm the IP address and **identity pubkey** of your
|
||||
validator is visible in the gossip network by running:
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
```
|
||||
|
||||
## Check Vote Activity
|
||||
The vote pubkey for the validator can be found by running:
|
||||
```bash
|
||||
$ solana-keygen pubkey ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
Provide the **vote pubkey** to the `solana show-vote-account` command to view
|
||||
the recent voting activity from your validator:
|
||||
```bash
|
||||
$ solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
```
|
||||
|
||||
## Check Your Balance
|
||||
Your lamport balance should decrease by the transaction fee amount as your
|
||||
validator submits votes, and increase after serving as the leader:
|
||||
```bash
|
||||
$ solana balance
|
||||
```
|
||||
|
||||
## Check Slot Number
|
||||
After your validator boots, it may take some time to catch up with the cluster.
|
||||
Use the `get-slot` command to view the current slot that the cluster is
|
||||
processing:
|
||||
```bash
|
||||
$ solana get-slot
|
||||
```
|
||||
|
||||
The current slot that your validator is processing can then been seen with:
|
||||
```bash
|
||||
$ solana --url http://127.0.0.1:8899 get-slot
|
||||
```
|
||||
|
||||
Until your validator has caught up, it will not be able to vote successfully and
|
||||
stake cannot be delegated to it.
|
||||
|
||||
Also if you find the cluster's slot advancing faster than yours, you will likely
|
||||
never catch up. This typically implies some kind of networking issue between
|
||||
your validator and the rest of the cluster.
|
||||
|
||||
## Get Cluster Info
|
||||
There are several useful JSON-RPC endpoints for monitoring your validator on the
|
||||
cluster, as well as the health of the cluster:
|
||||
|
||||
```bash
|
||||
# Similar to solana-gossip, you should see your validator in the list of cluster nodes
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
|
||||
# If your validator is properly voting, it should appear in the list of `current` vote accounts. If staked, `stake` should be > 0
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
|
||||
# Returns the current leader schedule
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
|
||||
# Returns info about the current epoch. slotIndex should progress on subsequent calls.
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
## Validator Metrics
|
||||
Metrics are available for local monitoring of your validator.
|
||||
|
||||
Docker must be installed and the current user added to the docker group. Then
|
||||
download `solana-metrics.tar.bz2` from the Github Release and run
|
||||
```bash
|
||||
$ tar jxf solana-metrics.tar.bz2
|
||||
$ cd solana-metrics/
|
||||
$ ./start.sh
|
||||
```
|
||||
|
||||
A local InfluxDB and Grafana instance is now running on your machine. Define
|
||||
`SOLANA_METRICS_CONFIG` in your environment as described at the end of the
|
||||
`start.sh` output and restart your validator.
|
||||
|
||||
Metrics should now be streaming and visible from your local Grafana dashboard.
|
||||
|
||||
## Timezone For Log Messages
|
||||
Log messages emitted by your validator include a timestamp. When sharing logs
|
||||
with others to help triage issues, that timestamp can cause confusion as it does
|
||||
not contain timezone information.
|
||||
|
||||
To make it easier to compare logs between different sources we request that
|
||||
everybody use Pacific Time on their validator nodes. In Linux this can be
|
||||
accomplished by running:
|
||||
```bash
|
||||
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
|
||||
```
|
63
book/src/validator-software.md
Normal file
63
book/src/validator-software.md
Normal file
@ -0,0 +1,63 @@
|
||||
# Installing the Validator Software
|
||||
|
||||
## Bootstrap with `solana-install`
|
||||
|
||||
The `solana-install` tool can be used to easily install and upgrade the validator
|
||||
software on Linux x86_64 and mac OS systems.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the
|
||||
following command to obtain the same result:
|
||||
```bash
|
||||
$ solana-install init
|
||||
```
|
||||
|
||||
After a successful install, `solana-install update` may be used to easily update the cluster
|
||||
software to a newer version at any time.
|
||||
|
||||
## Download Prebuilt Binaries
|
||||
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
||||
|
||||
### Linux
|
||||
Download the binaries by navigating to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
||||
archive:
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
### mac OS
|
||||
Download the binaries by navigating to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
|
||||
archive:
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
## Build From Source
|
||||
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
||||
from source, navigate to
|
||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||
and download the **Source Code** archive. Extract the code and build the
|
||||
binaries with:
|
||||
```bash
|
||||
$ ./scripts/cargo-install-all.sh .
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
If building for CUDA (Linux only), fetch the perf-libs first then include the
|
||||
`cuda` feature flag when building:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ source target/perf-libs/env.sh
|
||||
$ ./scripts/cargo-install-all.sh . cuda
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
41
book/src/validator-stake.md
Normal file
41
book/src/validator-stake.md
Normal file
@ -0,0 +1,41 @@
|
||||
## Staking a Validator
|
||||
When your validator starts, it will have no stake, which means it will be
|
||||
ineligible to become leader.
|
||||
|
||||
Adding stake can be accomplished by using the `solana` CLI
|
||||
|
||||
First create a stake account keypair with `solana-keygen`:
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-config/stake-keypair.json
|
||||
```
|
||||
and use the cli's `delegate-stake` command to stake your validator with 42 lamports:
|
||||
```bash
|
||||
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json 42
|
||||
```
|
||||
|
||||
Note that stakes need to warm up, and warmup increments are applied at Epoch boundaries, so it can take an hour
|
||||
or more for the change to fully take effect.
|
||||
|
||||
Assuming your node is voting, now you're up and running and generating validator rewards. You'll want
|
||||
to periodically redeem/claim your rewards:
|
||||
|
||||
```bash
|
||||
$ solana-wallet redeem-vote-credits ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
The rewards lamports earned are split between your stake account and the vote account according to the
|
||||
commission rate set in the vote account.
|
||||
|
||||
Stake can be deactivated by running:
|
||||
```bash
|
||||
$ solana deactivate-stake ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
The stake will cool down, deactivate over time. While cooling down, your stake will continue to earn
|
||||
rewards.
|
||||
|
||||
Note that a stake account may only be used once, so after deactivation, use the
|
||||
cli's `withdraw-stake` command to recover the previously staked lamports.
|
||||
|
||||
Be sure and redeem your credits before withdrawing all your lamports.
|
||||
Once the account is fully withdrawn, the account is destroyed.
|
112
book/src/validator-start.md
Normal file
112
book/src/validator-start.md
Normal file
@ -0,0 +1,112 @@
|
||||
# Starting a Validator
|
||||
|
||||
## Confirm The Testnet Is Reachable
|
||||
Before attaching a validator node, sanity check that the cluster is accessible
|
||||
to your machine by running some simple commands. If any of the commands fail,
|
||||
please retry 5-10 minutes later to confirm the testnet is not just restarting
|
||||
itself before debugging further.
|
||||
|
||||
Fetch the current transaction count over JSON RPC:
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
Inspect the network explorer at
|
||||
[https://explorer.solana.com/](https://explorer.solana.com/) for activity.
|
||||
|
||||
View the [metrics dashboard](
|
||||
https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet)
|
||||
for more detail on cluster activity.
|
||||
|
||||
## Confirm your Installation
|
||||
Sanity check that you are able to interact with the cluster by receiving a small
|
||||
airdrop of lamports from the testnet drone:
|
||||
```bash
|
||||
$ solana set --url http://testnet.solana.com:8899
|
||||
$ solana get
|
||||
$ solana airdrop 123
|
||||
$ solana balance
|
||||
```
|
||||
|
||||
Also try running following command to join the gossip network and view all the
|
||||
other nodes in the cluster:
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
## Start your Validator
|
||||
Create an identity keypair for your validator by running:
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-keypair.json
|
||||
```
|
||||
|
||||
### Wallet Configuration
|
||||
You can set solana configuration to use your validator keypair for all
|
||||
following commands:
|
||||
```bash
|
||||
$ solana set --keypair ~/validator-keypair.json
|
||||
```
|
||||
|
||||
**All following solana commands assume you have set `--keypair` config to
|
||||
**your validator identity keypair.**
|
||||
If you haven't, you will need to add the `--keypair` argument to each command, like:
|
||||
```bash
|
||||
$ solana --keypair ~/validator-keypair.json airdrop 1000
|
||||
```
|
||||
(You can always override the set configuration by explicitly passing the
|
||||
`--keypair` argument with a command.)
|
||||
|
||||
### Validator Start
|
||||
Airdrop yourself some lamports to get started:
|
||||
```bash
|
||||
$ solana airdrop 1000
|
||||
```
|
||||
|
||||
Your validator will need a vote account. Create it now with the following
|
||||
commands:
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-vote-keypair.json
|
||||
$ solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json 1
|
||||
```
|
||||
|
||||
Then use one of the following commands, depending on your installation
|
||||
choice, to start the node:
|
||||
|
||||
If this is a `solana-install`-installation:
|
||||
```bash
|
||||
$ validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block --entrypoint testnet.solana.com
|
||||
```
|
||||
|
||||
Alternatively, the `solana-install run` command can be used to run the validator
|
||||
node while periodically checking for and applying software updates:
|
||||
```bash
|
||||
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block --entrypoint testnet.solana.com
|
||||
```
|
||||
|
||||
If you built from source:
|
||||
```bash
|
||||
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --poll-for-new-genesis-block --entrypoint testnet.solana.com
|
||||
```
|
||||
|
||||
### Enabling CUDA
|
||||
By default CUDA is disabled. If your machine has a GPU with CUDA installed,
|
||||
define the SOLANA_CUDA flag in your environment *before* running any of the
|
||||
previusly mentioned commands
|
||||
```bash
|
||||
$ export SOLANA_CUDA=1
|
||||
```
|
||||
|
||||
When your validator is started look for the following log message to indicate that CUDA is enabled:
|
||||
`"[<timestamp> solana::validator] CUDA is enabled"`
|
||||
|
||||
### Controlling local network port allocation
|
||||
By default the validator will dynamically select available network ports in the
|
||||
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
||||
example, `validator.sh --dynamic-port-range 11000-11010 ...` will restrict the
|
||||
validator to ports 11000-11011.
|
||||
|
||||
### Limiting ledger size to conserve disk space
|
||||
By default the validator will retain the full ledger. To conserve disk space
|
||||
start the validator with the `--limit-ledger-size`, which will instruct the
|
||||
validator to only retain the last couple hours of ledger.
|
72
book/src/validator-testnet.md
Normal file
72
book/src/validator-testnet.md
Normal file
@ -0,0 +1,72 @@
|
||||
# Choosing a Testnet
|
||||
As noted in the overview, solana currently maintains several testnets, each featuring a validator that can serve as the entrypoint to the cluster for your validator.
|
||||
|
||||
Current testnet entrypoints:
|
||||
- Stable, testnet.solana.com
|
||||
- Beta, beta.testnet.solana.com
|
||||
- Edge, edge.testnet.solana.com
|
||||
|
||||
Prior to mainnet, the testnets may be running different versions of solana
|
||||
software, which may feature breaking changes. Generally, the edge testnet tracks
|
||||
the tip of master, beta tracks the latest tagged minor release, and stable
|
||||
tracks the most stable tagged release.
|
||||
|
||||
### Get Testnet Version
|
||||
You can submit a JSON-RPC request to see the specific version of the cluster.
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
|
||||
{"jsonrpc":"2.0","result":{"solana-core":"0.18.0-pre1"},"id":1}
|
||||
```
|
||||
|
||||
## Using a Different Testnet
|
||||
This guide is written in the context of testnet.solana.com, our most stable
|
||||
cluster. To participate in another testnet, you will need to modify some of the
|
||||
commands in the following pages.
|
||||
|
||||
### Downloading Software
|
||||
If you are bootstrapping with `solana-install`, you can specify the release tag or named channel to install to match your desired testnet.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - 0.18.0
|
||||
```
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - beta
|
||||
```
|
||||
|
||||
Similarly, you can add this argument to the `solana-install` command if you've built the program from source:
|
||||
```bash
|
||||
$ solana-install init 0.18.0
|
||||
```
|
||||
|
||||
If you are downloading pre-compiled binaries or building from source, simply choose the release matching your desired testnet.
|
||||
|
||||
### Validator Commands
|
||||
Solana CLI tools like solana and solana-validator-info point at
|
||||
testnet.solana.com by default. Include a `--url` argument to point at a
|
||||
different testnet. For instance:
|
||||
```bash
|
||||
$ solana --url http://beta.testnet.solana.com:8899 balance
|
||||
```
|
||||
|
||||
The solana cli includes `get` and `set` configuration commands to automatically
|
||||
set the `--url` argument for future wallet commands.
|
||||
For example:
|
||||
```bash
|
||||
$ solana set --url http://beta.testnet.solana.com:8899
|
||||
$ solana balance # Same result as command above
|
||||
```
|
||||
(You can always override the set configuration by explicitly passing the `--url`
|
||||
argument with a command.)
|
||||
|
||||
Solana-gossip and solana-validator commands already require an explicit
|
||||
`--entrypoint` argument. Simply replace testnet.solana.com in the examples with
|
||||
an alternate url to interact with a different testnet. For example:
|
||||
```bash
|
||||
$ validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block beta.testnet.solana.com
|
||||
```
|
||||
|
||||
You can also submit JSON-RPC requests to a different testnet, like:
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
|
||||
```
|
2
book/src/validator-troubleshoot.md
Normal file
2
book/src/validator-troubleshoot.md
Normal file
@ -0,0 +1,2 @@
|
||||
# Troubleshooting Validator Issues
|
||||
Coming soon...
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -9,4 +9,4 @@ license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.38"
|
||||
cc = "1.0.40"
|
||||
|
15
ci/buildkite-release.yml
Normal file
15
ci/buildkite-release.yml
Normal file
@ -0,0 +1,15 @@
|
||||
# Build steps that run on a release tag
|
||||
#
|
||||
# All the steps in `buildkite.yml` are skipped and we jump directly to the
|
||||
# secondary build steps since it's assumed the commit that was tagged is known
|
||||
# to be good so there's no need to rebuild and retest it.
|
||||
steps:
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
@ -1,16 +1,19 @@
|
||||
#
|
||||
# Build steps that run after the primary pipeline on pushes and tags.
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 90
|
||||
timeout_in_minutes: 120
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 45
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-book.sh"
|
||||
timeout_in_minutes: 15
|
||||
|
@ -1,3 +1,6 @@
|
||||
# Build steps that run on pushes and pull requests.
|
||||
#
|
||||
# Release tags use buildkite-release.yml instead
|
||||
steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.36.0
|
||||
FROM rust:1.37.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
@ -10,9 +10,7 @@ RUN set -x \
|
||||
&& apt update \
|
||||
&& apt-get install apt-transport-https \
|
||||
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
|
||||
&& echo deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-7 main > /etc/apt/sources.list.d/llvm.list \
|
||||
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
|
||||
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - \
|
||||
&& apt update \
|
||||
&& apt install -y \
|
||||
buildkite-agent \
|
||||
@ -20,7 +18,6 @@ RUN set -x \
|
||||
cmake \
|
||||
lcov \
|
||||
libclang-common-7-dev \
|
||||
llvm-7 \
|
||||
mscgen \
|
||||
rsync \
|
||||
sudo \
|
||||
|
@ -74,20 +74,23 @@ source scripts/configure-metrics.sh
|
||||
nodes=(
|
||||
"multinode-demo/drone.sh"
|
||||
"multinode-demo/bootstrap-leader.sh \
|
||||
--enable-rpc-exit \
|
||||
--no-restart \
|
||||
--init-complete-file init-complete-node1.log"
|
||||
--init-complete-file init-complete-node1.log \
|
||||
--dynamic-port-range 8000-8019"
|
||||
"multinode-demo/validator.sh \
|
||||
--enable-rpc-exit \
|
||||
--no-restart \
|
||||
--dynamic-port-range 8020-8039
|
||||
--init-complete-file init-complete-node2.log \
|
||||
--rpc-port 18899"
|
||||
)
|
||||
|
||||
for i in $(seq 1 $extraNodes); do
|
||||
portStart=$((8040 + i * 20))
|
||||
portEnd=$((portStart + 19))
|
||||
nodes+=(
|
||||
"multinode-demo/validator.sh \
|
||||
--no-restart \
|
||||
--dynamic-port-range $portStart-$portEnd
|
||||
--label dyn$i \
|
||||
--init-complete-file init-complete-node$((2 + i)).log"
|
||||
)
|
||||
@ -267,7 +270,7 @@ verifyLedger() {
|
||||
(
|
||||
source multinode-demo/common.sh
|
||||
set -x
|
||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/$ledger-ledger verify
|
||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/$ledger verify
|
||||
) || flag_error
|
||||
done
|
||||
}
|
||||
|
@ -101,7 +101,8 @@ echo --- Creating tarball
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
export USE_INSTALL=1
|
||||
export REQUIRE_CONFIG_DIR=1
|
||||
export REQUIRE_LEDGER_DIR=1
|
||||
export REQUIRE_KEYPAIRS=1
|
||||
exec multinode-demo/validator.sh "$@"
|
||||
EOF
|
||||
chmod +x solana-release/bin/validator.sh
|
||||
|
@ -13,8 +13,8 @@
|
||||
# $ source ci/rust-version.sh
|
||||
#
|
||||
|
||||
stable_version=1.36.0
|
||||
nightly_version=2019-07-19
|
||||
stable_version=1.37.0
|
||||
nightly_version=2019-08-21
|
||||
|
||||
export rust_stable="$stable_version"
|
||||
export rust_stable_docker_image=solanalabs/rust:"$stable_version"
|
||||
|
@ -10,42 +10,30 @@ source ci/rust-version.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
do_bpf_check() {
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
_ cargo +"$rust_nightly" test --all
|
||||
_ cargo +"$rust_nightly" clippy --version
|
||||
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings
|
||||
_ cargo +"$rust_stable" audit
|
||||
}
|
||||
|
||||
(
|
||||
(
|
||||
cd sdk/bpf/rust/rust-no-std
|
||||
do_bpf_check
|
||||
)
|
||||
(
|
||||
cd sdk/bpf/rust/rust-utils
|
||||
do_bpf_check
|
||||
)
|
||||
(
|
||||
cd sdk/bpf/rust/rust-test
|
||||
do_bpf_check
|
||||
)
|
||||
for project in programs/bpf/rust/*/ ; do
|
||||
(
|
||||
cd "$project"
|
||||
do_bpf_check
|
||||
)
|
||||
done
|
||||
)
|
||||
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
|
||||
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
|
||||
# See https://github.com/solana-labs/solana/issues/5503
|
||||
_ cargo +"$rust_stable" clippy --version
|
||||
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0011 # https://github.com/solana-labs/solana/issues/5207
|
||||
_ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
|
||||
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
|
||||
|
||||
# _ cargo +"$rust_stable" audit --version ### cargo-audit stopped supporting --version?? https://github.com/RustSec/cargo-audit/issues/100
|
||||
_ cargo +"$rust_stable" audit
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ book/build.sh
|
||||
|
||||
for project in sdk/bpf/rust/{rust-no-std,rust-utils,rust-test} programs/bpf/rust/*/ ; do
|
||||
echo "+++ do_bpf_check $project"
|
||||
(
|
||||
cd "$project"
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
_ cargo +"$rust_nightly" test --all
|
||||
_ cargo +"$rust_nightly" clippy --version
|
||||
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings
|
||||
_ cargo +"$rust_stable" audit
|
||||
)
|
||||
done
|
||||
|
||||
echo --- ok
|
||||
|
@ -90,7 +90,7 @@ Deploys a CD testnet
|
||||
- Attempt to generate a TLS certificate using this DNS name
|
||||
--fullnode-additional-disk-size-gb [number]
|
||||
- Size of additional disk in GB for all fullnodes
|
||||
--no-snapshot
|
||||
--no-snapshot-fetch
|
||||
- If set, disables booting validators from a snapshot
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
@ -137,7 +137,7 @@ while [[ -n $1 ]]; do
|
||||
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
|
||||
shortArgs+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --no-snapshot ]]; then
|
||||
elif [[ $1 = --no-snapshot-fetch ]]; then
|
||||
maybeNoSnapshot="$1"
|
||||
shift 1
|
||||
else
|
||||
|
@ -142,8 +142,6 @@ testnet-beta|testnet-beta-perf)
|
||||
testnet)
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
: "${EC2_NODE_COUNT:=10}"
|
||||
: "${GCE_NODE_COUNT:=}"
|
||||
;;
|
||||
testnet-perf)
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
@ -156,8 +154,9 @@ testnet-demo)
|
||||
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
||||
;;
|
||||
tds)
|
||||
CHANNEL_OR_TAG=beta
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
: "${TDS_CHANNEL_OR_TAG:=edge}"
|
||||
CHANNEL_OR_TAG="$TDS_CHANNEL_OR_TAG"
|
||||
CHANNEL_BRANCH="$CI_BRANCH"
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
@ -178,11 +177,11 @@ for val in "${GCE_LOW_QUOTA_ZONES[@]}"; do
|
||||
GCE_LOW_QUOTA_ZONE_ARGS+=("-z $val")
|
||||
done
|
||||
|
||||
if [[ -n $TESTNET_DB_HOST ]]; then
|
||||
SOLANA_METRICS_PARTIAL_CONFIG="host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
if [[ -z $TESTNET_DB_HOST ]]; then
|
||||
TESTNET_DB_HOST="https://metrics.solana.com:8086"
|
||||
fi
|
||||
|
||||
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
export SOLANA_METRICS_CONFIG="db=$TESTNET,host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||
source scripts/configure-metrics.sh
|
||||
|
||||
@ -203,7 +202,6 @@ steps:
|
||||
TESTNET: "$TESTNET"
|
||||
TESTNET_OP: "$TESTNET_OP"
|
||||
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
||||
EC2_NODE_COUNT: "$EC2_NODE_COUNT"
|
||||
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
|
||||
GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT"
|
||||
EOF
|
||||
@ -220,7 +218,8 @@ sanity() {
|
||||
set -x
|
||||
NO_INSTALL_CHECK=1 \
|
||||
NO_LEDGER_VERIFY=1 \
|
||||
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||
NO_VALIDATOR_SANITY=1 \
|
||||
ci/testnet-sanity.sh edge-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet-edge-perf)
|
||||
@ -237,7 +236,8 @@ sanity() {
|
||||
set -x
|
||||
NO_INSTALL_CHECK=1 \
|
||||
NO_LEDGER_VERIFY=1 \
|
||||
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||
NO_VALIDATOR_SANITY=1 \
|
||||
ci/testnet-sanity.sh beta-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet-beta-perf)
|
||||
@ -252,19 +252,9 @@ sanity() {
|
||||
testnet)
|
||||
(
|
||||
set -x
|
||||
|
||||
ok=true
|
||||
if [[ -n $EC2_NODE_COUNT ]]; then
|
||||
NO_LEDGER_VERIFY=1 \
|
||||
ci/testnet-sanity.sh testnet-solana-com ec2 "${EC2_ZONES[0]}" || ok=false
|
||||
elif [[ -n $GCE_NODE_COUNT ]]; then
|
||||
NO_LEDGER_VERIFY=1 \
|
||||
ci/testnet-sanity.sh testnet-solana-com gce "${GCE_ZONES[0]}" || ok=false
|
||||
else
|
||||
echo "Error: no EC2 or GCE nodes"
|
||||
ok=false
|
||||
fi
|
||||
$ok
|
||||
NO_LEDGER_VERIFY=1 \
|
||||
NO_VALIDATOR_SANITY=1 \
|
||||
ci/testnet-sanity.sh testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
@ -334,9 +324,9 @@ deploy() {
|
||||
testnet-edge)
|
||||
(
|
||||
set -x
|
||||
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
|
||||
-a eipalloc-0ccd4f2239886fa94 --letsencrypt edge.testnet.solana.com \
|
||||
ci/testnet-deploy.sh -p edge-testnet-solana-com -C gce -z us-west1-b \
|
||||
-t "$CHANNEL_OR_TAG" -n 2 -c 0 -u -P \
|
||||
-a edge-testnet-solana-com --letsencrypt edge.testnet.solana.com \
|
||||
${skipCreate:+-e} \
|
||||
${skipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
@ -361,9 +351,9 @@ deploy() {
|
||||
(
|
||||
set -x
|
||||
NO_VALIDATOR_SANITY=1 \
|
||||
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
|
||||
-a eipalloc-0f286cf8a0771ce35 --letsencrypt beta.testnet.solana.com \
|
||||
ci/testnet-deploy.sh -p beta-testnet-solana-com -C gce -z us-west1-b \
|
||||
-t "$CHANNEL_OR_TAG" -n 2 -c 0 -u -P \
|
||||
-a beta-testnet-solana-com --letsencrypt beta.testnet.solana.com \
|
||||
${skipCreate:+-e} \
|
||||
${skipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
@ -387,30 +377,14 @@ deploy() {
|
||||
testnet)
|
||||
(
|
||||
set -x
|
||||
|
||||
if [[ -n $GCE_NODE_COUNT ]] || [[ -n $skipStart ]]; then
|
||||
maybeSkipStart="skip"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2068
|
||||
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
|
||||
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f \
|
||||
-a eipalloc-0fa502bf95f6f18b2 --letsencrypt testnet.solana.com \
|
||||
${skipCreate:+-e} \
|
||||
${maybeSkipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
${maybeDelete:+-D}
|
||||
|
||||
if [[ -n $GCE_NODE_COUNT ]]; then
|
||||
# shellcheck disable=SC2068
|
||||
ci/testnet-deploy.sh -p testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
||||
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -f \
|
||||
NO_VALIDATOR_SANITY=1 \
|
||||
ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \
|
||||
-t "$CHANNEL_OR_TAG" -n 2 -c 0 -u -P \
|
||||
-a testnet-solana-com --letsencrypt testnet.solana.com \
|
||||
${skipCreate:+-e} \
|
||||
${skipStart:+-s} \
|
||||
${maybeStop:+-S} \
|
||||
${maybeDelete:+-D} \
|
||||
-x
|
||||
fi
|
||||
${maybeDelete:+-D}
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
@ -580,8 +554,7 @@ deploy() {
|
||||
${maybeExternalAccountsFile} \
|
||||
${maybeLamports} \
|
||||
${maybeAdditionalDisk} \
|
||||
--skip-deploy-update \
|
||||
--no-snapshot
|
||||
--skip-deploy-update
|
||||
)
|
||||
;;
|
||||
*)
|
||||
|
0
netutil/.gitignore → cli/.gitignore
vendored
0
netutil/.gitignore → cli/.gitignore
vendored
50
cli/Cargo.toml
Normal file
50
cli/Cargo.toml
Normal file
@ -0,0 +1,50 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.18.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.1.4"
|
||||
bs58 = "0.2.4"
|
||||
chrono = { version = "0.4.7", features = ["serde"] }
|
||||
clap = "2.33.0"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.3", features = ["termination"] }
|
||||
console = "0.7.7"
|
||||
dirs = "2.0.2"
|
||||
lazy_static = "1.3.0"
|
||||
log = "0.4.8"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.1.0"
|
||||
serde = "1.0.99"
|
||||
serde_derive = "1.0.99"
|
||||
serde_json = "1.0.40"
|
||||
serde_yaml = "0.8.9"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.18.0" }
|
||||
solana-client = { path = "../client", version = "0.18.0" }
|
||||
solana-drone = { path = "../drone", version = "0.18.0" }
|
||||
solana-logger = { path = "../logger", version = "0.18.0" }
|
||||
solana-netutil = { path = "../utils/netutil", version = "0.18.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.18.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.18.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.18.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.18.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.18.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.18.0" }
|
||||
url = "2.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "0.18.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.18.0" }
|
||||
|
||||
[features]
|
||||
cuda = []
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
path = "src/main.rs"
|
@ -3,11 +3,11 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
cargo build --package solana-wallet
|
||||
cargo build --package solana-cli
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
|
||||
echo "\`\`\`manpage"
|
||||
solana-wallet --help
|
||||
solana --help
|
||||
echo "\`\`\`"
|
||||
echo ""
|
||||
|
||||
@ -15,7 +15,7 @@ commands=(address airdrop balance cancel confirm deploy fees get-transaction-cou
|
||||
|
||||
for x in "${commands[@]}"; do
|
||||
echo "\`\`\`manpage"
|
||||
solana-wallet "${x}" --help
|
||||
solana "${x}" --help
|
||||
echo "\`\`\`"
|
||||
echo ""
|
||||
done
|
49
cli/src/config.rs
Normal file
49
cli/src/config.rs
Normal file
@ -0,0 +1,49 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::fs::{create_dir_all, File};
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG_FILE: Option<String> = {
|
||||
dirs::home_dir().map(|mut path| {
|
||||
path.extend(&[".config", "solana", "wallet", "config.yml"]);
|
||||
path.to_str().unwrap().to_string()
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
|
||||
pub struct Config {
|
||||
pub url: String,
|
||||
pub keypair: String,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn new(url: &str, keypair: &str) -> Self {
|
||||
Self {
|
||||
url: url.to_string(),
|
||||
keypair: keypair.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load(config_file: &str) -> Result<Self, io::Error> {
|
||||
let file = File::open(config_file.to_string())?;
|
||||
let config = serde_yaml::from_reader(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn save(&self, config_file: &str) -> Result<(), io::Error> {
|
||||
let serialized = serde_yaml::to_string(self)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
|
||||
if let Some(outdir) = Path::new(&config_file).parent() {
|
||||
create_dir_all(outdir)?;
|
||||
}
|
||||
let mut file = File::create(config_file)?;
|
||||
file.write_all(&serialized.into_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
11
cli/src/display.rs
Normal file
11
cli/src/display.rs
Normal file
@ -0,0 +1,11 @@
|
||||
use console::style;
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
let styled_value = if value == "" {
|
||||
style("(not set)").italic()
|
||||
} else {
|
||||
style(value)
|
||||
};
|
||||
println!("{} {}", style(name).bold(), styled_value);
|
||||
}
|
6
cli/src/lib.rs
Normal file
6
cli/src/lib.rs
Normal file
@ -0,0 +1,6 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod config;
|
||||
pub mod display;
|
||||
pub mod wallet;
|
237
cli/src/main.rs
Normal file
237
cli/src/main.rs
Normal file
@ -0,0 +1,237 @@
|
||||
use clap::{crate_description, crate_name, crate_version, Arg, ArgGroup, ArgMatches, SubCommand};
|
||||
use console::style;
|
||||
use solana_cli::config::{self, Config};
|
||||
use solana_cli::display::println_name_value;
|
||||
use solana_cli::wallet::{app, parse_command, process_command, WalletConfig, WalletError};
|
||||
use solana_sdk::signature::{gen_keypair_file, read_keypair, KeypairUtil};
|
||||
use std::error;
|
||||
|
||||
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
|
||||
let parse_args = match matches.subcommand() {
|
||||
("get", Some(subcommand_matches)) => {
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(field) = subcommand_matches.value_of("specific_setting") {
|
||||
let value = match field {
|
||||
"url" => config.url,
|
||||
"keypair" => config.keypair,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
println_name_value(&format!("* {}:", field), &value);
|
||||
} else {
|
||||
println_name_value("Wallet Config:", config_file);
|
||||
println_name_value("* url:", &config.url);
|
||||
println_name_value("* keypair:", &config.keypair);
|
||||
}
|
||||
} else {
|
||||
println!("{} Either provide the `--config` arg or ensure home directory exists to use the default config location", style("No config file found.").bold());
|
||||
}
|
||||
false
|
||||
}
|
||||
("set", Some(subcommand_matches)) => {
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let mut config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(url) = subcommand_matches.value_of("url") {
|
||||
config.url = url.to_string();
|
||||
}
|
||||
if let Some(keypair) = subcommand_matches.value_of("keypair") {
|
||||
config.keypair = keypair.to_string();
|
||||
}
|
||||
config.save(config_file)?;
|
||||
println_name_value("Wallet Config Updated:", config_file);
|
||||
println_name_value("* url:", &config.url);
|
||||
println_name_value("* keypair:", &config.keypair);
|
||||
} else {
|
||||
println!("{} Either provide the `--config` arg or ensure home directory exists to use the default config location", style("No config file found.").bold());
|
||||
}
|
||||
false
|
||||
}
|
||||
_ => true,
|
||||
};
|
||||
Ok(parse_args)
|
||||
}
|
||||
|
||||
pub fn parse_args(matches: &ArgMatches<'_>) -> Result<WalletConfig, Box<dyn error::Error>> {
|
||||
let config = if let Some(config_file) = matches.value_of("config_file") {
|
||||
Config::load(config_file).unwrap_or_default()
|
||||
} else {
|
||||
Config::default()
|
||||
};
|
||||
let json_rpc_url = if let Some(url) = matches.value_of("json_rpc_url") {
|
||||
url.to_string()
|
||||
} else if config.url != "" {
|
||||
config.url
|
||||
} else {
|
||||
let default = WalletConfig::default();
|
||||
default.json_rpc_url
|
||||
};
|
||||
|
||||
let drone_host = if let Some(drone_host) = matches.value_of("drone_host") {
|
||||
Some(solana_netutil::parse_host(drone_host).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"Invalid drone host: {:?}",
|
||||
err
|
||||
)))
|
||||
})?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let drone_port = matches
|
||||
.value_of("drone_port")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"Invalid drone port: {:?}",
|
||||
err
|
||||
)))
|
||||
})?;
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else if config.keypair != "" {
|
||||
&config.keypair
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
if !path.exists() {
|
||||
gen_keypair_file(path.to_str().unwrap())?;
|
||||
println!("New keypair generated at: {}", path.to_str().unwrap());
|
||||
}
|
||||
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let keypair = read_keypair(id_path).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open keypair file: {}",
|
||||
err, id_path
|
||||
)))
|
||||
})?;
|
||||
|
||||
let command = parse_command(&keypair.pubkey(), &matches)?;
|
||||
|
||||
Ok(WalletConfig {
|
||||
command,
|
||||
drone_host,
|
||||
drone_port,
|
||||
json_rpc_url,
|
||||
keypair,
|
||||
rpc_client: None,
|
||||
})
|
||||
}
|
||||
|
||||
// Return an error if a url cannot be parsed.
|
||||
fn is_url(string: String) -> Result<(), String> {
|
||||
match url::Url::parse(&string) {
|
||||
Ok(url) => {
|
||||
if url.has_host() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err("no host provided".to_string())
|
||||
}
|
||||
}
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
solana_logger::setup();
|
||||
|
||||
let default = WalletConfig::default();
|
||||
let default_drone_port = format!("{}", default.drone_port);
|
||||
|
||||
let matches = app(crate_name!(), crate_description!(), crate_version!())
|
||||
.arg({
|
||||
let arg = Arg::with_name("config_file")
|
||||
.short("c")
|
||||
.long("config")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("Configuration file to use");
|
||||
if let Some(ref config_file) = *config::CONFIG_FILE {
|
||||
arg.default_value(&config_file)
|
||||
} else {
|
||||
arg
|
||||
}
|
||||
})
|
||||
.arg(
|
||||
Arg::with_name("json_rpc_url")
|
||||
.short("u")
|
||||
.long("url")
|
||||
.value_name("URL")
|
||||
.takes_value(true)
|
||||
.validator(is_url)
|
||||
.help("JSON RPC URL for the solana cluster"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("drone_host")
|
||||
.long("drone-host")
|
||||
.value_name("HOST")
|
||||
.takes_value(true)
|
||||
.help("Drone host to use [default: same as the --url host]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("drone_port")
|
||||
.long("drone-port")
|
||||
.value_name("PORT")
|
||||
.takes_value(true)
|
||||
.default_value(&default_drone_port)
|
||||
.help("Drone port to use"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("get")
|
||||
.about("Get wallet config settings")
|
||||
.arg(
|
||||
Arg::with_name("specific_setting")
|
||||
.index(1)
|
||||
.value_name("CONFIG_FIELD")
|
||||
.takes_value(true)
|
||||
.possible_values(&["url", "keypair"])
|
||||
.help("Return a specific config setting"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("set")
|
||||
.about("Set a wallet config setting")
|
||||
.arg(
|
||||
Arg::with_name("url")
|
||||
.short("u")
|
||||
.long("url")
|
||||
.value_name("URL")
|
||||
.takes_value(true)
|
||||
.validator(is_url)
|
||||
.help("Set default JSON RPC URL to query"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.group(
|
||||
ArgGroup::with_name("config_settings")
|
||||
.args(&["url", "keypair"])
|
||||
.multiple(true)
|
||||
.required(true),
|
||||
),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
if parse_settings(&matches)? {
|
||||
let config = parse_args(&matches)?;
|
||||
let result = process_command(&config)?;
|
||||
println!("{}", result);
|
||||
}
|
||||
Ok(())
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,10 +1,10 @@
|
||||
use serde_json::{json, Value};
|
||||
use solana::validator::new_validator_for_tests;
|
||||
use solana_cli::wallet::{process_command, WalletCommand, WalletConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_client::rpc_request::RpcRequest;
|
||||
use solana_core::validator::new_validator_for_tests;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_sdk::bpf_loader;
|
||||
use solana_wallet::wallet::{process_command, WalletCommand, WalletConfig};
|
||||
use std::fs::{remove_dir_all, File};
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
@ -1,17 +1,17 @@
|
||||
use chrono::prelude::*;
|
||||
use serde_json::Value;
|
||||
use solana_cli::wallet::{
|
||||
process_command, request_and_confirm_airdrop, WalletCommand, WalletConfig,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
use solana_wallet::wallet::{
|
||||
process_command, request_and_confirm_airdrop, WalletCommand, WalletConfig,
|
||||
};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[cfg(test)]
|
||||
use solana::validator::new_validator_for_tests;
|
||||
use solana_core::validator::new_validator_for_tests;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -57,6 +57,14 @@ fn test_wallet_timestamp_tx() {
|
||||
.unwrap();
|
||||
check_balance(50, &rpc_client, &config_payer.keypair.pubkey());
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&drone_addr,
|
||||
&config_witness.keypair.pubkey(),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Make transaction (from config_payer to bob_pubkey) requiring timestamp from config_witness
|
||||
let date_string = "\"2018-09-19T17:30:59Z\"";
|
||||
let dt: DateTime<Utc> = serde_json::from_str(&date_string).unwrap();
|
||||
@ -120,6 +128,13 @@ fn test_wallet_witness_tx() {
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &drone_addr, &config_payer.keypair.pubkey(), 50)
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&drone_addr,
|
||||
&config_witness.keypair.pubkey(),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Make transaction (from config_payer to bob_pubkey) requiring witness signature from config_witness
|
||||
config_payer.command = WalletCommand::Pay(
|
@ -1,8 +1,8 @@
|
||||
use solana::validator::new_validator_for_tests;
|
||||
use solana_cli::wallet::{process_command, WalletCommand, WalletConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::validator::new_validator_for_tests;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
use solana_wallet::wallet::{process_command, WalletCommand, WalletConfig};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::mpsc::channel;
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,19 +10,19 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.1.4"
|
||||
bs58 = "0.2.0"
|
||||
jsonrpc-core = "12.1.0"
|
||||
log = "0.4.7"
|
||||
bs58 = "0.2.4"
|
||||
jsonrpc-core = "13.0.0"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rayon = "1.1.0"
|
||||
reqwest = "0.9.19"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
reqwest = "0.9.20"
|
||||
serde = "1.0.99"
|
||||
serde_derive = "1.0.99"
|
||||
serde_json = "1.0.40"
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
solana-netutil = { path = "../utils/netutil", version = "0.18.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.18.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "12.1.0"
|
||||
jsonrpc-http-server = "12.1.0"
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
jsonrpc-core = "13.0.0"
|
||||
jsonrpc-http-server = "13.0.0"
|
||||
solana-logger = { path = "../logger", version = "0.18.0" }
|
||||
|
@ -11,7 +11,7 @@ use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{KeypairUtil, Signature};
|
||||
use solana_sdk::timing::{DEFAULT_NUM_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
|
||||
use solana_sdk::timing::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
|
||||
use solana_sdk::transaction::{self, Transaction, TransactionError};
|
||||
use std::error;
|
||||
use std::io;
|
||||
@ -20,7 +20,7 @@ use std::thread::sleep;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
pub struct RpcClient {
|
||||
client: Box<GenericRpcClientRequest + Send + Sync>,
|
||||
client: Box<dyn GenericRpcClientRequest + Send + Sync>,
|
||||
}
|
||||
|
||||
impl RpcClient {
|
||||
@ -94,6 +94,25 @@ impl RpcClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_version(&self) -> io::Result<String> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetVersion, None, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetVersion request failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
|
||||
serde_json::to_string(&response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetVersion parse failure: {}", err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction<T: KeypairUtil>(
|
||||
&self,
|
||||
transaction: &mut Transaction,
|
||||
@ -116,7 +135,7 @@ impl RpcClient {
|
||||
if cfg!(not(test)) {
|
||||
// Retry ~twice during a slot
|
||||
sleep(Duration::from_millis(
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_NUM_TICKS_PER_SECOND,
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
|
||||
));
|
||||
}
|
||||
};
|
||||
@ -162,7 +181,7 @@ impl RpcClient {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_NUM_TICKS_PER_SECOND));
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let signature = self.send_transaction(&transaction).ok();
|
||||
@ -176,7 +195,7 @@ impl RpcClient {
|
||||
if cfg!(not(test)) {
|
||||
// Retry ~twice during a slot
|
||||
sleep(Duration::from_millis(
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_NUM_TICKS_PER_SECOND,
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
|
||||
));
|
||||
}
|
||||
|
||||
@ -350,7 +369,7 @@ impl RpcClient {
|
||||
let blockhash = blockhash.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetRecentBlockhash parse failure: {:?}", err),
|
||||
format!("GetRecentBlockhash hash parse failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
Ok((blockhash, fee_calculator))
|
||||
@ -368,7 +387,7 @@ impl RpcClient {
|
||||
|
||||
// Retry ~twice during a slot
|
||||
sleep(Duration::from_millis(
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_NUM_TICKS_PER_SECOND,
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
|
||||
));
|
||||
num_retries -= 1;
|
||||
}
|
||||
@ -378,6 +397,33 @@ impl RpcClient {
|
||||
))
|
||||
}
|
||||
|
||||
pub fn get_genesis_blockhash(&self) -> io::Result<Hash> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetGenesisBlockhash, None, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetGenesisBlockhash request failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
|
||||
let blockhash = serde_json::from_value::<String>(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetGenesisBlockhash parse failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
|
||||
let blockhash = blockhash.parse().map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetGenesisBlockhash hash parse failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
Ok(blockhash)
|
||||
}
|
||||
|
||||
pub fn poll_balance_with_timeout(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
|
@ -4,7 +4,7 @@ use crate::rpc_request::{RpcError, RpcRequest};
|
||||
use log::*;
|
||||
use reqwest;
|
||||
use reqwest::header::CONTENT_TYPE;
|
||||
use solana_sdk::timing::{DEFAULT_NUM_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
|
||||
use solana_sdk::timing::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -73,7 +73,7 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
||||
|
||||
// Sleep for approximately half a slot
|
||||
sleep(Duration::from_millis(
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_NUM_TICKS_PER_SECOND,
|
||||
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -9,18 +9,20 @@ pub enum RpcRequest {
|
||||
GetAccountInfo,
|
||||
GetBalance,
|
||||
GetClusterNodes,
|
||||
GetGenesisBlockhash,
|
||||
GetNumBlocksSinceSignatureConfirmation,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetSignatureStatus,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
GetEpochVoteAccounts,
|
||||
GetStorageTurn,
|
||||
GetStorageTurnRate,
|
||||
GetSlotsPerSegment,
|
||||
GetStoragePubkeysForSlot,
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
GetVoteAccounts,
|
||||
RegisterNode,
|
||||
RequestAirdrop,
|
||||
SendTransaction,
|
||||
@ -37,6 +39,7 @@ impl RpcRequest {
|
||||
RpcRequest::GetAccountInfo => "getAccountInfo",
|
||||
RpcRequest::GetBalance => "getBalance",
|
||||
RpcRequest::GetClusterNodes => "getClusterNodes",
|
||||
RpcRequest::GetGenesisBlockhash => "getGenesisBlockhash",
|
||||
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
|
||||
"getNumBlocksSinceSignatureConfirmation"
|
||||
}
|
||||
@ -45,12 +48,13 @@ impl RpcRequest {
|
||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
RpcRequest::GetEpochVoteAccounts => "getEpochVoteAccounts",
|
||||
RpcRequest::GetStorageTurn => "getStorageTurn",
|
||||
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
|
||||
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
|
||||
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
RpcRequest::GetVoteAccounts => "getVoteAccounts",
|
||||
RpcRequest::RegisterNode => "registerNode",
|
||||
RpcRequest::RequestAirdrop => "requestAirdrop",
|
||||
RpcRequest::SendTransaction => "sendTransaction",
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.17.0"
|
||||
version = "0.18.0"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -19,59 +19,61 @@ kvstore = ["solana-kvstore"]
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.1.4"
|
||||
bs58 = "0.2.0"
|
||||
bs58 = "0.2.4"
|
||||
byteorder = "1.3.2"
|
||||
bzip2 = "0.3.3"
|
||||
chrono = { version = "0.4.7", features = ["serde"] }
|
||||
core_affinity = "0.5.9"
|
||||
crc = { version = "1.8.1", optional = true }
|
||||
crossbeam-channel = "0.3"
|
||||
hashbrown = "0.2.0"
|
||||
dir-diff = "0.3.1"
|
||||
fs_extra = "1.1.0"
|
||||
indexmap = "1.0"
|
||||
itertools = "0.8.0"
|
||||
jsonrpc-core = "12.1.0"
|
||||
jsonrpc-derive = "12.1.0"
|
||||
jsonrpc-http-server = "12.1.0"
|
||||
jsonrpc-pubsub = "12.0.0"
|
||||
jsonrpc-ws-server = "12.1.0"
|
||||
libc = "0.2.58"
|
||||
log = "0.4.7"
|
||||
jsonrpc-core = "13.0.0"
|
||||
jsonrpc-derive = "13.0.0"
|
||||
jsonrpc-http-server = "13.0.0"
|
||||
jsonrpc-pubsub = "13.0.0"
|
||||
jsonrpc-ws-server = "13.0.0"
|
||||
libc = "0.2.62"
|
||||
log = "0.4.8"
|
||||
memmap = { version = "0.7.0", optional = true }
|
||||
nix = "0.14.1"
|
||||
nix = "0.15.0"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
rayon = "1.1.0"
|
||||
reqwest = "0.9.19"
|
||||
rocksdb = "0.11.0"
|
||||
serde = "1.0.97"
|
||||
serde_derive = "1.0.97"
|
||||
reqwest = "0.9.20"
|
||||
serde = "1.0.99"
|
||||
serde_derive = "1.0.99"
|
||||
serde_json = "1.0.40"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.17.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.17.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.17.0" }
|
||||
solana-client = { path = "../client", version = "0.17.0" }
|
||||
solana-config-program = { path = "../programs/config_program", version = "0.17.0" }
|
||||
solana-drone = { path = "../drone", version = "0.17.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.18.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.18.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.18.0" }
|
||||
solana-client = { path = "../client", version = "0.18.0" }
|
||||
solana-drone = { path = "../drone", version = "0.18.0" }
|
||||
solana-ed25519-dalek = "0.2.0"
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.17.0" }
|
||||
solana-kvstore = { path = "../kvstore", version = "0.17.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.17.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.17.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.17.0" }
|
||||
solana-measure = { path = "../measure", version = "0.17.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.17.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.17.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.17.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.17.0" }
|
||||
solana-stake-program = { path = "../programs/stake_program", version = "0.17.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.17.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.17.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.17.0" }
|
||||
solana-vote-program = { path = "../programs/vote_program", version = "0.17.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.17.0" }
|
||||
solana-kvstore = { path = "../kvstore", version = "0.18.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.18.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.18.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.18.0" }
|
||||
solana-measure = { path = "../measure", version = "0.18.0" }
|
||||
solana-netutil = { path = "../utils/netutil", version = "0.18.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.18.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.18.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.18.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.18.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.18.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.18.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.18.0" }
|
||||
symlink = "0.1.0"
|
||||
sys-info = "0.5.7"
|
||||
tar = "0.4.26"
|
||||
tempfile = "3.1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.7.0"
|
||||
|
||||
# reed-solomon-erasure's simd_c feature fails to build for x86_64-pc-windows-msvc, use pure-rust
|
||||
@ -80,6 +82,13 @@ reed-solomon-erasure = { version = "3.1.1", features = ["pure-rust"] }
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
reed-solomon-erasure = "3.1.1"
|
||||
|
||||
[dependencies.rocksdb]
|
||||
# Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts
|
||||
# when also using the bzip2 crate
|
||||
version = "0.11.0"
|
||||
default-features = false
|
||||
features = ["lz4"]
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.0"
|
||||
matches = "0.1.6"
|
||||
|
@ -2,21 +2,21 @@
|
||||
|
||||
extern crate test;
|
||||
#[macro_use]
|
||||
extern crate solana;
|
||||
extern crate solana_core;
|
||||
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana::banking_stage::{create_test_recorder, BankingStage};
|
||||
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||
use solana::cluster_info::ClusterInfo;
|
||||
use solana::cluster_info::Node;
|
||||
use solana::genesis_utils::{create_genesis_block, GenesisBlockInfo};
|
||||
use solana::packet::to_packets_chunked;
|
||||
use solana::poh_recorder::WorkingBankEntries;
|
||||
use solana::service::Service;
|
||||
use solana::test_tx::test_tx;
|
||||
use solana_core::banking_stage::{create_test_recorder, BankingStage};
|
||||
use solana_core::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||
use solana_core::cluster_info::ClusterInfo;
|
||||
use solana_core::cluster_info::Node;
|
||||
use solana_core::genesis_utils::{create_genesis_block, GenesisBlockInfo};
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::poh_recorder::WorkingBankEntries;
|
||||
use solana_core::service::Service;
|
||||
use solana_core::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
@ -4,19 +4,20 @@ use rand;
|
||||
extern crate test;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana;
|
||||
extern crate solana_core;
|
||||
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||
use solana::entry::{make_large_test_entries, make_tiny_test_entries, EntrySlice};
|
||||
use solana::packet::{Blob, BLOB_HEADER_SIZE};
|
||||
use solana_core::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||
use solana_core::entry::{make_large_test_entries, make_tiny_test_entries, EntrySlice};
|
||||
use solana_core::packet::{Blob, BLOB_HEADER_SIZE};
|
||||
use std::path::Path;
|
||||
use test::Bencher;
|
||||
|
||||
// Given some blobs and a ledger at ledger_path, benchmark writing the blobs to the ledger
|
||||
fn bench_write_blobs(bench: &mut Bencher, blobs: &mut Vec<Blob>, ledger_path: &str) {
|
||||
fn bench_write_blobs(bench: &mut Bencher, blobs: &mut Vec<Blob>, ledger_path: &Path) {
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger");
|
||||
|
||||
let num_blobs = blobs.len();
|
||||
|
||||
@ -36,7 +37,7 @@ fn bench_write_blobs(bench: &mut Bencher, blobs: &mut Vec<Blob>, ledger_path: &s
|
||||
}
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
Blocktree::destroy(ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
// Insert some blobs into the ledger in preparation for read benchmarks
|
||||
@ -110,7 +111,7 @@ fn bench_read_sequential(bench: &mut Bencher) {
|
||||
// Generate random starting point in the range [0, total_blobs - 1], read num_reads blobs sequentially
|
||||
let start_index = rng.gen_range(0, num_small_blobs + num_large_blobs);
|
||||
for i in start_index..start_index + num_reads {
|
||||
let _ = blocktree.get_data_blob(slot, i as u64 % total_blobs);
|
||||
let _ = blocktree.get_data_shred_as_blob(slot, i as u64 % total_blobs);
|
||||
}
|
||||
});
|
||||
|
||||
@ -141,7 +142,7 @@ fn bench_read_random(bench: &mut Bencher) {
|
||||
.collect();
|
||||
bench.iter(move || {
|
||||
for i in indexes.iter() {
|
||||
let _ = blocktree.get_data_blob(slot, *i as u64);
|
||||
let _ = blocktree.get_data_shred_as_blob(slot, *i as u64);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
//#![feature(test)]
|
||||
//
|
||||
//extern crate solana;
|
||||
//extern crate solana_core;
|
||||
//extern crate test;
|
||||
//
|
||||
//use solana::chacha::chacha_cbc_encrypt_files;
|
||||
//use solana_core::chacha::chacha_cbc_encrypt_files;
|
||||
//use std::fs::remove_file;
|
||||
//use std::fs::File;
|
||||
//use std::io::Write;
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
extern crate test;
|
||||
|
||||
use solana::gen_keys::GenKeys;
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
extern crate test;
|
||||
|
||||
use solana::entry::{next_entries, reconstruct_entries_from_blobs, EntrySlice};
|
||||
use solana_core::entry::{next_entries, reconstruct_entries_from_blobs, EntrySlice};
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
|
@ -1,10 +1,10 @@
|
||||
// This bench attempts to justify the value of `solana::poh_service::NUM_HASHES_PER_BATCH`
|
||||
// This bench attempts to justify the value of `solana_core::poh_service::NUM_HASHES_PER_BATCH`
|
||||
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use solana::poh::Poh;
|
||||
use solana::poh_service::NUM_HASHES_PER_BATCH;
|
||||
use solana_core::poh::Poh;
|
||||
use solana_core::poh_service::NUM_HASHES_PER_BATCH;
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
@ -1,8 +1,8 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use solana::entry::EntrySlice;
|
||||
use solana::entry::{next_entry_mut, Entry};
|
||||
use solana_core::entry::EntrySlice;
|
||||
use solana_core::entry::{next_entry_mut, Entry};
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
|
@ -2,10 +2,10 @@
|
||||
|
||||
extern crate test;
|
||||
|
||||
use solana::packet::to_packets;
|
||||
use solana::recycler::Recycler;
|
||||
use solana::sigverify;
|
||||
use solana::test_tx::test_tx;
|
||||
use solana_core::packet::to_packets;
|
||||
use solana_core::recycler::Recycler;
|
||||
use solana_core::sigverify;
|
||||
use solana_core::test_tx::test_tx;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
|
@ -1,15 +1,15 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate solana;
|
||||
extern crate solana_core;
|
||||
extern crate test;
|
||||
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana::packet::to_packets_chunked;
|
||||
use solana::service::Service;
|
||||
use solana::sigverify_stage::SigVerifyStage;
|
||||
use solana::test_tx::test_tx;
|
||||
use solana_core::packet::to_packets_chunked;
|
||||
use solana_core::service::Service;
|
||||
use solana_core::sigverify_stage::SigVerifyStage;
|
||||
use solana_core::test_tx::test_tx;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
|
@ -1,25 +1,37 @@
|
||||
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
|
||||
|
||||
use bincode::{deserialize_from, serialize_into};
|
||||
use crate::result::Result;
|
||||
use crate::snapshot_package::SnapshotPackageSender;
|
||||
use crate::snapshot_utils;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::{Bank, BankRc, StatusCacheRc};
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::status_cache::MAX_CACHE_ENTRIES;
|
||||
use solana_sdk::timing;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::{BufReader, BufWriter, Error, ErrorKind};
|
||||
use std::ops::Index;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct SnapshotConfig {
|
||||
// Generate a new snapshot every this many slots
|
||||
pub snapshot_interval_slots: usize,
|
||||
|
||||
// Where to store the latest packaged snapshot
|
||||
pub snapshot_package_output_path: PathBuf,
|
||||
|
||||
// Where to place the snapshots for recent slots
|
||||
pub snapshot_path: PathBuf,
|
||||
}
|
||||
|
||||
pub struct BankForks {
|
||||
banks: HashMap<u64, Arc<Bank>>,
|
||||
working_bank: Arc<Bank>,
|
||||
root: u64,
|
||||
slots: HashSet<u64>,
|
||||
snapshot_path: Option<String>,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
slots_since_snapshot: Vec<u64>,
|
||||
confidence: HashMap<u64, Confidence>,
|
||||
}
|
||||
|
||||
@ -71,8 +83,8 @@ impl BankForks {
|
||||
banks,
|
||||
working_bank,
|
||||
root: 0,
|
||||
slots: HashSet::new(),
|
||||
snapshot_path: None,
|
||||
snapshot_config: None,
|
||||
slots_since_snapshot: vec![bank_slot],
|
||||
confidence: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@ -126,18 +138,29 @@ impl BankForks {
|
||||
self.banks.get(&bank_slot)
|
||||
}
|
||||
|
||||
pub fn new_from_banks(initial_banks: &[Arc<Bank>], root: u64) -> Self {
|
||||
pub fn new_from_banks(initial_forks: &[Arc<Bank>], rooted_path: Vec<u64>) -> Self {
|
||||
let mut banks = HashMap::new();
|
||||
let working_bank = initial_banks[0].clone();
|
||||
for bank in initial_banks {
|
||||
let working_bank = initial_forks[0].clone();
|
||||
|
||||
// Iterate through the heads of all the different forks
|
||||
for bank in initial_forks {
|
||||
banks.insert(bank.slot(), bank.clone());
|
||||
let parents = bank.parents();
|
||||
for parent in parents {
|
||||
if banks.contains_key(&parent.slot()) {
|
||||
// All ancestors have already been inserted by another fork
|
||||
break;
|
||||
}
|
||||
banks.insert(parent.slot(), parent.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
root,
|
||||
root: *rooted_path.last().unwrap(),
|
||||
banks,
|
||||
working_bank,
|
||||
slots: HashSet::new(),
|
||||
snapshot_path: None,
|
||||
snapshot_config: None,
|
||||
slots_since_snapshot: rooted_path,
|
||||
confidence: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@ -156,7 +179,7 @@ impl BankForks {
|
||||
self.working_bank.clone()
|
||||
}
|
||||
|
||||
pub fn set_root(&mut self, root: u64) {
|
||||
pub fn set_root(&mut self, root: u64, snapshot_package_sender: &Option<SnapshotPackageSender>) {
|
||||
self.root = root;
|
||||
let set_root_start = Instant::now();
|
||||
let root_bank = self
|
||||
@ -168,8 +191,51 @@ impl BankForks {
|
||||
.last()
|
||||
.map(|bank| bank.transaction_count())
|
||||
.unwrap_or(0);
|
||||
|
||||
if self.snapshot_config.is_some() && snapshot_package_sender.is_some() {
|
||||
let new_rooted_path = root_bank
|
||||
.parents()
|
||||
.into_iter()
|
||||
.map(|p| p.slot())
|
||||
.rev()
|
||||
.skip(1);
|
||||
self.slots_since_snapshot.extend(new_rooted_path);
|
||||
self.slots_since_snapshot.push(root);
|
||||
if self.slots_since_snapshot.len() > MAX_CACHE_ENTRIES {
|
||||
let num_to_remove = self.slots_since_snapshot.len() - MAX_CACHE_ENTRIES;
|
||||
self.slots_since_snapshot.drain(0..num_to_remove);
|
||||
}
|
||||
}
|
||||
|
||||
root_bank.squash();
|
||||
let new_tx_count = root_bank.transaction_count();
|
||||
|
||||
// Generate a snapshot if snapshots are configured and it's been an appropriate number
|
||||
// of banks since the last snapshot
|
||||
if self.snapshot_config.is_some() && snapshot_package_sender.is_some() {
|
||||
let config = self.snapshot_config.as_ref().unwrap();
|
||||
info!("setting snapshot root: {}", root);
|
||||
if root - self.slots_since_snapshot[0] >= config.snapshot_interval_slots as u64 {
|
||||
let mut snapshot_time = Measure::start("total-snapshot-ms");
|
||||
let r = self.generate_snapshot(
|
||||
root,
|
||||
&self.slots_since_snapshot[1..],
|
||||
snapshot_package_sender.as_ref().unwrap(),
|
||||
snapshot_utils::get_snapshot_tar_path(&config.snapshot_package_output_path),
|
||||
);
|
||||
if r.is_err() {
|
||||
warn!("Error generating snapshot for bank: {}, err: {:?}", root, r);
|
||||
} else {
|
||||
self.slots_since_snapshot = vec![root];
|
||||
}
|
||||
|
||||
// Cleanup outdated snapshots
|
||||
self.purge_old_snapshots();
|
||||
snapshot_time.stop();
|
||||
inc_new_counter_info!("total-snapshot-setup-ms", snapshot_time.as_ms() as usize);
|
||||
}
|
||||
}
|
||||
|
||||
self.prune_non_root(root);
|
||||
|
||||
inc_new_counter_info!(
|
||||
@ -186,30 +252,65 @@ impl BankForks {
|
||||
self.root
|
||||
}
|
||||
|
||||
fn prune_non_root(&mut self, root: u64) {
|
||||
let slots: HashSet<u64> = self
|
||||
.banks
|
||||
.iter()
|
||||
.filter(|(_, b)| b.is_frozen())
|
||||
.map(|(k, _)| *k)
|
||||
.collect();
|
||||
let descendants = self.descendants();
|
||||
self.banks
|
||||
.retain(|slot, _| descendants[&root].contains(slot));
|
||||
self.confidence
|
||||
.retain(|slot, _| slot == &root || descendants[&root].contains(slot));
|
||||
if self.snapshot_path.is_some() {
|
||||
let diff: HashSet<_> = slots.symmetric_difference(&self.slots).collect();
|
||||
trace!("prune non root {} - {:?}", root, diff);
|
||||
for slot in diff.iter() {
|
||||
if **slot > root {
|
||||
let _ = self.add_snapshot(**slot, root);
|
||||
} else {
|
||||
BankForks::remove_snapshot(**slot, &self.snapshot_path);
|
||||
}
|
||||
pub fn slots_since_snapshot(&self) -> &[u64] {
|
||||
&self.slots_since_snapshot
|
||||
}
|
||||
|
||||
fn purge_old_snapshots(&self) {
|
||||
// Remove outdated snapshots
|
||||
let config = self.snapshot_config.as_ref().unwrap();
|
||||
let slot_snapshot_paths = snapshot_utils::get_snapshot_paths(&config.snapshot_path);
|
||||
let num_to_remove = slot_snapshot_paths.len().saturating_sub(MAX_CACHE_ENTRIES);
|
||||
for slot_files in &slot_snapshot_paths[..num_to_remove] {
|
||||
let r = snapshot_utils::remove_snapshot(slot_files.slot, &config.snapshot_path);
|
||||
if r.is_err() {
|
||||
warn!("Couldn't remove snapshot at: {:?}", config.snapshot_path);
|
||||
}
|
||||
}
|
||||
self.slots = slots.clone();
|
||||
}
|
||||
|
||||
fn generate_snapshot<P: AsRef<Path>>(
|
||||
&self,
|
||||
root: u64,
|
||||
slots_since_snapshot: &[u64],
|
||||
snapshot_package_sender: &SnapshotPackageSender,
|
||||
tar_output_file: P,
|
||||
) -> Result<()> {
|
||||
let config = self.snapshot_config.as_ref().unwrap();
|
||||
|
||||
// Add a snapshot for the new root
|
||||
let bank = self
|
||||
.get(root)
|
||||
.cloned()
|
||||
.expect("root must exist in BankForks");
|
||||
snapshot_utils::add_snapshot(&config.snapshot_path, &bank, slots_since_snapshot)?;
|
||||
|
||||
// Package the relevant snapshots
|
||||
let slot_snapshot_paths = snapshot_utils::get_snapshot_paths(&config.snapshot_path);
|
||||
|
||||
// We only care about the last MAX_CACHE_ENTRIES snapshots of roots because
|
||||
// the status cache of anything older is thrown away by the bank in
|
||||
// status_cache.prune_roots()
|
||||
let start = slot_snapshot_paths.len().saturating_sub(MAX_CACHE_ENTRIES);
|
||||
let package = snapshot_utils::package_snapshot(
|
||||
&bank,
|
||||
&slot_snapshot_paths[start..],
|
||||
tar_output_file,
|
||||
&config.snapshot_path,
|
||||
)?;
|
||||
|
||||
// Send the package to the packaging thread
|
||||
snapshot_package_sender.send(package)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn prune_non_root(&mut self, root: u64) {
|
||||
let descendants = self.descendants();
|
||||
self.banks
|
||||
.retain(|slot, _| slot == &root || descendants[&root].contains(slot));
|
||||
self.confidence
|
||||
.retain(|slot, _| slot == &root || descendants[&root].contains(slot));
|
||||
}
|
||||
|
||||
pub fn cache_fork_confidence(
|
||||
@ -247,189 +348,12 @@ impl BankForks {
|
||||
self.confidence.get(&fork)
|
||||
}
|
||||
|
||||
fn get_io_error(error: &str) -> Error {
|
||||
warn!("BankForks error: {:?}", error);
|
||||
Error::new(ErrorKind::Other, error)
|
||||
pub fn set_snapshot_config(&mut self, snapshot_config: SnapshotConfig) {
|
||||
self.snapshot_config = Some(snapshot_config);
|
||||
}
|
||||
|
||||
fn get_snapshot_path(path: &Option<String>) -> PathBuf {
|
||||
Path::new(&path.clone().unwrap()).to_path_buf()
|
||||
}
|
||||
|
||||
pub fn add_snapshot(&self, slot: u64, root: u64) -> Result<(), Error> {
|
||||
let path = BankForks::get_snapshot_path(&self.snapshot_path);
|
||||
fs::create_dir_all(path.clone())?;
|
||||
let bank_file = format!("{}", slot);
|
||||
let bank_file_path = path.join(bank_file);
|
||||
trace!("path: {:?}", bank_file_path);
|
||||
let file = File::create(bank_file_path)?;
|
||||
let mut stream = BufWriter::new(file);
|
||||
let bank_slot = self.get(slot);
|
||||
if bank_slot.is_none() {
|
||||
return Err(BankForks::get_io_error("bank_forks get error"));
|
||||
}
|
||||
let bank = bank_slot.unwrap().clone();
|
||||
serialize_into(&mut stream, &*bank)
|
||||
.map_err(|_| BankForks::get_io_error("serialize bank error"))?;
|
||||
let mut parent_slot: u64 = 0;
|
||||
if let Some(parent_bank) = bank.parent() {
|
||||
parent_slot = parent_bank.slot();
|
||||
}
|
||||
serialize_into(&mut stream, &parent_slot)
|
||||
.map_err(|_| BankForks::get_io_error("serialize bank parent error"))?;
|
||||
serialize_into(&mut stream, &root)
|
||||
.map_err(|_| BankForks::get_io_error("serialize root error"))?;
|
||||
serialize_into(&mut stream, &bank.src)
|
||||
.map_err(|_| BankForks::get_io_error("serialize bank status cache error"))?;
|
||||
serialize_into(&mut stream, &bank.rc)
|
||||
.map_err(|_| BankForks::get_io_error("serialize bank accounts error"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_snapshot(slot: u64, path: &Option<String>) {
|
||||
let path = BankForks::get_snapshot_path(path);
|
||||
let bank_file = format!("{}", slot);
|
||||
let bank_file_path = path.join(bank_file);
|
||||
let _ = fs::remove_file(bank_file_path);
|
||||
}
|
||||
|
||||
pub fn set_snapshot_config(&mut self, path: Option<String>) {
|
||||
self.snapshot_path = path;
|
||||
}
|
||||
|
||||
fn load_snapshots(
|
||||
names: &[u64],
|
||||
bank0: &mut Bank,
|
||||
bank_maps: &mut Vec<(u64, u64, Bank)>,
|
||||
status_cache_rc: &StatusCacheRc,
|
||||
snapshot_path: &Option<String>,
|
||||
) -> Option<u64> {
|
||||
let path = BankForks::get_snapshot_path(snapshot_path);
|
||||
let mut bank_root: Option<u64> = None;
|
||||
|
||||
for bank_slot in names.iter().rev() {
|
||||
let bank_path = format!("{}", bank_slot);
|
||||
let bank_file_path = path.join(bank_path.clone());
|
||||
info!("Load from {:?}", bank_file_path);
|
||||
let file = File::open(bank_file_path);
|
||||
if file.is_err() {
|
||||
warn!("Snapshot file open failed for {}", bank_slot);
|
||||
continue;
|
||||
}
|
||||
let file = file.unwrap();
|
||||
let mut stream = BufReader::new(file);
|
||||
let bank: Result<Bank, std::io::Error> = deserialize_from(&mut stream)
|
||||
.map_err(|_| BankForks::get_io_error("deserialize bank error"));
|
||||
let slot: Result<u64, std::io::Error> = deserialize_from(&mut stream)
|
||||
.map_err(|_| BankForks::get_io_error("deserialize bank parent error"));
|
||||
let parent_slot = if slot.is_ok() { slot.unwrap() } else { 0 };
|
||||
let root: Result<u64, std::io::Error> = deserialize_from(&mut stream)
|
||||
.map_err(|_| BankForks::get_io_error("deserialize root error"));
|
||||
let status_cache: Result<StatusCacheRc, std::io::Error> = deserialize_from(&mut stream)
|
||||
.map_err(|_| BankForks::get_io_error("deserialize bank status cache error"));
|
||||
if bank_root.is_none() && bank0.rc.update_from_stream(&mut stream).is_ok() {
|
||||
bank_root = Some(root.unwrap());
|
||||
}
|
||||
if bank_root.is_some() {
|
||||
match bank {
|
||||
Ok(v) => {
|
||||
if status_cache.is_ok() {
|
||||
status_cache_rc.append(&status_cache.unwrap());
|
||||
}
|
||||
bank_maps.push((*bank_slot, parent_slot, v));
|
||||
}
|
||||
Err(_) => warn!("Load snapshot failed for {}", bank_slot),
|
||||
}
|
||||
} else {
|
||||
BankForks::remove_snapshot(*bank_slot, snapshot_path);
|
||||
warn!("Load snapshot rc failed for {}", bank_slot);
|
||||
}
|
||||
}
|
||||
|
||||
bank_root
|
||||
}
|
||||
|
||||
fn setup_banks(
|
||||
bank_maps: &mut Vec<(u64, u64, Bank)>,
|
||||
bank_rc: &BankRc,
|
||||
status_cache_rc: &StatusCacheRc,
|
||||
) -> (HashMap<u64, Arc<Bank>>, HashSet<u64>, u64) {
|
||||
let mut banks = HashMap::new();
|
||||
let mut slots = HashSet::new();
|
||||
let (last_slot, last_parent_slot, mut last_bank) = bank_maps.remove(0);
|
||||
last_bank.set_bank_rc(&bank_rc, &status_cache_rc);
|
||||
|
||||
while let Some((slot, parent_slot, mut bank)) = bank_maps.pop() {
|
||||
bank.set_bank_rc(&bank_rc, &status_cache_rc);
|
||||
if parent_slot != 0 {
|
||||
if let Some(parent) = banks.get(&parent_slot) {
|
||||
bank.set_parent(parent);
|
||||
}
|
||||
}
|
||||
if slot > 0 {
|
||||
banks.insert(slot, Arc::new(bank));
|
||||
slots.insert(slot);
|
||||
}
|
||||
}
|
||||
if last_parent_slot != 0 {
|
||||
if let Some(parent) = banks.get(&last_parent_slot) {
|
||||
last_bank.set_parent(parent);
|
||||
}
|
||||
}
|
||||
banks.insert(last_slot, Arc::new(last_bank));
|
||||
slots.insert(last_slot);
|
||||
|
||||
(banks, slots, last_slot)
|
||||
}
|
||||
|
||||
pub fn load_from_snapshot(
|
||||
genesis_block: &GenesisBlock,
|
||||
account_paths: Option<String>,
|
||||
snapshot_path: &Option<String>,
|
||||
) -> Result<Self, Error> {
|
||||
let path = BankForks::get_snapshot_path(snapshot_path);
|
||||
let paths = fs::read_dir(path)?;
|
||||
let mut names = paths
|
||||
.filter_map(|entry| {
|
||||
entry.ok().and_then(|e| {
|
||||
e.path()
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str().map(|s| s.parse::<u64>().unwrap()))
|
||||
})
|
||||
})
|
||||
.collect::<Vec<u64>>();
|
||||
|
||||
names.sort();
|
||||
let mut bank_maps = vec![];
|
||||
let status_cache_rc = StatusCacheRc::default();
|
||||
let id = (names[names.len() - 1] + 1) as usize;
|
||||
let mut bank0 =
|
||||
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc, id);
|
||||
bank0.freeze();
|
||||
let bank_root = BankForks::load_snapshots(
|
||||
&names,
|
||||
&mut bank0,
|
||||
&mut bank_maps,
|
||||
&status_cache_rc,
|
||||
snapshot_path,
|
||||
);
|
||||
if bank_maps.is_empty() || bank_root.is_none() {
|
||||
BankForks::remove_snapshot(0, snapshot_path);
|
||||
return Err(Error::new(ErrorKind::Other, "no snapshots loaded"));
|
||||
}
|
||||
|
||||
let root = bank_root.unwrap();
|
||||
let (banks, slots, last_slot) =
|
||||
BankForks::setup_banks(&mut bank_maps, &bank0.rc, &status_cache_rc);
|
||||
let working_bank = banks[&last_slot].clone();
|
||||
Ok(BankForks {
|
||||
banks,
|
||||
working_bank,
|
||||
root,
|
||||
slots,
|
||||
snapshot_path: snapshot_path.clone(),
|
||||
confidence: HashMap::new(),
|
||||
})
|
||||
pub fn snapshot_config(&self) -> &Option<SnapshotConfig> {
|
||||
&self.snapshot_config
|
||||
}
|
||||
}
|
||||
|
||||
@ -437,12 +361,18 @@ impl BankForks {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
|
||||
use solana_sdk::hash::Hash;
|
||||
use crate::service::Service;
|
||||
use crate::snapshot_package::SnapshotPackagerService;
|
||||
use fs_extra::dir::CopyOptions;
|
||||
use itertools::Itertools;
|
||||
use solana_sdk::hash::{hashv, Hash};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
use std::env;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::fs;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks() {
|
||||
@ -552,120 +482,361 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
struct TempPaths {
|
||||
pub paths: String,
|
||||
}
|
||||
fn restore_from_snapshot(old_bank_forks: &BankForks, account_paths: String) {
|
||||
let (snapshot_path, snapshot_package_output_path) = old_bank_forks
|
||||
.snapshot_config
|
||||
.as_ref()
|
||||
.map(|c| (&c.snapshot_path, &c.snapshot_package_output_path))
|
||||
.unwrap();
|
||||
|
||||
impl TempPaths {
|
||||
fn remove_all(&self) {
|
||||
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
|
||||
paths.iter().for_each(|p| {
|
||||
let _ignored = remove_dir_all(p);
|
||||
});
|
||||
let deserialized_bank = snapshot_utils::bank_from_archive(
|
||||
account_paths,
|
||||
old_bank_forks.snapshot_config.as_ref().unwrap(),
|
||||
snapshot_utils::get_snapshot_tar_path(snapshot_package_output_path),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let bank = old_bank_forks
|
||||
.banks
|
||||
.get(&deserialized_bank.slot())
|
||||
.unwrap()
|
||||
.clone();
|
||||
bank.compare_bank(&deserialized_bank);
|
||||
|
||||
let slot_snapshot_paths = snapshot_utils::get_snapshot_paths(&snapshot_path);
|
||||
|
||||
for p in slot_snapshot_paths {
|
||||
snapshot_utils::remove_snapshot(p.slot, &snapshot_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! tmp_bank_accounts_name {
|
||||
() => {
|
||||
&format!("{}-{}", file!(), line!())
|
||||
};
|
||||
}
|
||||
// creates banks up to "last_slot" and runs the input function `f` on each bank created
|
||||
// also marks each bank as root and generates snapshots
|
||||
// finally tries to restore from the last bank's snapshot and compares the restored bank to the
|
||||
// `last_slot` bank
|
||||
fn run_bank_forks_snapshot_n<F>(last_slot: u64, f: F, set_root_interval: u64)
|
||||
where
|
||||
F: Fn(&mut Bank, &Keypair),
|
||||
{
|
||||
solana_logger::setup();
|
||||
// Set up snapshotting config
|
||||
let mut snapshot_test_config = setup_snapshot_test(1);
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! get_tmp_bank_accounts_path {
|
||||
() => {
|
||||
get_tmp_bank_accounts_path(tmp_bank_accounts_name!())
|
||||
};
|
||||
}
|
||||
let bank_forks = &mut snapshot_test_config.bank_forks;
|
||||
let accounts_dir = &snapshot_test_config.accounts_dir;
|
||||
let snapshot_config = &snapshot_test_config.snapshot_config;
|
||||
let mint_keypair = &snapshot_test_config.genesis_block_info.mint_keypair;
|
||||
|
||||
impl Drop for TempPaths {
|
||||
fn drop(&mut self) {
|
||||
self.remove_all()
|
||||
}
|
||||
}
|
||||
|
||||
fn get_paths_vec(paths: &str) -> Vec<String> {
|
||||
paths.split(',').map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
fn get_tmp_snapshots_path() -> TempPaths {
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let path = format!("{}/snapshots", out_dir);
|
||||
TempPaths {
|
||||
paths: path.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tmp_bank_accounts_path(paths: &str) -> TempPaths {
|
||||
let vpaths = get_paths_vec(paths);
|
||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let vpaths: Vec<_> = vpaths
|
||||
.iter()
|
||||
.map(|path| format!("{}/{}", out_dir, path))
|
||||
.collect();
|
||||
TempPaths {
|
||||
paths: vpaths.join(","),
|
||||
}
|
||||
}
|
||||
|
||||
fn restore_from_snapshot(
|
||||
genesis_block: &GenesisBlock,
|
||||
bank_forks: BankForks,
|
||||
account_paths: Option<String>,
|
||||
last_slot: u64,
|
||||
) {
|
||||
let new =
|
||||
BankForks::load_from_snapshot(&genesis_block, account_paths, &bank_forks.snapshot_path)
|
||||
.unwrap();
|
||||
for (slot, _) in new.banks.iter() {
|
||||
if *slot > 0 {
|
||||
let bank = bank_forks.banks.get(slot).unwrap().clone();
|
||||
let new_bank = new.banks.get(slot).unwrap();
|
||||
bank.compare_bank(&new_bank);
|
||||
let (s, _r) = channel();
|
||||
let sender = Some(s);
|
||||
for slot in 0..last_slot {
|
||||
let mut bank = Bank::new_from_parent(&bank_forks[slot], &Pubkey::default(), slot + 1);
|
||||
f(&mut bank, &mint_keypair);
|
||||
let bank = bank_forks.insert(bank);
|
||||
// Set root to make sure we don't end up with too many account storage entries
|
||||
// and to allow snapshotting of bank and the purging logic on status_cache to
|
||||
// kick in
|
||||
if slot % set_root_interval == 0 || slot == last_slot - 1 {
|
||||
bank_forks.set_root(bank.slot(), &sender);
|
||||
}
|
||||
}
|
||||
assert_eq!(new.working_bank().slot(), last_slot);
|
||||
for (slot, _) in new.banks.iter() {
|
||||
BankForks::remove_snapshot(*slot, &bank_forks.snapshot_path);
|
||||
}
|
||||
// Generate a snapshot package for last bank
|
||||
let last_bank = bank_forks.get(last_slot).unwrap();
|
||||
let slot_snapshot_paths =
|
||||
snapshot_utils::get_snapshot_paths(&snapshot_config.snapshot_path);
|
||||
let snapshot_package = snapshot_utils::package_snapshot(
|
||||
last_bank,
|
||||
&slot_snapshot_paths,
|
||||
snapshot_utils::get_snapshot_tar_path(&snapshot_config.snapshot_package_output_path),
|
||||
&snapshot_config.snapshot_path,
|
||||
)
|
||||
.unwrap();
|
||||
SnapshotPackagerService::package_snapshots(&snapshot_package).unwrap();
|
||||
|
||||
restore_from_snapshot(
|
||||
bank_forks,
|
||||
accounts_dir.path().to_str().unwrap().to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks_snapshot_n() {
|
||||
solana_logger::setup();
|
||||
let path = get_tmp_bank_accounts_path!();
|
||||
let spath = get_tmp_snapshots_path();
|
||||
let GenesisBlockInfo {
|
||||
genesis_block,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_block(10_000);
|
||||
path.remove_all();
|
||||
spath.remove_all();
|
||||
for index in 0..10 {
|
||||
let bank0 = Bank::new_with_paths(&genesis_block, Some(path.paths.clone()));
|
||||
bank0.freeze();
|
||||
let slot = bank0.slot();
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
bank_forks.set_snapshot_config(Some(spath.paths.clone()));
|
||||
bank_forks.add_snapshot(slot, 0).unwrap();
|
||||
for forks in 0..index {
|
||||
let bank = Bank::new_from_parent(&bank_forks[forks], &Pubkey::default(), forks + 1);
|
||||
// create banks upto slot 4 and create 1 new account in each bank. test that bank 4 snapshots
|
||||
// and restores correctly
|
||||
run_bank_forks_snapshot_n(
|
||||
4,
|
||||
|bank, mint_keypair| {
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let tx = system_transaction::create_user_account(
|
||||
&mint_keypair,
|
||||
&key1,
|
||||
1,
|
||||
genesis_block.hash(),
|
||||
bank.last_blockhash(),
|
||||
);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
bank.freeze();
|
||||
let slot = bank.slot();
|
||||
bank_forks.insert(bank);
|
||||
bank_forks.add_snapshot(slot, 0).unwrap();
|
||||
},
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
fn goto_end_of_slot(bank: &mut Bank) {
|
||||
let mut tick_hash = bank.last_blockhash();
|
||||
loop {
|
||||
tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]);
|
||||
bank.register_tick(&tick_hash);
|
||||
if tick_hash == bank.last_blockhash() {
|
||||
bank.freeze();
|
||||
return;
|
||||
}
|
||||
restore_from_snapshot(&genesis_block, bank_forks, Some(path.paths.clone()), index);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks_status_cache_snapshot_n() {
|
||||
// create banks upto slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time
|
||||
// this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves
|
||||
// ahead. Also tests the status_cache purge and status cache snapshotting.
|
||||
// Makes sure that the last bank is restored correctly
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let key2 = Keypair::new().pubkey();
|
||||
for set_root_interval in &[1, 4] {
|
||||
run_bank_forks_snapshot_n(
|
||||
(MAX_CACHE_ENTRIES * 2 + 1) as u64,
|
||||
|bank, mint_keypair| {
|
||||
let tx = system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&key1,
|
||||
1,
|
||||
bank.parent().unwrap().last_blockhash(),
|
||||
);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
let tx = system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&key2,
|
||||
1,
|
||||
bank.parent().unwrap().last_blockhash(),
|
||||
);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
goto_end_of_slot(bank);
|
||||
},
|
||||
*set_root_interval,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_snapshot_packaging() {
|
||||
solana_logger::setup();
|
||||
|
||||
// Set up snapshotting config
|
||||
let mut snapshot_test_config = setup_snapshot_test(1);
|
||||
|
||||
let bank_forks = &mut snapshot_test_config.bank_forks;
|
||||
let accounts_dir = &snapshot_test_config.accounts_dir;
|
||||
let snapshots_dir = &snapshot_test_config.snapshot_dir;
|
||||
let snapshot_config = &snapshot_test_config.snapshot_config;
|
||||
let mint_keypair = &snapshot_test_config.genesis_block_info.mint_keypair;
|
||||
let genesis_block = &snapshot_test_config.genesis_block_info.genesis_block;
|
||||
|
||||
// Take snapshot of zeroth bank
|
||||
let bank0 = bank_forks.get(0).unwrap();
|
||||
snapshot_utils::add_snapshot(&snapshot_config.snapshot_path, bank0, &vec![]).unwrap();
|
||||
|
||||
// Set up snapshotting channels
|
||||
let (sender, receiver) = channel();
|
||||
let (fake_sender, _fake_receiver) = channel();
|
||||
|
||||
// Create next MAX_CACHE_ENTRIES + 2 banks and snapshots. Every bank will get snapshotted
|
||||
// and the snapshot purging logic will run on every snapshot taken. This means the three
|
||||
// (including snapshot for bank0 created above) earliest snapshots will get purged by the
|
||||
// time this loop is done.
|
||||
|
||||
// Also, make a saved copy of the state of the snapshot for a bank with
|
||||
// bank.slot == saved_slot, so we can use it for a correctness check later.
|
||||
let saved_snapshots_dir = TempDir::new().unwrap();
|
||||
let saved_accounts_dir = TempDir::new().unwrap();
|
||||
let saved_slot = 4;
|
||||
let saved_tar = snapshot_config
|
||||
.snapshot_package_output_path
|
||||
.join(saved_slot.to_string());
|
||||
for forks in 0..MAX_CACHE_ENTRIES + 2 {
|
||||
let bank = Bank::new_from_parent(
|
||||
&bank_forks[forks as u64],
|
||||
&Pubkey::default(),
|
||||
(forks + 1) as u64,
|
||||
);
|
||||
let slot = bank.slot();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let tx = system_transaction::create_user_account(
|
||||
&mint_keypair,
|
||||
&key1,
|
||||
1,
|
||||
genesis_block.hash(),
|
||||
);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
bank.freeze();
|
||||
bank_forks.insert(bank);
|
||||
|
||||
let package_sender = {
|
||||
if slot == saved_slot as u64 {
|
||||
// Only send one package on the real sender so that the packaging service
|
||||
// doesn't take forever to run the packaging logic on all MAX_CACHE_ENTRIES
|
||||
// later
|
||||
&sender
|
||||
} else {
|
||||
&fake_sender
|
||||
}
|
||||
};
|
||||
|
||||
bank_forks
|
||||
.generate_snapshot(
|
||||
slot,
|
||||
&vec![],
|
||||
&package_sender,
|
||||
snapshot_config
|
||||
.snapshot_package_output_path
|
||||
.join(slot.to_string()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
if slot == saved_slot as u64 {
|
||||
let options = CopyOptions::new();
|
||||
fs_extra::dir::copy(accounts_dir, &saved_accounts_dir, &options).unwrap();
|
||||
let snapshot_paths: Vec<_> = fs::read_dir(&snapshot_config.snapshot_path)
|
||||
.unwrap()
|
||||
.filter_map(|entry| {
|
||||
let e = entry.unwrap();
|
||||
let file_path = e.path();
|
||||
let file_name = file_path.file_name().unwrap();
|
||||
file_name
|
||||
.to_str()
|
||||
.map(|s| s.parse::<u64>().ok().map(|_| file_path.clone()))
|
||||
.unwrap_or(None)
|
||||
})
|
||||
.collect();
|
||||
|
||||
for snapshot_path in snapshot_paths {
|
||||
fs_extra::dir::copy(&snapshot_path, &saved_snapshots_dir, &options).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Purge all the outdated snapshots, including the ones needed to generate the package
|
||||
// currently sitting in the channel
|
||||
bank_forks.purge_old_snapshots();
|
||||
let mut snapshot_paths = snapshot_utils::get_snapshot_paths(&snapshots_dir);
|
||||
snapshot_paths.sort();
|
||||
assert_eq!(
|
||||
snapshot_paths.iter().map(|path| path.slot).collect_vec(),
|
||||
(3..=MAX_CACHE_ENTRIES as u64 + 2).collect_vec()
|
||||
);
|
||||
|
||||
// Create a SnapshotPackagerService to create tarballs from all the pending
|
||||
// SnapshotPackage's on the channel. By the time this service starts, we have already
|
||||
// purged the first two snapshots, which are needed by every snapshot other than
|
||||
// the last two snapshots. However, the packaging service should still be able to
|
||||
// correctly construct the earlier snapshots because the SnapshotPackage's on the
|
||||
// channel hold hard links to these deleted snapshots. We verify this is the case below.
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let snapshot_packager_service = SnapshotPackagerService::new(receiver, &exit);
|
||||
|
||||
// Close the channel so that the package service will exit after reading all the
|
||||
// packages off the channel
|
||||
drop(sender);
|
||||
|
||||
// Wait for service to finish
|
||||
snapshot_packager_service
|
||||
.join()
|
||||
.expect("SnapshotPackagerService exited with error");
|
||||
|
||||
// Check the tar we cached the state for earlier was generated correctly
|
||||
snapshot_utils::tests::verify_snapshot_tar(
|
||||
saved_tar,
|
||||
saved_snapshots_dir.path(),
|
||||
saved_accounts_dir
|
||||
.path()
|
||||
.join(accounts_dir.path().file_name().unwrap()),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slots_since_snapshot() {
|
||||
solana_logger::setup();
|
||||
for add_root_interval in 1..10 {
|
||||
let (snapshot_sender, _snapshot_receiver) = channel();
|
||||
let num_set_roots = MAX_CACHE_ENTRIES * 5;
|
||||
// Make sure this test never clears bank.slots_since_snapshot
|
||||
let mut snapshot_test_config =
|
||||
setup_snapshot_test(add_root_interval * num_set_roots * 2);
|
||||
let mut current_bank = snapshot_test_config.bank_forks[0].clone();
|
||||
let snapshot_sender = Some(snapshot_sender);
|
||||
for _ in 0..num_set_roots {
|
||||
for _ in 0..add_root_interval {
|
||||
let new_slot = current_bank.slot() + 1;
|
||||
let new_bank =
|
||||
Bank::new_from_parent(¤t_bank, &Pubkey::default(), new_slot);
|
||||
snapshot_test_config.bank_forks.insert(new_bank);
|
||||
current_bank = snapshot_test_config.bank_forks[new_slot].clone();
|
||||
}
|
||||
snapshot_test_config
|
||||
.bank_forks
|
||||
.set_root(current_bank.slot(), &snapshot_sender);
|
||||
|
||||
let slots_since_snapshot_hashset: HashSet<_> = snapshot_test_config
|
||||
.bank_forks
|
||||
.slots_since_snapshot
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
assert_eq!(slots_since_snapshot_hashset, current_bank.src.roots());
|
||||
}
|
||||
|
||||
let expected_slots_since_snapshot =
|
||||
(0..=num_set_roots as u64 * add_root_interval as u64).collect_vec();
|
||||
let num_old_slots = expected_slots_since_snapshot.len() - MAX_CACHE_ENTRIES;
|
||||
|
||||
assert_eq!(
|
||||
snapshot_test_config.bank_forks.slots_since_snapshot(),
|
||||
&expected_slots_since_snapshot[num_old_slots..],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
struct SnapshotTestConfig {
|
||||
accounts_dir: TempDir,
|
||||
snapshot_dir: TempDir,
|
||||
_snapshot_output_path: TempDir,
|
||||
snapshot_config: SnapshotConfig,
|
||||
bank_forks: BankForks,
|
||||
genesis_block_info: GenesisBlockInfo,
|
||||
}
|
||||
|
||||
fn setup_snapshot_test(snapshot_interval_slots: usize) -> SnapshotTestConfig {
|
||||
let accounts_dir = TempDir::new().unwrap();
|
||||
let snapshot_dir = TempDir::new().unwrap();
|
||||
let snapshot_output_path = TempDir::new().unwrap();
|
||||
let genesis_block_info = create_genesis_block(10_000);
|
||||
let bank0 = Bank::new_with_paths(
|
||||
&genesis_block_info.genesis_block,
|
||||
Some(accounts_dir.path().to_str().unwrap().to_string()),
|
||||
);
|
||||
bank0.freeze();
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
|
||||
let snapshot_config = SnapshotConfig {
|
||||
snapshot_interval_slots,
|
||||
snapshot_package_output_path: PathBuf::from(snapshot_output_path.path()),
|
||||
snapshot_path: PathBuf::from(snapshot_dir.path()),
|
||||
};
|
||||
bank_forks.set_snapshot_config(snapshot_config.clone());
|
||||
SnapshotTestConfig {
|
||||
accounts_dir,
|
||||
snapshot_dir,
|
||||
_snapshot_output_path: snapshot_output_path,
|
||||
snapshot_config,
|
||||
bank_forks,
|
||||
genesis_block_info,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::{
|
||||
self, DEFAULT_NUM_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE,
|
||||
self, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE,
|
||||
MAX_TRANSACTION_FORWARDING_DELAY,
|
||||
};
|
||||
use solana_sdk::transaction::{self, Transaction, TransactionError};
|
||||
@ -134,15 +134,13 @@ impl BankingStage {
|
||||
|
||||
fn forward_buffered_packets(
|
||||
socket: &std::net::UdpSocket,
|
||||
tpu_via_blobs: &std::net::SocketAddr,
|
||||
tpu_forwards: &std::net::SocketAddr,
|
||||
unprocessed_packets: &[PacketsAndOffsets],
|
||||
) -> std::io::Result<()> {
|
||||
let packets = Self::filter_valid_packets_for_forwarding(unprocessed_packets);
|
||||
inc_new_counter_info!("banking_stage-forwarded_packets", packets.len());
|
||||
let blobs = packet::packets_to_blobs(&packets);
|
||||
|
||||
for blob in blobs {
|
||||
socket.send_to(&blob.data[..blob.meta.size], tpu_via_blobs)?;
|
||||
for p in packets {
|
||||
socket.send_to(&p.data[..p.meta.size], &tpu_forwards)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -200,6 +198,7 @@ impl BankingStage {
|
||||
if processed < verified_txs_len {
|
||||
let next_leader = poh_recorder.lock().unwrap().next_slot_leader();
|
||||
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
|
||||
#[allow(clippy::while_let_on_iterator)]
|
||||
while let Some((msgs, unprocessed_indexes)) = buffered_packets_iter.next() {
|
||||
let unprocessed_indexes = Self::filter_unprocessed_packets(
|
||||
&bank,
|
||||
@ -316,7 +315,7 @@ impl BankingStage {
|
||||
.read()
|
||||
.unwrap()
|
||||
.lookup(&leader_pubkey)
|
||||
.map(|leader| leader.tpu_via_blobs)
|
||||
.map(|leader| leader.tpu_forwards)
|
||||
};
|
||||
|
||||
leader_addr.map_or(Ok(()), |leader_addr| {
|
||||
@ -428,7 +427,7 @@ impl BankingStage {
|
||||
txs: &[Transaction],
|
||||
results: &[transaction::Result<()>],
|
||||
poh: &Arc<Mutex<PohRecorder>>,
|
||||
) -> (Result<()>, Vec<usize>) {
|
||||
) -> (Result<usize>, Vec<usize>) {
|
||||
let mut processed_generation = Measure::start("record::process_generation");
|
||||
let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = results
|
||||
.iter()
|
||||
@ -444,13 +443,11 @@ impl BankingStage {
|
||||
.unzip();
|
||||
|
||||
processed_generation.stop();
|
||||
debug!("processed: {} ", processed_transactions.len());
|
||||
let num_to_commit = processed_transactions.len();
|
||||
debug!("num_to_commit: {} ", num_to_commit);
|
||||
// unlock all the accounts with errors which are filtered by the above `filter_map`
|
||||
if !processed_transactions.is_empty() {
|
||||
inc_new_counter_warn!(
|
||||
"banking_stage-record_transactions",
|
||||
processed_transactions.len()
|
||||
);
|
||||
inc_new_counter_warn!("banking_stage-record_transactions", num_to_commit);
|
||||
|
||||
let mut hash_time = Measure::start("record::hash");
|
||||
let hash = hash_transactions(&processed_transactions[..]);
|
||||
@ -468,13 +465,16 @@ impl BankingStage {
|
||||
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)) => {
|
||||
// If record errors, add all the committable transactions (the ones
|
||||
// we just attempted to record) as retryable
|
||||
return (res, processed_transactions_indexes);
|
||||
return (
|
||||
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)),
|
||||
processed_transactions_indexes,
|
||||
);
|
||||
}
|
||||
Err(e) => panic!(format!("Poh recorder returned unexpected error: {:?}", e)),
|
||||
}
|
||||
poh_record.stop();
|
||||
}
|
||||
(Ok(()), vec![])
|
||||
(Ok(num_to_commit), vec![])
|
||||
}
|
||||
|
||||
fn process_and_record_transactions_locked(
|
||||
@ -482,7 +482,7 @@ impl BankingStage {
|
||||
txs: &[Transaction],
|
||||
poh: &Arc<Mutex<PohRecorder>>,
|
||||
lock_results: &LockedAccountsResults,
|
||||
) -> (Result<()>, Vec<usize>) {
|
||||
) -> (Result<usize>, Vec<usize>) {
|
||||
let mut load_execute_time = Measure::start("load_execute_time");
|
||||
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
|
||||
// the likelihood of any single thread getting starved and processing old ids.
|
||||
@ -494,20 +494,20 @@ impl BankingStage {
|
||||
|
||||
let freeze_lock = bank.freeze_lock();
|
||||
|
||||
let record_time = {
|
||||
let mut record_time = Measure::start("record_time");
|
||||
let (res, retryable_record_txs) =
|
||||
Self::record_transactions(bank.slot(), txs, &results, poh);
|
||||
retryable_txs.extend(retryable_record_txs);
|
||||
if res.is_err() {
|
||||
return (res, retryable_txs);
|
||||
}
|
||||
record_time.stop();
|
||||
record_time
|
||||
};
|
||||
let mut record_time = Measure::start("record_time");
|
||||
let (num_to_commit, retryable_record_txs) =
|
||||
Self::record_transactions(bank.slot(), txs, &results, poh);
|
||||
retryable_txs.extend(retryable_record_txs);
|
||||
if num_to_commit.is_err() {
|
||||
return (num_to_commit, retryable_txs);
|
||||
}
|
||||
record_time.stop();
|
||||
|
||||
let commit_time = {
|
||||
let mut commit_time = Measure::start("commit_time");
|
||||
let mut commit_time = Measure::start("commit_time");
|
||||
|
||||
let num_to_commit = num_to_commit.unwrap();
|
||||
|
||||
if num_to_commit != 0 {
|
||||
bank.commit_transactions(
|
||||
txs,
|
||||
&mut loaded_accounts,
|
||||
@ -515,14 +515,13 @@ impl BankingStage {
|
||||
tx_count,
|
||||
signature_count,
|
||||
);
|
||||
commit_time.stop();
|
||||
commit_time
|
||||
};
|
||||
}
|
||||
commit_time.stop();
|
||||
|
||||
drop(freeze_lock);
|
||||
|
||||
debug!(
|
||||
"bank: {} load_execute: {}us record: {}us commit: {}us txs_len: {}",
|
||||
"bank: {} process_and_record_locked: {}us record: {}us commit: {}us txs_len: {}",
|
||||
bank.slot(),
|
||||
load_execute_time.as_us(),
|
||||
record_time.as_us(),
|
||||
@ -530,7 +529,7 @@ impl BankingStage {
|
||||
txs.len(),
|
||||
);
|
||||
|
||||
(Ok(()), retryable_txs)
|
||||
(Ok(num_to_commit), retryable_txs)
|
||||
}
|
||||
|
||||
pub fn process_and_record_transactions(
|
||||
@ -538,7 +537,7 @@ impl BankingStage {
|
||||
txs: &[Transaction],
|
||||
poh: &Arc<Mutex<PohRecorder>>,
|
||||
chunk_offset: usize,
|
||||
) -> (Result<()>, Vec<usize>) {
|
||||
) -> (Result<usize>, Vec<usize>) {
|
||||
let mut lock_time = Measure::start("lock_time");
|
||||
// Once accounts are locked, other threads cannot encode transactions that will modify the
|
||||
// same account state
|
||||
@ -702,7 +701,7 @@ impl BankingStage {
|
||||
.saturating_sub(MAX_TRANSACTION_FORWARDING_DELAY)
|
||||
.saturating_sub(
|
||||
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET * bank.ticks_per_slot()
|
||||
/ DEFAULT_NUM_TICKS_PER_SECOND) as usize,
|
||||
/ DEFAULT_TICKS_PER_SECOND) as usize,
|
||||
),
|
||||
&mut error_counters,
|
||||
);
|
||||
@ -851,6 +850,7 @@ impl BankingStage {
|
||||
if processed < verified_txs_len {
|
||||
let next_leader = poh.lock().unwrap().next_slot_leader();
|
||||
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
|
||||
#[allow(clippy::while_let_on_iterator)]
|
||||
while let Some((msgs, vers)) = mms_iter.next() {
|
||||
let packet_indexes = Self::generate_packet_indexes(vers);
|
||||
let unprocessed_indexes = Self::filter_unprocessed_packets(
|
||||
@ -1157,6 +1157,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_banking_stage_entryfication() {
|
||||
solana_logger::setup();
|
||||
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||
@ -1296,7 +1297,12 @@ mod tests {
|
||||
];
|
||||
|
||||
let mut results = vec![Ok(()), Ok(())];
|
||||
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder);
|
||||
let _ = BankingStage::record_transactions(
|
||||
bank.slot(),
|
||||
&transactions,
|
||||
&results,
|
||||
&poh_recorder,
|
||||
);
|
||||
let (_, entries) = entry_receiver.recv().unwrap();
|
||||
assert_eq!(entries[0].0.transactions.len(), transactions.len());
|
||||
|
||||
|
@ -1,11 +1,15 @@
|
||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||
|
||||
use crate::recycler::Recycler;
|
||||
use crate::result;
|
||||
use crate::result::Error;
|
||||
use crate::service::Service;
|
||||
use crate::streamer::{self, BlobSender};
|
||||
use crate::streamer::{self, BlobSender, PacketReceiver, PacketSender};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::{channel, RecvTimeoutError};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
|
||||
pub struct BlobFetchStage {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
@ -27,6 +31,79 @@ impl BlobFetchStage {
|
||||
|
||||
Self { thread_hdls }
|
||||
}
|
||||
|
||||
fn handle_forwarded_packets(
|
||||
recvr: &PacketReceiver,
|
||||
sendr: &PacketSender,
|
||||
) -> result::Result<()> {
|
||||
let msgs = recvr.recv()?;
|
||||
let mut batch = vec![msgs];
|
||||
while let Ok(more) = recvr.try_recv() {
|
||||
batch.push(more);
|
||||
}
|
||||
|
||||
batch
|
||||
.iter_mut()
|
||||
.for_each(|b| b.packets.iter_mut().for_each(|p| p.meta.forward = true));
|
||||
|
||||
for packets in batch {
|
||||
if sendr.send(packets).is_err() {
|
||||
return Err(Error::SendError);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new_multi_socket_packet(
|
||||
sockets: Vec<Arc<UdpSocket>>,
|
||||
forward_sockets: Vec<Arc<UdpSocket>>,
|
||||
sender: &PacketSender,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let recycler = Recycler::default();
|
||||
let tvu_threads = sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
&exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
"blob_fetch_stage",
|
||||
)
|
||||
});
|
||||
|
||||
let (forward_sender, forward_receiver) = channel();
|
||||
let tvu_forwards_threads = forward_sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
&exit,
|
||||
forward_sender.clone(),
|
||||
recycler.clone(),
|
||||
"blob_fetch_stage",
|
||||
)
|
||||
});
|
||||
|
||||
let sender = sender.clone();
|
||||
let fwd_thread_hdl = Builder::new()
|
||||
.name("solana-tvu-fetch-stage-fwd-rcvr".to_string())
|
||||
.spawn(move || loop {
|
||||
if let Err(e) = Self::handle_forwarded_packets(&forward_receiver, &sender) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::RecvError(_) => break,
|
||||
Error::SendError => break,
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let mut thread_hdls: Vec<_> = tvu_threads.chain(tvu_forwards_threads).collect();
|
||||
thread_hdls.push(fwd_thread_hdl);
|
||||
|
||||
Self { thread_hdls }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BlobFetchStage {
|
||||
|
@ -10,6 +10,7 @@ use serde_json::json;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cell::RefCell;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub trait EntryWriter: std::fmt::Debug {
|
||||
fn write(&self, payload: String) -> Result<()>;
|
||||
@ -41,7 +42,7 @@ impl EntryVec {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EntrySocket {
|
||||
socket: String,
|
||||
unix_socket: PathBuf,
|
||||
}
|
||||
|
||||
impl EntryWriter for EntrySocket {
|
||||
@ -50,11 +51,10 @@ impl EntryWriter for EntrySocket {
|
||||
use std::io::prelude::*;
|
||||
use std::net::Shutdown;
|
||||
use std::os::unix::net::UnixStream;
|
||||
use std::path::Path;
|
||||
|
||||
const MESSAGE_TERMINATOR: &str = "\n";
|
||||
|
||||
let mut socket = UnixStream::connect(Path::new(&self.socket))?;
|
||||
let mut socket = UnixStream::connect(&self.unix_socket)?;
|
||||
socket.write_all(payload.as_bytes())?;
|
||||
socket.write_all(MESSAGE_TERMINATOR.as_bytes())?;
|
||||
socket.shutdown(Shutdown::Write)?;
|
||||
@ -144,9 +144,11 @@ where
|
||||
pub type SocketBlockstream = Blockstream<EntrySocket>;
|
||||
|
||||
impl SocketBlockstream {
|
||||
pub fn new(socket: String) -> Self {
|
||||
pub fn new(unix_socket: &Path) -> Self {
|
||||
Blockstream {
|
||||
output: EntrySocket { socket },
|
||||
output: EntrySocket {
|
||||
unix_socket: unix_socket.to_path_buf(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -154,7 +156,7 @@ impl SocketBlockstream {
|
||||
pub type MockBlockstream = Blockstream<EntryVec>;
|
||||
|
||||
impl MockBlockstream {
|
||||
pub fn new(_: String) -> Self {
|
||||
pub fn new(_: &Path) -> Self {
|
||||
Blockstream {
|
||||
output: EntryVec::new(),
|
||||
}
|
||||
@ -183,6 +185,7 @@ mod test {
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn test_serialize_transactions() {
|
||||
@ -205,7 +208,7 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_blockstream() -> () {
|
||||
let blockstream = MockBlockstream::new("test_stream".to_string());
|
||||
let blockstream = MockBlockstream::new(&PathBuf::from("test_stream"));
|
||||
let ticks_per_slot = 5;
|
||||
|
||||
let mut blockhash = Hash::default();
|
||||
|
@ -11,6 +11,7 @@ use crate::blocktree::Blocktree;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::service::Service;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
use std::sync::Arc;
|
||||
@ -26,10 +27,10 @@ impl BlockstreamService {
|
||||
pub fn new(
|
||||
slot_full_receiver: Receiver<(u64, Pubkey)>,
|
||||
blocktree: Arc<Blocktree>,
|
||||
blockstream_socket: String,
|
||||
unix_socket: &Path,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let mut blockstream = Blockstream::new(blockstream_socket);
|
||||
let mut blockstream = Blockstream::new(unix_socket);
|
||||
let exit = exit.clone();
|
||||
let t_blockstream = Builder::new()
|
||||
.name("solana-blockstream".to_string())
|
||||
@ -69,7 +70,7 @@ impl BlockstreamService {
|
||||
.iter()
|
||||
.filter(|entry| entry.is_tick())
|
||||
.fold(0, |acc, _| acc + 1);
|
||||
let mut tick_height = if slot > 0 {
|
||||
let mut tick_height = if slot > 0 && ticks_per_slot > 0 {
|
||||
ticks_per_slot * slot - 1
|
||||
} else {
|
||||
0
|
||||
@ -116,6 +117,7 @@ mod test {
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
@ -133,7 +135,7 @@ mod test {
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
|
||||
// Set up blockstream
|
||||
let mut blockstream = Blockstream::new("test_stream".to_string());
|
||||
let mut blockstream = Blockstream::new(&PathBuf::from("test_stream"));
|
||||
|
||||
// Set up dummy channel to receive a full-slot notification
|
||||
let (slot_full_sender, slot_full_receiver) = channel();
|
||||
@ -159,7 +161,7 @@ mod test {
|
||||
let expected_tick_heights = [5, 6, 7, 8, 8, 9];
|
||||
|
||||
blocktree
|
||||
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
||||
.write_entries_using_shreds(1, 0, 0, ticks_per_slot, None, true, &entries)
|
||||
.unwrap();
|
||||
|
||||
slot_full_sender.send((1, leader_pubkey)).unwrap();
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -44,6 +44,14 @@ pub mod columns {
|
||||
#[derive(Debug)]
|
||||
/// The index column
|
||||
pub struct Index;
|
||||
|
||||
#[derive(Debug)]
|
||||
/// The shred data column
|
||||
pub struct ShredData;
|
||||
|
||||
#[derive(Debug)]
|
||||
/// The shred erasure code column
|
||||
pub struct ShredCode;
|
||||
}
|
||||
|
||||
pub trait Backend: Sized + Send + Sync {
|
||||
|
@ -1,6 +1,7 @@
|
||||
use crate::erasure::ErasureConfig;
|
||||
use solana_metrics::datapoint;
|
||||
use std::{collections::BTreeSet, ops::RangeBounds};
|
||||
use std::cmp::Ordering;
|
||||
use std::{collections::BTreeSet, ops::Range, ops::RangeBounds};
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
||||
// The Meta column family
|
||||
@ -27,6 +28,51 @@ pub struct SlotMeta {
|
||||
pub is_connected: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
|
||||
pub struct ErasureSetRanges {
|
||||
r: Vec<Range<u64>>,
|
||||
}
|
||||
|
||||
impl ErasureSetRanges {
|
||||
pub fn insert(&mut self, start: u64, end: u64) -> Result<usize, Range<u64>> {
|
||||
let range = if start < end {
|
||||
(start..end)
|
||||
} else {
|
||||
(end..start)
|
||||
};
|
||||
|
||||
match self.pos(range.start) {
|
||||
Ok(pos) => Err(self.r[pos].clone()),
|
||||
Err(pos) => {
|
||||
self.r.insert(pos, range);
|
||||
Ok(pos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn pos(&self, seek: u64) -> Result<usize, usize> {
|
||||
self.r.binary_search_by(|probe| {
|
||||
if probe.contains(&seek) {
|
||||
Ordering::Equal
|
||||
} else {
|
||||
probe.start.cmp(&seek)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn lookup(&self, seek: u64) -> Result<Range<u64>, usize> {
|
||||
self.pos(seek)
|
||||
.map(|pos| self.r[pos].clone())
|
||||
.or_else(|epos| {
|
||||
if epos < self.r.len() && self.r[epos].contains(&seek) {
|
||||
Ok(self.r[epos].clone())
|
||||
} else {
|
||||
Err(epos)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
/// Index recording presence/absence of blobs
|
||||
pub struct Index {
|
||||
@ -56,7 +102,7 @@ pub struct ErasureMeta {
|
||||
/// Size of shards in this erasure set
|
||||
pub size: usize,
|
||||
/// Erasure configuration for this erasure set
|
||||
config: ErasureConfig,
|
||||
pub config: ErasureConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@ -300,4 +346,50 @@ mod test {
|
||||
assert_eq!(e_meta.status(&index), DataFull);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_erasure_set_ranges() {
|
||||
let mut ranges = ErasureSetRanges::default();
|
||||
|
||||
// Test empty ranges
|
||||
(0..100 as u64).for_each(|i| {
|
||||
assert_eq!(ranges.lookup(i), Err(0));
|
||||
});
|
||||
|
||||
// Test adding one range and all boundary condition lookups
|
||||
assert_eq!(ranges.insert(5, 13), Ok(0));
|
||||
assert_eq!(ranges.lookup(0), Err(0));
|
||||
assert_eq!(ranges.lookup(4), Err(0));
|
||||
assert_eq!(ranges.lookup(5), Ok(5..13));
|
||||
assert_eq!(ranges.lookup(12), Ok(5..13));
|
||||
assert_eq!(ranges.lookup(13), Err(1));
|
||||
assert_eq!(ranges.lookup(100), Err(1));
|
||||
|
||||
// Test adding second range (with backwards values) and all boundary condition lookups
|
||||
assert_eq!(ranges.insert(55, 33), Ok(1));
|
||||
assert_eq!(ranges.lookup(0), Err(0));
|
||||
assert_eq!(ranges.lookup(4), Err(0));
|
||||
assert_eq!(ranges.lookup(5), Ok(5..13));
|
||||
assert_eq!(ranges.lookup(12), Ok(5..13));
|
||||
assert_eq!(ranges.lookup(13), Err(1));
|
||||
assert_eq!(ranges.lookup(32), Err(1));
|
||||
assert_eq!(ranges.lookup(33), Ok(33..55));
|
||||
assert_eq!(ranges.lookup(54), Ok(33..55));
|
||||
assert_eq!(ranges.lookup(55), Err(2));
|
||||
|
||||
// Add a third range between previous two ranges
|
||||
assert_eq!(ranges.insert(23, 30), Ok(1));
|
||||
assert_eq!(ranges.lookup(0), Err(0));
|
||||
assert_eq!(ranges.lookup(4), Err(0));
|
||||
assert_eq!(ranges.lookup(5), Ok(5..13));
|
||||
assert_eq!(ranges.lookup(12), Ok(5..13));
|
||||
assert_eq!(ranges.lookup(13), Err(1));
|
||||
assert_eq!(ranges.lookup(23), Ok(23..30));
|
||||
assert_eq!(ranges.lookup(29), Ok(23..30));
|
||||
assert_eq!(ranges.lookup(30), Err(2));
|
||||
assert_eq!(ranges.lookup(32), Err(2));
|
||||
assert_eq!(ranges.lookup(33), Ok(33..55));
|
||||
assert_eq!(ranges.lookup(54), Ok(33..55));
|
||||
assert_eq!(ranges.lookup(55), Err(3));
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,8 @@ impl Backend for Rocks {
|
||||
|
||||
fn open(path: &Path) -> Result<Rocks> {
|
||||
use crate::blocktree::db::columns::{
|
||||
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, SlotMeta,
|
||||
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData,
|
||||
SlotMeta,
|
||||
};
|
||||
|
||||
fs::create_dir_all(&path)?;
|
||||
@ -58,6 +59,10 @@ impl Backend for Rocks {
|
||||
ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(Root::NAME));
|
||||
let index_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(Index::NAME));
|
||||
let shred_data_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options(ShredData::NAME));
|
||||
let shred_code_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options(ShredCode::NAME));
|
||||
|
||||
let cfs = vec![
|
||||
meta_cf_descriptor,
|
||||
@ -68,6 +73,8 @@ impl Backend for Rocks {
|
||||
orphans_cf_descriptor,
|
||||
root_cf_descriptor,
|
||||
index_cf_descriptor,
|
||||
shred_data_cf_descriptor,
|
||||
shred_code_cf_descriptor,
|
||||
];
|
||||
|
||||
// Open the database
|
||||
@ -78,7 +85,8 @@ impl Backend for Rocks {
|
||||
|
||||
fn columns(&self) -> Vec<&'static str> {
|
||||
use crate::blocktree::db::columns::{
|
||||
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, SlotMeta,
|
||||
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData,
|
||||
SlotMeta,
|
||||
};
|
||||
|
||||
vec![
|
||||
@ -90,6 +98,8 @@ impl Backend for Rocks {
|
||||
Orphans::NAME,
|
||||
Root::NAME,
|
||||
SlotMeta::NAME,
|
||||
ShredData::NAME,
|
||||
ShredCode::NAME,
|
||||
]
|
||||
}
|
||||
|
||||
@ -196,6 +206,53 @@ impl Column<Rocks> for cf::Data {
|
||||
}
|
||||
}
|
||||
|
||||
impl Column<Rocks> for cf::ShredCode {
|
||||
const NAME: &'static str = super::CODE_SHRED_CF;
|
||||
type Index = (u64, u64);
|
||||
|
||||
fn key(index: (u64, u64)) -> Vec<u8> {
|
||||
cf::ShredData::key(index)
|
||||
}
|
||||
|
||||
fn index(key: &[u8]) -> (u64, u64) {
|
||||
cf::ShredData::index(key)
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index.0
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
(slot, 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Column<Rocks> for cf::ShredData {
|
||||
const NAME: &'static str = super::DATA_SHRED_CF;
|
||||
type Index = (u64, u64);
|
||||
|
||||
fn key((slot, index): (u64, u64)) -> Vec<u8> {
|
||||
let mut key = vec![0; 16];
|
||||
BigEndian::write_u64(&mut key[..8], slot);
|
||||
BigEndian::write_u64(&mut key[8..16], index);
|
||||
key
|
||||
}
|
||||
|
||||
fn index(key: &[u8]) -> (u64, u64) {
|
||||
let slot = BigEndian::read_u64(&key[..8]);
|
||||
let index = BigEndian::read_u64(&key[8..16]);
|
||||
(slot, index)
|
||||
}
|
||||
|
||||
fn slot(index: Self::Index) -> Slot {
|
||||
index.0
|
||||
}
|
||||
|
||||
fn as_index(slot: Slot) -> Self::Index {
|
||||
(slot, 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Column<Rocks> for cf::Index {
|
||||
const NAME: &'static str = super::INDEX_CF;
|
||||
type Index = u64;
|
||||
@ -407,11 +464,11 @@ impl std::convert::From<rocksdb::Error> for Error {
|
||||
}
|
||||
|
||||
fn get_cf_options(name: &'static str) -> Options {
|
||||
use crate::blocktree::db::columns::{Coding, Data};
|
||||
use crate::blocktree::db::columns::{Coding, Data, ShredCode, ShredData};
|
||||
|
||||
let mut options = Options::default();
|
||||
match name {
|
||||
Coding::NAME | Data::NAME => {
|
||||
Coding::NAME | Data::NAME | ShredCode::NAME | ShredData::NAME => {
|
||||
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM
|
||||
options.set_max_write_buffer_number(8);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
|
||||
|
@ -29,16 +29,23 @@ impl<'a> Iterator for RootedSlotIterator<'a> {
|
||||
.find(|x| self.blocktree.is_root(**x))
|
||||
.cloned();
|
||||
|
||||
rooted_slot.map(|rooted_slot| {
|
||||
let slot_meta = self
|
||||
.blocktree
|
||||
.meta(rooted_slot)
|
||||
.expect("Database failure, couldnt fetch SlotMeta")
|
||||
.expect("SlotMeta in iterator didn't exist");
|
||||
rooted_slot
|
||||
.map(|rooted_slot| {
|
||||
let slot_meta = self
|
||||
.blocktree
|
||||
.meta(rooted_slot)
|
||||
.expect("Database failure, couldnt fetch SlotMeta");
|
||||
|
||||
self.next_slots = slot_meta.next_slots.clone();
|
||||
(rooted_slot, slot_meta)
|
||||
})
|
||||
if slot_meta.is_none() {
|
||||
warn!("Rooted SlotMeta was deleted in between checking is_root and fetch");
|
||||
}
|
||||
|
||||
slot_meta.map(|slot_meta| {
|
||||
self.next_slots = slot_meta.next_slots.clone();
|
||||
(rooted_slot, slot_meta)
|
||||
})
|
||||
})
|
||||
.unwrap_or(None)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::bank_forks::BankForks;
|
||||
use crate::blocktree::Blocktree;
|
||||
use crate::blocktree::{Blocktree, SlotMeta};
|
||||
use crate::entry::{Entry, EntrySlice};
|
||||
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||
use rayon::prelude::*;
|
||||
@ -8,8 +8,8 @@ use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::timing::duration_as_ms;
|
||||
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::timing::{duration_as_ms, Slot, MAX_RECENT_BLOCKHASHES};
|
||||
use solana_sdk::transaction::Result;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
@ -130,7 +130,6 @@ pub fn process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct BankForksInfo {
|
||||
pub bank_slot: u64,
|
||||
pub entry_height: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -143,151 +142,67 @@ pub fn process_blocktree(
|
||||
blocktree: &Blocktree,
|
||||
account_paths: Option<String>,
|
||||
verify_ledger: bool,
|
||||
dev_halt_at_slot: Option<Slot>,
|
||||
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||
let now = Instant::now();
|
||||
info!("processing ledger...");
|
||||
info!("processing ledger from bank 0...");
|
||||
|
||||
// Setup bank for slot 0
|
||||
let mut pending_slots = {
|
||||
let slot = 0;
|
||||
let bank = Arc::new(Bank::new_with_paths(&genesis_block, account_paths));
|
||||
let entry_height = 0;
|
||||
let last_entry_hash = bank.last_blockhash();
|
||||
let bank0 = Arc::new(Bank::new_with_paths(&genesis_block, account_paths));
|
||||
process_bank_0(&bank0, blocktree, verify_ledger)?;
|
||||
process_blocktree_from_root(blocktree, bank0, verify_ledger, dev_halt_at_slot)
|
||||
}
|
||||
|
||||
// Load the metadata for this slot
|
||||
let meta = blocktree
|
||||
.meta(slot)
|
||||
.map_err(|err| {
|
||||
warn!("Failed to load meta for slot {}: {:?}", slot, err);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?
|
||||
.unwrap();
|
||||
// Process blocktree from a known root bank
|
||||
pub fn process_blocktree_from_root(
|
||||
blocktree: &Blocktree,
|
||||
bank: Arc<Bank>,
|
||||
verify_ledger: bool,
|
||||
dev_halt_at_slot: Option<Slot>,
|
||||
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||
info!("processing ledger from root: {}...", bank.slot());
|
||||
// Starting slot must be a root, and thus has no parents
|
||||
assert!(bank.parent().is_none());
|
||||
let start_slot = bank.slot();
|
||||
let now = Instant::now();
|
||||
let mut rooted_path = vec![start_slot];
|
||||
let dev_halt_at_slot = dev_halt_at_slot.unwrap_or(std::u64::MAX);
|
||||
|
||||
vec![(slot, meta, bank, entry_height, last_entry_hash)]
|
||||
blocktree
|
||||
.set_roots(&[start_slot])
|
||||
.expect("Couldn't set root on startup");
|
||||
|
||||
let meta = blocktree.meta(start_slot).unwrap();
|
||||
|
||||
// Iterate and replay slots from blocktree starting from `start_slot`
|
||||
let (bank_forks, bank_forks_info, leader_schedule_cache) = {
|
||||
if let Some(meta) = meta {
|
||||
let epoch_schedule = bank.epoch_schedule();
|
||||
let mut leader_schedule_cache = LeaderScheduleCache::new(*epoch_schedule, &bank);
|
||||
let fork_info = process_pending_slots(
|
||||
&bank,
|
||||
&meta,
|
||||
blocktree,
|
||||
&mut leader_schedule_cache,
|
||||
&mut rooted_path,
|
||||
verify_ledger,
|
||||
dev_halt_at_slot,
|
||||
)?;
|
||||
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
|
||||
let bank_forks = BankForks::new_from_banks(&banks, rooted_path);
|
||||
(bank_forks, bank_forks_info, leader_schedule_cache)
|
||||
} else {
|
||||
// If there's no meta for the input `start_slot`, then we started from a snapshot
|
||||
// and there's no point in processing the rest of blocktree and implies blocktree
|
||||
// should be empty past this point.
|
||||
let bfi = BankForksInfo {
|
||||
bank_slot: start_slot,
|
||||
};
|
||||
let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank);
|
||||
let bank_forks = BankForks::new_from_banks(&[bank], rooted_path);
|
||||
(bank_forks, vec![bfi], leader_schedule_cache)
|
||||
}
|
||||
};
|
||||
|
||||
blocktree.set_roots(&[0]).expect("Couldn't set first root");
|
||||
|
||||
let leader_schedule_cache =
|
||||
LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), &pending_slots[0].2);
|
||||
|
||||
let mut fork_info = vec![];
|
||||
let mut last_status_report = Instant::now();
|
||||
let mut root = 0;
|
||||
while !pending_slots.is_empty() {
|
||||
let (slot, meta, bank, mut entry_height, mut last_entry_hash) =
|
||||
pending_slots.pop().unwrap();
|
||||
|
||||
if last_status_report.elapsed() > Duration::from_secs(2) {
|
||||
info!("processing ledger...block {}", slot);
|
||||
last_status_report = Instant::now();
|
||||
}
|
||||
|
||||
// Fetch all entries for this slot
|
||||
let mut entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
|
||||
warn!("Failed to load entries for slot {}: {:?}", slot, err);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?;
|
||||
|
||||
if slot == 0 {
|
||||
// The first entry in the ledger is a pseudo-tick used only to ensure the number of ticks
|
||||
// in slot 0 is the same as the number of ticks in all subsequent slots. It is not
|
||||
// processed by the bank, skip over it.
|
||||
if entries.is_empty() {
|
||||
warn!("entry0 not present");
|
||||
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
||||
}
|
||||
let entry0 = entries.remove(0);
|
||||
if !(entry0.is_tick() && entry0.verify(&last_entry_hash)) {
|
||||
warn!("Ledger proof of history failed at entry0");
|
||||
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
||||
}
|
||||
last_entry_hash = entry0.hash;
|
||||
entry_height += 1;
|
||||
}
|
||||
|
||||
if !entries.is_empty() {
|
||||
if verify_ledger && !entries.verify(&last_entry_hash) {
|
||||
warn!(
|
||||
"Ledger proof of history failed at slot: {}, entry: {}",
|
||||
slot, entry_height
|
||||
);
|
||||
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
||||
}
|
||||
|
||||
process_entries(&bank, &entries).map_err(|err| {
|
||||
warn!("Failed to process entries for slot {}: {:?}", slot, err);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?;
|
||||
|
||||
last_entry_hash = entries.last().unwrap().hash;
|
||||
entry_height += entries.len() as u64;
|
||||
}
|
||||
|
||||
bank.freeze(); // all banks handled by this routine are created from complete slots
|
||||
|
||||
if blocktree.is_root(slot) {
|
||||
root = slot;
|
||||
leader_schedule_cache.set_root(&bank);
|
||||
bank.squash();
|
||||
pending_slots.clear();
|
||||
fork_info.clear();
|
||||
}
|
||||
|
||||
if meta.next_slots.is_empty() {
|
||||
// Reached the end of this fork. Record the final entry height and last entry.hash
|
||||
let bfi = BankForksInfo {
|
||||
bank_slot: slot,
|
||||
entry_height,
|
||||
};
|
||||
fork_info.push((bank, bfi));
|
||||
continue;
|
||||
}
|
||||
|
||||
// This is a fork point, create a new child bank for each fork
|
||||
for next_slot in meta.next_slots {
|
||||
let next_meta = blocktree
|
||||
.meta(next_slot)
|
||||
.map_err(|err| {
|
||||
warn!("Failed to load meta for slot {}: {:?}", slot, err);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?
|
||||
.unwrap();
|
||||
|
||||
// only process full slots in blocktree_processor, replay_stage
|
||||
// handles any partials
|
||||
if next_meta.is_full() {
|
||||
let next_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank,
|
||||
&leader_schedule_cache
|
||||
.slot_leader_at(next_slot, Some(&bank))
|
||||
.unwrap(),
|
||||
next_slot,
|
||||
));
|
||||
trace!("Add child bank for slot={}", next_slot);
|
||||
// bank_forks.insert(*next_slot, child_bank);
|
||||
pending_slots.push((
|
||||
next_slot,
|
||||
next_meta,
|
||||
next_bank,
|
||||
entry_height,
|
||||
last_entry_hash,
|
||||
));
|
||||
} else {
|
||||
let bfi = BankForksInfo {
|
||||
bank_slot: slot,
|
||||
entry_height,
|
||||
};
|
||||
fork_info.push((bank.clone(), bfi));
|
||||
}
|
||||
}
|
||||
|
||||
// reverse sort by slot, so the next slot to be processed can be pop()ed
|
||||
// TODO: remove me once leader_scheduler can hang with out-of-order slots?
|
||||
pending_slots.sort_by(|a, b| b.0.cmp(&a.0));
|
||||
}
|
||||
|
||||
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
|
||||
let bank_forks = BankForks::new_from_banks(&banks, root);
|
||||
info!(
|
||||
"processing ledger...complete in {}ms, forks={}...",
|
||||
duration_as_ms(&now.elapsed()),
|
||||
@ -297,6 +212,195 @@ pub fn process_blocktree(
|
||||
Ok((bank_forks, bank_forks_info, leader_schedule_cache))
|
||||
}
|
||||
|
||||
fn verify_and_process_entries(
|
||||
bank: &Bank,
|
||||
entries: &[Entry],
|
||||
verify_ledger: bool,
|
||||
last_entry_hash: Hash,
|
||||
) -> result::Result<Hash, BlocktreeProcessorError> {
|
||||
assert!(!entries.is_empty());
|
||||
|
||||
if verify_ledger && !entries.verify(&last_entry_hash) {
|
||||
warn!("Ledger proof of history failed at slot: {}", bank.slot());
|
||||
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
||||
}
|
||||
|
||||
process_entries(&bank, &entries).map_err(|err| {
|
||||
warn!(
|
||||
"Failed to process entries for slot {}: {:?}",
|
||||
bank.slot(),
|
||||
err
|
||||
);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?;
|
||||
|
||||
Ok(entries.last().unwrap().hash)
|
||||
}
|
||||
|
||||
// Special handling required for processing the entries in slot 0
|
||||
fn process_bank_0(
|
||||
bank0: &Bank,
|
||||
blocktree: &Blocktree,
|
||||
verify_ledger: bool,
|
||||
) -> result::Result<(), BlocktreeProcessorError> {
|
||||
assert_eq!(bank0.slot(), 0);
|
||||
|
||||
// Fetch all entries for this slot
|
||||
let mut entries = blocktree.get_slot_entries(0, 0, None).map_err(|err| {
|
||||
warn!("Failed to load entries for slot 0, err: {:?}", err);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?;
|
||||
|
||||
// The first entry in the ledger is a pseudo-tick used only to ensure the number of ticks
|
||||
// in slot 0 is the same as the number of ticks in all subsequent slots. It is not
|
||||
// processed by the bank, skip over it.
|
||||
if entries.is_empty() {
|
||||
warn!("entry0 not present");
|
||||
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
||||
}
|
||||
let entry0 = entries.remove(0);
|
||||
if !(entry0.is_tick() && entry0.verify(&bank0.last_blockhash())) {
|
||||
warn!("Ledger proof of history failed at entry0");
|
||||
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
||||
}
|
||||
|
||||
if !entries.is_empty() {
|
||||
verify_and_process_entries(bank0, &entries, verify_ledger, entry0.hash)?;
|
||||
} else {
|
||||
bank0.register_tick(&entry0.hash);
|
||||
}
|
||||
|
||||
bank0.freeze();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Given a slot, add its children to the pending slots queue if those children slots are
|
||||
// complete
|
||||
fn process_next_slots(
|
||||
bank: &Arc<Bank>,
|
||||
meta: &SlotMeta,
|
||||
blocktree: &Blocktree,
|
||||
leader_schedule_cache: &LeaderScheduleCache,
|
||||
pending_slots: &mut Vec<(u64, SlotMeta, Arc<Bank>, Hash)>,
|
||||
fork_info: &mut Vec<(Arc<Bank>, BankForksInfo)>,
|
||||
) -> result::Result<(), BlocktreeProcessorError> {
|
||||
if meta.next_slots.is_empty() {
|
||||
// Reached the end of this fork. Record the final entry height and last entry.hash
|
||||
let bfi = BankForksInfo {
|
||||
bank_slot: bank.slot(),
|
||||
};
|
||||
fork_info.push((bank.clone(), bfi));
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// This is a fork point if there are multiple children, create a new child bank for each fork
|
||||
for next_slot in &meta.next_slots {
|
||||
let next_meta = blocktree
|
||||
.meta(*next_slot)
|
||||
.map_err(|err| {
|
||||
warn!("Failed to load meta for slot {}: {:?}", next_slot, err);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?
|
||||
.unwrap();
|
||||
|
||||
// Only process full slots in blocktree_processor, replay_stage
|
||||
// handles any partials
|
||||
if next_meta.is_full() {
|
||||
let next_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank,
|
||||
&leader_schedule_cache
|
||||
.slot_leader_at(*next_slot, Some(&bank))
|
||||
.unwrap(),
|
||||
*next_slot,
|
||||
));
|
||||
trace!("Add child bank {} of slot={}", next_slot, bank.slot());
|
||||
pending_slots.push((*next_slot, next_meta, next_bank, bank.last_blockhash()));
|
||||
} else {
|
||||
let bfi = BankForksInfo {
|
||||
bank_slot: bank.slot(),
|
||||
};
|
||||
fork_info.push((bank.clone(), bfi));
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse sort by slot, so the next slot to be processed can be popped
|
||||
// TODO: remove me once leader_scheduler can hang with out-of-order slots?
|
||||
pending_slots.sort_by(|a, b| b.0.cmp(&a.0));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Iterate through blocktree processing slots starting from the root slot pointed to by the
|
||||
// given `meta`
|
||||
fn process_pending_slots(
|
||||
root_bank: &Arc<Bank>,
|
||||
root_meta: &SlotMeta,
|
||||
blocktree: &Blocktree,
|
||||
leader_schedule_cache: &mut LeaderScheduleCache,
|
||||
rooted_path: &mut Vec<u64>,
|
||||
verify_ledger: bool,
|
||||
dev_halt_at_slot: Slot,
|
||||
) -> result::Result<Vec<(Arc<Bank>, BankForksInfo)>, BlocktreeProcessorError> {
|
||||
let mut fork_info = vec![];
|
||||
let mut last_status_report = Instant::now();
|
||||
let mut pending_slots = vec![];
|
||||
process_next_slots(
|
||||
root_bank,
|
||||
root_meta,
|
||||
blocktree,
|
||||
leader_schedule_cache,
|
||||
&mut pending_slots,
|
||||
&mut fork_info,
|
||||
)?;
|
||||
|
||||
while !pending_slots.is_empty() {
|
||||
let (slot, meta, bank, last_entry_hash) = pending_slots.pop().unwrap();
|
||||
|
||||
if last_status_report.elapsed() > Duration::from_secs(2) {
|
||||
info!("processing ledger...block {}", slot);
|
||||
last_status_report = Instant::now();
|
||||
}
|
||||
|
||||
// Fetch all entries for this slot
|
||||
let entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
|
||||
warn!("Failed to load entries for slot {}: {:?}", slot, err);
|
||||
BlocktreeProcessorError::LedgerVerificationFailed
|
||||
})?;
|
||||
|
||||
verify_and_process_entries(&bank, &entries, verify_ledger, last_entry_hash)?;
|
||||
|
||||
bank.freeze(); // all banks handled by this routine are created from complete slots
|
||||
|
||||
if blocktree.is_root(slot) {
|
||||
let parents = bank.parents().into_iter().map(|b| b.slot()).rev().skip(1);
|
||||
let parents: Vec<_> = parents.collect();
|
||||
rooted_path.extend(parents);
|
||||
rooted_path.push(slot);
|
||||
leader_schedule_cache.set_root(&bank);
|
||||
bank.squash();
|
||||
pending_slots.clear();
|
||||
fork_info.clear();
|
||||
}
|
||||
|
||||
if slot >= dev_halt_at_slot {
|
||||
let bfi = BankForksInfo { bank_slot: slot };
|
||||
fork_info.push((bank, bfi));
|
||||
break;
|
||||
}
|
||||
|
||||
process_next_slots(
|
||||
&bank,
|
||||
&meta,
|
||||
blocktree,
|
||||
leader_schedule_cache,
|
||||
&mut pending_slots,
|
||||
&mut fork_info,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(fork_info)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
@ -325,8 +429,17 @@ pub mod tests {
|
||||
let entries = create_ticks(ticks_per_slot, last_entry_hash);
|
||||
let last_entry_hash = entries.last().unwrap().hash;
|
||||
|
||||
let blobs = entries_to_blobs(&entries, slot, parent_slot, true);
|
||||
blocktree.insert_data_blobs(blobs.iter()).unwrap();
|
||||
blocktree
|
||||
.write_entries_using_shreds(
|
||||
slot,
|
||||
0,
|
||||
0,
|
||||
ticks_per_slot,
|
||||
Some(parent_slot),
|
||||
true,
|
||||
&entries,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
last_entry_hash
|
||||
}
|
||||
@ -376,14 +489,13 @@ pub mod tests {
|
||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
|
||||
|
||||
let (mut _bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(
|
||||
bank_forks_info[0],
|
||||
BankForksInfo {
|
||||
bank_slot: 0, // slot 1 isn't "full", we stop at slot zero
|
||||
entry_height: ticks_per_slot,
|
||||
}
|
||||
);
|
||||
}
|
||||
@ -435,7 +547,7 @@ pub mod tests {
|
||||
blocktree.set_roots(&[4, 1, 0]).unwrap();
|
||||
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
|
||||
|
||||
@ -443,7 +555,6 @@ pub mod tests {
|
||||
bank_forks_info[0],
|
||||
BankForksInfo {
|
||||
bank_slot: 4, // Fork 2's head is slot 4
|
||||
entry_height: ticks_per_slot * 3,
|
||||
}
|
||||
);
|
||||
assert!(&bank_forks[4]
|
||||
@ -454,10 +565,7 @@ pub mod tests {
|
||||
.is_empty());
|
||||
|
||||
// Ensure bank_forks holds the right banks
|
||||
for info in bank_forks_info {
|
||||
assert_eq!(bank_forks[info.bank_slot].slot(), info.bank_slot);
|
||||
assert!(bank_forks[info.bank_slot].is_frozen());
|
||||
}
|
||||
verify_fork_infos(&bank_forks, &bank_forks_info);
|
||||
|
||||
assert_eq!(bank_forks.root(), 4);
|
||||
}
|
||||
@ -509,14 +617,13 @@ pub mod tests {
|
||||
blocktree.set_roots(&[0, 1]).unwrap();
|
||||
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
||||
assert_eq!(
|
||||
bank_forks_info[0],
|
||||
BankForksInfo {
|
||||
bank_slot: 3, // Fork 1's head is slot 3
|
||||
entry_height: ticks_per_slot * 4,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
@ -531,7 +638,6 @@ pub mod tests {
|
||||
bank_forks_info[1],
|
||||
BankForksInfo {
|
||||
bank_slot: 4, // Fork 2's head is slot 4
|
||||
entry_height: ticks_per_slot * 3,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
@ -546,10 +652,7 @@ pub mod tests {
|
||||
assert_eq!(bank_forks.root(), 1);
|
||||
|
||||
// Ensure bank_forks holds the right banks
|
||||
for info in bank_forks_info {
|
||||
assert_eq!(bank_forks[info.bank_slot].slot(), info.bank_slot);
|
||||
assert!(bank_forks[info.bank_slot].is_frozen());
|
||||
}
|
||||
verify_fork_infos(&bank_forks, &bank_forks_info);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -590,14 +693,13 @@ pub mod tests {
|
||||
|
||||
// Check that we can properly restart the ledger / leader scheduler doesn't fail
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1); // There is one fork
|
||||
assert_eq!(
|
||||
bank_forks_info[0],
|
||||
BankForksInfo {
|
||||
bank_slot: last_slot + 1, // Head is last_slot + 1
|
||||
entry_height: ticks_per_slot * (last_slot + 2),
|
||||
}
|
||||
);
|
||||
|
||||
@ -722,21 +824,14 @@ pub mod tests {
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
|
||||
blocktree
|
||||
.write_entries(1, 0, 0, genesis_block.ticks_per_slot, &entries)
|
||||
.write_entries_using_shreds(1, 0, 0, genesis_block.ticks_per_slot, None, true, &entries)
|
||||
.unwrap();
|
||||
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(bank_forks.root(), 0);
|
||||
assert_eq!(
|
||||
bank_forks_info[0],
|
||||
BankForksInfo {
|
||||
bank_slot: 1,
|
||||
entry_height,
|
||||
}
|
||||
);
|
||||
assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 1 });
|
||||
|
||||
let bank = bank_forks[1].clone();
|
||||
assert_eq!(
|
||||
@ -757,16 +852,10 @@ pub mod tests {
|
||||
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None, true, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(
|
||||
bank_forks_info[0],
|
||||
BankForksInfo {
|
||||
bank_slot: 0,
|
||||
entry_height: 1,
|
||||
}
|
||||
);
|
||||
assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 0 });
|
||||
let bank = bank_forks[0].clone();
|
||||
assert_eq!(bank.tick_height(), 0);
|
||||
}
|
||||
@ -1254,6 +1343,83 @@ pub mod tests {
|
||||
assert_eq!(bank.process_transaction(&fail_tx), Ok(()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_blocktree_from_root() {
|
||||
let GenesisBlockInfo {
|
||||
mut genesis_block, ..
|
||||
} = create_genesis_block(123);
|
||||
|
||||
let ticks_per_slot = 1;
|
||||
genesis_block.ticks_per_slot = ticks_per_slot;
|
||||
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
|
||||
/*
|
||||
Build a blocktree in the ledger with the following fork structure:
|
||||
|
||||
slot 0 (all ticks)
|
||||
|
|
||||
slot 1 (all ticks)
|
||||
|
|
||||
slot 2 (all ticks)
|
||||
|
|
||||
slot 3 (all ticks) -> root
|
||||
|
|
||||
slot 4 (all ticks)
|
||||
|
|
||||
slot 5 (all ticks) -> root
|
||||
|
|
||||
slot 6 (all ticks)
|
||||
*/
|
||||
|
||||
let mut last_hash = blockhash;
|
||||
for i in 0..6 {
|
||||
last_hash =
|
||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, i + 1, i, last_hash);
|
||||
}
|
||||
blocktree.set_roots(&[3, 5]).unwrap();
|
||||
|
||||
// Set up bank1
|
||||
let bank0 = Arc::new(Bank::new(&genesis_block));
|
||||
process_bank_0(&bank0, &blocktree, true).unwrap();
|
||||
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
||||
bank1.squash();
|
||||
let slot1_entries = blocktree.get_slot_entries(1, 0, None).unwrap();
|
||||
verify_and_process_entries(&bank1, &slot1_entries, true, bank0.last_blockhash()).unwrap();
|
||||
|
||||
// Test process_blocktree_from_root() from slot 1 onwards
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree_from_root(&blocktree, bank1, true, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1); // One fork
|
||||
assert_eq!(
|
||||
bank_forks_info[0],
|
||||
BankForksInfo {
|
||||
bank_slot: 6, // The head of the fork is slot 6
|
||||
}
|
||||
);
|
||||
|
||||
// slots_since_snapshot should contain everything on the rooted path
|
||||
assert_eq!(
|
||||
bank_forks.slots_since_snapshot().to_vec(),
|
||||
vec![1, 2, 3, 4, 5]
|
||||
);
|
||||
assert_eq!(bank_forks.root(), 5);
|
||||
|
||||
// Verify the parents of the head of the fork
|
||||
assert_eq!(
|
||||
&bank_forks[6]
|
||||
.parents()
|
||||
.iter()
|
||||
.map(|bank| bank.slot())
|
||||
.collect::<Vec<_>>(),
|
||||
&[5]
|
||||
);
|
||||
|
||||
// Check that bank forks has the correct banks
|
||||
verify_fork_infos(&bank_forks, &bank_forks_info);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_process_entries_stress() {
|
||||
@ -1347,4 +1513,22 @@ pub mod tests {
|
||||
let bank = Bank::new_with_paths(&genesis_block, account_paths);
|
||||
bank.epoch_schedule().clone()
|
||||
}
|
||||
|
||||
// Check that `bank_forks` contains all the ancestors and banks for each fork identified in
|
||||
// `bank_forks_info`
|
||||
fn verify_fork_infos(bank_forks: &BankForks, bank_forks_info: &[BankForksInfo]) {
|
||||
for fork in bank_forks_info {
|
||||
let head_slot = fork.bank_slot;
|
||||
let head_bank = &bank_forks[head_slot];
|
||||
let mut parents = head_bank.parents();
|
||||
parents.push(head_bank.clone());
|
||||
|
||||
// Ensure the tip of each fork and all its parents are in the given bank_forks
|
||||
for parent in parents {
|
||||
let parent_bank = &bank_forks[parent.slot()];
|
||||
assert_eq!(parent_bank.slot(), parent.slot());
|
||||
assert!(parent_bank.is_frozen());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,12 +9,10 @@ use crate::erasure::{CodingGenerator, ErasureConfig};
|
||||
use crate::poh_recorder::WorkingBankEntries;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::service::Service;
|
||||
use crate::shred::Shredder;
|
||||
use crate::staking_utils;
|
||||
use rayon::ThreadPool;
|
||||
use solana_metrics::{
|
||||
datapoint, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_info,
|
||||
};
|
||||
use solana_sdk::timing::duration_as_ms;
|
||||
use solana_metrics::{datapoint, inc_new_counter_error, inc_new_counter_info};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
@ -24,7 +22,7 @@ use std::time::Instant;
|
||||
|
||||
mod broadcast_bad_blob_sizes;
|
||||
mod broadcast_fake_blobs_run;
|
||||
mod broadcast_utils;
|
||||
pub(crate) mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
|
||||
@ -110,6 +108,7 @@ trait BroadcastRun {
|
||||
|
||||
struct Broadcast {
|
||||
coding_generator: CodingGenerator,
|
||||
parent_slot: Option<u64>,
|
||||
thread_pool: ThreadPool,
|
||||
}
|
||||
|
||||
@ -149,6 +148,7 @@ impl BroadcastStage {
|
||||
|
||||
let mut broadcast = Broadcast {
|
||||
coding_generator,
|
||||
parent_slot: None,
|
||||
thread_pool: rayon::ThreadPoolBuilder::new()
|
||||
.num_threads(sys_info::cpu_num().unwrap_or(NUM_THREADS) as usize)
|
||||
.build()
|
||||
@ -241,6 +241,7 @@ mod test {
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
@ -255,7 +256,7 @@ mod test {
|
||||
|
||||
fn setup_dummy_broadcast_service(
|
||||
leader_pubkey: &Pubkey,
|
||||
ledger_path: &str,
|
||||
ledger_path: &Path,
|
||||
entry_receiver: Receiver<WorkingBankEntries>,
|
||||
) -> MockBroadcastStage {
|
||||
// Make the database ledger
|
||||
@ -297,6 +298,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_broadcast_ledger() {
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path("test_broadcast_ledger");
|
||||
@ -311,18 +313,22 @@ mod test {
|
||||
&ledger_path,
|
||||
entry_receiver,
|
||||
);
|
||||
let bank = broadcast_service.bank.clone();
|
||||
let start_tick_height = bank.tick_height();
|
||||
let max_tick_height = bank.max_tick_height();
|
||||
let ticks_per_slot = bank.ticks_per_slot();
|
||||
let start_tick_height;
|
||||
let max_tick_height;
|
||||
let ticks_per_slot;
|
||||
{
|
||||
let bank = broadcast_service.bank.clone();
|
||||
start_tick_height = bank.tick_height();
|
||||
max_tick_height = bank.max_tick_height();
|
||||
ticks_per_slot = bank.ticks_per_slot();
|
||||
|
||||
let ticks = create_ticks(max_tick_height - start_tick_height, Hash::default());
|
||||
for (i, tick) in ticks.into_iter().enumerate() {
|
||||
entry_sender
|
||||
.send((bank.clone(), vec![(tick, i as u64 + 1)]))
|
||||
.expect("Expect successful send to broadcast service");
|
||||
let ticks = create_ticks(max_tick_height - start_tick_height, Hash::default());
|
||||
for (i, tick) in ticks.into_iter().enumerate() {
|
||||
entry_sender
|
||||
.send((bank.clone(), vec![(tick, i as u64 + 1)]))
|
||||
.expect("Expect successful send to broadcast service");
|
||||
}
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(2000));
|
||||
|
||||
trace!(
|
||||
@ -337,7 +343,7 @@ mod test {
|
||||
for i in 0..max_tick_height - start_tick_height {
|
||||
let slot = (start_tick_height + i + 1) / ticks_per_slot;
|
||||
|
||||
let result = blocktree.get_data_blob(slot, blob_index).unwrap();
|
||||
let result = blocktree.get_data_shred_as_blob(slot, blob_index).unwrap();
|
||||
|
||||
blob_index += 1;
|
||||
result.expect("expect blob presence");
|
||||
|
@ -4,10 +4,12 @@ use crate::erasure::CodingGenerator;
|
||||
use crate::packet::{self, SharedBlob};
|
||||
use crate::poh_recorder::WorkingBankEntries;
|
||||
use crate::result::Result;
|
||||
use crate::shred::Shredder;
|
||||
use rayon::prelude::*;
|
||||
use rayon::ThreadPool;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signable};
|
||||
use std::io::Write;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
@ -97,6 +99,34 @@ pub(super) fn entries_to_blobs(
|
||||
(blobs, coding)
|
||||
}
|
||||
|
||||
pub fn entries_to_shreds(
|
||||
ventries: Vec<Vec<Entry>>,
|
||||
last_tick: u64,
|
||||
bank_max_tick: u64,
|
||||
shredder: &mut Shredder,
|
||||
) {
|
||||
ventries.iter().enumerate().for_each(|(i, entries)| {
|
||||
let data = bincode::serialize(entries).unwrap();
|
||||
let mut offset = 0;
|
||||
while offset < data.len() {
|
||||
offset += shredder.write(&data[offset..]).unwrap();
|
||||
}
|
||||
// bincode::serialize_into(&shredder, &entries).unwrap();
|
||||
trace!(
|
||||
"Shredded {:?} entries into {:?} shreds",
|
||||
entries.len(),
|
||||
shredder.shreds.len()
|
||||
);
|
||||
if i + 1 == ventries.len() && last_tick == bank_max_tick {
|
||||
debug!("Finalized slot for the shreds");
|
||||
shredder.finalize_slot();
|
||||
} else {
|
||||
debug!("Finalized fec block for the shreds");
|
||||
shredder.finalize_fec_block();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn generate_data_blobs(
|
||||
ventries: Vec<Vec<(Entry, u64)>>,
|
||||
thread_pool: &ThreadPool,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user