Compare commits

...

63 Commits

Author SHA1 Message Date
Michael Vines
c853632fc4 Add stub address_labels field for 1.3 compatibility (#10696) 2020-06-18 11:05:48 -07:00
mergify[bot]
e651209f73 Update testnet shred version (#10684) (#10685)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 9c22a6007d)

Co-authored-by: carllin <wumu727@gmail.com>
2020-06-18 07:59:20 +00:00
mergify[bot]
641f439a45 Update testnet shred version (#10681) (#10682)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit dae8bc477b)

Co-authored-by: carllin <wumu727@gmail.com>
2020-06-18 07:44:58 +00:00
Michael Vines
a2486f8094 Remove strict from automerge, add rebase opt in 2020-06-17 20:53:55 -07:00
mergify[bot]
d48bd80619 Plumb --warp-slot through net scripts (bp #10639) (#10642)
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-06-18 01:44:58 +00:00
mergify[bot]
4ff70a05f1 ignore break (#10666) (#10668)
(cherry picked from commit a5f82c995e)

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2020-06-17 22:46:24 +00:00
mergify[bot]
7831cef9a7 Wait until bank is frozen before sending RPC notifications (bp #10654) (#10662)
* Wait until bank is frozen before sending RPC notifications (#10654)

(cherry picked from commit 39984cdcc3)

# Conflicts:
#	core/src/replay_stage.rs

* Update replay_stage.rs

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-06-17 20:35:16 +00:00
mergify[bot]
7dd22d6601 Factor out testnet automation SW version resolution (#10659)
(cherry picked from commit a15f60a291)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-06-17 17:19:02 +00:00
mergify[bot]
3bb0388299 Add address to non-circulating supply (#10647)
(cherry picked from commit 5673343f49)

Co-authored-by: publish-docs.sh <maintainers@solana.com>
2020-06-17 05:43:23 +00:00
mergify[bot]
a0a2c61856 Allow pre-existing stake accounts in multinode-demo/delegate-stake.sh (#10635)
(cherry picked from commit ae0d5ba201)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-06-16 22:53:52 +00:00
mergify[bot]
4afa64c20d Plumb --wait-for-supermajority through scripts (#10611) (#10613)
(cherry picked from commit 348bf78cd1)

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-06-16 17:55:16 +00:00
mergify[bot]
be6edb950c Add generic is_parsable() input validator (bp #10599) (#10620)
* Add generic is_parsable() input validator.
Allow input validators to accept &str, &String and String parameters.

(cherry picked from commit daa2e6363f)

# Conflicts:
#	clap-utils/src/input_validators.rs

* Fix conflict

Co-authored-by: Kristofer Peterson <kris@tranception.com>
Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-06-16 11:15:29 +00:00
Michael Vines
62bc83ef39 Add mergify automerge rules 2020-06-15 09:14:39 -07:00
Michael Vines
f26824f2b5 Bump version to v1.1.19 2020-06-14 20:18:45 -07:00
mergify[bot]
bc808d785b Fix udp port check retry and check all udp ports (bp #10385) (#10576)
automerge
2020-06-14 17:36:17 -07:00
mergify[bot]
a5e91f8b14 Fix perf-libs version detection (#10571) (#10573)
automerge
2020-06-14 13:50:10 -07:00
mergify[bot]
79b1d49e42 Fix fannout gossip bench (bp #10509) (#10555)
* Fix fannout gossip bench (#10509)

* Gossip benchmark

* Rayon tweaking

* push pulls

* fanout to max nodes

* fixup! fanout to max nodes

* fixup! fixup! fanout to max nodes

* update

* multi vote test

* fixup prune

* fast propagation

* fixups

* compute up to 95%

* test for specific tx

* stats

* stats

* fixed tests

* rename

* track a lagging view of which nodes have the local node in their active set in the local received_cache

* test fixups

* dups are old now

* dont prune your own origin

* send vote to tpu

* tests

* fixed tests

* fixed test

* update

* ignore scale

* lint

* fixup

* fixup

* fixup

* cleanup

Co-authored-by: Stephen Akridge <sakridge@gmail.com>
(cherry picked from commit ba83e4ca50)

* Merge fixes

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2020-06-14 06:28:43 -07:00
mergify[bot]
5c5207b7c4 Use git diff instead of git show for --check (#10566) (#10567)
automerge
2020-06-14 06:28:33 -07:00
mergify[bot]
6280ea1b6e Check the whole range of commits in the topic branch (bp #10560) (#10563)
automerge
2020-06-14 04:46:09 -07:00
sakridge
f016ccdbb5 Dial down gossip threadpool (#10540) 2020-06-13 22:48:32 -07:00
Dan Albert
a528e966e6 Add Trust Wallet security info (#10516)
automerge

(cherry picked from commit 914f285914)
2020-06-12 22:14:34 -07:00
mergify[bot]
4be9d926c8 Add FdGYQ... to non-circulation withdrawer authority list (#10542) (#10545)
automerge
2020-06-12 18:14:27 -07:00
Michael Vines
94e162b0f0 Refine build condition 2020-06-12 17:03:48 -07:00
mergify[bot]
26ca3c6d6d Update non-circulating pubkeys (#10524) (#10526)
automerge

(cherry picked from commit fb8612be49)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-06-11 16:58:28 -07:00
mergify[bot]
729b997392 Improve BPF SDK dependency caching (#10434) (#10517)
automerge
2020-06-11 14:12:16 -07:00
Michael Vines
37b381f47f Force CI_REPO_SLUG 2020-06-11 13:14:42 -07:00
Michael Vines
0115bfa2ea Bump version to v1.1.18 2020-06-10 22:42:05 -07:00
mergify[bot]
3f60fe62c2 Add StakeInstruction::Merge (#10503) (#10506)
automerge
2020-06-10 20:04:03 -07:00
mergify[bot]
ea44e64d21 Add VoteInstruction::UpdateCommission (#10497)
automerge
2020-06-10 12:54:09 -07:00
mergify[bot]
8e1c2d2df4 Add back missing pull_response success counter (#10491) (#10500)
automerge
2020-06-10 10:45:27 -07:00
sakridge
a79702c62c Optimize process pull responses (#10460) (#10484)
* Batch process pull responses

* Generate pull requests at 1/2 rate

* Do filtering work of process_pull_response in read lock

Only take write lock to insert if needed.
2020-06-09 20:02:46 -07:00
mergify[bot]
3c94084177 Add SendTransactionService (#10470)
automerge
2020-06-09 18:13:50 -07:00
mergify[bot]
7d448eb1a9 Add --warp-slot argument to |solana-ledger-tool create-snapshot| (#10473)
automerge
2020-06-09 11:09:32 -07:00
sakridge
a705764ca7 v1.1 gossip lock optimizations (#10459)
* Skip gossip requests with different shred version and split lock (#10240)


(cherry picked from commit 3f508b37fd)

* More cluster stats and add epoch stakes cache in retransmit stage (#10345)

* More cluster info metrics for push request/response counts

* Cache staked peers for the epoch

(cherry picked from commit ef37b82ffa)

* Cache tvu peers for broadcast (#10373)


(cherry picked from commit 2cf719ac2c)

* Add pull request count metrics (#10421)


(cherry picked from commit 3d2230f1a9)
2020-06-08 17:05:55 -07:00
mergify[bot]
3110def6c3 Remove lock around JsonRpcRequestProcessor (bp #10417) (#10450)
automerge
2020-06-08 16:45:49 -07:00
Michael Vines
afc89beefa Bump version to v1.1.17 2020-06-08 10:32:26 -07:00
Michael Vines
d5d5e8797b Bump new_system_program_activation_epoch by 2 2020-06-08 09:40:39 -07:00
mergify[bot]
09f0624887 Adjust RPC simulateTransaction endpoint to match v1.2 (#10443)
automerge
2020-06-06 21:08:27 -07:00
mergify[bot]
52c20a5c38 Add Certus One as a trusted validator for testnet (#10433) (#10437)
automerge
2020-06-05 16:49:21 -07:00
mergify[bot]
3c38df9be0 Avoid AccountInUse errors when simulating transactions (#10391) (#10419)
automerge
2020-06-04 20:41:12 -07:00
Michael Vines
da038e626a v1.1: ledger_cleanup_service: compact at a slower rate than purging (#10415)
automerge
2020-06-04 20:30:31 -07:00
mergify[bot]
9cfbf8a94d Deactivate legacy_system_instruction_processor at epoch 58/38 (preview/stable) (#10406) (#10407)
automerge
2020-06-04 01:21:43 -07:00
Michael Vines
fbcbd37650 v1.1: Enable rolling update of "Permit paying oneself" / "No longer allow create-account to add funds to an existing account" (#10394)
automerge
2020-06-03 16:34:59 -07:00
mergify[bot]
dca932fe45 Don't share same snapshot dir for secondary access (bp #10384) (#10386)
automerge
2020-06-03 06:34:19 -07:00
mergify[bot]
8d89eac32f Support opening an in-use rocksdb as secondary (bp #10209) (#10381)
automerge
2020-06-02 23:51:43 -07:00
Michael Vines
862fd63bb4 Update system_instruction_processor.rs 2020-06-02 23:35:31 -07:00
Greg Fitzgerald
578d77495a No longer allow create-account to add funds to an existing account (#10192) 2020-06-02 23:35:31 -07:00
Ryo Onodera
537d135005 Add --max-genesis-archive-unpacked-size to capitalization (#10380)
automerge
2020-06-02 21:39:14 -07:00
Michael Vines
5ade9b9f02 Revert "Reduce UNLOCK_NONCE_SLOT to ensure it is active on all three clusters (#10223)" (#10370)
automerge
2020-06-02 12:42:03 -07:00
mergify[bot]
e023719c58 Add preflight checks to sendTransaction RPC method (bp #10338) (#10362)
automerge
2020-06-01 22:45:51 -07:00
mergify[bot]
a278f745f8 Reduce stable jobs (#10344) (#10346)
automerge
2020-05-31 22:40:51 -07:00
mergify[bot]
640bb9cb95 Permit paying oneself (#10337) (#10341)
automerge
2020-05-31 13:18:34 -07:00
mergify[bot]
c344a878b6 validator: Added --health-check-slot-distance (bp #10324) (#10330)
automerge
2020-05-29 17:49:09 -07:00
mergify[bot]
9b63f7a50f Improve Rpc inflation tooling (bp #10309) (#10321)
automerge
2020-05-29 17:35:10 -07:00
Trent Nelson
b128087445 Backport of #9161 to v1.1 branch (#10327)
automerge
2020-05-29 16:34:36 -07:00
Tyera Eulberg
72755fcd19 Add mechanism to get blockhash's last valid slot (#10239) (#10318)
automerge
2020-05-29 11:27:45 -07:00
mergify[bot]
24937e63d4 verify_reachable_ports: Handle errors without expect() (#10298) (#10304)
automerge
2020-05-28 16:12:08 -07:00
mergify[bot]
995759faf5 Add commitment parameter to getFeeCalculatorForBlockhash (#10255) (#10296) (#10302)
automerge
2020-05-28 15:26:39 -07:00
mergify[bot]
db60bd30dc Feign RPC health while in a --wait-for-supermajority holding pattern (#10295) (#10300)
automerge
2020-05-28 13:53:05 -07:00
carllin
bc86ee8d13 Fix run_orphan DOS (#10290)
Co-authored-by: Carl <carl@solana.com>
2020-05-28 11:29:13 -07:00
mergify[bot]
93506b22e7 Include GenesisConfig inflation in Display (#10282) (#10288)
automerge
2020-05-28 00:54:13 -07:00
mergify[bot]
1e53760a65 Use correct --url (#10284) (#10286)
automerge
2020-05-27 22:15:47 -07:00
Michael Vines
24c796b434 Bump version to 1.1.16 2020-05-27 18:13:17 -07:00
175 changed files with 6701 additions and 1908 deletions

View File

@@ -1,9 +1,40 @@
# Validate your changes with:
#
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate/
#
# https://doc.mergify.io/
pull_request_rules:
- name: automatic merge (squash) on CI success
conditions:
- status-success=buildkite/solana
#- status-success=Travis CI - Pull Request
- status-success=ci-gate
- label=automerge
- author≠@dont-squash-my-commits
actions:
merge:
method: squash
# Join the dont-squash-my-commits group if you won't like your commits squashed
- name: automatic merge (rebase) on CI success
conditions:
- status-success=buildkite/solana
#- status-success=Travis CI - Pull Request
- status-success=ci-gate
- label=automerge
- author=@dont-squash-my-commits
actions:
merge:
method: rebase
- name: remove automerge label on CI failure
conditions:
- label=automerge
- "#status-failure!=0"
actions:
label:
remove:
- automerge
comment:
message: automerge label removed due to a CI failure
- name: remove outdated reviews
conditions:
- base=master

View File

@@ -18,6 +18,8 @@ branches:
- master
- /^v\d+\.\d+/
if: type IN (api, cron) OR tag IS present
notifications:
slack:
on_success: change

135
Cargo.lock generated
View File

@@ -252,7 +252,7 @@ dependencies = [
[[package]]
name = "btc_spv_bin"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"hex",
@@ -2886,7 +2886,7 @@ dependencies = [
[[package]]
name = "solana-accounts-bench"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"crossbeam-channel",
@@ -2901,7 +2901,7 @@ dependencies = [
[[package]]
name = "solana-archiver"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"console 0.10.3",
@@ -2916,7 +2916,7 @@ dependencies = [
[[package]]
name = "solana-archiver-lib"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"crossbeam-channel",
@@ -2946,7 +2946,7 @@ dependencies = [
[[package]]
name = "solana-archiver-utils"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"hex",
"log 0.4.8",
@@ -2961,7 +2961,7 @@ dependencies = [
[[package]]
name = "solana-banking-bench"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"crossbeam-channel",
@@ -2981,7 +2981,7 @@ dependencies = [
[[package]]
name = "solana-bench-exchange"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"itertools",
@@ -3008,7 +3008,7 @@ dependencies = [
[[package]]
name = "solana-bench-streamer"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"solana-clap-utils",
@@ -3019,7 +3019,7 @@ dependencies = [
[[package]]
name = "solana-bench-tps"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"clap",
@@ -3045,7 +3045,7 @@ dependencies = [
[[package]]
name = "solana-bpf-loader-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"byteorder",
@@ -3062,7 +3062,7 @@ dependencies = [
[[package]]
name = "solana-btc-spv-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"hex",
@@ -3076,7 +3076,7 @@ dependencies = [
[[package]]
name = "solana-budget-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"chrono",
@@ -3092,7 +3092,7 @@ dependencies = [
[[package]]
name = "solana-chacha"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"hex-literal",
"log 0.4.8",
@@ -3107,7 +3107,7 @@ dependencies = [
[[package]]
name = "solana-chacha-cuda"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"hex-literal",
"log 0.4.8",
@@ -3121,14 +3121,14 @@ dependencies = [
[[package]]
name = "solana-chacha-sys"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"cc",
]
[[package]]
name = "solana-clap-utils"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"chrono",
"clap",
@@ -3142,7 +3142,7 @@ dependencies = [
[[package]]
name = "solana-cli"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"Inflector",
"bincode",
@@ -3186,7 +3186,7 @@ dependencies = [
[[package]]
name = "solana-cli-config"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"dirs",
"lazy_static",
@@ -3198,7 +3198,7 @@ dependencies = [
[[package]]
name = "solana-client"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"assert_matches",
"bincode",
@@ -3224,7 +3224,7 @@ dependencies = [
[[package]]
name = "solana-config-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"chrono",
@@ -3237,7 +3237,7 @@ dependencies = [
[[package]]
name = "solana-core"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"bs58",
@@ -3277,6 +3277,7 @@ dependencies = [
"solana-clap-utils",
"solana-client",
"solana-faucet",
"solana-genesis-programs",
"solana-ledger",
"solana-logger",
"solana-measure",
@@ -3307,7 +3308,7 @@ dependencies = [
[[package]]
name = "solana-crate-features"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"backtrace",
"bytes 0.4.12",
@@ -3330,7 +3331,7 @@ dependencies = [
[[package]]
name = "solana-dos"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"clap",
@@ -3345,7 +3346,7 @@ dependencies = [
[[package]]
name = "solana-download-utils"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bzip2",
"console 0.10.3",
@@ -3359,7 +3360,7 @@ dependencies = [
[[package]]
name = "solana-exchange-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"log 0.4.8",
@@ -3376,7 +3377,7 @@ dependencies = [
[[package]]
name = "solana-failure-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"solana-runtime",
"solana-sdk",
@@ -3384,7 +3385,7 @@ dependencies = [
[[package]]
name = "solana-faucet"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"byteorder",
@@ -3403,7 +3404,7 @@ dependencies = [
[[package]]
name = "solana-genesis"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"base64 0.12.1",
"chrono",
@@ -3414,6 +3415,7 @@ dependencies = [
"solana-clap-utils",
"solana-genesis-programs",
"solana-ledger",
"solana-logger",
"solana-sdk",
"solana-stake-program",
"solana-storage-program",
@@ -3423,7 +3425,7 @@ dependencies = [
[[package]]
name = "solana-genesis-programs"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"log 0.4.8",
"solana-bpf-loader-program",
@@ -3437,7 +3439,7 @@ dependencies = [
[[package]]
name = "solana-gossip"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"solana-clap-utils",
@@ -3450,7 +3452,7 @@ dependencies = [
[[package]]
name = "solana-install"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"atty",
"bincode",
@@ -3482,7 +3484,7 @@ dependencies = [
[[package]]
name = "solana-keygen"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bs58",
"clap",
@@ -3497,7 +3499,7 @@ dependencies = [
[[package]]
name = "solana-ledger"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"assert_matches",
"bincode",
@@ -3544,12 +3546,13 @@ dependencies = [
[[package]]
name = "solana-ledger-tool"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"assert_cmd",
"bs58",
"clap",
"histogram",
"log 0.4.8",
"serde_json",
"serde_yaml",
"solana-clap-utils",
@@ -3566,7 +3569,7 @@ dependencies = [
[[package]]
name = "solana-local-cluster"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"assert_matches",
"itertools",
@@ -3596,7 +3599,7 @@ dependencies = [
[[package]]
name = "solana-log-analyzer"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"byte-unit",
"clap",
@@ -3608,7 +3611,7 @@ dependencies = [
[[package]]
name = "solana-logger"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"env_logger",
"lazy_static",
@@ -3617,7 +3620,7 @@ dependencies = [
[[package]]
name = "solana-measure"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"jemalloc-ctl",
"jemallocator",
@@ -3628,7 +3631,7 @@ dependencies = [
[[package]]
name = "solana-merkle-tree"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"fast-math",
"hex",
@@ -3637,7 +3640,7 @@ dependencies = [
[[package]]
name = "solana-metrics"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"env_logger",
"gethostname",
@@ -3652,7 +3655,7 @@ dependencies = [
[[package]]
name = "solana-net-shaper"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"rand 0.7.3",
@@ -3664,7 +3667,7 @@ dependencies = [
[[package]]
name = "solana-net-utils"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"bytes 0.4.12",
@@ -3683,7 +3686,7 @@ dependencies = [
[[package]]
name = "solana-noop-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"log 0.4.8",
"solana-logger",
@@ -3692,7 +3695,7 @@ dependencies = [
[[package]]
name = "solana-ownable"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"num-derive 0.3.0",
@@ -3704,7 +3707,7 @@ dependencies = [
[[package]]
name = "solana-perf"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"dlopen",
@@ -3724,7 +3727,7 @@ dependencies = [
[[package]]
name = "solana-rayon-threadlimit"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"lazy_static",
"num_cpus",
@@ -3743,7 +3746,7 @@ dependencies = [
[[package]]
name = "solana-remote-wallet"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"base32",
"console 0.10.3",
@@ -3761,7 +3764,7 @@ dependencies = [
[[package]]
name = "solana-runtime"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"assert_matches",
"bincode",
@@ -3799,7 +3802,7 @@ dependencies = [
[[package]]
name = "solana-scripts"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"csv",
"serde",
@@ -3807,7 +3810,7 @@ dependencies = [
[[package]]
name = "solana-sdk"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"assert_matches",
"bincode",
@@ -3841,7 +3844,7 @@ dependencies = [
[[package]]
name = "solana-sdk-c"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"bs58",
@@ -3855,7 +3858,7 @@ dependencies = [
[[package]]
name = "solana-sdk-macro"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bs58",
"proc-macro2 1.0.12",
@@ -3865,7 +3868,7 @@ dependencies = [
[[package]]
name = "solana-stake-accounts"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"solana-clap-utils",
@@ -3879,7 +3882,7 @@ dependencies = [
[[package]]
name = "solana-stake-monitor"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"console 0.10.3",
@@ -3903,7 +3906,7 @@ dependencies = [
[[package]]
name = "solana-stake-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"log 0.4.8",
@@ -3921,7 +3924,7 @@ dependencies = [
[[package]]
name = "solana-storage-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"assert_matches",
"bincode",
@@ -3937,7 +3940,7 @@ dependencies = [
[[package]]
name = "solana-streamer"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"libc",
"log 0.4.8",
@@ -3952,7 +3955,7 @@ dependencies = [
[[package]]
name = "solana-sys-tuner"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"libc",
@@ -3967,7 +3970,7 @@ dependencies = [
[[package]]
name = "solana-transaction-status"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"bs58",
@@ -3978,7 +3981,7 @@ dependencies = [
[[package]]
name = "solana-upload-perf"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"serde_json",
"solana-metrics",
@@ -3986,7 +3989,7 @@ dependencies = [
[[package]]
name = "solana-validator"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"chrono",
"clap",
@@ -4014,7 +4017,7 @@ dependencies = [
[[package]]
name = "solana-version"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"serde",
"serde_derive",
@@ -4023,7 +4026,7 @@ dependencies = [
[[package]]
name = "solana-vest-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"chrono",
@@ -4039,7 +4042,7 @@ dependencies = [
[[package]]
name = "solana-vote-program"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"bincode",
"log 0.4.8",
@@ -4054,7 +4057,7 @@ dependencies = [
[[package]]
name = "solana-vote-signer"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"jsonrpc-core",
@@ -4069,7 +4072,7 @@ dependencies = [
[[package]]
name = "solana-watchtower"
version = "1.1.15"
version = "1.1.19"
dependencies = [
"clap",
"humantime 2.0.0",

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-logger = { path = "../logger", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-measure = { path = "../measure", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
rand = "0.7.0"
clap = "2.33.0"
crossbeam-channel = "0.4"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "1.1.15"
version = "1.1.19"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,23 +15,23 @@ ed25519-dalek = "=1.0.0-pre.3"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-client = { path = "../client", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.19" }
solana-storage-program = { path = "../programs/storage", version = "1.1.19" }
thiserror = "1.0"
serde = "1.0.105"
serde_json = "1.0.48"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-chacha = { path = "../chacha", version = "1.1.15" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-chacha = { path = "../chacha", version = "1.1.19" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-perf = { path = "../perf", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-core = { path = "../core", version = "1.1.19" }
solana-streamer = { path = "../streamer", version = "1.1.19" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
[dev-dependencies]
hex = "0.4.2"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "1.1.15"
version = "1.1.19"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,12 +11,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.7.0"
solana-chacha = { path = "../chacha", version = "1.1.15" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-chacha = { path = "../chacha", version = "1.1.19" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-perf = { path = "../perf", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
[dev-dependencies]
hex = "0.4.2"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,13 +10,13 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.10.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-core = { path = "../core", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
[package.metadata.docs.rs]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,15 +13,15 @@ crossbeam-channel = "0.4"
log = "0.4.6"
rand = "0.7.0"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-version = { path = "../version", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.19" }
solana-streamer = { path = "../streamer", version = "1.1.19" }
solana-perf = { path = "../perf", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-measure = { path = "../measure", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-version = { path = "../version", version = "1.1.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,20 +18,20 @@ rand = "0.7.0"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-genesis = { path = "../genesis", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-core = { path = "../core", version = "1.1.19" }
solana-genesis = { path = "../genesis", version = "1.1.19" }
solana-client = { path = "../client", version = "1.1.19" }
solana-faucet = { path = "../faucet", version = "1.1.19" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.1.15" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,17 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-streamer = { path = "../streamer", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,24 +14,24 @@ log = "0.4.8"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-genesis = { path = "../genesis", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-core = { path = "../core", version = "1.1.19" }
solana-genesis = { path = "../genesis", version = "1.1.19" }
solana-client = { path = "../client", version = "1.1.19" }
solana-faucet = { path = "../faucet", version = "1.1.19" }
#solana-librapay = { path = "../programs/librapay", version = "1.1.8", optional = true }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
solana-measure = { path = "../measure", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.8", optional = true }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.1.15" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.19" }
#[features]
#move = ["solana-librapay", "solana-move-loader-program"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "1.1.15"
version = "1.1.19"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,12 +10,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.15" }
solana-chacha = { path = "../chacha", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.19" }
solana-chacha = { path = "../chacha", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-perf = { path = "../perf", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "1.1.15"
version = "1.1.19"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "1.1.15"
version = "1.1.19"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ edition = "2018"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-perf = { path = "../perf", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -5,6 +5,9 @@
# Release tags use buildkite-release.yml instead
steps:
- command: "ci/test-sanity.sh"
name: "sanity"
timeout_in_minutes: 5
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 20

View File

@@ -19,7 +19,7 @@ while [[ ! -f config/run/init-completed ]]; do
fi
done
while [[ $($solana_cli slot --commitment recent) -eq 0 ]]; do
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -eq 0 ]]; do
sleep 1
done
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899

View File

@@ -10,9 +10,6 @@ source ci/rust-version.sh nightly
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
# Look for failed mergify.io backports
_ git show HEAD --check --oneline
_ cargo +"$rust_stable" fmt --all -- --check
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
@@ -23,10 +20,8 @@ _ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnin
_ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ docs/build.sh
_ ci/check-ssh-keys.sh
{
cd programs/bpf

27
ci/test-sanity.sh Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
source ci/_
(
echo --- git diff --check
set -x
# Look for failed mergify.io backports by searching leftover conflict markers
# Also check for any trailing whitespaces!
if [[ -n $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
base_branch=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
else
base_branch=$BUILDKITE_BRANCH
fi
git fetch origin "$base_branch"
git diff "$(git merge-base HEAD "origin/$base_branch")..HEAD" --check --oneline
)
echo
_ ci/nits.sh
_ ci/check-ssh-keys.sh
echo --- ok

View File

@@ -39,9 +39,9 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
rm -rf target/xargo # Issue #3105
# Limit compiler jobs to reduce memory usage
# on machines with 1gb/thread of memory
# on machines with 2gb/thread of memory
NPROC=$(nproc)
NPROC=$((NPROC>16 ? 16 : NPROC))
NPROC=$((NPROC>14 ? 14 : NPROC))
echo "Executing $testName"
case $testName in

View File

@@ -23,10 +23,14 @@ if [[ -z $CI_TAG ]]; then
exit 1
fi
if [[ -z $CI_REPO_SLUG ]]; then
echo Error: CI_REPO_SLUG not defined
exit 1
fi
# Force CI_REPO_SLUG since sometimes
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
# artifact upload to fail
CI_REPO_SLUG=solana-labs/solana
#if [[ -z $CI_REPO_SLUG ]]; then
# echo Error: CI_REPO_SLUG not defined
# exit 1
#fi
releaseId=$( \
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.1.15"
version = "1.1.19"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
thiserror = "1.0.11"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -6,50 +6,86 @@ use solana_sdk::{
pubkey::Pubkey,
signature::{read_keypair_file, Signature},
};
use std::fmt::Display;
use std::str::FromStr;
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
U: FromStr,
U::Err: Display,
{
string
.as_ref()
.parse::<U>()
.map(|_| ())
.map_err(|err| format!("error parsing '{}': {}", string, err))
}
// Return an error if string cannot be parsed as type T.
// Takes a String to avoid second type parameter when used as a clap validator
pub fn is_parsable<T>(string: String) -> Result<(), String>
where
T: FromStr,
T::Err: Display,
{
is_parsable_generic::<T, String>(string)
}
// Return an error if a pubkey cannot be parsed.
pub fn is_pubkey(string: String) -> Result<(), String> {
match string.parse::<Pubkey>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{}", err)),
}
pub fn is_pubkey<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<Pubkey, _>(string)
}
// Return an error if a hash cannot be parsed.
pub fn is_hash(string: String) -> Result<(), String> {
match string.parse::<Hash>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{}", err)),
}
pub fn is_hash<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<Hash, _>(string)
}
// Return an error if a keypair file cannot be parsed.
pub fn is_keypair(string: String) -> Result<(), String> {
read_keypair_file(&string)
pub fn is_keypair<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
read_keypair_file(string.as_ref())
.map(|_| ())
.map_err(|err| format!("{}", err))
}
// Return an error if a keypair file cannot be parsed
pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
if string.as_str() == ASK_KEYWORD {
pub fn is_keypair_or_ask_keyword<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
if string.as_ref() == ASK_KEYWORD {
return Ok(());
}
read_keypair_file(&string)
read_keypair_file(string.as_ref())
.map(|_| ())
.map_err(|err| format!("{}", err))
}
// Return an error if string cannot be parsed as pubkey string or keypair file location
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_pubkey(string.as_ref()).or_else(|_| is_keypair(string))
}
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
// produce a pubkey()
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
match parse_keypair_path(&string) {
pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
match parse_keypair_path(string.as_ref()) {
KeypairUrl::Filepath(path) => is_keypair(path),
_ => Ok(()),
}
@@ -63,13 +99,19 @@ pub fn is_valid_pubkey(string: String) -> Result<(), String> {
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
// also provided and correct happens in parsing, not in validation.
pub fn is_valid_signer(string: String) -> Result<(), String> {
pub fn is_valid_signer<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_valid_pubkey(string)
}
// Return an error if string cannot be parsed as pubkey=signature string
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
let mut signer = string.split('=');
pub fn is_pubkey_sig<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
let mut signer = string.as_ref().split('=');
match Pubkey::from_str(
signer
.next()
@@ -90,8 +132,11 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
}
// Return an error if a url cannot be parsed.
pub fn is_url(string: String) -> Result<(), String> {
match url::Url::parse(&string) {
pub fn is_url<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
match url::Url::parse(string.as_ref()) {
Ok(url) => {
if url.has_host() {
Ok(())
@@ -103,20 +148,26 @@ pub fn is_url(string: String) -> Result<(), String> {
}
}
pub fn is_slot(slot: String) -> Result<(), String> {
slot.parse::<Slot>()
.map(|_| ())
.map_err(|e| format!("{}", e))
pub fn is_slot<T>(slot: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<Slot, _>(slot)
}
pub fn is_port(port: String) -> Result<(), String> {
port.parse::<u16>()
.map(|_| ())
.map_err(|e| format!("{}", e))
pub fn is_port<T>(port: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<u16, _>(port)
}
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
pub fn is_valid_percentage<T>(percentage: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
percentage
.as_ref()
.parse::<u8>()
.map_err(|e| {
format!(
@@ -136,8 +187,11 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
})
}
pub fn is_amount(amount: String) -> Result<(), String> {
if amount.parse::<u64>().is_ok() || amount.parse::<f64>().is_ok() {
pub fn is_amount<T>(amount: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
if amount.as_ref().parse::<u64>().is_ok() || amount.as_ref().parse::<f64>().is_ok() {
Ok(())
} else {
Err(format!(
@@ -147,14 +201,20 @@ pub fn is_amount(amount: String) -> Result<(), String> {
}
}
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
DateTime::parse_from_rfc3339(&value)
pub fn is_rfc3339_datetime<T>(value: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
DateTime::parse_from_rfc3339(value.as_ref())
.map(|_| ())
.map_err(|e| format!("{}", e))
}
pub fn is_derivation(value: String) -> Result<(), String> {
let value = value.replace("'", "");
pub fn is_derivation<T>(value: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
let value = value.as_ref().replace("'", "");
let mut parts = value.split('/');
let account = parts.next().unwrap();
account
@@ -186,14 +246,14 @@ mod tests {
#[test]
fn test_is_derivation() {
assert_eq!(is_derivation("2".to_string()), Ok(()));
assert_eq!(is_derivation("0".to_string()), Ok(()));
assert_eq!(is_derivation("65537".to_string()), Ok(()));
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
assert!(is_derivation("a".to_string()).is_err());
assert!(is_derivation("4294967296".to_string()).is_err());
assert!(is_derivation("a/b".to_string()).is_err());
assert!(is_derivation("0/4294967296".to_string()).is_err());
assert_eq!(is_derivation("2"), Ok(()));
assert_eq!(is_derivation("0"), Ok(()));
assert_eq!(is_derivation("65537"), Ok(()));
assert_eq!(is_derivation("0/2"), Ok(()));
assert_eq!(is_derivation("0'/2'"), Ok(()));
assert!(is_derivation("a").is_err());
assert!(is_derivation("4294967296").is_err());
assert!(is_derivation("a/b").is_err());
assert!(is_derivation("0/4294967296").is_err());
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -1,6 +1,6 @@
// Wallet settings that can be configured for long-term use
use serde_derive::{Deserialize, Serialize};
use std::io;
use std::{collections::HashMap, io};
use url::Url;
lazy_static! {
@@ -17,6 +17,8 @@ pub struct Config {
pub json_rpc_url: String,
pub websocket_url: String,
pub keypair_path: String,
#[serde(default)]
pub address_labels: HashMap<String, String>,
}
impl Default for Config {
@@ -36,6 +38,7 @@ impl Default for Config {
json_rpc_url,
websocket_url,
keypair_path,
address_labels: HashMap::new(),
}
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,28 +27,28 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-cli-config = { path = "../cli-config", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-config-program = { path = "../programs/config", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.19" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-cli-config = { path = "../cli-config", version = "1.1.19" }
solana-client = { path = "../client", version = "1.1.19" }
solana-config-program = { path = "../programs/config", version = "1.1.19" }
solana-faucet = { path = "../faucet", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-stake-program = { path = "../programs/stake", version = "1.1.19" }
solana-storage-program = { path = "../programs/storage", version = "1.1.19" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.19" }
solana-vote-program = { path = "../programs/vote", version = "1.1.19" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.19" }
thiserror = "1.0.13"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.19" }
solana-budget-program = { path = "../programs/budget", version = "1.1.19" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -1,7 +1,7 @@
use crate::{
cli_output::{CliAccount, CliSignOnlyData, CliSignature, OutputFormat},
cluster_query::*,
display::println_name_value,
display::{new_spinner_progress_bar, println_name_value, println_transaction},
nonce::{self, *},
offline::{blockhash_query::BlockhashQuery, *},
stake::*,
@@ -26,7 +26,7 @@ use solana_clap_utils::{
use solana_client::{
client_error::{ClientErrorKind, Result as ClientResult},
rpc_client::RpcClient,
rpc_config::RpcLargestAccountsFilter,
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
rpc_response::{RpcAccount, RpcKeyedAccount},
};
#[cfg(not(test))]
@@ -36,7 +36,7 @@ use solana_faucet::faucet_mock::request_airdrop_transaction;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
bpf_loader,
clock::{Epoch, Slot},
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
@@ -47,6 +47,7 @@ use solana_sdk::{
program_utils::DecodeError,
pubkey::{Pubkey, MAX_SEED_LEN},
signature::{Keypair, Signature, Signer, SignerError},
signers::Signers,
system_instruction::{self, SystemError},
system_program,
transaction::{Transaction, TransactionError},
@@ -1231,7 +1232,7 @@ fn process_confirm(
"\nTransaction executed in slot {}:",
confirmed_transaction.slot
);
crate::display::println_transaction(
println_transaction(
&confirmed_transaction
.transaction
.transaction
@@ -1261,7 +1262,7 @@ fn process_confirm(
}
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
crate::display::println_transaction(transaction, &None, "");
println_transaction(transaction, &None, "");
Ok("".to_string())
}
@@ -1299,6 +1300,103 @@ fn process_show_account(
Ok(account_string)
}
fn send_and_confirm_transactions_with_spinner<T: Signers>(
rpc_client: &RpcClient,
mut transactions: Vec<Transaction>,
signer_keys: &T,
) -> Result<(), Box<dyn error::Error>> {
let progress_bar = new_spinner_progress_bar();
let mut send_retries = 5;
loop {
let mut status_retries = 15;
// Send all transactions
let mut transactions_signatures = vec![];
let num_transactions = transactions.len();
for transaction in transactions {
if cfg!(not(test)) {
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
// when all the write transactions modify the same program account (eg, deploying a
// new program)
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
}
let signature = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
skip_preflight: true,
},
)
.ok();
transactions_signatures.push((transaction, signature));
progress_bar.set_message(&format!(
"[{}/{}] Transactions sent",
transactions_signatures.len(),
num_transactions
));
}
// Collect statuses for all the transactions, drop those that are confirmed
while status_retries > 0 {
status_retries -= 1;
progress_bar.set_message(&format!(
"[{}/{}] Transactions confirmed",
num_transactions - transactions_signatures.len(),
num_transactions
));
if cfg!(not(test)) {
// Retry twice a second
sleep(Duration::from_millis(500));
}
transactions_signatures = transactions_signatures
.into_iter()
.filter(|(_transaction, signature)| {
if let Some(signature) = signature {
if let Ok(status) = rpc_client.get_signature_status(&signature) {
if rpc_client
.get_num_blocks_since_signature_confirmation(&signature)
.unwrap_or(0)
> 1
{
return false;
} else {
return match status {
None => true,
Some(result) => result.is_err(),
};
}
}
}
true
})
.collect();
if transactions_signatures.is_empty() {
return Ok(());
}
}
if send_retries == 0 {
return Err("Transactions failed".into());
}
send_retries -= 1;
// Re-sign any failed transactions with a new blockhash and retry
let (blockhash, _fee_calculator) = rpc_client
.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
transactions = vec![];
for (mut transaction, _) in transactions_signatures.into_iter() {
transaction.try_sign(signer_keys, blockhash)?;
transactions.push(transaction);
}
}
}
fn process_deploy(
rpc_client: &RpcClient,
config: &CliConfig,
@@ -1366,11 +1464,18 @@ fn process_deploy(
})?;
trace!("Writing program data");
rpc_client.send_and_confirm_transactions(write_transactions, &signers)?;
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
)?;
trace!("Finalizing program account");
rpc_client
.send_and_confirm_transaction_with_spinner(&finalize_tx)
.send_and_confirm_transaction_with_spinner_and_config(
&finalize_tx,
RpcSendTransactionConfig {
skip_preflight: true,
},
)
.map_err(|e| {
CliError::DynamicProgramError(format!("Program finalize transaction failed: {}", e))
})?;
@@ -1584,11 +1689,6 @@ fn process_transfer(
) -> ProcessResult {
let from = config.signers[from];
check_unique_pubkeys(
(&from.pubkey(), "cli keypair".to_string()),
(to, "to".to_string()),
)?;
let (recent_blockhash, fee_calculator) =
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
let ixs = vec![system_instruction::transfer(&from.pubkey(), to, lamports)];

View File

@@ -1,11 +1,10 @@
use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::*,
display::println_name_value,
display::{new_spinner_progress_bar, println_name_value},
};
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{
commitment::{commitment_arg, COMMITMENT_ARG},
input_parsers::*,
@@ -469,15 +468,6 @@ pub fn parse_transaction_history(
})
}
/// Creates a new process bar for processing that will take an unknown amount of time
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
progress_bar.enable_steady_tick(100);
progress_bar
}
pub fn process_catchup(
rpc_client: &RpcClient,
node_pubkey: &Pubkey,

View File

@@ -1,5 +1,6 @@
use crate::cli::SettingType;
use console::style;
use indicatif::{ProgressBar, ProgressStyle};
use solana_sdk::{
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
transaction::Transaction,
@@ -200,3 +201,12 @@ pub fn println_transaction(
}
}
}
/// Creates a new process bar for processing that will take an unknown amount of time
pub fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
progress_bar.enable_steady_tick(100);
progress_bar
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.1.15"
version = "1.1.19"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-vote-program = { path = "../programs/vote", version = "1.1.19" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -31,7 +31,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.6"
solana-logger = { path = "../logger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -3,7 +3,7 @@ use crate::{
generic_rpc_client_request::GenericRpcClientRequest,
mock_rpc_client_request::{MockRpcClientRequest, Mocks},
rpc_client_request::RpcClientRequest,
rpc_config::RpcLargestAccountsConfig,
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig},
rpc_request::{RpcError, RpcRequest},
rpc_response::*,
};
@@ -21,7 +21,6 @@ use solana_sdk::{
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
signature::Signature,
signers::Signers,
@@ -32,7 +31,6 @@ use solana_transaction_status::{
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
error,
net::SocketAddr,
thread::sleep,
time::{Duration, Instant},
@@ -96,10 +94,20 @@ impl RpcClient {
}
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
}
pub fn send_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
let signature_base58_str: String =
self.send(RpcRequest::SendTransaction, json!([serialized_encoded]))?;
let signature_base58_str: String = self.send(
RpcRequest::SendTransaction,
json!([serialized_encoded, config]),
)?;
let signature = signature_base58_str
.parse::<Signature>()
@@ -123,7 +131,7 @@ impl RpcClient {
&self,
transaction: &Transaction,
sig_verify: bool,
) -> RpcResult<TransactionStatus> {
) -> RpcResult<RpcSimulateTransactionResult> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
self.send(
RpcRequest::SimulateTransaction,
@@ -347,8 +355,12 @@ impl RpcClient {
})
}
pub fn get_inflation(&self) -> ClientResult<Inflation> {
self.send(RpcRequest::GetInflation, Value::Null)
pub fn get_inflation_governor(&self) -> ClientResult<RpcInflationGovernor> {
self.send(RpcRequest::GetInflationGovernor, Value::Null)
}
pub fn get_inflation_rate(&self) -> ClientResult<RpcInflationRate> {
self.send(RpcRequest::GetInflationRate, Value::Null)
}
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
@@ -404,74 +416,6 @@ impl RpcClient {
}
}
pub fn send_and_confirm_transactions<T: Signers>(
&self,
mut transactions: Vec<Transaction>,
signer_keys: &T,
) -> Result<(), Box<dyn error::Error>> {
let mut send_retries = 5;
loop {
let mut status_retries = 15;
// Send all transactions
let mut transactions_signatures = vec![];
for transaction in transactions {
if cfg!(not(test)) {
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
// when all the write transactions modify the same program account (eg, deploying a
// new program)
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
}
let signature = self.send_transaction(&transaction).ok();
transactions_signatures.push((transaction, signature))
}
// Collect statuses for all the transactions, drop those that are confirmed
while status_retries > 0 {
status_retries -= 1;
if cfg!(not(test)) {
// Retry twice a second
sleep(Duration::from_millis(500));
}
transactions_signatures = transactions_signatures
.into_iter()
.filter(|(_transaction, signature)| {
if let Some(signature) = signature {
if let Ok(status) = self.get_signature_status(&signature) {
if status.is_none() {
return false;
}
return status.unwrap().is_err();
}
}
true
})
.collect();
if transactions_signatures.is_empty() {
return Ok(());
}
}
if send_retries == 0 {
return Err(RpcError::ForUser("Transactions failed".to_string()).into());
}
send_retries -= 1;
// Re-sign any failed transactions with a new blockhash and retry
let (blockhash, _fee_calculator) =
self.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
transactions = vec![];
for (mut transaction, _) in transactions_signatures.into_iter() {
transaction.try_sign(signer_keys, blockhash)?;
transactions.push(transaction);
}
}
}
pub fn resign_transaction<T: Signers>(
&self,
tx: &mut Transaction,
@@ -483,11 +427,7 @@ impl RpcClient {
Ok(())
}
pub fn retry_get_balance(
&self,
pubkey: &Pubkey,
_retries: usize,
) -> Result<Option<u64>, Box<dyn error::Error>> {
pub fn retry_get_balance(&self, pubkey: &Pubkey, _retries: usize) -> ClientResult<Option<u64>> {
let request = RpcRequest::GetBalance;
let balance_json = self
.client
@@ -611,26 +551,46 @@ impl RpcClient {
}
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
Ok(self
let (blockhash, fee_calculator, _last_valid_slot) = self
.get_recent_blockhash_with_commitment(CommitmentConfig::default())?
.value)
.value;
Ok((blockhash, fee_calculator))
}
pub fn get_recent_blockhash_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<(Hash, FeeCalculator)> {
let Response {
) -> RpcResult<(Hash, FeeCalculator, Slot)> {
let (context, blockhash, fee_calculator, last_valid_slot) = if let Ok(Response {
context,
value:
RpcFees {
blockhash,
fee_calculator,
last_valid_slot,
},
}) =
self.send::<Response<RpcFees>>(RpcRequest::GetFees, json!([commitment_config]))
{
(context, blockhash, fee_calculator, last_valid_slot)
} else if let Ok(Response {
context,
value:
RpcBlockhashFeeCalculator {
blockhash,
fee_calculator,
},
} = self.send::<Response<RpcBlockhashFeeCalculator>>(
}) = self.send::<Response<RpcBlockhashFeeCalculator>>(
RpcRequest::GetRecentBlockhash,
json!([commitment_config]),
)?;
) {
(context, blockhash, fee_calculator, 0)
} else {
return Err(ClientError::new_with_request(
RpcError::ParseError("RpcBlockhashFeeCalculator or RpcFees".to_string()).into(),
RpcRequest::GetRecentBlockhash,
));
};
let blockhash = blockhash.parse().map_err(|_| {
ClientError::new_with_request(
@@ -640,7 +600,7 @@ impl RpcClient {
})?;
Ok(Response {
context,
value: (blockhash, fee_calculator),
value: (blockhash, fee_calculator, last_valid_slot),
})
}
@@ -648,12 +608,28 @@ impl RpcClient {
&self,
blockhash: &Hash,
) -> ClientResult<Option<FeeCalculator>> {
let Response { value, .. } = self.send::<Response<Option<RpcFeeCalculator>>>(
Ok(self
.get_fee_calculator_for_blockhash_with_commitment(
blockhash,
CommitmentConfig::default(),
)?
.value)
}
pub fn get_fee_calculator_for_blockhash_with_commitment(
&self,
blockhash: &Hash,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<FeeCalculator>> {
let Response { context, value } = self.send::<Response<Option<RpcFeeCalculator>>>(
RpcRequest::GetFeeCalculatorForBlockhash,
json!([blockhash.to_string()]),
json!([blockhash.to_string(), commitment_config]),
)?;
Ok(value.map(|rf| rf.fee_calculator))
Ok(Response {
context,
value: value.map(|rf| rf.fee_calculator),
})
}
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
@@ -910,6 +886,17 @@ impl RpcClient {
pub fn send_and_confirm_transaction_with_spinner(
&self,
transaction: &Transaction,
) -> ClientResult<Signature> {
self.send_and_confirm_transaction_with_spinner_and_config(
transaction,
RpcSendTransactionConfig::default(),
)
}
pub fn send_and_confirm_transaction_with_spinner_and_config(
&self,
transaction: &Transaction,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let mut confirmations = 0;
@@ -925,7 +912,7 @@ impl RpcClient {
));
let mut status_retries = 15;
let (signature, status) = loop {
let signature = self.send_transaction(transaction)?;
let signature = self.send_transaction_with_config(transaction, config.clone())?;
// Get recent commitment in order to count confirmations for successful transactions
let status = self

View File

@@ -1,4 +1,4 @@
use solana_sdk::commitment_config::CommitmentConfig;
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@@ -9,7 +9,13 @@ pub struct RpcSignatureStatusConfig {
pub commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSendTransactionConfig {
pub skip_preflight: bool,
}
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionConfig {
pub sig_verify: bool,
@@ -29,3 +35,11 @@ pub struct RpcLargestAccountsConfig {
pub commitment: Option<CommitmentConfig>,
pub filter: Option<RpcLargestAccountsFilter>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcInflationConfig {
pub epoch: Option<Epoch>,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}

View File

@@ -16,15 +16,18 @@ pub enum RpcRequest {
GetConfirmedTransaction,
GetEpochInfo,
GetEpochSchedule,
GetGenesisHash,
GetIdentity,
GetInflation,
GetLargestAccounts,
GetLeaderSchedule,
GetProgramAccounts,
GetRecentBlockhash,
GetFeeCalculatorForBlockhash,
GetFeeRateGovernor,
GetFees,
GetGenesisHash,
GetIdentity,
GetInflationGovernor,
GetInflationRate,
GetLargestAccounts,
GetLeaderSchedule,
GetMinimumBalanceForRentExemption,
GetProgramAccounts,
GetRecentBlockhash,
GetSignatureStatuses,
GetSlot,
GetSlotLeader,
@@ -37,13 +40,12 @@ pub enum RpcRequest {
GetTransactionCount,
GetVersion,
GetVoteAccounts,
MinimumLedgerSlot,
RegisterNode,
RequestAirdrop,
SendTransaction,
SimulateTransaction,
SignVote,
GetMinimumBalanceForRentExemption,
MinimumLedgerSlot,
}
impl fmt::Display for RpcRequest {
@@ -61,15 +63,18 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
RpcRequest::GetEpochInfo => "getEpochInfo",
RpcRequest::GetEpochSchedule => "getEpochSchedule",
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflation => "getInflation",
RpcRequest::GetLargestAccounts => "getLargestAccounts",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
RpcRequest::GetFees => "getFees",
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflationGovernor => "getInflationGovernor",
RpcRequest::GetInflationRate => "getInflationRate",
RpcRequest::GetLargestAccounts => "getLargestAccounts",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
RpcRequest::GetSlot => "getSlot",
RpcRequest::GetSlotLeader => "getSlotLeader",
@@ -82,13 +87,12 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetTransactionCount => "getTransactionCount",
RpcRequest::GetVersion => "getVersion",
RpcRequest::GetVoteAccounts => "getVoteAccounts",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
RpcRequest::RegisterNode => "registerNode",
RpcRequest::RequestAirdrop => "requestAirdrop",
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::SimulateTransaction => "simulateTransaction",
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
};
write!(f, "{}", method)
@@ -144,10 +148,6 @@ mod tests {
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getEpochInfo");
let test_request = RpcRequest::GetInflation;
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getInflation");
let test_request = RpcRequest::GetRecentBlockhash;
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getRecentBlockhash");

View File

@@ -3,6 +3,7 @@ use solana_sdk::{
account::Account,
clock::{Epoch, Slot},
fee_calculator::{FeeCalculator, FeeRateGovernor},
inflation::Inflation,
pubkey::Pubkey,
transaction::{Result, TransactionError},
};
@@ -35,6 +36,14 @@ pub struct RpcBlockhashFeeCalculator {
pub fee_calculator: FeeCalculator,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcFees {
pub blockhash: String,
pub fee_calculator: FeeCalculator,
pub last_valid_slot: Slot,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcFeeCalculator {
@@ -47,6 +56,37 @@ pub struct RpcFeeRateGovernor {
pub fee_rate_governor: FeeRateGovernor,
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcInflationGovernor {
pub initial: f64,
pub terminal: f64,
pub taper: f64,
pub foundation: f64,
pub foundation_term: f64,
}
impl From<Inflation> for RpcInflationGovernor {
fn from(inflation: Inflation) -> Self {
Self {
initial: inflation.initial,
terminal: inflation.terminal,
taper: inflation.taper,
foundation: inflation.foundation,
foundation_term: inflation.foundation_term,
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcInflationRate {
pub total: f64,
pub validator: f64,
pub foundation: f64,
pub epoch: Epoch,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcKeyedAccount {
@@ -188,6 +228,13 @@ pub struct RpcSignatureConfirmation {
pub status: Result<()>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionResult {
pub err: Option<TransactionError>,
pub logs: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcStorageTurn {

View File

@@ -440,7 +440,7 @@ impl SyncClient for ThinClient {
match recent_blockhash {
Ok(Response { value, .. }) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok(value)
Ok((value.0, value.1))
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -21,6 +21,7 @@ byteorder = "1.3.4"
chrono = { version = "0.4.11", features = ["serde"] }
core_affinity = "0.5.10"
crossbeam-channel = "0.4"
ed25519-dalek = "=1.0.0-pre.3"
fs_extra = "1.1.0"
flate2 = "1.0"
indexmap = "1.3"
@@ -41,37 +42,37 @@ regex = "1.3.6"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
ed25519-dalek = "=1.0.0-pre.3"
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-version = { path = "../version", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.15" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.15" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.19" }
solana-budget-program = { path = "../programs/budget", version = "1.1.19" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-client = { path = "../client", version = "1.1.19" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.19" }
solana-faucet = { path = "../faucet", version = "1.1.19" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
solana-measure = { path = "../measure", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.19" }
solana-perf = { path = "../perf", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-stake-program = { path = "../programs/stake", version = "1.1.19" }
solana-storage-program = { path = "../programs/storage", version = "1.1.19" }
solana-streamer = { path = "../streamer", version = "1.1.19" }
solana-version = { path = "../version", version = "1.1.19" }
solana-vote-program = { path = "../programs/vote", version = "1.1.19" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.19" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.19" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.15" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.19" }
trees = "0.2.1"
[dev-dependencies]

View File

@@ -3,6 +3,7 @@
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
@@ -48,7 +49,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
&peers_and_stakes,
&peers,
&last_datapoint,
&mut 0,
&mut TransmitShredsStats::default(),
)
.unwrap();
});

View File

@@ -51,7 +51,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
/// Transaction forwarding
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 4;
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
// Fixed thread size seems to be fastest on GCP setup
pub const NUM_THREADS: u32 = 4;

View File

@@ -35,7 +35,7 @@ use std::{
};
mod broadcast_fake_shreds_run;
pub(crate) mod broadcast_metrics;
pub mod broadcast_metrics;
pub(crate) mod broadcast_utils;
mod fail_entry_verification_broadcast_run;
mod standard_broadcast_run;
@@ -374,13 +374,14 @@ pub fn broadcast_shreds(
peers_and_stakes: &[(u64, usize)],
peers: &[ContactInfo],
last_datapoint_submit: &Arc<AtomicU64>,
send_mmsg_total: &mut u64,
transmit_stats: &mut TransmitShredsStats,
) -> Result<()> {
let broadcast_len = peers_and_stakes.len();
if broadcast_len == 0 {
update_peer_stats(1, 1, last_datapoint_submit);
return Ok(());
}
let mut shred_select = Measure::start("shred_select");
let packets: Vec<_> = shreds
.iter()
.map(|shred| {
@@ -389,6 +390,8 @@ pub fn broadcast_shreds(
(&shred.payload, &peers[broadcast_index].tvu)
})
.collect();
shred_select.stop();
transmit_stats.shred_select += shred_select.as_us();
let mut sent = 0;
let mut send_mmsg_time = Measure::start("send_mmsg");
@@ -401,7 +404,7 @@ pub fn broadcast_shreds(
}
}
send_mmsg_time.stop();
*send_mmsg_total += send_mmsg_time.as_us();
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
let num_live_peers = num_live_peers(&peers);
update_peer_stats(

View File

@@ -29,11 +29,12 @@ impl ProcessShredsStats {
}
#[derive(Default, Clone)]
pub(crate) struct TransmitShredsStats {
pub(crate) transmit_elapsed: u64,
pub(crate) send_mmsg_elapsed: u64,
pub(crate) get_peers_elapsed: u64,
pub(crate) num_shreds: usize,
pub struct TransmitShredsStats {
pub transmit_elapsed: u64,
pub send_mmsg_elapsed: u64,
pub get_peers_elapsed: u64,
pub shred_select: u64,
pub num_shreds: usize,
}
impl BroadcastStats for TransmitShredsStats {
@@ -42,6 +43,7 @@ impl BroadcastStats for TransmitShredsStats {
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
self.get_peers_elapsed += new_stats.get_peers_elapsed;
self.num_shreds += new_stats.num_shreds;
self.shred_select += new_stats.shred_select;
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
datapoint_info!(
@@ -58,6 +60,7 @@ impl BroadcastStats for TransmitShredsStats {
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
("num_shreds", self.num_shreds as i64, i64),
("shred_select", self.shred_select as i64, i64),
);
}
}
@@ -176,15 +179,16 @@ mod test {
}
#[test]
fn test_update() {
fn test_update_broadcast() {
let start = Instant::now();
let mut slot_broadcast_stats = SlotBroadcastStats::default();
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
get_peers_elapsed: 2,
send_mmsg_elapsed: 3,
shred_select: 4,
num_shreds: 5,
},
&Some(BroadcastShredBatchInfo {
slot: 0,
@@ -198,16 +202,18 @@ mod test {
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
transmit_elapsed: 7,
get_peers_elapsed: 8,
send_mmsg_elapsed: 9,
shred_select: 10,
num_shreds: 11,
},
&None,
);
@@ -217,9 +223,10 @@ mod test {
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
// If another batch is given, then total number of batches == num_expected_batches == 2,
// so the batch should be purged from the HashMap
@@ -228,6 +235,7 @@ mod test {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
shred_select: 1,
num_shreds: 1,
},
&Some(BroadcastShredBatchInfo {

View File

@@ -81,14 +81,13 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
// Broadcast data
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let mut send_mmsg_total = 0;
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&Arc::new(AtomicU64::new(0)),
&mut send_mmsg_total,
&mut TransmitShredsStats::default(),
)?;
Ok(())

View File

@@ -9,6 +9,7 @@ use solana_ledger::{
};
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
use std::collections::HashMap;
use std::sync::RwLock;
use std::time::Duration;
#[derive(Clone)]
@@ -23,6 +24,14 @@ pub struct StandardBroadcastRun {
shred_version: u16,
last_datapoint_submit: Arc<AtomicU64>,
num_batches: usize,
broadcast_peer_cache: Arc<RwLock<BroadcastPeerCache>>,
last_peer_update: Arc<AtomicU64>,
}
#[derive(Default)]
struct BroadcastPeerCache {
peers: Vec<ContactInfo>,
peers_and_stakes: Vec<(u64, usize)>,
}
impl StandardBroadcastRun {
@@ -38,6 +47,8 @@ impl StandardBroadcastRun {
shred_version,
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
num_batches: 0,
broadcast_peer_cache: Arc::new(RwLock::new(BroadcastPeerCache::default())),
last_peer_update: Arc::new(AtomicU64::new(0)),
}
}
@@ -293,33 +304,46 @@ impl StandardBroadcastRun {
shreds: Arc<Vec<Shred>>,
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
) -> Result<()> {
const BROADCAST_PEER_UPDATE_INTERVAL_MS: u64 = 1000;
trace!("Broadcasting {:?} shreds", shreds.len());
// Get the list of peers to broadcast to
let get_peers_start = Instant::now();
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let get_peers_elapsed = get_peers_start.elapsed();
let mut get_peers_time = Measure::start("broadcast::get_peers");
let now = timestamp();
let last = self.last_peer_update.load(Ordering::Relaxed);
if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS
&& self
.last_peer_update
.compare_and_swap(now, last, Ordering::Relaxed)
== last
{
let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap();
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
w_broadcast_peer_cache.peers = peers;
w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes;
}
get_peers_time.stop();
let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap();
let mut transmit_stats = TransmitShredsStats::default();
// Broadcast the shreds
let transmit_start = Instant::now();
let mut send_mmsg_total = 0;
let mut transmit_time = Measure::start("broadcast_shreds");
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&r_broadcast_peer_cache.peers_and_stakes,
&r_broadcast_peer_cache.peers,
&self.last_datapoint_submit,
&mut send_mmsg_total,
&mut transmit_stats,
)?;
let transmit_elapsed = transmit_start.elapsed();
let new_transmit_shreds_stats = TransmitShredsStats {
transmit_elapsed: duration_as_us(&transmit_elapsed),
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
send_mmsg_elapsed: send_mmsg_total,
num_shreds: shreds.len(),
};
drop(r_broadcast_peer_cache);
transmit_time.stop();
transmit_stats.transmit_elapsed = transmit_time.as_us();
transmit_stats.get_peers_elapsed = get_peers_time.as_us();
transmit_stats.num_shreds = shreds.len();
// Process metrics
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
self.update_transmit_metrics(&transmit_stats, &broadcast_shred_batch_info);
Ok(())
}

View File

@@ -16,7 +16,7 @@ use crate::{
contact_info::ContactInfo,
crds_gossip::CrdsGossip,
crds_gossip_error::CrdsGossipError,
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
crds_gossip_pull::{CrdsFilter, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
crds_value::{
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, SnapshotHash,
Version, Vote, MAX_WALLCLOCK,
@@ -214,11 +214,17 @@ struct GossipStats {
repair_peers: Counter,
new_push_requests: Counter,
new_push_requests2: Counter,
new_push_requests_num: Counter,
filter_pull_response: Counter,
process_pull_response: Counter,
process_pull_response_count: Counter,
process_pull_response_len: Counter,
process_pull_response_timeout: Counter,
process_pull_response_fail_insert: Counter,
process_pull_response_fail_timeout: Counter,
process_pull_response_success: Counter,
process_pull_requests: Counter,
generate_pull_responses: Counter,
process_prune: Counter,
process_push_message: Counter,
prune_received_cache: Counter,
@@ -227,7 +233,14 @@ struct GossipStats {
epoch_slots_push: Counter,
push_message: Counter,
new_pull_requests: Counter,
new_pull_requests_count: Counter,
mark_pull_request: Counter,
skip_pull_response_shred_version: Counter,
skip_pull_shred_version: Counter,
skip_push_message_shred_version: Counter,
push_message_count: Counter,
push_message_value_count: Counter,
push_response_count: Counter,
}
pub struct ClusterInfo {
@@ -241,6 +254,13 @@ pub struct ClusterInfo {
my_contact_info: RwLock<ContactInfo>,
id: Pubkey,
stats: GossipStats,
socket: UdpSocket,
}
impl Default for ClusterInfo {
fn default() -> Self {
Self::new_with_invalid_keypair(ContactInfo::default())
}
}
#[derive(Default, Clone)]
@@ -390,6 +410,7 @@ impl ClusterInfo {
my_contact_info: RwLock::new(contact_info),
id,
stats: GossipStats::default(),
socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
};
{
let mut gossip = me.gossip.write().unwrap();
@@ -415,6 +436,7 @@ impl ClusterInfo {
my_contact_info: RwLock::new(my_contact_info),
id: *new_id,
stats: GossipStats::default(),
socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
}
}
@@ -728,6 +750,13 @@ impl ClusterInfo {
.process_push_message(&self.id(), vec![entry], now);
}
pub fn send_vote(&self, vote: &Transaction) -> Result<()> {
let tpu = self.my_contact_info().tpu;
let buf = serialize(vote)?;
self.socket.send_to(&buf, &tpu)?;
Ok(())
}
/// Get votes in the crds
/// * since - The timestamp of when the vote inserted must be greater than
/// since. This allows the bank to query for new votes only.
@@ -1391,6 +1420,9 @@ impl ClusterInfo {
.collect()
};
self.append_entrypoint_to_pulls(&mut pulls);
self.stats
.new_pull_requests_count
.add_relaxed(pulls.len() as u64);
pulls
.into_iter()
.map(|(peer, filter, gossip, self_info)| {
@@ -1405,7 +1437,7 @@ impl ClusterInfo {
let (_, push_messages) = self
.time_gossip_write_lock("new_push_requests", &self.stats.new_push_requests)
.new_push_messages(timestamp());
push_messages
let messages: Vec<_> = push_messages
.into_iter()
.filter_map(|(peer, messages)| {
let peer_label = CrdsValueLabel::ContactInfo(peer);
@@ -1420,11 +1452,24 @@ impl ClusterInfo {
.into_iter()
.map(move |payload| (peer, Protocol::PushMessage(self_id, payload)))
})
.collect()
.collect();
self.stats
.new_push_requests_num
.add_relaxed(messages.len() as u64);
messages
}
fn gossip_request(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<(SocketAddr, Protocol)> {
let pulls: Vec<_> = self.new_pull_requests(stakes);
// Generate new push and pull requests
fn generate_new_gossip_requests(
&self,
stakes: &HashMap<Pubkey, u64>,
generate_pull_requests: bool,
) -> Vec<(SocketAddr, Protocol)> {
let pulls: Vec<_> = if generate_pull_requests {
self.new_pull_requests(stakes)
} else {
vec![]
};
let pushes: Vec<_> = self.new_push_requests();
vec![pulls, pushes].into_iter().flatten().collect()
}
@@ -1435,8 +1480,9 @@ impl ClusterInfo {
recycler: &PacketsRecycler,
stakes: &HashMap<Pubkey, u64>,
sender: &PacketSender,
generate_pull_requests: bool,
) -> Result<()> {
let reqs = obj.gossip_request(&stakes);
let reqs = obj.generate_new_gossip_requests(&stakes, generate_pull_requests);
if !reqs.is_empty() {
let packets = to_packets_with_destination(recycler.clone(), &reqs);
sender.send(packets)?;
@@ -1462,6 +1508,7 @@ impl ClusterInfo {
let message = CrdsData::Version(Version::new(obj.id()));
obj.push_message(CrdsValue::new_signed(message, &obj.keypair));
let mut generate_pull_requests = true;
loop {
let start = timestamp();
thread_mem_usage::datapoint("solana-gossip");
@@ -1477,7 +1524,9 @@ impl ClusterInfo {
}
None => HashMap::new(),
};
let _ = Self::run_gossip(&obj, &recycler, &stakes, &sender);
let _ =
Self::run_gossip(&obj, &recycler, &stakes, &sender, generate_pull_requests);
if exit.load(Ordering::Relaxed) {
return;
}
@@ -1542,6 +1591,7 @@ impl ClusterInfo {
let time_left = GOSSIP_SLEEP_MILLIS - elapsed;
sleep(Duration::from_millis(time_left));
}
generate_pull_requests = !generate_pull_requests;
}
})
.unwrap()
@@ -1560,6 +1610,7 @@ impl ClusterInfo {
let allocated = thread_mem_usage::Allocatedp::default();
let mut gossip_pull_data: Vec<PullData> = vec![];
let timeouts = me.gossip.read().unwrap().make_timeouts(&stakes, epoch_ms);
let mut pull_responses = HashMap::new();
packets.packets.iter().for_each(|packet| {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size])
@@ -1577,12 +1628,17 @@ impl ClusterInfo {
if contact_info.id == me.id() {
warn!("PullRequest ignored, I'm talking to myself");
inc_new_counter_debug!("cluster_info-window-request-loopback", 1);
} else {
} else if contact_info.shred_version == 0
|| contact_info.shred_version == me.my_shred_version()
|| me.my_shred_version() == 0
{
gossip_pull_data.push(PullData {
from_addr,
caller,
filter,
});
} else {
me.stats.skip_pull_shred_version.add_relaxed(1);
}
}
datapoint_debug!(
@@ -1602,7 +1658,8 @@ impl ClusterInfo {
}
ret
});
Self::handle_pull_response(me, &from, data, &timeouts);
let pull_entry = pull_responses.entry(from).or_insert_with(Vec::new);
pull_entry.extend(data);
datapoint_debug!(
"solana-gossip-listen-memory",
("pull_response", (allocated.get() - start) as i64, i64),
@@ -1664,6 +1721,11 @@ impl ClusterInfo {
}
})
});
for (from, data) in pull_responses {
Self::handle_pull_response(me, &from, data, &timeouts);
}
// process the collected pulls together
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data, stakes);
if let Some(rsp) = rsp {
@@ -1671,6 +1733,26 @@ impl ClusterInfo {
}
}
fn update_data_budget(&self, stakes: &HashMap<Pubkey, u64>) {
let mut w_outbound_budget = self.outbound_budget.write().unwrap();
let now = timestamp();
const INTERVAL_MS: u64 = 100;
// allow 30kBps per staked validator, epoch slots + votes ~= 1.5kB/slot ~= 4kB/s
const BYTES_PER_INTERVAL: usize = 3000;
const MAX_BUDGET_MULTIPLE: usize = 5; // allow budget build-up to 5x the interval default
if now - w_outbound_budget.last_timestamp_ms > INTERVAL_MS {
let len = std::cmp::max(stakes.len(), 2);
w_outbound_budget.bytes += len * BYTES_PER_INTERVAL;
w_outbound_budget.bytes = std::cmp::min(
w_outbound_budget.bytes,
MAX_BUDGET_MULTIPLE * len * BYTES_PER_INTERVAL,
);
w_outbound_budget.last_timestamp_ms = now;
}
}
// Pull requests take an incoming bloom filter of contained entries from a node
// and tries to send back to them the values it detects are missing.
fn handle_pull_requests(
@@ -1683,33 +1765,19 @@ impl ClusterInfo {
let mut caller_and_filters = vec![];
let mut addrs = vec![];
let mut time = Measure::start("handle_pull_requests");
{
let mut w_outbound_budget = me.outbound_budget.write().unwrap();
let now = timestamp();
const INTERVAL_MS: u64 = 100;
// allow 50kBps per staked validator, epoch slots + votes ~= 1.5kB/slot ~= 4kB/s
const BYTES_PER_INTERVAL: usize = 5000;
const MAX_BUDGET_MULTIPLE: usize = 5; // allow budget build-up to 5x the interval default
if now - w_outbound_budget.last_timestamp_ms > INTERVAL_MS {
let len = std::cmp::max(stakes.len(), 2);
w_outbound_budget.bytes += len * BYTES_PER_INTERVAL;
w_outbound_budget.bytes = std::cmp::min(
w_outbound_budget.bytes,
MAX_BUDGET_MULTIPLE * len * BYTES_PER_INTERVAL,
);
w_outbound_budget.last_timestamp_ms = now;
}
}
me.update_data_budget(stakes);
for pull_data in requests {
caller_and_filters.push((pull_data.caller, pull_data.filter));
addrs.push(pull_data.from_addr);
}
let now = timestamp();
let self_id = me.id();
let pull_responses = me
.time_gossip_write_lock("process_pull_reqs", &me.stats.process_pull_requests)
.time_gossip_read_lock("generate_pull_responses", &me.stats.generate_pull_responses)
.generate_pull_responses(&caller_and_filters);
me.time_gossip_write_lock("process_pull_reqs", &me.stats.process_pull_requests)
.process_pull_requests(caller_and_filters, now);
// Filter bad to addresses
@@ -1806,37 +1874,115 @@ impl ClusterInfo {
Some(packets)
}
// Returns (failed, timeout, success)
fn handle_pull_response(
me: &Self,
from: &Pubkey,
data: Vec<CrdsValue>,
mut crds_values: Vec<CrdsValue>,
timeouts: &HashMap<Pubkey, u64>,
) {
let len = data.len();
) -> (usize, usize, usize) {
let len = crds_values.len();
trace!("PullResponse me: {} from: {} len={}", me.id, from, len);
let (_fail, timeout_count) = me
.time_gossip_write_lock("process_pull", &me.stats.process_pull_response)
.process_pull_response(from, timeouts, data, timestamp());
if let Some(shred_version) = me.lookup_contact_info(from, |ci| ci.shred_version) {
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
me.my_shred_version(),
);
}
let filtered_len = crds_values.len();
let mut pull_stats = ProcessPullStats::default();
let (filtered_pulls, filtered_pulls_expired_timeout) = me
.time_gossip_read_lock("filter_pull_resp", &me.stats.filter_pull_response)
.filter_pull_responses(timeouts, crds_values, timestamp(), &mut pull_stats);
if !filtered_pulls.is_empty() || !filtered_pulls_expired_timeout.is_empty() {
me.time_gossip_write_lock("process_pull_resp", &me.stats.process_pull_response)
.process_pull_responses(
from,
filtered_pulls,
filtered_pulls_expired_timeout,
timestamp(),
&mut pull_stats,
);
}
me.stats
.skip_pull_response_shred_version
.add_relaxed((len - filtered_len) as u64);
me.stats.process_pull_response_count.add_relaxed(1);
me.stats.process_pull_response_len.add_relaxed(len as u64);
me.stats
.process_pull_response_len
.add_relaxed(filtered_len as u64);
me.stats
.process_pull_response_timeout
.add_relaxed(timeout_count as u64);
.add_relaxed(pull_stats.timeout_count as u64);
me.stats
.process_pull_response_fail_insert
.add_relaxed(pull_stats.failed_insert as u64);
me.stats
.process_pull_response_fail_timeout
.add_relaxed(pull_stats.failed_timeout as u64);
me.stats
.process_pull_response_success
.add_relaxed(pull_stats.success as u64);
(
pull_stats.failed_insert + pull_stats.failed_timeout,
pull_stats.timeout_count,
pull_stats.success,
)
}
fn filter_by_shred_version(
from: &Pubkey,
crds_values: &mut Vec<CrdsValue>,
shred_version: u16,
my_shred_version: u16,
) {
if my_shred_version != 0 && shred_version != 0 && shred_version != my_shred_version {
// Allow someone to update their own ContactInfo so they
// can change shred versions if needed.
crds_values.retain(|crds_value| match &crds_value.data {
CrdsData::ContactInfo(contact_info) => contact_info.id == *from,
_ => false,
});
}
}
fn handle_push_message(
me: &Self,
recycler: &PacketsRecycler,
from: &Pubkey,
data: Vec<CrdsValue>,
mut crds_values: Vec<CrdsValue>,
stakes: &HashMap<Pubkey, u64>,
) -> Option<Packets> {
let self_id = me.id();
inc_new_counter_debug!("cluster_info-push_message", 1);
me.stats.push_message_count.add_relaxed(1);
let len = crds_values.len();
if let Some(shred_version) = me.lookup_contact_info(from, |ci| ci.shred_version) {
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
me.my_shred_version(),
);
}
let filtered_len = crds_values.len();
me.stats
.push_message_value_count
.add_relaxed(filtered_len as u64);
me.stats
.skip_push_message_shred_version
.add_relaxed((len - filtered_len) as u64);
let updated: Vec<_> = me
.time_gossip_write_lock("process_push", &me.stats.process_push_message)
.process_push_message(from, data, timestamp());
.process_push_message(from, crds_values, timestamp());
let updated_labels: Vec<_> = updated.into_iter().map(|u| u.value.label()).collect();
let prunes_map: HashMap<Pubkey, HashSet<Pubkey>> = me
@@ -1866,6 +2012,9 @@ impl ClusterInfo {
return None;
}
let mut packets = to_packets_with_destination(recycler.clone(), &rsp);
me.stats
.push_response_count
.add_relaxed(packets.packets.len() as u64);
if !packets.is_empty() {
let pushes: Vec<_> = me.new_push_requests();
inc_new_counter_debug!("cluster_info-push_message-pushes", pushes.len());
@@ -1957,6 +2106,11 @@ impl ClusterInfo {
),
("all_tvu_peers", self.stats.all_tvu_peers.clear(), i64),
("tvu_peers", self.stats.tvu_peers.clear(), i64),
(
"new_push_requests_num",
self.stats.new_push_requests_num.clear(),
i64
),
);
datapoint_info!(
"cluster_info_stats2",
@@ -1978,11 +2132,41 @@ impl ClusterInfo {
self.stats.process_pull_response.clear(),
i64
),
(
"filter_pull_resp",
self.stats.filter_pull_response.clear(),
i64
),
(
"process_pull_resp_count",
self.stats.process_pull_response_count.clear(),
i64
),
(
"pull_response_fail_insert",
self.stats.process_pull_response_fail_insert.clear(),
i64
),
(
"pull_response_fail_timeout",
self.stats.process_pull_response_fail_timeout.clear(),
i64
),
(
"pull_response_success",
self.stats.process_pull_response_success.clear(),
i64
),
(
"process_pull_resp_timeout",
self.stats.process_pull_response_timeout.clear(),
i64
),
(
"push_response_count",
self.stats.push_response_count.clear(),
i64
),
);
datapoint_info!(
"cluster_info_stats3",
@@ -1996,6 +2180,11 @@ impl ClusterInfo {
self.stats.process_pull_requests.clear(),
i64
),
(
"generate_pull_responses",
self.stats.generate_pull_responses.clear(),
i64
),
("process_prune", self.stats.process_prune.clear(), i64),
(
"process_push_message",
@@ -2025,6 +2214,39 @@ impl ClusterInfo {
i64
),
);
datapoint_info!(
"cluster_info_stats4",
(
"skip_push_message_shred_version",
self.stats.skip_push_message_shred_version.clear(),
i64
),
(
"skip_pull_response_shred_version",
self.stats.skip_pull_response_shred_version.clear(),
i64
),
(
"skip_pull_shred_version",
self.stats.skip_pull_shred_version.clear(),
i64
),
(
"push_message_count",
self.stats.push_message_count.clear(),
i64
),
(
"push_message_value_count",
self.stats.push_message_value_count.clear(),
i64
),
(
"new_pull_requests_count",
self.stats.new_pull_requests_count.clear(),
i64
),
);
*last_print = Instant::now();
}
@@ -2043,7 +2265,8 @@ impl ClusterInfo {
.name("solana-listen".to_string())
.spawn(move || {
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.num_threads(get_thread_count() / 2)
.thread_name(|ix| format!("gos_work_{}", ix))
.build()
.unwrap();
let mut last_print = Instant::now();
@@ -2406,6 +2629,92 @@ mod tests {
assert!(ClusterInfo::is_spy_node(&node));
}
#[test]
fn test_handle_pull() {
solana_logger::setup();
let node = Node::new_localhost();
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let entrypoint_pubkey = Pubkey::new_rand();
let data = test_crds_values(entrypoint_pubkey);
let timeouts = HashMap::new();
assert_eq!(
(0, 0, 1),
ClusterInfo::handle_pull_response(
&cluster_info,
&entrypoint_pubkey,
data.clone(),
&timeouts
)
);
let entrypoint_pubkey2 = Pubkey::new_rand();
assert_eq!(
(1, 0, 0),
ClusterInfo::handle_pull_response(&cluster_info, &entrypoint_pubkey2, data, &timeouts)
);
}
fn test_crds_values(pubkey: Pubkey) -> Vec<CrdsValue> {
let entrypoint = ContactInfo::new_localhost(&pubkey, timestamp());
let entrypoint_crdsvalue =
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
vec![entrypoint_crdsvalue]
}
#[test]
fn test_filter_shred_version() {
let from = Pubkey::new_rand();
let my_shred_version = 1;
let other_shred_version = 1;
// Allow same shred_version
let mut values = test_crds_values(from);
ClusterInfo::filter_by_shred_version(
&from,
&mut values,
other_shred_version,
my_shred_version,
);
assert_eq!(values.len(), 1);
// Allow shred_version=0.
let other_shred_version = 0;
ClusterInfo::filter_by_shred_version(
&from,
&mut values,
other_shred_version,
my_shred_version,
);
assert_eq!(values.len(), 1);
// Change to sender's ContactInfo version, allow that.
let other_shred_version = 2;
ClusterInfo::filter_by_shred_version(
&from,
&mut values,
other_shred_version,
my_shred_version,
);
assert_eq!(values.len(), 1);
let snapshot_hash_data = CrdsValue::new_unsigned(CrdsData::SnapshotHashes(SnapshotHash {
from: Pubkey::new_rand(),
hashes: vec![],
wallclock: 0,
}));
values.push(snapshot_hash_data);
// Change to sender's ContactInfo version, allow that.
let other_shred_version = 2;
ClusterInfo::filter_by_shred_version(
&from,
&mut values,
other_shred_version,
my_shred_version,
);
assert_eq!(values.len(), 1);
}
#[test]
fn test_cluster_spy_gossip() {
//check that gossip doesn't try to push to invalid addresses
@@ -2418,7 +2727,7 @@ mod tests {
.write()
.unwrap()
.refresh_push_active_set(&HashMap::new());
let reqs = cluster_info.gossip_request(&HashMap::new());
let reqs = cluster_info.generate_new_gossip_requests(&HashMap::new(), true);
//assert none of the addrs are invalid.
reqs.iter().all(|(addr, _)| {
let res = ContactInfo::is_valid_address(addr);

View File

@@ -36,6 +36,7 @@ use std::collections::HashMap;
pub struct Crds {
/// Stores the map of labels and values
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
pub num_inserts: usize,
}
#[derive(PartialEq, Debug)]
@@ -84,6 +85,7 @@ impl Default for Crds {
fn default() -> Self {
Crds {
table: IndexMap::new(),
num_inserts: 0,
}
}
}
@@ -93,6 +95,24 @@ impl Crds {
pub fn new_versioned(&self, local_timestamp: u64, value: CrdsValue) -> VersionedCrdsValue {
VersionedCrdsValue::new(local_timestamp, value)
}
pub fn would_insert(
&self,
value: CrdsValue,
local_timestamp: u64,
) -> Option<VersionedCrdsValue> {
let new_value = self.new_versioned(local_timestamp, value);
let label = new_value.value.label();
let would_insert = self
.table
.get(&label)
.map(|current| new_value > *current)
.unwrap_or(true);
if would_insert {
Some(new_value)
} else {
None
}
}
/// insert the new value, returns the old value if insert succeeds
pub fn insert_versioned(
&mut self,
@@ -107,6 +127,7 @@ impl Crds {
.unwrap_or(true);
if do_insert {
let old = self.table.insert(label, new_value);
self.num_inserts += 1;
Ok(old)
} else {
trace!("INSERT FAILED data: {} new.wallclock: {}", label, wallclock,);

View File

@@ -6,7 +6,7 @@
use crate::{
crds::{Crds, VersionedCrdsValue},
crds_gossip_error::CrdsGossipError,
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
crds_value::{CrdsValue, CrdsValueLabel},
};
@@ -76,17 +76,10 @@ impl CrdsGossip {
stakes: &HashMap<Pubkey, u64>,
) -> HashMap<Pubkey, HashSet<Pubkey>> {
let id = &self.id;
let crds = &self.crds;
let push = &mut self.push;
let versioned = labels
.into_iter()
.filter_map(|label| crds.lookup_versioned(&label));
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
for val in versioned {
let origin = val.value.pubkey();
let hash = val.value_hash;
let peers = push.prune_received_cache(id, &origin, hash, stakes);
for origin in labels.iter().map(|k| k.pubkey()) {
let peers = push.prune_received_cache(id, &origin, stakes);
for from in peers {
prune_map.entry(from).or_default().insert(origin);
}
@@ -113,7 +106,7 @@ impl CrdsGossip {
return Err(CrdsGossipError::PruneMessageTimeout);
}
if self.id == *destination {
self.push.process_prune_msg(peer, origin);
self.push.process_prune_msg(&self.id, peer, origin);
Ok(())
} else {
Err(CrdsGossipError::BadPruneDestination)
@@ -158,24 +151,47 @@ impl CrdsGossip {
self.pull.mark_pull_request_creation_time(from, now)
}
/// process a pull request and create a response
pub fn process_pull_requests(
&mut self,
filters: Vec<(CrdsValue, CrdsFilter)>,
now: u64,
) -> Vec<Vec<CrdsValue>> {
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
self.pull
.process_pull_requests(&mut self.crds, filters, now)
.process_pull_requests(&mut self.crds, filters, now);
}
/// process a pull response
pub fn process_pull_response(
&mut self,
from: &Pubkey,
pub fn generate_pull_responses(
&self,
filters: &[(CrdsValue, CrdsFilter)],
) -> Vec<Vec<CrdsValue>> {
self.pull.generate_pull_responses(&self.crds, filters)
}
pub fn filter_pull_responses(
&self,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> (usize, usize) {
process_pull_stats: &mut ProcessPullStats,
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
self.pull
.process_pull_response(&mut self.crds, from, timeouts, response, now)
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
}
/// process a pull response
pub fn process_pull_responses(
&mut self,
from: &Pubkey,
responses: Vec<VersionedCrdsValue>,
responses_expired_timeout: Vec<VersionedCrdsValue>,
now: u64,
process_pull_stats: &mut ProcessPullStats,
) {
let success = self.pull.process_pull_responses(
&mut self.crds,
from,
responses,
responses_expired_timeout,
now,
process_pull_stats,
);
self.push.push_pull_responses(success, now);
}
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {

View File

@@ -2,7 +2,6 @@
pub enum CrdsGossipError {
NoPeers,
PushMessageTimeout,
PushMessageAlreadyReceived,
PushMessageOldVersion,
BadPruneDestination,
PruneMessageTimeout,

View File

@@ -10,7 +10,7 @@
//! of false positives.
use crate::contact_info::ContactInfo;
use crate::crds::Crds;
use crate::crds::{Crds, VersionedCrdsValue};
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
@@ -20,8 +20,8 @@ use solana_runtime::bloom::Bloom;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use std::cmp;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::collections::{HashMap, HashSet};
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
// The maximum age of a value received over pull responses
@@ -118,6 +118,14 @@ impl CrdsFilter {
}
}
#[derive(Default)]
pub struct ProcessPullStats {
pub success: usize,
pub failed_insert: usize,
pub failed_timeout: usize,
pub timeout_count: usize,
}
#[derive(Clone)]
pub struct CrdsGossipPull {
/// timestamp of last request
@@ -126,6 +134,7 @@ pub struct CrdsGossipPull {
purged_values: VecDeque<(Hash, u64)>,
pub crds_timeout: u64,
pub msg_timeout: u64,
pub num_pulls: usize,
}
impl Default for CrdsGossipPull {
@@ -135,6 +144,7 @@ impl Default for CrdsGossipPull {
pull_request_time: HashMap::new(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
num_pulls: 0,
}
}
}
@@ -204,14 +214,13 @@ impl CrdsGossipPull {
self.purged_values.push_back((hash, timestamp))
}
/// process a pull request and create a response
/// process a pull request
pub fn process_pull_requests(
&mut self,
crds: &mut Crds,
requests: Vec<(CrdsValue, CrdsFilter)>,
now: u64,
) -> Vec<Vec<CrdsValue>> {
let rv = self.filter_crds_values(crds, &requests);
) {
requests.into_iter().for_each(|(caller, _)| {
let key = caller.label().pubkey();
let old = crds.insert(caller, now);
@@ -221,20 +230,33 @@ impl CrdsGossipPull {
}
crds.update_record_timestamp(&key, now);
});
rv
}
/// process a pull response
pub fn process_pull_response(
&mut self,
crds: &mut Crds,
from: &Pubkey,
/// Create gossip responses to pull requests
pub fn generate_pull_responses(
&self,
crds: &Crds,
requests: &[(CrdsValue, CrdsFilter)],
) -> Vec<Vec<CrdsValue>> {
self.filter_crds_values(crds, requests)
}
// Checks if responses should be inserted and
// returns those responses converted to VersionedCrdsValue
// Separated in two vecs as:
// .0 => responses that update the owner timestamp
// .1 => responses that do not update the owner timestamp
pub fn filter_pull_responses(
&self,
crds: &Crds,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
responses: Vec<CrdsValue>,
now: u64,
) -> (usize, usize) {
let mut failed = 0;
let mut timeout_count = 0;
for r in response {
stats: &mut ProcessPullStats,
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
let mut versioned = vec![];
let mut versioned_expired_timestamp = vec![];
for r in responses {
let owner = r.label().pubkey();
// Check if the crds value is older than the msg_timeout
if now
@@ -253,8 +275,8 @@ impl CrdsGossipPull {
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|| now + timeout < r.wallclock()
{
timeout_count += 1;
failed += 1;
stats.timeout_count += 1;
stats.failed_timeout += 1;
continue;
}
}
@@ -262,29 +284,69 @@ impl CrdsGossipPull {
// Before discarding this value, check if a ContactInfo for the owner
// exists in the table. If it doesn't, that implies that this value can be discarded
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
timeout_count += 1;
failed += 1;
stats.timeout_count += 1;
stats.failed_timeout += 1;
continue;
} else {
// Silently insert this old value without bumping record timestamps
failed += crds.insert(r, now).is_err() as usize;
match crds.would_insert(r, now) {
Some(resp) => versioned_expired_timestamp.push(resp),
None => stats.failed_insert += 1,
}
continue;
}
}
}
}
let old = crds.insert(r, now);
failed += old.is_err() as usize;
match crds.would_insert(r, now) {
Some(resp) => versioned.push(resp),
None => stats.failed_insert += 1,
}
}
(versioned, versioned_expired_timestamp)
}
/// process a vec of pull responses
pub fn process_pull_responses(
&mut self,
crds: &mut Crds,
from: &Pubkey,
responses: Vec<VersionedCrdsValue>,
responses_expired_timeout: Vec<VersionedCrdsValue>,
now: u64,
stats: &mut ProcessPullStats,
) -> Vec<(CrdsValueLabel, Hash, u64)> {
let mut success = vec![];
let mut owners = HashSet::new();
for r in responses_expired_timeout {
stats.failed_insert += crds.insert_versioned(r).is_err() as usize;
}
for r in responses {
let owner = r.value.label().pubkey();
let label = r.value.label();
let wc = r.value.wallclock();
let hash = r.value_hash;
let old = crds.insert_versioned(r);
if old.is_err() {
stats.failed_insert += 1;
} else {
stats.success += 1;
self.num_pulls += 1;
success.push((label, hash, wc));
}
old.ok().map(|opt| {
crds.update_record_timestamp(&owner, now);
owners.insert(owner);
opt.map(|val| {
self.purged_values
.push_back((val.value_hash, val.local_timestamp))
})
});
}
crds.update_record_timestamp(from, now);
(failed, timeout_count)
owners.insert(*from);
for owner in owners {
crds.update_record_timestamp(&owner, now);
}
success
}
// build a set of filters of the current crds table
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
@@ -374,6 +436,34 @@ impl CrdsGossipPull {
.count();
self.purged_values.drain(..cnt);
}
/// For legacy tests
#[cfg(test)]
pub fn process_pull_response(
&mut self,
crds: &mut Crds,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> (usize, usize, usize) {
let mut stats = ProcessPullStats::default();
let (versioned, versioned_expired_timeout) =
self.filter_pull_responses(crds, timeouts, response, now, &mut stats);
self.process_pull_responses(
crds,
from,
versioned,
versioned_expired_timeout,
now,
&mut stats,
);
(
stats.failed_timeout + stats.failed_insert,
stats.timeout_count,
stats.success,
)
}
}
#[cfg(test)]
mod test {
@@ -573,8 +663,9 @@ mod test {
let mut dest_crds = Crds::default();
let mut dest = CrdsGossipPull::default();
let (_, filters, caller) = req.unwrap();
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.process_pull_requests(&mut dest_crds, filters, 1);
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
dest.process_pull_requests(&mut dest_crds, filters, 1);
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
assert!(dest_crds.lookup(&caller.label()).is_some());
assert_eq!(
@@ -643,8 +734,9 @@ mod test {
PACKET_DATA_SIZE,
);
let (_, filters, caller) = req.unwrap();
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.process_pull_requests(&mut dest_crds, filters, 0);
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
dest.process_pull_requests(&mut dest_crds, filters, 0);
// if there is a false positive this is empty
// prob should be around 0.1 per iteration
if rsp.is_empty() {

View File

@@ -35,6 +35,7 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
#[derive(Clone)]
pub struct CrdsGossipPush {
@@ -44,12 +45,18 @@ pub struct CrdsGossipPush {
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
/// push message queue
push_messages: HashMap<CrdsValueLabel, Hash>,
/// cache that tracks which validators a message was received from
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
/// Cache that tracks which validators a message was received from
/// bool indicates it has been pruned.
/// This cache represents a lagging view of which validators
/// currently have this node in their `active_set`
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
pub num_active: usize,
pub push_fanout: usize,
pub msg_timeout: u64,
pub prune_timeout: u64,
pub num_total: usize,
pub num_old: usize,
pub num_pushes: usize,
}
impl Default for CrdsGossipPush {
@@ -64,6 +71,9 @@ impl Default for CrdsGossipPush {
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
prune_timeout: CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS,
num_total: 0,
num_old: 0,
num_pushes: 0,
}
}
}
@@ -81,18 +91,21 @@ impl CrdsGossipPush {
&mut self,
self_pubkey: &Pubkey,
origin: &Pubkey,
hash: Hash,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<Pubkey> {
let origin_stake = stakes.get(origin).unwrap_or(&0);
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
let cache = self.received_cache.get(&hash);
let cache = self.received_cache.get(origin);
if cache.is_none() {
return Vec::new();
}
let peers = cache.unwrap();
let peers = &cache.unwrap().1;
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
let peer_stake_total: u64 = peers
.iter()
.filter(|v| !(v.1).0)
.map(|v| stakes.get(v.0).unwrap_or(&0))
.sum();
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
if peer_stake_total < prune_stake_threshold {
return Vec::new();
@@ -100,7 +113,8 @@ impl CrdsGossipPush {
let staked_peers: Vec<(Pubkey, u64)> = peers
.iter()
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
.filter(|v| !(v.1).0)
.filter_map(|p| stakes.get(p.0).map(|s| (*p.0, *s)))
.filter(|(_, s)| *s > 0)
.collect();
@@ -117,16 +131,27 @@ impl CrdsGossipPush {
let (next_peer, next_stake) = staked_peers[next];
keep.insert(next_peer);
peer_stake_sum += next_stake;
if peer_stake_sum >= prune_stake_threshold {
if peer_stake_sum >= prune_stake_threshold
&& keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES
{
break;
}
}
peers
.iter()
let pruned_peers: Vec<Pubkey> = peers
.keys()
.filter(|p| !keep.contains(p))
.cloned()
.collect()
.collect();
pruned_peers.iter().for_each(|p| {
self.received_cache
.get_mut(origin)
.unwrap()
.get_mut(p)
.unwrap()
.0 = true;
});
pruned_peers
}
/// process a push message to the network
@@ -137,6 +162,7 @@ impl CrdsGossipPush {
value: CrdsValue,
now: u64,
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
self.num_total += 1;
if now
> value
.wallclock()
@@ -149,21 +175,32 @@ impl CrdsGossipPush {
return Err(CrdsGossipError::PushMessageTimeout);
}
let label = value.label();
let origin = label.pubkey();
let new_value = crds.new_versioned(now, value);
let value_hash = new_value.value_hash;
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
received_set.insert(from.clone());
return Err(CrdsGossipError::PushMessageAlreadyReceived);
}
let received_set = self
.received_cache
.entry(origin)
.or_insert_with(HashMap::new);
received_set.entry(*from).or_insert((false, 0)).1 = now;
let old = crds.insert_versioned(new_value);
if old.is_err() {
self.num_old += 1;
return Err(CrdsGossipError::PushMessageOldVersion);
}
let mut received_set = HashSet::new();
received_set.insert(from.clone());
self.push_messages.insert(label, value_hash);
self.received_cache.insert(value_hash, (now, received_set));
Ok(old.ok().and_then(|opt| opt))
Ok(old.unwrap())
}
/// push pull responses
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
for (label, value_hash, wc) in values {
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
continue;
}
self.push_messages.insert(label, value_hash);
}
}
/// New push message to broadcast to peers.
@@ -172,18 +209,10 @@ impl CrdsGossipPush {
/// The list of push messages is created such that all the randomly selected peers have not
/// pruned the source addresses.
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> {
let max = self.active_set.len();
let mut nodes: Vec<_> = (0..max).collect();
nodes.shuffle(&mut rand::thread_rng());
let peers: Vec<Pubkey> = nodes
.into_iter()
.filter_map(|n| self.active_set.get_index(n))
.take(self.push_fanout)
.map(|n| *n.0)
.collect();
let mut total_bytes: usize = 0;
let mut values = vec![];
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
trace!("new_push_messages {}", self.push_messages.len());
for (label, hash) in &self.push_messages {
let res = crds.lookup_versioned(label);
if res.is_none() {
@@ -203,21 +232,37 @@ impl CrdsGossipPush {
}
values.push(value.clone());
}
trace!(
"new_push_messages {} {}",
values.len(),
self.active_set.len()
);
for v in values {
for p in peers.iter() {
let filter = self.active_set.get_mut(p);
if filter.is_some() && !filter.unwrap().contains(&v.label().pubkey()) {
push_messages.entry(*p).or_default().push(v.clone());
//use a consistent index for the same origin so
//the active set learns the MST for that origin
let start = v.label().pubkey().as_ref()[0] as usize;
let max = self.push_fanout.min(self.active_set.len());
for i in start..(start + max) {
let ix = i % self.active_set.len();
if let Some((p, filter)) = self.active_set.get_index(ix) {
if !filter.contains(&v.label().pubkey()) {
trace!("new_push_messages insert {} {:?}", *p, v);
push_messages.entry(*p).or_default().push(v.clone());
self.num_pushes += 1;
}
}
self.push_messages.remove(&v.label());
}
self.push_messages.remove(&v.label());
}
push_messages
}
/// add the `from` to the peer's filter of nodes
pub fn process_prune_msg(&mut self, peer: &Pubkey, origins: &[Pubkey]) {
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
for origin in origins {
if origin == self_pubkey {
continue;
}
if let Some(p) = self.active_set.get_mut(peer) {
p.add(origin)
}
@@ -339,15 +384,11 @@ impl CrdsGossipPush {
/// purge received push message cache
pub fn purge_old_received_cache(&mut self, min_time: u64) {
let old_msgs: Vec<Hash> = self
.received_cache
.iter()
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
.cloned()
.collect();
for k in old_msgs {
self.received_cache.remove(&k);
}
self.received_cache
.iter_mut()
.for_each(|v| v.1.retain(|_, v| v.1 > min_time));
self.received_cache.retain(|_, v| !v.is_empty());
}
}
@@ -371,7 +412,6 @@ mod test {
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&origin, 0,
)));
let label = value.label();
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
let mut low_staked_set = HashSet::new();
low_staked_peers.for_each(|p| {
@@ -380,11 +420,7 @@ mod test {
stakes.insert(p, 1);
});
let versioned = crds
.lookup_versioned(&label)
.expect("versioned value should exist");
let hash = versioned.value_hash;
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
assert!(
pruned.is_empty(),
"should not prune if min threshold has not been reached"
@@ -395,7 +431,7 @@ mod test {
stakes.insert(high_staked_peer, high_stake);
let _ = push.process_push_message(&mut crds, &high_staked_peer, value.clone(), 0);
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
assert!(
pruned.len() < low_staked_set.len() + 1,
"should not prune all peers"
@@ -409,7 +445,7 @@ mod test {
}
#[test]
fn test_process_push() {
fn test_process_push_one() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
@@ -426,8 +462,8 @@ mod test {
// push it again
assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Err(CrdsGossipError::PushMessageAlreadyReceived)
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
Err(CrdsGossipError::PushMessageOldVersion)
);
}
#[test]
@@ -690,6 +726,7 @@ mod test {
#[test]
fn test_process_prune() {
let mut crds = Crds::default();
let self_id = Pubkey::new_rand();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
@@ -707,7 +744,11 @@ mod test {
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
Ok(None)
);
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
push.process_prune_msg(
&self_id,
&peer.label().pubkey(),
&[new_msg.label().pubkey()],
);
assert_eq!(push.new_push_messages(&crds, 0), expected);
}
#[test]
@@ -749,9 +790,9 @@ mod test {
assert_eq!(crds.lookup(&label), Some(&value));
// push it again
assert_eq!(
assert_matches!(
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Err(CrdsGossipError::PushMessageAlreadyReceived)
Err(CrdsGossipError::PushMessageOldVersion)
);
// purge the old pushed

View File

@@ -3,7 +3,7 @@
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_sdk::clock::Slot;
use solana_sdk::clock::{Slot, DEFAULT_TICKS_PER_SLOT, TICKS_PER_DAY};
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
@@ -32,6 +32,10 @@ pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Delay between purges to cooperate with other blockstore users
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
// Compacting at a slower interval than purging helps keep IOPS down.
// Once a day should be ample
const DEFAULT_COMPACTION_SLOT_INTERVAL: u64 = TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
}
@@ -49,6 +53,8 @@ impl LedgerCleanupService {
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
@@ -62,6 +68,8 @@ impl LedgerCleanupService {
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
Some(DEFAULT_DELAY_BETWEEN_PURGES),
&mut last_compaction_slot,
DEFAULT_COMPACTION_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
@@ -116,7 +124,7 @@ impl LedgerCleanupService {
}
}
(true, lowest_cleanup_slot, first_slot, total_shreds)
(true, first_slot, lowest_cleanup_slot, total_shreds)
}
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
@@ -135,6 +143,8 @@ impl LedgerCleanupService {
last_purge_slot: &mut u64,
purge_interval: u64,
delay_between_purges: Option<Duration>,
last_compaction_slot: &mut u64,
compaction_interval: u64,
) -> Result<(), RecvTimeoutError> {
let root = Self::receive_new_roots(new_root_receiver)?;
if root - *last_purge_slot <= purge_interval {
@@ -143,19 +153,20 @@ impl LedgerCleanupService {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: last_root={}, last_purge_slot={}, purge_interval={}, disk_utilization={:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
"purge: last_root={}, last_purge_slot={}, purge_interval={}, last_compaction_slot={}, disk_utilization={:?}",
root, last_purge_slot, purge_interval, last_compaction_slot, disk_utilization_pre
);
*last_purge_slot = root;
let (slots_to_clean, lowest_cleanup_slot, first_slot, total_shreds) =
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
if slots_to_clean {
info!(
"purging data from slots {} to {}",
first_slot, lowest_cleanup_slot
);
let mut compact_first_slot = std::u64::MAX;
if lowest_cleanup_slot.saturating_sub(*last_compaction_slot) > compaction_interval {
compact_first_slot = *last_compaction_slot;
*last_compaction_slot = lowest_cleanup_slot;
}
let purge_complete = Arc::new(AtomicBool::new(false));
let blockstore = blockstore.clone();
@@ -167,14 +178,36 @@ impl LedgerCleanupService {
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
slot_update_time.stop();
info!(
"purging data from slots {} to {}",
purge_first_slot, lowest_cleanup_slot
);
let mut purge_time = Measure::start("purge_slots_with_delay");
blockstore.purge_slots_with_delay(
first_slot,
purge_first_slot,
lowest_cleanup_slot,
delay_between_purges,
);
purge_time.stop();
info!("{}", purge_time);
if compact_first_slot < lowest_cleanup_slot {
info!(
"compacting data from slots {} to {}",
compact_first_slot, lowest_cleanup_slot
);
if let Err(err) =
blockstore.compact_storage(compact_first_slot, lowest_cleanup_slot)
{
// This error is not fatal and indicates an internal error?
error!(
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
err, compact_first_slot, lowest_cleanup_slot
);
}
}
purge_complete1.store(true, Ordering::Relaxed);
})
.unwrap();
@@ -233,6 +266,7 @@ mod tests {
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
@@ -241,6 +275,8 @@ mod tests {
&mut last_purge_slot,
10,
None,
&mut last_compaction_slot,
10,
)
.unwrap();
@@ -272,6 +308,7 @@ mod tests {
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
@@ -296,6 +333,8 @@ mod tests {
&mut last_purge_slot,
10,
None,
&mut last_compaction_slot,
10,
)
.unwrap();
time.stop();
@@ -331,14 +370,17 @@ mod tests {
// send signal to cleanup slots
let (sender, receiver) = channel();
sender.send(n).unwrap();
let mut next_purge_batch = 0;
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
max_ledger_shreds,
&mut next_purge_batch,
&mut last_purge_slot,
10,
None,
&mut last_compaction_slot,
10,
)
.unwrap();

View File

@@ -42,10 +42,12 @@ pub mod retransmit_stage;
pub mod rewards_recorder_service;
pub mod rpc;
pub mod rpc_error;
pub mod rpc_health;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
pub mod rpc_service;
pub mod rpc_subscriptions;
pub mod send_transaction_service;
pub mod serve_repair;
pub mod serve_repair_service;
pub mod sigverify;

View File

@@ -56,8 +56,8 @@ solana_sdk::pubkeys!(
[
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
"CWeRmXme7LmbaUWTZWFLt6FMnpzLCHaQLuR2TdgFn4Lq",
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
@@ -75,6 +75,7 @@ solana_sdk::pubkeys!(
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
]
);
@@ -84,6 +85,7 @@ solana_sdk::pubkeys!(
[
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
"FdGYQdiRky8NZzN9wZtczTBcWLYYRXrJ3LMDhqDPn5rM",
]
);

View File

@@ -215,6 +215,7 @@ impl ReplayStage {
&mut progress,
transaction_status_sender.clone(),
&verify_recyclers,
&subscriptions,
);
Self::report_memory(&allocated, "replay_active_banks", start);
@@ -758,7 +759,6 @@ impl ReplayStage {
progress.get_fork_stats(bank.slot()).unwrap().total_staked,
lockouts_sender,
);
Self::push_vote(
cluster_info,
bank,
@@ -838,6 +838,7 @@ impl ReplayStage {
let blockhash = bank.last_blockhash();
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
vote_tx.partial_sign(&[authorized_voter_keypair.as_ref()], blockhash);
let _ = cluster_info.send_vote(&vote_tx);
cluster_info.push_vote(tower_index, vote_tx);
}
@@ -896,6 +897,7 @@ impl ReplayStage {
progress: &mut ProgressMap,
transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers,
subscriptions: &Arc<RpcSubscriptions>,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@@ -963,6 +965,7 @@ impl ReplayStage {
did_complete_bank = true;
info!("bank frozen: {}", bank.slot());
bank.freeze();
subscriptions.notify_frozen(bank.slot());
} else {
trace!(
"bank {} not completed tick_height: {}, max_tick_height: {}",

View File

@@ -3,6 +3,7 @@
use crate::{
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
repair_service::RepairStrategy,
result::{Error, Result},
window_service::{should_retransmit_and_persist, WindowService},
@@ -17,8 +18,9 @@ use solana_ledger::{
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error;
use solana_perf::packet::Packets;
use solana_sdk::clock::Slot;
use solana_sdk::clock::{Epoch, Slot};
use solana_sdk::epoch_schedule::EpochSchedule;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use solana_streamer::streamer::PacketReceiver;
use std::{
@@ -43,6 +45,8 @@ struct RetransmitStats {
total_packets: AtomicU64,
total_batches: AtomicU64,
total_time: AtomicU64,
epoch_fetch: AtomicU64,
epoch_cache_update: AtomicU64,
repair_total: AtomicU64,
discard_total: AtomicU64,
retransmit_total: AtomicU64,
@@ -64,6 +68,8 @@ fn update_retransmit_stats(
peers_len: usize,
packets_by_slot: HashMap<Slot, usize>,
packets_by_source: HashMap<String, usize>,
epoch_fetch: u64,
epoch_cach_update: u64,
) {
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
stats
@@ -82,6 +88,10 @@ fn update_retransmit_stats(
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
stats.total_batches.fetch_add(1, Ordering::Relaxed);
stats.epoch_fetch.fetch_add(epoch_fetch, Ordering::Relaxed);
stats
.epoch_cache_update
.fetch_add(epoch_cach_update, Ordering::Relaxed);
{
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
for (slot, count) in packets_by_slot {
@@ -106,6 +116,16 @@ fn update_retransmit_stats(
stats.total_time.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"epoch_fetch",
stats.epoch_fetch.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"epoch_cache_update",
stats.epoch_cache_update.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_batches",
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
@@ -147,6 +167,14 @@ fn update_retransmit_stats(
}
}
#[derive(Default)]
struct EpochStakesCache {
epoch: Epoch,
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
peers: Vec<ContactInfo>,
stakes_and_index: Vec<(u64, usize)>,
}
fn retransmit(
bank_forks: &Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
@@ -155,6 +183,8 @@ fn retransmit(
sock: &UdpSocket,
id: u32,
stats: &Arc<RetransmitStats>,
epoch_stakes_cache: &Arc<RwLock<EpochStakesCache>>,
last_peer_update: &Arc<AtomicU64>,
) -> Result<()> {
let timer = Duration::new(1, 0);
let r_lock = r.lock().unwrap();
@@ -171,12 +201,42 @@ fn retransmit(
}
drop(r_lock);
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let r_bank = bank_forks.read().unwrap().working_bank();
let bank_epoch = r_bank.get_leader_schedule_epoch(r_bank.slot());
epoch_fetch.stop();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cach_update");
let mut r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
if r_epoch_stakes_cache.epoch != bank_epoch {
drop(r_epoch_stakes_cache);
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
if w_epoch_stakes_cache.epoch != bank_epoch {
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
let stakes = stakes.map(Arc::new);
w_epoch_stakes_cache.stakes = stakes;
w_epoch_stakes_cache.epoch = bank_epoch;
}
drop(w_epoch_stakes_cache);
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
}
let now = timestamp();
let last = last_peer_update.load(Ordering::Relaxed);
if now - last > 1000 && last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
{
drop(r_epoch_stakes_cache);
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
let (peers, stakes_and_index) =
cluster_info.sorted_retransmit_peers_and_stakes(w_epoch_stakes_cache.stakes.clone());
w_epoch_stakes_cache.peers = peers;
w_epoch_stakes_cache.stakes_and_index = stakes_and_index;
drop(w_epoch_stakes_cache);
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
}
let mut peers_len = 0;
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
let stakes = stakes.map(Arc::new);
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
epoch_cache_update.stop();
let my_id = cluster_info.id();
let mut discard_total = 0;
let mut repair_total = 0;
@@ -201,8 +261,8 @@ fn retransmit(
let mut compute_turbine_peers = Measure::start("turbine_start");
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
&my_id,
&peers,
&stakes_and_index,
&r_epoch_stakes_cache.peers,
&r_epoch_stakes_cache.stakes_and_index,
packet.meta.seed,
);
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
@@ -215,8 +275,14 @@ fn retransmit(
let (neighbors, children) =
compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, indexes);
let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect();
let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect();
let neighbors: Vec<_> = neighbors
.into_iter()
.map(|index| &r_epoch_stakes_cache.peers[index])
.collect();
let children: Vec<_> = children
.into_iter()
.map(|index| &r_epoch_stakes_cache.peers[index])
.collect();
compute_turbine_peers.stop();
compute_turbine_peers_total += compute_turbine_peers.as_us();
@@ -257,6 +323,8 @@ fn retransmit(
peers_len,
packets_by_slot,
packets_by_source,
epoch_fetch.as_us(),
epoch_cache_update.as_us(),
);
Ok(())
@@ -286,6 +354,8 @@ pub fn retransmitter(
let r = r.clone();
let cluster_info = cluster_info.clone();
let stats = stats.clone();
let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default()));
let last_peer_update = Arc::new(AtomicU64::new(0));
Builder::new()
.name("solana-retransmitter".to_string())
@@ -300,6 +370,8 @@ pub fn retransmitter(
&sockets[s],
s as u32,
&stats,
&epoch_stakes_cache,
&last_peer_update,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@ use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
pub enum RpcCustomError {
NonexistentClusterRoot {
@@ -13,6 +14,9 @@ pub enum RpcCustomError {
slot: Slot,
first_available_block: Slot,
},
SendTransactionPreflightFailure {
message: String,
},
}
impl From<RpcCustomError> for Error {
@@ -40,6 +44,11 @@ impl From<RpcCustomError> for Error {
),
data: None,
},
RpcCustomError::SendTransactionPreflightFailure { message } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_2),
message,
data: None,
},
}
}
}

118
core/src/rpc_health.rs Normal file
View File

@@ -0,0 +1,118 @@
use crate::cluster_info::ClusterInfo;
use solana_sdk::pubkey::Pubkey;
use std::{
collections::HashSet,
sync::atomic::{AtomicBool, Ordering},
sync::Arc,
};
#[derive(PartialEq, Clone, Copy)]
pub enum RpcHealthStatus {
Ok,
Behind, // Validator is behind its trusted validators
}
pub struct RpcHealth {
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
health_check_slot_distance: u64,
override_health_check: Arc<AtomicBool>,
#[cfg(test)]
stub_health_status: std::sync::RwLock<Option<RpcHealthStatus>>,
}
impl RpcHealth {
pub fn new(
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
health_check_slot_distance: u64,
override_health_check: Arc<AtomicBool>,
) -> Self {
Self {
cluster_info,
trusted_validators,
health_check_slot_distance,
override_health_check,
#[cfg(test)]
stub_health_status: std::sync::RwLock::new(None),
}
}
pub fn check(&self) -> RpcHealthStatus {
#[cfg(test)]
{
if let Some(stub_health_status) = *self.stub_health_status.read().unwrap() {
return stub_health_status;
}
}
if self.override_health_check.load(Ordering::Relaxed) {
RpcHealthStatus::Ok
} else if let Some(trusted_validators) = &self.trusted_validators {
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
(
self.cluster_info
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0),
trusted_validators
.iter()
.map(|trusted_validator| {
self.cluster_info
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0)
})
.max()
.unwrap_or(0),
)
};
// This validator is considered healthy if its latest account hash slot is within
// `health_check_slot_distance` of the latest trusted validator's account hash slot
if latest_account_hash_slot > 0
&& latest_trusted_validator_account_hash_slot > 0
&& latest_account_hash_slot
> latest_trusted_validator_account_hash_slot
.saturating_sub(self.health_check_slot_distance)
{
RpcHealthStatus::Ok
} else {
warn!(
"health check: me={}, latest trusted_validator={}",
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
);
RpcHealthStatus::Behind
}
} else {
// No trusted validator point of reference available, so this validator is healthy
// because it's running
RpcHealthStatus::Ok
}
}
#[cfg(test)]
pub(crate) fn stub() -> Arc<Self> {
Arc::new(Self::new(
Arc::new(ClusterInfo::default()),
None,
42,
Arc::new(AtomicBool::new(false)),
))
}
#[cfg(test)]
pub(crate) fn stub_set_health_status(&self, stub_health_status: Option<RpcHealthStatus>) {
*self.stub_health_status.write().unwrap() = stub_health_status;
}
}

View File

@@ -1,8 +1,9 @@
//! The `rpc_service` module implements the Solana JSON RPC service.
use crate::{
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*,
storage_stage::StorageState, validator::ValidatorExit,
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, rpc_health::*,
send_transaction_service::SendTransactionService, storage_stage::StorageState,
validator::ValidatorExit,
};
use jsonrpc_core::MetaIoHandler;
use jsonrpc_http_server::{
@@ -20,20 +21,17 @@ use std::{
collections::HashSet,
net::SocketAddr,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::{mpsc::channel, Arc, RwLock},
thread::{self, Builder, JoinHandle},
};
use tokio::prelude::Future;
// If trusted validators are specified, consider this validator healthy if its latest account hash
// is no further behind than this distance from the latest trusted validator account hash
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
pub struct JsonRpcService {
thread_hdl: JoinHandle<()>,
#[cfg(test)]
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>, // Used only by test_rpc_new()...
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
close_handle: Option<CloseHandle>,
}
@@ -42,27 +40,24 @@ struct RpcRequestMiddleware {
ledger_path: PathBuf,
snapshot_archive_path_regex: Regex,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
health: Arc<RpcHealth>,
}
impl RpcRequestMiddleware {
pub fn new(
ledger_path: PathBuf,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
health: Arc<RpcHealth>,
) -> Self {
Self {
ledger_path,
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
.unwrap(),
snapshot_config,
cluster_info,
trusted_validators,
bank_forks,
health,
}
}
@@ -133,58 +128,10 @@ impl RpcRequestMiddleware {
}
fn health_check(&self) -> &'static str {
let response = if let Some(trusted_validators) = &self.trusted_validators {
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
(
self.cluster_info
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0),
trusted_validators
.iter()
.map(|trusted_validator| {
self.cluster_info
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0)
})
.max()
.unwrap_or(0),
)
};
// This validator is considered healthy if its latest account hash slot is within
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
if latest_account_hash_slot > 0
&& latest_trusted_validator_account_hash_slot > 0
&& latest_account_hash_slot
> latest_trusted_validator_account_hash_slot
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
{
"ok"
} else {
warn!(
"health check: me={}, latest trusted_validator={}",
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
);
"behind"
}
} else {
// No trusted validator point of reference available, so this validator is healthy
// because it's running
"ok"
let response = match self.health.check() {
RpcHealthStatus::Ok => "ok",
RpcHealthStatus::Behind => "behind",
};
info!("health check: {}", response);
response
}
@@ -290,17 +237,37 @@ impl JsonRpcService {
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
trusted_validators: Option<HashSet<Pubkey>>,
override_health_check: Arc<AtomicBool>,
) -> Self {
info!("rpc bound to {:?}", rpc_addr);
info!("rpc configuration: {:?}", config);
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
let health = Arc::new(RpcHealth::new(
cluster_info.clone(),
trusted_validators,
config.health_check_slot_distance,
override_health_check,
));
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
let send_transaction_service = Arc::new(SendTransactionService::new(
&cluster_info,
&bank_forks,
&exit_send_transaction_service,
));
let request_processor = JsonRpcRequestProcessor::new(
config,
bank_forks.clone(),
block_commitment_cache,
blockstore,
storage_state,
validator_exit.clone(),
)));
health.clone(),
cluster_info,
genesis_hash,
send_transaction_service,
);
#[cfg(test)]
let test_request_processor = request_processor.clone();
@@ -318,17 +285,12 @@ impl JsonRpcService {
let request_middleware = RpcRequestMiddleware::new(
ledger_path,
snapshot_config,
cluster_info.clone(),
trusted_validators,
bank_forks.clone(),
health.clone(),
);
let server = ServerBuilder::with_meta_extractor(
io,
move |_req: &hyper::Request<hyper::Body>| Meta {
request_processor: request_processor.clone(),
cluster_info: cluster_info.clone(),
genesis_hash,
},
move |_req: &hyper::Request<hyper::Body>| request_processor.clone(),
)
.threads(num_cpus::get())
.cors(DomainsValidation::AllowOnly(vec![
@@ -351,6 +313,7 @@ impl JsonRpcService {
let server = server.unwrap();
close_handle_sender.send(server.close_handle()).unwrap();
server.wait();
exit_send_transaction_service.store(true, Ordering::Relaxed);
})
.unwrap();
@@ -384,7 +347,6 @@ impl JsonRpcService {
mod tests {
use super::*;
use crate::{
contact_info::ContactInfo,
crds_value::{CrdsData, CrdsValue, SnapshotHash},
rpc::tests::create_validator_exit,
};
@@ -394,8 +356,7 @@ mod tests {
};
use solana_runtime::bank::Bank;
use solana_sdk::signature::Signer;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::atomic::AtomicBool;
use std::net::{IpAddr, Ipv4Addr};
#[test]
fn test_rpc_new() {
@@ -407,7 +368,7 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let bank = Bank::new(&genesis_config);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let cluster_info = Arc::new(ClusterInfo::default());
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let rpc_addr = SocketAddr::new(
ip_addr,
@@ -432,6 +393,7 @@ mod tests {
StorageState::default(),
validator_exit,
None,
Arc::new(AtomicBool::new(false)),
);
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
@@ -440,8 +402,6 @@ mod tests {
10_000,
rpc_service
.request_processor
.read()
.unwrap()
.get_balance(Ok(mint_keypair.pubkey()), None)
.unwrap()
.value
@@ -473,15 +433,12 @@ mod tests {
#[test]
fn test_is_file_get_path() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let bank_forks = create_bank_forks();
let rrm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
None,
bank_forks.clone(),
RpcHealth::stub(),
);
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
PathBuf::from("/"),
@@ -490,9 +447,8 @@ mod tests {
snapshot_package_output_path: PathBuf::from("/"),
snapshot_path: PathBuf::from("/"),
}),
cluster_info,
None,
bank_forks,
RpcHealth::stub(),
);
assert!(rrm.is_file_get_path("/genesis.tar.bz2"));
@@ -518,30 +474,30 @@ mod tests {
#[test]
fn test_health_check_with_no_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
None,
create_bank_forks(),
RpcHealth::stub(),
);
assert_eq!(rm.health_check(), "ok");
}
#[test]
fn test_health_check_with_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let cluster_info = Arc::new(ClusterInfo::default());
let health_check_slot_distance = 123;
let override_health_check = Arc::new(AtomicBool::new(false));
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
let health = Arc::new(RpcHealth::new(
cluster_info.clone(),
Some(trusted_validators.clone().into_iter().collect()),
create_bank_forks(),
);
health_check_slot_distance,
override_health_check.clone(),
));
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, create_bank_forks(), health);
// No account hashes for this node or any trusted validators == "behind"
assert_eq!(rm.health_check(), "behind");
@@ -549,6 +505,9 @@ mod tests {
// No account hashes for any trusted validators == "behind"
cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
assert_eq!(rm.health_check(), "behind");
override_health_check.store(true, Ordering::Relaxed);
assert_eq!(rm.health_check(), "ok");
override_health_check.store(false, Ordering::Relaxed);
// This node is ahead of the trusted validators == "ok"
cluster_info
@@ -579,7 +538,7 @@ mod tests {
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[1].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
vec![(1000 + health_check_slot_distance - 1, Hash::default())],
))),
1,
)
@@ -595,7 +554,7 @@ mod tests {
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[2].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
vec![(1000 + health_check_slot_distance, Hash::default())],
))),
1,
)

View File

@@ -56,6 +56,7 @@ enum NotificationEntry {
Slot(SlotInfo),
Vote(Vote),
Root(Slot),
Frozen(Slot),
Bank(CacheSlotInfo),
Gossip(Slot),
}
@@ -64,6 +65,7 @@ impl std::fmt::Debug for NotificationEntry {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
NotificationEntry::Root(root) => write!(f, "Root({})", root),
NotificationEntry::Frozen(slot) => write!(f, "Frozen({})", slot),
NotificationEntry::Vote(vote) => write!(f, "Vote({:?})", vote),
NotificationEntry::Slot(slot_info) => write!(f, "Slot({:?})", slot_info),
NotificationEntry::Bank(cache_slot_info) => write!(
@@ -219,6 +221,8 @@ fn filter_account_result(
last_notified_slot: Slot,
) -> (Box<dyn Iterator<Item = RpcAccount>>, Slot) {
if let Some((account, fork)) = result {
// If fork < last_notified_slot this means that we last notified for a fork
// and should notify that the account state has been reverted.
if fork != last_notified_slot {
return (Box::new(iter::once(RpcAccount::encode(account))), fork);
}
@@ -639,6 +643,10 @@ impl RpcSubscriptions {
self.enqueue_notification(NotificationEntry::Vote(vote.clone()));
}
pub fn notify_frozen(&self, frozen_slot: Slot) {
self.enqueue_notification(NotificationEntry::Frozen(frozen_slot));
}
pub fn add_root_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<Slot>) {
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let mut subscriptions = self.subscriptions.root_subscriptions.write().unwrap();
@@ -682,6 +690,7 @@ impl RpcSubscriptions {
bank_forks: Arc<RwLock<BankForks>>,
last_checked_slots: Arc<RwLock<HashMap<CommitmentLevel, Slot>>>,
) {
let mut pending_gossip_notifications = HashSet::new();
loop {
if exit.load(Ordering::Relaxed) {
break;
@@ -712,6 +721,12 @@ impl RpcSubscriptions {
for (_, sink) in subscriptions.iter() {
notifier.notify(root, sink);
}
// Prune old pending notifications
pending_gossip_notifications = pending_gossip_notifications
.into_iter()
.filter(|&s| s > root)
.collect();
}
NotificationEntry::Bank(cache_slot_info) => {
RpcSubscriptions::notify_accounts_programs_signatures(
@@ -723,23 +738,36 @@ impl RpcSubscriptions {
&notifier,
)
}
NotificationEntry::Frozen(slot) => {
if pending_gossip_notifications.remove(&slot) {
Self::process_gossip_notification(
slot,
&notifier,
&subscriptions,
&bank_forks,
&last_checked_slots,
);
}
}
NotificationEntry::Gossip(slot) => {
let _ = last_checked_slots
.write()
let bank_frozen = bank_forks
.read()
.unwrap()
.insert(CommitmentLevel::SingleGossip, slot);
let cache_slot_info = CacheSlotInfo {
highest_confirmed_slot: slot,
..CacheSlotInfo::default()
};
RpcSubscriptions::notify_accounts_programs_signatures(
&subscriptions.gossip_account_subscriptions,
&subscriptions.gossip_program_subscriptions,
&subscriptions.gossip_signature_subscriptions,
&bank_forks,
&cache_slot_info,
&notifier,
)
.get(slot)
.filter(|b| b.is_frozen())
.is_some();
if !bank_frozen {
pending_gossip_notifications.insert(slot);
} else {
Self::process_gossip_notification(
slot,
&notifier,
&subscriptions,
&bank_forks,
&last_checked_slots,
);
}
}
},
Err(RecvTimeoutError::Timeout) => {
@@ -753,6 +781,42 @@ impl RpcSubscriptions {
}
}
fn process_gossip_notification(
slot: Slot,
notifier: &RpcNotifier,
subscriptions: &Subscriptions,
bank_forks: &Arc<RwLock<BankForks>>,
last_checked_slots: &Arc<RwLock<HashMap<CommitmentLevel, Slot>>>,
) {
let mut last_checked_slots_lock = last_checked_slots.write().unwrap();
let last_checked_slot = last_checked_slots_lock
.get(&CommitmentLevel::SingleGossip)
.cloned()
.unwrap_or_default();
if slot > last_checked_slot {
last_checked_slots_lock.insert(CommitmentLevel::SingleGossip, slot);
} else {
// Avoid sending stale or duplicate notifications
return;
}
drop(last_checked_slots_lock);
let cache_slot_info = CacheSlotInfo {
highest_confirmed_slot: slot,
..CacheSlotInfo::default()
};
RpcSubscriptions::notify_accounts_programs_signatures(
&subscriptions.gossip_account_subscriptions,
&subscriptions.gossip_program_subscriptions,
&subscriptions.gossip_signature_subscriptions,
&bank_forks,
&cache_slot_info,
&notifier,
);
}
fn notify_accounts_programs_signatures(
account_subscriptions: &Arc<RpcAccountSubscriptions>,
program_subscriptions: &Arc<RpcProgramSubscriptions>,
@@ -1373,6 +1437,8 @@ pub(crate) mod tests {
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bank2 = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.write().unwrap().insert(bank2);
let alice = Keypair::new();
let (subscriber0, _id_receiver, transport_receiver0) =
@@ -1398,17 +1464,10 @@ pub(crate) mod tests {
sub_id0.clone(),
subscriber0,
);
let sub_id1 = SubscriptionId::Number(1 as u64);
subscriptions.add_account_subscription(
alice.pubkey(),
Some(CommitmentConfig::recent()),
sub_id1.clone(),
subscriber1,
);
assert!(subscriptions
.subscriptions
.account_subscriptions
.gossip_account_subscriptions
.read()
.unwrap()
.contains_key(&alice.pubkey()));
@@ -1421,37 +1480,27 @@ pub(crate) mod tests {
16,
&solana_budget_program::id(),
);
// Add the transaction to the 1st bank and then freeze the bank
let bank1 = bank_forks.write().unwrap().get(1).cloned().unwrap();
bank1.process_transaction(&tx).unwrap();
bank1.freeze();
// Add the same transaction to the unfrozen 2nd bank
bank_forks
.write()
.unwrap()
.get(1)
.get(2)
.unwrap()
.process_transaction(&tx)
.unwrap();
let mut cache_slot_info = CacheSlotInfo::default();
cache_slot_info.current_slot = 1;
subscriptions.notify_subscribers(cache_slot_info);
let (response, _) = robust_poll_or_panic(transport_receiver1);
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 1 },
"value": {
"data": "1111111111111111",
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
},
},
"subscription": 1,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
// First, notify the unfrozen bank first to queue pending notification
subscriptions.notify_gossip_subscribers(2);
// Now, notify the frozen bank and ensure its notifications are processed
subscriptions.notify_gossip_subscribers(1);
let (response, _) = robust_poll_or_panic(transport_receiver0);
let expected = json!({
"jsonrpc": "2.0",
@@ -1471,18 +1520,41 @@ pub(crate) mod tests {
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
subscriptions.remove_account_subscription(&sub_id0);
assert!(subscriptions
.subscriptions
.account_subscriptions
.read()
.unwrap()
.contains_key(&alice.pubkey()));
let sub_id1 = SubscriptionId::Number(1 as u64);
subscriptions.add_account_subscription(
alice.pubkey(),
Some(CommitmentConfig::single_gossip()),
sub_id1.clone(),
subscriber1,
);
subscriptions.notify_frozen(2);
let (response, _) = robust_poll_or_panic(transport_receiver1);
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 2 },
"value": {
"data": "1111111111111111",
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
},
},
"subscription": 1,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
subscriptions.remove_account_subscription(&sub_id1);
assert!(!subscriptions
.subscriptions
.account_subscriptions
.gossip_account_subscriptions
.read()
.unwrap()
.contains_key(&alice.pubkey()));

View File

@@ -0,0 +1,377 @@
use crate::cluster_info::ClusterInfo;
use solana_ledger::bank_forks::BankForks;
use solana_metrics::{datapoint_warn, inc_new_counter_info};
use solana_runtime::bank::Bank;
use solana_sdk::{clock::Slot, signature::Signature};
use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, Mutex, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
/// Maximum size of the transaction queue
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
pub struct SendTransactionService {
thread: JoinHandle<()>,
sender: Mutex<Sender<TransactionInfo>>,
send_socket: UdpSocket,
tpu_address: SocketAddr,
}
struct TransactionInfo {
signature: Signature,
wire_transaction: Vec<u8>,
last_valid_slot: Slot,
}
#[derive(Default, Debug, PartialEq)]
struct ProcessTransactionsResult {
rooted: u64,
expired: u64,
retried: u64,
failed: u64,
retained: u64,
}
impl SendTransactionService {
pub fn new(
cluster_info: &Arc<ClusterInfo>,
bank_forks: &Arc<RwLock<BankForks>>,
exit: &Arc<AtomicBool>,
) -> Self {
let (sender, receiver) = channel::<TransactionInfo>();
let tpu_address = cluster_info.my_contact_info().tpu;
let thread = Self::retry_thread(receiver, bank_forks.clone(), tpu_address, exit.clone());
Self {
thread,
sender: Mutex::new(sender),
send_socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
tpu_address,
}
}
fn retry_thread(
receiver: Receiver<TransactionInfo>,
bank_forks: Arc<RwLock<BankForks>>,
tpu_address: SocketAddr,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
let mut last_status_check = Instant::now();
let mut transactions = HashMap::new();
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
Builder::new()
.name("send-tx-svc".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Ok(transaction_info) = receiver.recv_timeout(Duration::from_secs(1)) {
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
transactions.insert(transaction_info.signature, transaction_info);
} else {
datapoint_warn!("send_transaction_service-queue-overflow");
}
}
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
if !transactions.is_empty() {
datapoint_info!(
"send_transaction_service-queue-size",
("len", transactions.len(), i64)
);
let bank_forks = bank_forks.read().unwrap();
let root_bank = bank_forks.root_bank();
let working_bank = bank_forks.working_bank();
let _result = Self::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
}
last_status_check = Instant::now();
}
})
.unwrap()
}
fn process_transactions(
working_bank: &Arc<Bank>,
root_bank: &Arc<Bank>,
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
transactions: &mut HashMap<Signature, TransactionInfo>,
) -> ProcessTransactionsResult {
let mut result = ProcessTransactionsResult::default();
transactions.retain(|signature, transaction_info| {
if root_bank.has_signature(signature) {
info!("Transaction is rooted: {}", signature);
result.rooted += 1;
inc_new_counter_info!("send_transaction_service-rooted", 1);
false
} else if transaction_info.last_valid_slot < root_bank.slot() {
info!("Dropping expired transaction: {}", signature);
result.expired += 1;
inc_new_counter_info!("send_transaction_service-expired", 1);
false
} else {
match working_bank.get_signature_status_slot(signature) {
None => {
// Transaction is unknown to the working bank, it might have been
// dropped or landed in another fork. Re-send it
info!("Retrying transaction: {}", signature);
result.retried += 1;
inc_new_counter_info!("send_transaction_service-retry", 1);
Self::send_transaction(
&send_socket,
&tpu_address,
&transaction_info.wire_transaction,
);
true
}
Some((_slot, status)) => {
if status.is_err() {
info!("Dropping failed transaction: {}", signature);
result.failed += 1;
inc_new_counter_info!("send_transaction_service-failed", 1);
false
} else {
result.retained += 1;
true
}
}
}
}
});
result
}
fn send_transaction(
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
wire_transaction: &[u8],
) {
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
}
}
pub fn send(&self, signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) {
inc_new_counter_info!("send_transaction_service-enqueue", 1, 1);
Self::send_transaction(&self.send_socket, &self.tpu_address, &wire_transaction);
self.sender
.lock()
.unwrap()
.send(TransactionInfo {
signature,
wire_transaction,
last_valid_slot,
})
.unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err));
}
pub fn join(self) -> thread::Result<()> {
self.thread.join()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::rpc::tests::new_bank_forks;
use solana_sdk::{pubkey::Pubkey, signature::Signer};
#[test]
fn service_exit() {
let cluster_info = Arc::new(ClusterInfo::default());
let bank_forks = new_bank_forks().0;
let exit = Arc::new(AtomicBool::new(false));
let send_tranaction_service =
SendTransactionService::new(&cluster_info, &bank_forks, &exit);
exit.store(true, Ordering::Relaxed);
send_tranaction_service.join().unwrap();
}
#[test]
fn process_transactions() {
solana_logger::setup();
let (bank_forks, mint_keypair, _voting_keypair) = new_bank_forks();
let cluster_info = ClusterInfo::default();
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let tpu_address = cluster_info.my_contact_info().tpu;
let root_bank = Arc::new(Bank::new_from_parent(
&bank_forks.read().unwrap().working_bank(),
&Pubkey::default(),
1,
));
let rooted_signature = root_bank
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
.unwrap();
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
let non_rooted_signature = working_bank
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
.unwrap();
let failed_signature = {
let blockhash = working_bank.last_blockhash();
let transaction = solana_sdk::system_transaction::transfer(
&mint_keypair,
&Pubkey::default(),
1,
blockhash,
);
let signature = transaction.signatures[0];
working_bank.process_transaction(&transaction).unwrap_err();
signature
};
let mut transactions = HashMap::new();
info!("Expired transactions are dropped..");
transactions.insert(
Signature::default(),
TransactionInfo {
signature: Signature::default(),
wire_transaction: vec![],
last_valid_slot: root_bank.slot() - 1,
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
expired: 1,
..ProcessTransactionsResult::default()
}
);
info!("Rooted transactions are dropped...");
transactions.insert(
rooted_signature,
TransactionInfo {
signature: rooted_signature,
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
rooted: 1,
..ProcessTransactionsResult::default()
}
);
info!("Failed transactions are dropped...");
transactions.insert(
failed_signature,
TransactionInfo {
signature: failed_signature,
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
failed: 1,
..ProcessTransactionsResult::default()
}
);
info!("Non-rooted transactions are kept...");
transactions.insert(
non_rooted_signature,
TransactionInfo {
signature: non_rooted_signature,
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert_eq!(transactions.len(), 1);
assert_eq!(
result,
ProcessTransactionsResult {
retained: 1,
..ProcessTransactionsResult::default()
}
);
transactions.clear();
info!("Unknown transactions are retried...");
transactions.insert(
Signature::default(),
TransactionInfo {
signature: Signature::default(),
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert_eq!(transactions.len(), 1);
assert_eq!(
result,
ProcessTransactionsResult {
retried: 1,
..ProcessTransactionsResult::default()
}
);
}
}

View File

@@ -590,6 +590,8 @@ impl ServeRepair {
);
if let Some(packet) = packet {
res.packets.push(packet);
} else {
break;
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
slot = meta.parent_slot;
@@ -864,6 +866,8 @@ mod tests {
// Should not panic.
run_orphan(UNLOCK_NONCE_SLOT, 3, None);
run_orphan(UNLOCK_NONCE_SLOT, 3, Some(9));
// Giving no nonce after UNLOCK_NONCE_SLOT should return empty
run_orphan(UNLOCK_NONCE_SLOT + 1, 3, None);
}
fn run_orphan(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
@@ -902,40 +906,47 @@ mod tests {
// For a orphan request for `slot + num_slots - 1`, we should return the highest shreds
// from slots in the range [slot, slot + num_slots - 1]
let rv: Vec<_> = ServeRepair::run_orphan(
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot + num_slots - 1,
5,
nonce,
)
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
);
// Verify responses
let expected: Vec<_> = (slot..slot + num_slots)
.rev()
.filter_map(|slot| {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
repair_response::repair_response_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
nonce,
)
})
.collect();
assert_eq!(rv, expected);
if Shred::is_nonce_unlocked(slot + num_slots - 1) && nonce.is_none() {
// If a nonce is expected but not provided, there should be no
// response
assert!(rv.is_none());
} else {
// Verify responses
let rv: Vec<_> = rv
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
let expected: Vec<_> = (slot..slot + num_slots)
.rev()
.filter_map(|slot| {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
repair_response::repair_response_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
nonce,
)
})
.collect();
assert_eq!(rv, expected);
}
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");

View File

@@ -30,7 +30,7 @@ use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver},
blockstore_processor::{self, BankForksInfo},
create_new_tmp_ledger,
hardened_unpack::open_genesis_config,
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
};
@@ -81,6 +81,7 @@ pub struct ValidatorConfig {
pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection
pub frozen_accounts: Vec<Pubkey>,
pub no_rocksdb_compaction: bool,
pub max_genesis_archive_unpacked_size: u64,
}
impl Default for ValidatorConfig {
@@ -107,6 +108,7 @@ impl Default for ValidatorConfig {
accounts_hash_fault_injection_slots: 0,
frozen_accounts: vec![],
no_rocksdb_compaction: false,
max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
}
}
}
@@ -247,6 +249,7 @@ impl Validator {
block_commitment_cache.clone(),
));
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
@@ -269,6 +272,7 @@ impl Validator {
storage_state.clone(),
validator_exit.clone(),
config.trusted_validators.clone(),
rpc_override_health_check.clone(),
),
PubSubService::new(
&subscriptions,
@@ -388,7 +392,7 @@ impl Validator {
(None, None)
};
wait_for_supermajority(config, &bank, &cluster_info);
wait_for_supermajority(config, &bank, &cluster_info, rpc_override_health_check);
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
assert_eq!(
@@ -579,7 +583,8 @@ fn new_banks_from_blockstore(
LeaderScheduleCache,
Option<(Slot, Hash)>,
) {
let genesis_config = open_genesis_config(blockstore_path);
let genesis_config =
open_genesis_config(blockstore_path, config.max_genesis_archive_unpacked_size);
// This needs to be limited otherwise the state in the VoteAccount data
// grows too large
@@ -643,7 +648,12 @@ fn new_banks_from_blockstore(
)
}
fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &ClusterInfo) {
fn wait_for_supermajority(
config: &ValidatorConfig,
bank: &Bank,
cluster_info: &ClusterInfo,
rpc_override_health_check: Arc<AtomicBool>,
) {
if config.wait_for_supermajority != Some(bank.slot()) {
return;
}
@@ -658,8 +668,13 @@ fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &
if gossip_stake_percent >= 80 {
break;
}
// The normal RPC health checks don't apply as the node is waiting, so feign health to
// prevent load balancers from removing the node from their list of candidates during a
// manual restart.
rpc_override_health_check.store(true, Ordering::Relaxed);
sleep(Duration::new(1, 0));
}
rpc_override_health_check.store(false, Ordering::Relaxed);
}
pub struct TestValidator {

View File

@@ -5,7 +5,7 @@ use solana_core::cluster_info;
use solana_core::contact_info::ContactInfo;
use solana_core::crds_gossip::*;
use solana_core::crds_gossip_error::CrdsGossipError;
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use solana_core::crds_gossip_pull::{ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS};
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
use solana_core::crds_value::CrdsValueLabel;
use solana_core::crds_value::{CrdsData, CrdsValue};
@@ -426,37 +426,35 @@ fn network_run_pull(
.map(|f| f.filter.bits.len() as usize / 8)
.sum::<usize>();
bytes += serialized_size(&caller_info).unwrap() as usize;
let filters = filters
let filters: Vec<_> = filters
.into_iter()
.map(|f| (caller_info.clone(), f))
.collect();
let rsp = network
let rsp: Vec<_> = network
.get(&to)
.map(|node| {
let mut rsp = vec![];
rsp.append(
&mut node
.lock()
.unwrap()
.process_pull_requests(filters, now)
.into_iter()
.flatten()
.collect(),
);
let rsp = node
.lock()
.unwrap()
.generate_pull_responses(&filters)
.into_iter()
.flatten()
.collect();
node.lock().unwrap().process_pull_requests(filters, now);
rsp
})
.unwrap();
bytes += serialized_size(&rsp).unwrap() as usize;
msgs += rsp.len();
network.get(&from).map(|node| {
node.lock()
.unwrap()
.mark_pull_request_creation_time(&from, now);
overhead += node
.lock()
.unwrap()
.process_pull_response(&from, &timeouts, rsp, now)
.0;
let mut node = node.lock().unwrap();
node.mark_pull_request_creation_time(&from, now);
let mut stats = ProcessPullStats::default();
let (vers, vers_expired_timeout) =
node.filter_pull_responses(&timeouts, rsp, now, &mut stats);
node.process_pull_responses(&from, vers, vers_expired_timeout, now, &mut stats);
overhead += stats.failed_insert;
overhead += stats.failed_timeout;
});
(bytes, msgs, overhead)
})

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "1.1.15"
version = "1.1.19"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -27,10 +27,12 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
* [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor)
* [getFees](jsonrpc-api.md#getfees)
* [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock)
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
* [getIdentity](jsonrpc-api.md#getidentity)
* [getInflation](jsonrpc-api.md#getinflation)
* [getInflationGovernor](jsonrpc-api.md#getinflationgovernor)
* [getInflationRate](jsonrpc-api.md#getinflationrate)
* [getLargestAccounts](jsonrpc-api.md#getlargestaccounts)
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
@@ -494,7 +496,8 @@ Returns the fee calculator associated with the query blockhash, or `null` if the
#### Parameters:
* `blockhash: <string>`, query blockhash as a Base58 encoded string
* `<string>` - query blockhash as a Base58 encoded string
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@@ -542,6 +545,34 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":{"feeRateGovernor":{"burnPercent":50,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
```
### getFees
Returns a recent block hash from the ledger, a fee schedule that can be used to
compute the cost of submitting a transaction using it, and the last slot in
which the blockhash will be valid.
#### Parameters:
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields:
* `blockhash: <string>` - a Hash as base-58 encoded string
* `feeCalculator: <object>` - FeeCalculator object, the fee schedule for this block hash
* `lastValidSlot: <u64>` - last slot in which a blockhash will be valid
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getFees"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{lamportsPerSignature":5000},"lastValidSlot":297}},"id":1}
```
### getFirstAvailableBlock
Returns the slot of the lowest confirmed block that has not been purged from the ledger
@@ -609,33 +640,59 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":{"identity": "2r1F4iWqVcb8M1DbAjQuFpebkQHY9hcVU4WuW2DJBppN"},"id":1}
```
### getInflation
### getInflationGovernor
Returns the inflation configuration of the cluster
Returns the current inflation governor
#### Parameters:
None
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
The result field will be an Inflation object with the following fields:
The result field will be a JSON object with the following fields:
* `initial: <f64>`, the initial inflation percentage from time 0
* `terminal: <f64>`, terminal inflation percentage
* `taper: <f64>`, rate per year at which inflation is lowered
* `foundation: <f64>`, percentage of total inflation allocated to the foundation
* `foundationTerm: <f64>`, duration of foundation pool inflation in years
* `storage: <f64>`, percentage of total inflation allocated to storage rewards
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflation"}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflationGovernor"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"taper":0.15,"terminal":0.015},"id":1}
```
### getInflationRate
Returns the specific inflation values for a particular epoch
#### Parameters:
* `<u64>` - (optional) Epoch, default is the current epoch
#### Results:
The result field will be a JSON object with the following fields:
* `total: <f64>`, total inflation
* `validator: <f64>`, inflation allocated to validators
* `foundation: <f64>`, inflation allocated to the foundation
* `epoch: <f64>`, epoch for which these values are valid
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflationRate"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"epoch":100,"foundation":0.001,"total":0.149,"validator":0.148},"id":1}
```
### getLargestAccounts
@@ -769,7 +826,7 @@ An RpcResponse containing a JSON object consisting of a string blockhash and Fee
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"lamportsPerSignature":5000}}},"id":1}
```
### getSignatureStatuses
@@ -1017,7 +1074,7 @@ The result field will be a JSON object with the following fields:
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"solana-core": "1.1.15"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "1.1.19"},"id":1}
```
### getVoteAccounts
@@ -1099,11 +1156,20 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### sendTransaction
Creates new transaction
Submits a signed transaction to the cluster for processing.
Before submitting, the following preflight checks are performed:
1. The transaction signatures are verified
2. The transaction is simulated against the latest max confirmed bank
and on failure an error will be returned. Preflight checks may be disabled if
desired.
#### Parameters:
* `<string>` - fully-signed Transaction, as base-58 encoded string
* `<object>` - (optional) Configuration object containing the following field:
* `skipPreflight: <bool>` - if true, skip the preflight transaction checks (default: false)
#### Results:
@@ -1132,6 +1198,10 @@ Simulate sending a transaction
#### Results:
An RpcResponse containing a TransactionStatus object
The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields:
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
* `logs: <array | null>` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure)
#### Example:

View File

@@ -6,7 +6,7 @@ Solana takes a very different approach, which it calls _Proof of History_ or _Po
Solana technically never sends a _block_, but uses the term to describe the sequence of entries that validators vote on to achieve _confirmation_. In that way, Solana's confirmation times can be compared apples to apples to block-based systems. The current implementation sets block time to 800ms.
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.155.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.195.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
## Relationship to VDFs

View File

@@ -44,6 +44,8 @@ $ solana-validator \
--limit-ledger-size
```
The `--trusted-validator`s is operated by Solana
## Testnet
* Testnet is where we stress test recent release features on a live
@@ -72,16 +74,21 @@ $ solana-validator \
--identity ~/validator-keypair.json \
--vote-account ~/vote-account-keypair.json \
--trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \
--trusted-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQAD \
--no-untrusted-rpc \
--ledger ~/validator-ledger \
--rpc-port 8899 \
--dynamic-port-range 8000-8010 \
--entrypoint 35.203.170.30:8001 \
--expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY \
--expected-shred-version 56096 \
--expected-shred-version 62235 \
--limit-ledger-size
```
The identity of the `--trusted-validator`s are:
* `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - testnet.solana.com (Solana)
* `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One
## Mainnet Beta
A permissionless, persistent cluster for early token holders and launch partners.
Currently smart contracts, rewards, and inflation are disabled.
@@ -117,3 +124,5 @@ $ solana-validator \
--expected-shred-version 64864 \
--limit-ledger-size
```
All four `--trusted-validator`s are operated by Solana

View File

@@ -6,9 +6,9 @@ Solana is an open source project implementing a new, high-performance, permissio
## Why Solana?
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.155.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.195.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.151.1078)
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.191.1078)
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you would use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.

View File

@@ -7,6 +7,25 @@ experience for most people who are new or experienced with using crypto wallets.
currently the easiest and fastest way to get set up with a new wallet on Solana.
The app is free and getting your wallet set up only takes a few minutes.
### Trust Wallet Security
Tokens held in Trust Wallet are only as secure as the device on which the app is
installed. Anyone who is able to unlock your phone or tablet may be able to
use the Trust Wallet app and transfer your tokens. To improve security,
you can add a passcode to the Trust Wallet application.
To add a Trust Wallet passcode, open the app and go to
Settings -> Security -> Passcode.
If someone gains access to your Trust Wallet application, they can access your
recovery seed phrase.
Anyone who has access to your seed phrase will be able to recreate
your Trust Wallet keys on a different device. From there, they could
sign transactions from that device rather than on your own phone or tablet.
The seed phrase is displayed when a new wallet is created and it can also be
viewed at any later time in the app by following these steps:
- Go to Setting -> Wallets
- Under the Options menu for a particular wallet tap "Show Recovery Phrase"
{% page-ref page="trust-wallet.md" %}
## Ledger Live with Ledger Nano S

View File

@@ -59,7 +59,7 @@ some interface for signing transactions.
A hardware wallet, such as the
[Ledger hardware wallet](https://www.ledger.com/), offers a great blend of
security and convenience for cryptocurrencies. It effectively automates the
process of offline signing while retaining nearly all the convenience of an FS
wallet.
process of offline signing while retaining nearly all the convenience of a file
system wallet.
{% page-ref page="../hardware-wallet/README.md" %}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-dos"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,10 +13,10 @@ clap = "2.33.0"
log = "0.4.8"
rand = "0.7.0"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.1.15"
version = "1.1.19"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ console = "0.10.0"
indicatif = "0.14.0"
log = "0.4.8"
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
tar = "0.4.26"
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.1.15"
version = "1.1.19"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.105"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -6,7 +6,7 @@ VERSION=$PERF_LIBS_VERSION-1
set -e
cd "$(dirname "$0")"
if [[ ! -f target/perf-libs/.$VERSION ]]; then
if [[ $VERSION != "$(cat target/perf-libs/.version 2> /dev/null)" ]]; then
if [[ $(uname) != Linux ]]; then
echo Note: Performance libraries are only available for Linux
exit 0
@@ -17,6 +17,7 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
exit 0
fi
rm -rf target/perf-libs
mkdir -p target/perf-libs
(
set -x
@@ -35,7 +36,7 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
mkdir -p ~/.cache
mv solana-perf.tgz ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz
fi
touch .$VERSION
echo "$VERSION" > .version
)
# Setup symlinks so the perf-libs/ can be found from all binaries run out of

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "1.1.15"
version = "1.1.19"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,13 +10,13 @@ edition = "2018"
[dependencies]
log = { version = "0.4.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-vest-program = { path = "../programs/vest", version = "1.1.15" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.19" }
solana-budget-program = { path = "../programs/budget", version = "1.1.19" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-storage-program = { path = "../programs/storage", version = "1.1.19" }
solana-vest-program = { path = "../programs/vest", version = "1.1.19" }
[lib]
crate-type = ["lib"]

View File

@@ -18,36 +18,35 @@ use log::*;
use solana_runtime::bank::{Bank, EnteredEpochCallback};
pub fn get_inflation(operating_mode: OperatingMode, epoch: Epoch) -> Option<Inflation> {
let past_epoch_inflation = get_inflation_for_epoch(operating_mode, epoch.saturating_sub(1));
let epoch_inflation = get_inflation_for_epoch(operating_mode, epoch);
if epoch_inflation != past_epoch_inflation || epoch == 0 {
Some(epoch_inflation)
} else {
None
}
}
pub fn get_inflation_for_epoch(operating_mode: OperatingMode, epoch: Epoch) -> Inflation {
match operating_mode {
OperatingMode::Development => {
if epoch == 0 {
Some(Inflation::default())
} else {
None
}
}
OperatingMode::Development => Inflation::default(),
OperatingMode::Preview => {
if epoch == 0 {
// No inflation at epoch 0
Some(Inflation::new_disabled())
} else if epoch == 44 {
Some(Inflation::default())
if epoch >= 44 {
Inflation::default()
} else {
None
Inflation::new_disabled()
}
}
OperatingMode::Stable => {
if epoch == 0 {
// No inflation at epoch 0
Some(Inflation::new_disabled())
} else if epoch == std::u64::MAX {
if epoch == std::u64::MAX {
// Inflation starts
//
// The epoch of std::u64::MAX - 1 is a placeholder and is expected to be reduced in
// The epoch of std::u64::MAX is a placeholder and is expected to be reduced in
// a future hard fork.
Some(Inflation::default())
Inflation::default()
} else {
None
// No inflation from epoch 0
Inflation::new_disabled()
}
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,13 +15,14 @@ chrono = "0.4"
serde = "1.0.105"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-stake-program = { path = "../programs/stake", version = "1.1.19" }
solana-storage-program = { path = "../programs/storage", version = "1.1.19" }
solana-vote-program = { path = "../programs/vote", version = "1.1.19" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -6,7 +6,10 @@ use solana_clap_utils::{
input_validators::{is_pubkey_or_keypair, is_rfc3339_datetime, is_valid_percentage},
};
use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account};
use solana_ledger::{blockstore::create_new_ledger, poh::compute_hashes_per_tick};
use solana_ledger::{
blockstore::create_new_ledger, blockstore_db::AccessType,
hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, poh::compute_hashes_per_tick,
};
use solana_sdk::{
account::Account,
clock,
@@ -121,6 +124,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
timing::duration_as_us(&PohConfig::default().target_tick_duration);
let default_ticks_per_slot = &clock::DEFAULT_TICKS_PER_SLOT.to_string();
let default_operating_mode = "stable";
let default_genesis_archive_unpacked_size = MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string();
let matches = App::new(crate_name!())
.about(crate_description!())
@@ -327,6 +331,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
"Selects the features that will be enabled for the cluster"
),
)
.arg(
Arg::with_name("max_genesis_archive_unpacked_size")
.long("max-genesis-archive-unpacked-size")
.value_name("NUMBER")
.takes_value(true)
.default_value(&default_genesis_archive_unpacked_size)
.help(
"maximum total uncompressed file size of created genesis archive",
),
)
.get_matches();
let faucet_lamports = value_t!(matches, "faucet_lamports", u64).unwrap_or(0);
@@ -513,6 +527,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
}
}
let max_genesis_archive_unpacked_size =
value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64);
let issued_lamports = genesis_config
.accounts
.iter()
@@ -521,7 +538,13 @@ fn main() -> Result<(), Box<dyn error::Error>> {
add_genesis_accounts(&mut genesis_config, issued_lamports - faucet_lamports);
create_new_ledger(&ledger_path, &genesis_config)?;
solana_logger::setup();
create_new_ledger(
&ledger_path,
&genesis_config,
max_genesis_archive_unpacked_size,
AccessType::PrimaryOnly,
)?;
println!("{}", genesis_config);
Ok(())

View File

@@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-core = { path = "../core", version = "1.1.19" }
solana-client = { path = "../client", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-net-utils = { path = "../net-utils", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-install"
description = "The solana cluster software installer"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -24,11 +24,11 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-config-program = { path = "../programs/config", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-client = { path = "../client", version = "1.1.19" }
solana-config-program = { path = "../programs/config", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
semver = "0.9.0"
tar = "0.4.26"
tempdir = "0.3.7"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "1.1.15"
version = "1.1.19"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,10 +13,10 @@ bs58 = "0.3.0"
clap = "2.33"
dirs = "2.0.2"
num_cpus = "1.12.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-cli-config = { path = "../cli-config", version = "1.1.15" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-cli-config = { path = "../cli-config", version = "1.1.19" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
tiny-bip39 = "0.7.0"
[[bin]]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,17 +12,18 @@ homepage = "https://solana.com/"
bs58 = "0.3.0"
clap = "2.33.0"
histogram = "*"
log = { version = "0.4.8" }
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-cli = { path = "../cli", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-cli = { path = "../cli", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-stake-program = { path = "../programs/stake", version = "1.1.19" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.19" }
solana-vote-program = { path = "../programs/vote", version = "1.1.19" }
tempfile = "3.1.0"
[dev-dependencies]

View File

@@ -8,12 +8,13 @@ use solana_ledger::{
bank_forks::{BankForks, SnapshotConfig},
bank_forks_utils,
blockstore::Blockstore,
blockstore_db::{self, Column, Database},
blockstore_db::{self, AccessType, Column, Database},
blockstore_processor::{BankForksInfo, ProcessOptions},
hardened_unpack::open_genesis_config,
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
rooted_slot_iterator::RootedSlotIterator,
snapshot_utils,
};
use solana_runtime::bank::Bank;
use solana_sdk::{
clock::Slot, genesis_config::GenesisConfig, native_token::lamports_to_sol, pubkey::Pubkey,
shred_version::compute_shred_version,
@@ -28,8 +29,11 @@ use std::{
path::{Path, PathBuf},
process::{exit, Command, Stdio},
str::FromStr,
sync::Arc,
};
use log::*;
#[derive(PartialEq)]
enum LedgerOutputMethod {
Print,
@@ -494,8 +498,8 @@ fn analyze_storage(database: &Database) -> Result<(), String> {
Ok(())
}
fn open_blockstore(ledger_path: &Path) -> Blockstore {
match Blockstore::open(ledger_path) {
fn open_blockstore(ledger_path: &Path, access_type: AccessType) -> Blockstore {
match Blockstore::open_with_access_type(ledger_path, access_type) {
Ok(blockstore) => blockstore,
Err(err) => {
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
@@ -504,8 +508,8 @@ fn open_blockstore(ledger_path: &Path) -> Blockstore {
}
}
fn open_database(ledger_path: &Path) -> Database {
match Database::open(&ledger_path.join("rocksdb")) {
fn open_database(ledger_path: &Path, access_type: AccessType) -> Database {
match Database::open(&ledger_path.join("rocksdb"), access_type) {
Ok(database) => database,
Err(err) => {
eprintln!("Unable to read the Ledger rocksdb: {:?}", err);
@@ -528,31 +532,57 @@ fn load_bank_forks(
ledger_path: &PathBuf,
genesis_config: &GenesisConfig,
process_options: ProcessOptions,
access_type: AccessType,
) -> bank_forks_utils::LoadResult {
let blockstore = open_blockstore(&ledger_path, access_type);
let snapshot_path = ledger_path.clone().join(if blockstore.is_primary_access() {
"snapshot"
} else {
"snapshot.ledger-tool"
});
let snapshot_config = if arg_matches.is_present("no_snapshot") {
None
} else {
Some(SnapshotConfig {
snapshot_interval_slots: 0, // Value doesn't matter
snapshot_package_output_path: ledger_path.clone(),
snapshot_path: ledger_path.clone().join("snapshot"),
snapshot_path,
})
};
let account_paths = if let Some(account_paths) = arg_matches.value_of("account_paths") {
if !blockstore.is_primary_access() {
// Be defenstive, when default account dir is explicitly specified, it's still possible
// to wipe the dir possibly shared by the running validator!
eprintln!("Error: custom accounts path is not supported under secondary access");
exit(1);
}
account_paths.split(',').map(PathBuf::from).collect()
} else {
} else if blockstore.is_primary_access() {
vec![ledger_path.join("accounts")]
} else {
let non_primary_accounts_path = ledger_path.join("accounts.ledger-tool");
warn!(
"Default accounts path is switched aligning with Blockstore's secondary access: {:?}",
non_primary_accounts_path
);
vec![non_primary_accounts_path]
};
bank_forks_utils::load(
&genesis_config,
&open_blockstore(&ledger_path),
&blockstore,
account_paths,
snapshot_config.as_ref(),
process_options,
)
}
fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> GenesisConfig {
let max_genesis_archive_unpacked_size =
value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64);
open_genesis_config(ledger_path, max_genesis_archive_unpacked_size)
}
#[allow(clippy::cognitive_complexity)]
fn main() {
const DEFAULT_ROOT_COUNT: &str = "1";
@@ -586,6 +616,13 @@ fn main() {
.multiple(true)
.takes_value(true)
.help("Add a hard fork at this slot");
let default_genesis_archive_unpacked_size = MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string();
let max_genesis_archive_unpacked_size_arg = Arg::with_name("max_genesis_archive_unpacked_size")
.long("max-genesis-archive-unpacked-size")
.value_name("NUMBER")
.takes_value(true)
.default_value(&default_genesis_archive_unpacked_size)
.help("maximum total uncompressed size of unpacked genesis archive");
let matches = App::new(crate_name!())
.about(crate_description!())
@@ -635,15 +672,18 @@ fn main() {
.subcommand(
SubCommand::with_name("genesis")
.about("Prints the ledger's genesis config")
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("genesis-hash")
.about("Prints the ledger's genesis hash")
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("shred-version")
.about("Prints the ledger's shred hash")
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("bounds")
@@ -667,6 +707,7 @@ fn main() {
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(
Arg::with_name("skip_poh_verify")
.long("skip-poh-verify")
@@ -680,6 +721,7 @@ fn main() {
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(
Arg::with_name("include_all_votes")
.long("include-all-votes")
@@ -698,6 +740,7 @@ fn main() {
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(
Arg::with_name("snapshot_slot")
.index(1)
@@ -713,6 +756,17 @@ fn main() {
.takes_value(true)
.help("Output directory for the snapshot"),
)
.arg(
Arg::with_name("warp_slot")
.required(false)
.long("warp-slot")
.takes_value(true)
.value_name("WARP_SLOT")
.validator(is_slot)
.help("After loading the snapshot slot warp the ledger to WARP_SLOT, \
which could be a slot in a galaxy far far away"),
)
).subcommand(
SubCommand::with_name("accounts")
.about("Print account contents after processing in the ledger")
@@ -726,6 +780,7 @@ fn main() {
.takes_value(false)
.help("Include sysvars too"),
)
.arg(&max_genesis_archive_unpacked_size_arg)
).subcommand(
SubCommand::with_name("capitalization")
.about("Print capitalization (aka, total suppy)")
@@ -733,6 +788,7 @@ fn main() {
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
).subcommand(
SubCommand::with_name("purge")
.about("Purge the ledger at the block height")
@@ -799,16 +855,19 @@ fn main() {
("print", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
output_ledger(
open_blockstore(&ledger_path),
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary),
starting_slot,
LedgerOutputMethod::Print,
);
}
("genesis", Some(_arg_matches)) => {
println!("{}", open_genesis_config(&ledger_path));
("genesis", Some(arg_matches)) => {
println!("{}", open_genesis_config_by(&ledger_path, arg_matches));
}
("genesis-hash", Some(_arg_matches)) => {
println!("{}", open_genesis_config(&ledger_path).hash());
("genesis-hash", Some(arg_matches)) => {
println!(
"{}",
open_genesis_config_by(&ledger_path, arg_matches).hash()
);
}
("shred-version", Some(arg_matches)) => {
let process_options = ProcessOptions {
@@ -817,8 +876,14 @@ fn main() {
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config(&ledger_path);
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
match load_bank_forks(
arg_matches,
&ledger_path,
&genesis_config,
process_options,
AccessType::TryPrimaryThenSecondary,
) {
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
let bank_info = &bank_forks_info[0];
let bank = bank_forks[bank_info.bank_slot].clone();
@@ -839,7 +904,7 @@ fn main() {
}
("slot", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let blockstore = open_blockstore(&ledger_path);
let blockstore = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary);
for slot in slots {
println!("Slot {}", slot);
if let Err(err) = output_slot(&blockstore, slot, &LedgerOutputMethod::Print) {
@@ -850,14 +915,14 @@ fn main() {
("json", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
output_ledger(
open_blockstore(&ledger_path),
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary),
starting_slot,
LedgerOutputMethod::Json,
);
}
("set-dead-slot", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let blockstore = open_blockstore(&ledger_path);
let blockstore = open_blockstore(&ledger_path, AccessType::PrimaryOnly);
for slot in slots {
match blockstore.set_dead_slot(slot) {
Ok(_) => println!("Slot {} dead", slot),
@@ -872,13 +937,17 @@ fn main() {
poh_verify: !arg_matches.is_present("skip_poh_verify"),
..ProcessOptions::default()
};
println!("{}", open_genesis_config(&ledger_path).hash());
println!(
"genesis hash: {}",
open_genesis_config_by(&ledger_path, arg_matches).hash()
);
load_bank_forks(
arg_matches,
&ledger_path,
&open_genesis_config(&ledger_path),
&open_genesis_config_by(&ledger_path, arg_matches),
process_options,
AccessType::TryPrimaryThenSecondary,
)
.unwrap_or_else(|err| {
eprintln!("Ledger verification failed: {:?}", err);
@@ -899,8 +968,9 @@ fn main() {
match load_bank_forks(
arg_matches,
&ledger_path,
&open_genesis_config(&ledger_path),
&open_genesis_config_by(&ledger_path, arg_matches),
process_options,
AccessType::TryPrimaryThenSecondary,
) {
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
let dot = graph_forks(
@@ -933,6 +1003,7 @@ fn main() {
("create-snapshot", Some(arg_matches)) => {
let snapshot_slot = value_t_or_exit!(arg_matches, "snapshot_slot", Slot);
let output_directory = value_t_or_exit!(arg_matches, "output_directory", String);
let warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok();
let process_options = ProcessOptions {
dev_halt_at_slot: Some(snapshot_slot),
@@ -940,15 +1011,35 @@ fn main() {
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config(&ledger_path);
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
match load_bank_forks(
arg_matches,
&ledger_path,
&genesis_config,
process_options,
AccessType::TryPrimaryThenSecondary,
) {
Ok((bank_forks, _bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
let bank = bank_forks.get(snapshot_slot).unwrap_or_else(|| {
eprintln!("Error: Slot {} is not available", snapshot_slot);
exit(1);
});
let bank = bank_forks
.get(snapshot_slot)
.unwrap_or_else(|| {
eprintln!("Error: Slot {} is not available", snapshot_slot);
exit(1);
})
.clone();
let bank = if let Some(warp_slot) = warp_slot {
Arc::new(Bank::warp_from_parent(
&bank,
bank.collector_id(),
warp_slot,
))
} else {
bank
};
println!("Creating a snapshot of slot {}", bank.slot());
assert!(bank.is_complete());
bank.squash();
let temp_dir = tempfile::TempDir::new().unwrap_or_else(|err| {
@@ -972,7 +1063,8 @@ fn main() {
snapshot_utils::archive_snapshot_package(&package).map(|ok| {
println!(
"Successfully created snapshot for slot {}: {:?}",
snapshot_slot, package.tar_output_file
bank.slot(),
package.tar_output_file
);
println!(
"Shred version: {}",
@@ -1003,9 +1095,15 @@ fn main() {
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config(&ledger_path);
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let include_sysvars = arg_matches.is_present("include_sysvars");
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
match load_bank_forks(
arg_matches,
&ledger_path,
&genesis_config,
process_options,
AccessType::TryPrimaryThenSecondary,
) {
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
let slot = dev_halt_at_slot.unwrap_or_else(|| {
if bank_forks_info.len() > 1 {
@@ -1053,8 +1151,14 @@ fn main() {
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config(&ledger_path);
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
match load_bank_forks(
arg_matches,
&ledger_path,
&genesis_config,
process_options,
AccessType::TryPrimaryThenSecondary,
) {
Ok((bank_forks, bank_forks_info, _leader_schedule_cache, _snapshot_hash)) => {
let slot = dev_halt_at_slot.unwrap_or_else(|| {
if bank_forks_info.len() > 1 {
@@ -1125,12 +1229,12 @@ fn main() {
("purge", Some(arg_matches)) => {
let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot);
let end_slot = value_t_or_exit!(arg_matches, "end_slot", Slot);
let blockstore = open_blockstore(&ledger_path);
let blockstore = open_blockstore(&ledger_path, AccessType::PrimaryOnly);
blockstore.purge_slots(start_slot, end_slot);
blockstore.purge_from_next_slots(start_slot, end_slot);
}
("list-roots", Some(arg_matches)) => {
let blockstore = open_blockstore(&ledger_path);
let blockstore = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary);
let max_height = if let Some(height) = arg_matches.value_of("max_height") {
usize::from_str(height).expect("Maximum height must be a number")
} else {
@@ -1183,7 +1287,9 @@ fn main() {
});
}
("bounds", Some(arg_matches)) => {
match open_blockstore(&ledger_path).slot_meta_iterator(0) {
match open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary)
.slot_meta_iterator(0)
{
Ok(metas) => {
let all = arg_matches.is_present("all");
@@ -1209,15 +1315,20 @@ fn main() {
}
}
}
("analyze-storage", _) => match analyze_storage(&open_database(&ledger_path)) {
Ok(()) => {
println!("Ok.");
("analyze-storage", _) => {
match analyze_storage(&open_database(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
)) {
Ok(()) => {
println!("Ok.");
}
Err(err) => {
eprintln!("Unable to read the Ledger: {:?}", err);
exit(1);
}
}
Err(err) => {
eprintln!("Unable to read the Ledger: {:?}", err);
exit(1);
}
},
}
("", _) => {
eprintln!("{}", matches.usage());
exit(1);

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-ledger"
version = "1.1.15"
version = "1.1.19"
description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -28,19 +28,19 @@ reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0
regex = "1.3.6"
serde = "1.0.105"
serde_bytes = "0.11.3"
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.19" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-measure = { path = "../measure", version = "1.1.19" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
solana-perf = { path = "../perf", version = "1.1.19" }
ed25519-dalek = "1.0.0-pre.3"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-stake-program = { path = "../programs/stake", version = "1.1.19" }
solana-vote-program = { path = "../programs/vote", version = "1.1.19" }
symlink = "0.1.0"
tar = "0.4.26"
thiserror = "1.0"
@@ -57,7 +57,7 @@ features = ["lz4"]
[dev-dependencies]
assert_matches = "1.3.0"
matches = "0.1.6"
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.19" }
[lib]
crate-type = ["lib"]

View File

@@ -4,12 +4,13 @@
pub use crate::{blockstore_db::BlockstoreError, blockstore_meta::SlotMeta};
use crate::{
blockstore_db::{
columns as cf, Column, Database, IteratorDirection, IteratorMode, LedgerColumn, Result,
WriteBatch,
columns as cf, AccessType, Column, Database, IteratorDirection, IteratorMode, LedgerColumn,
Result, WriteBatch,
},
blockstore_meta::*,
entry::{create_ticks, Entry},
erasure::ErasureConfig,
hardened_unpack::{unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
leader_schedule_cache::LeaderScheduleCache,
next_slots_iterator::NextSlotsIterator,
shred::{Result as ShredResult, Shred, Shredder},
@@ -45,6 +46,7 @@ use std::{
cmp,
collections::HashMap,
fs,
io::{Error as IOError, ErrorKind},
path::{Path, PathBuf},
rc::Rc,
sync::{
@@ -175,6 +177,17 @@ impl Blockstore {
/// Opens a Ledger in directory, provides "infinite" window of shreds
pub fn open(ledger_path: &Path) -> Result<Blockstore> {
Self::do_open(ledger_path, AccessType::PrimaryOnly)
}
pub fn open_with_access_type(
ledger_path: &Path,
access_type: AccessType,
) -> Result<Blockstore> {
Self::do_open(ledger_path, access_type)
}
fn do_open(ledger_path: &Path, access_type: AccessType) -> Result<Blockstore> {
fs::create_dir_all(&ledger_path)?;
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
@@ -183,7 +196,7 @@ impl Blockstore {
// Open the database
let mut measure = Measure::start("open");
info!("Opening database at {:?}", blockstore_path);
let db = Database::open(&blockstore_path)?;
let db = Database::open(&blockstore_path, access_type)?;
// Create the metadata column family
let meta_cf = db.column();
@@ -265,7 +278,7 @@ impl Blockstore {
pub fn open_with_signal(
ledger_path: &Path,
) -> Result<(Self, Receiver<bool>, CompletedSlotsReceiver)> {
let mut blockstore = Self::open(ledger_path)?;
let mut blockstore = Self::open_with_access_type(ledger_path, AccessType::PrimaryOnly)?;
let (signal_sender, signal_receiver) = sync_channel(1);
let (completed_slots_sender, completed_slots_receiver) =
sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL);
@@ -332,20 +345,18 @@ impl Blockstore {
}
}
}
if !self.no_compaction {
if let Err(e) = self.compact_storage(from_slot, to_slot) {
// This error is not fatal and indicates an internal error
error!(
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
e, from_slot, to_slot
);
}
}
}
// TODO: rename purge_slots() to purge_and_compact_slots()
pub fn purge_slots(&self, from_slot: Slot, to_slot: Slot) {
self.purge_slots_with_delay(from_slot, to_slot, None)
self.purge_slots_with_delay(from_slot, to_slot, None);
if let Err(e) = self.compact_storage(from_slot, to_slot) {
// This error is not fatal and indicates an internal error?
error!(
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
e, from_slot, to_slot
);
}
}
/// Ensures that the SlotMeta::next_slots vector for all slots contain no references in the
@@ -462,6 +473,10 @@ impl Blockstore {
}
pub fn compact_storage(&self, from_slot: Slot, to_slot: Slot) -> Result<bool> {
if self.no_compaction {
info!("compact_storage: compaction disabled");
return Ok(false);
}
info!("compact_storage: from {} to {}", from_slot, to_slot);
let mut compact_timer = Measure::start("compact_range");
let result = self
@@ -2230,6 +2245,10 @@ impl Blockstore {
pub fn storage_size(&self) -> Result<u64> {
self.db.storage_size()
}
pub fn is_primary_access(&self) -> bool {
self.db.is_primary_access()
}
}
fn update_slot_meta(
@@ -2661,12 +2680,17 @@ fn calculate_stake_weighted_timestamp(
// Creates a new ledger with slot 0 full of ticks (and only ticks).
//
// Returns the blockhash that can be used to append entries with.
pub fn create_new_ledger(ledger_path: &Path, genesis_config: &GenesisConfig) -> Result<Hash> {
pub fn create_new_ledger(
ledger_path: &Path,
genesis_config: &GenesisConfig,
max_genesis_archive_unpacked_size: u64,
access_type: AccessType,
) -> Result<Hash> {
Blockstore::destroy(ledger_path)?;
genesis_config.write(&ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
let blockstore = Blockstore::open(ledger_path)?;
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type)?;
let ticks_per_slot = genesis_config.ticks_per_slot;
let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
@@ -2697,7 +2721,6 @@ pub fn create_new_ledger(ledger_path: &Path, genesis_config: &GenesisConfig) ->
.output()
.unwrap();
if !output.status.success() {
use std::io::{Error as IOError, ErrorKind};
use std::str::from_utf8;
error!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?"));
error!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?"));
@@ -2711,6 +2734,54 @@ pub fn create_new_ledger(ledger_path: &Path, genesis_config: &GenesisConfig) ->
)));
}
// ensure the genesis archive can be unpacked and it is under
// max_genesis_archive_unpacked_size, immedately after creating it above.
{
let temp_dir = tempfile::TempDir::new().unwrap();
// unpack into a temp dir, while completely discarding the unpacked files
let unpack_check = unpack_genesis_archive(
&archive_path,
&temp_dir.into_path(),
max_genesis_archive_unpacked_size,
);
if let Err(unpack_err) = unpack_check {
// stash problematic original archived genesis related files to
// examine them later and to prevent validator and ledger-tool from
// naively consuming them
let mut error_messages = String::new();
fs::rename(
&ledger_path.join("genesis.tar.bz2"),
ledger_path.join("genesis.tar.bz2.failed"),
)
.unwrap_or_else(|e| {
error_messages += &format!("/failed to stash problematic genesis.tar.bz2: {}", e)
});
fs::rename(
&ledger_path.join("genesis.bin"),
ledger_path.join("genesis.bin.failed"),
)
.unwrap_or_else(|e| {
error_messages += &format!("/failed to stash problematic genesis.bin: {}", e)
});
fs::rename(
&ledger_path.join("rocksdb"),
ledger_path.join("rocksdb.failed"),
)
.unwrap_or_else(|e| {
error_messages += &format!("/failed to stash problematic rocksdb: {}", e)
});
return Err(BlockstoreError::IO(IOError::new(
ErrorKind::Other,
format!(
"Error checking to unpack genesis archive: {}{}",
unpack_err, error_messages
),
)));
}
}
Ok(last_hash)
}
@@ -2750,7 +2821,11 @@ pub fn get_ledger_path_from_name(name: &str) -> PathBuf {
#[macro_export]
macro_rules! create_new_tmp_ledger {
($genesis_config:expr) => {
$crate::blockstore::create_new_ledger_from_name($crate::tmp_ledger_name!(), $genesis_config)
$crate::blockstore::create_new_ledger_from_name(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::AccessType::PrimaryOnly,
)
};
}
@@ -2776,9 +2851,19 @@ pub fn verify_shred_slots(slot: Slot, parent_slot: Slot, last_root: Slot) -> boo
//
// Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only
// ticks)
pub fn create_new_ledger_from_name(name: &str, genesis_config: &GenesisConfig) -> (PathBuf, Hash) {
pub fn create_new_ledger_from_name(
name: &str,
genesis_config: &GenesisConfig,
access_type: AccessType,
) -> (PathBuf, Hash) {
let ledger_path = get_ledger_path_from_name(name);
let blockhash = create_new_ledger(&ledger_path, genesis_config).unwrap();
let blockhash = create_new_ledger(
&ledger_path,
genesis_config,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
access_type,
)
.unwrap();
(ledger_path, blockhash)
}

View File

@@ -1,4 +1,4 @@
use crate::blockstore_meta;
use crate::{blockstore_meta, hardened_unpack::UnpackError};
use bincode::{deserialize, serialize};
use byteorder::{BigEndian, ByteOrder};
use log::*;
@@ -56,6 +56,7 @@ pub enum BlockstoreError {
FsExtraError(#[from] fs_extra::error::Error),
SlotCleanedUp,
UnableToSetOpenFileDescriptorLimit,
UnpackError(#[from] UnpackError),
}
pub type Result<T> = std::result::Result<T, BlockstoreError>;
@@ -125,11 +126,22 @@ pub mod columns {
pub struct Rewards;
}
pub enum AccessType {
PrimaryOnly,
TryPrimaryThenSecondary,
}
#[derive(Debug, PartialEq)]
pub enum ActualAccessType {
Primary,
Secondary,
}
#[derive(Debug)]
struct Rocks(rocksdb::DB);
struct Rocks(rocksdb::DB, ActualAccessType);
impl Rocks {
fn open(path: &Path) -> Result<Rocks> {
fn open(path: &Path, access_type: AccessType) -> Result<Rocks> {
use columns::{
AddressSignatures, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Rewards,
Root, ShredCode, ShredData, SlotMeta, TransactionStatus, TransactionStatusIndex,
@@ -138,7 +150,7 @@ impl Rocks {
fs::create_dir_all(&path)?;
// Use default database options
let db_options = get_db_options();
let mut db_options = get_db_options();
// Column family names
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
@@ -164,23 +176,53 @@ impl Rocks {
let rewards_cf_descriptor = ColumnFamilyDescriptor::new(Rewards::NAME, get_cf_options());
let cfs = vec![
meta_cf_descriptor,
dead_slots_cf_descriptor,
duplicate_slots_cf_descriptor,
erasure_meta_cf_descriptor,
orphans_cf_descriptor,
root_cf_descriptor,
index_cf_descriptor,
shred_data_cf_descriptor,
shred_code_cf_descriptor,
transaction_status_cf_descriptor,
address_signatures_cf_descriptor,
transaction_status_index_cf_descriptor,
rewards_cf_descriptor,
(SlotMeta::NAME, meta_cf_descriptor),
(DeadSlots::NAME, dead_slots_cf_descriptor),
(DuplicateSlots::NAME, duplicate_slots_cf_descriptor),
(ErasureMeta::NAME, erasure_meta_cf_descriptor),
(Orphans::NAME, orphans_cf_descriptor),
(Root::NAME, root_cf_descriptor),
(Index::NAME, index_cf_descriptor),
(ShredData::NAME, shred_data_cf_descriptor),
(ShredCode::NAME, shred_code_cf_descriptor),
(TransactionStatus::NAME, transaction_status_cf_descriptor),
(AddressSignatures::NAME, address_signatures_cf_descriptor),
(
TransactionStatusIndex::NAME,
transaction_status_index_cf_descriptor,
),
(Rewards::NAME, rewards_cf_descriptor),
];
// Open the database
let db = Rocks(DB::open_cf_descriptors(&db_options, path, cfs)?);
let db = match access_type {
AccessType::PrimaryOnly => Rocks(
DB::open_cf_descriptors(&db_options, path, cfs.into_iter().map(|c| c.1))?,
ActualAccessType::Primary,
),
AccessType::TryPrimaryThenSecondary => {
let names: Vec<_> = cfs.iter().map(|c| c.0).collect();
match DB::open_cf_descriptors(&db_options, path, cfs.into_iter().map(|c| c.1)) {
Ok(db) => Rocks(db, ActualAccessType::Primary),
Err(err) => {
let secondary_path = path.join("solana-secondary");
warn!("Error when opening as primary: {}", err);
warn!("Trying as secondary at : {:?}", secondary_path);
warn!("This active secondary db use may temporarily cause the performance of another db use (like by validator) to degrade");
// This is needed according to https://github.com/facebook/rocksdb/wiki/Secondary-instance
db_options.set_max_open_files(-1);
Rocks(
DB::open_cf_as_secondary(&db_options, path, &secondary_path, names)?,
ActualAccessType::Secondary,
)
}
}
}
};
Ok(db)
}
@@ -265,6 +307,10 @@ impl Rocks {
self.0.write(batch)?;
Ok(())
}
fn is_primary_access(&self) -> bool {
self.1 == ActualAccessType::Primary
}
}
pub trait Column {
@@ -576,8 +622,8 @@ pub struct WriteBatch<'a> {
}
impl Database {
pub fn open(path: &Path) -> Result<Self> {
let backend = Arc::new(Rocks::open(path)?);
pub fn open(path: &Path, access_type: AccessType) -> Result<Self> {
let backend = Arc::new(Rocks::open(path, access_type)?);
Ok(Database {
backend,
@@ -677,6 +723,10 @@ impl Database {
let end = max_slot <= to;
result.map(|_| end)
}
pub fn is_primary_access(&self) -> bool {
self.backend.is_primary_access()
}
}
impl<C> LedgerColumn<C>

View File

@@ -341,9 +341,14 @@ pub fn process_blockstore_from_root(
}
}
blockstore
.set_roots(&[start_slot])
.expect("Couldn't set root slot on startup");
// ensure start_slot is rooted for correct replay
if blockstore.is_primary_access() {
blockstore
.set_roots(&[start_slot])
.expect("Couldn't set root slot on startup");
} else if !blockstore.is_root(start_slot) {
panic!("starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot);
}
if let Ok(metas) = blockstore.slot_meta_iterator(start_slot) {
if let Some((slot, _meta)) = metas.last() {
@@ -801,10 +806,14 @@ fn process_single_slot(
// see DuplicateSignature errors later in ReplayStage
confirm_full_slot(blockstore, bank, opts, recyclers, progress).map_err(|err| {
let slot = bank.slot();
blockstore
.set_dead_slot(slot)
.expect("Failed to mark slot as dead in blockstore");
warn!("slot {} failed to verify: {}", slot, err);
if blockstore.is_primary_access() {
blockstore
.set_dead_slot(slot)
.expect("Failed to mark slot as dead in blockstore");
} else if !blockstore.is_dead(slot) {
panic!("Failed slot isn't dead and can't update due to being secondary blockstore access: {}", slot);
}
err
})?;

View File

@@ -19,9 +19,9 @@ use thiserror::Error;
#[derive(Error, Debug)]
pub enum UnpackError {
#[error("IO error")]
#[error("IO error: {0}")]
IO(#[from] std::io::Error),
#[error("Archive error")]
#[error("Archive error: {0}")]
Archive(String),
}
@@ -29,15 +29,15 @@ pub type Result<T> = std::result::Result<T, UnpackError>;
const MAX_SNAPSHOT_ARCHIVE_UNPACKED_SIZE: u64 = 500 * 1024 * 1024 * 1024; // 500 GiB
const MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT: u64 = 500_000;
const MAX_GENESIS_ARCHIVE_UNPACKED_SIZE: u64 = 1024 * 1024 * 1024; // 1024 MiB
pub const MAX_GENESIS_ARCHIVE_UNPACKED_SIZE: u64 = 10 * 1024 * 1024; // 10 MiB
const MAX_GENESIS_ARCHIVE_UNPACKED_COUNT: u64 = 100;
fn checked_total_size_sum(total_size: u64, entry_size: u64, limit_size: u64) -> Result<u64> {
let total_size = total_size.saturating_add(entry_size);
if total_size > limit_size {
return Err(UnpackError::Archive(format!(
"too large snapshot: {:?}",
total_size
"too large archive: {} than limit: {}",
total_size, limit_size,
)));
}
Ok(total_size)
@@ -151,10 +151,18 @@ fn is_valid_snapshot_archive_entry(parts: &[&str], kind: tar::EntryType) -> bool
}
}
pub fn open_genesis_config(ledger_path: &Path) -> GenesisConfig {
pub fn open_genesis_config(
ledger_path: &Path,
max_genesis_archive_unpacked_size: u64,
) -> GenesisConfig {
GenesisConfig::load(&ledger_path).unwrap_or_else(|load_err| {
let genesis_package = ledger_path.join("genesis.tar.bz2");
unpack_genesis_archive(&genesis_package, ledger_path).unwrap_or_else(|unpack_err| {
unpack_genesis_archive(
&genesis_package,
ledger_path,
max_genesis_archive_unpacked_size,
)
.unwrap_or_else(|unpack_err| {
warn!(
"Failed to open ledger genesis_config at {:?}: {}, {}",
ledger_path, load_err, unpack_err,
@@ -170,17 +178,20 @@ pub fn open_genesis_config(ledger_path: &Path) -> GenesisConfig {
pub fn unpack_genesis_archive(
archive_filename: &Path,
destination_dir: &Path,
) -> std::result::Result<(), String> {
max_genesis_archive_unpacked_size: u64,
) -> std::result::Result<(), UnpackError> {
info!("Extracting {:?}...", archive_filename);
let extract_start = Instant::now();
fs::create_dir_all(destination_dir).map_err(|err| err.to_string())?;
let tar_bz2 = File::open(&archive_filename)
.map_err(|err| format!("Unable to open {:?}: {:?}", archive_filename, err))?;
fs::create_dir_all(destination_dir)?;
let tar_bz2 = File::open(&archive_filename)?;
let tar = BzDecoder::new(BufReader::new(tar_bz2));
let mut archive = Archive::new(tar);
unpack_genesis(&mut archive, destination_dir)
.map_err(|err| format!("Unable to unpack {:?}: {:?}", archive_filename, err))?;
unpack_genesis(
&mut archive,
destination_dir,
max_genesis_archive_unpacked_size,
)?;
info!(
"Extracted {:?} in {:?}",
archive_filename,
@@ -189,11 +200,15 @@ pub fn unpack_genesis_archive(
Ok(())
}
fn unpack_genesis<A: Read, P: AsRef<Path>>(archive: &mut Archive<A>, unpack_dir: P) -> Result<()> {
fn unpack_genesis<A: Read, P: AsRef<Path>>(
archive: &mut Archive<A>,
unpack_dir: P,
max_genesis_archive_unpacked_size: u64,
) -> Result<()> {
unpack_archive(
archive,
unpack_dir,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
max_genesis_archive_unpacked_size,
MAX_GENESIS_ARCHIVE_UNPACKED_COUNT,
is_valid_genesis_archive_entry,
)
@@ -311,7 +326,9 @@ mod tests {
}
fn finalize_and_unpack_genesis(archive: tar::Builder<Vec<u8>>) -> Result<()> {
with_finalize_and_unpack(archive, |a, b| unpack_genesis(a, b))
with_finalize_and_unpack(archive, |a, b| {
unpack_genesis(a, b, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE)
})
}
#[test]
@@ -440,7 +457,7 @@ mod tests {
let mut archive = Builder::new(Vec::new());
archive.append(&header, data).unwrap();
let result = finalize_and_unpack_snapshot(archive);
assert_matches!(result, Err(UnpackError::Archive(ref message)) if message.to_string() == *"too large snapshot: 1125899906842624");
assert_matches!(result, Err(UnpackError::Archive(ref message)) if message.to_string() == format!("too large archive: 1125899906842624 than limit: {}", MAX_SNAPSHOT_ARCHIVE_UNPACKED_SIZE));
}
#[test]
@@ -456,7 +473,7 @@ mod tests {
let result =
checked_total_size_sum(u64::max_value() - 2, 2, MAX_SNAPSHOT_ARCHIVE_UNPACKED_SIZE);
assert_matches!(result, Err(UnpackError::Archive(ref message)) if message.to_string() == *"too large snapshot: 18446744073709551615");
assert_matches!(result, Err(UnpackError::Archive(ref message)) if message.to_string() == format!("too large archive: 18446744073709551615 than limit: {}", MAX_SNAPSHOT_ARCHIVE_UNPACKED_SIZE));
}
#[test]

View File

@@ -50,7 +50,7 @@ pub const SIZE_OF_NONCE_DATA_SHRED_PAYLOAD: usize =
pub const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_TYPE;
pub const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT;
pub const NONCE_SHRED_PAYLOAD_SIZE: usize = PACKET_DATA_SIZE - SIZE_OF_NONCE;
pub const UNLOCK_NONCE_SLOT: Slot = 5_000_000;
pub const UNLOCK_NONCE_SLOT: Slot = 13_115_515;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-local-cluster"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,24 +12,24 @@ homepage = "https://solana.com/"
itertools = "0.9.0"
log = "0.4.8"
rand = "0.7.0"
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.15" }
solana-config-program = { path = "../programs/config", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-download-utils = { path = "../download-utils", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.15" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-vest-program = { path = "../programs/vest", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.19" }
solana-config-program = { path = "../programs/config", version = "1.1.19" }
solana-core = { path = "../core", version = "1.1.19" }
solana-client = { path = "../client", version = "1.1.19" }
solana-download-utils = { path = "../download-utils", version = "1.1.19" }
solana-faucet = { path = "../faucet", version = "1.1.19" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.19" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.19" }
solana-ledger = { path = "../ledger", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
solana-runtime = { path = "../runtime", version = "1.1.19" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-stake-program = { path = "../programs/stake", version = "1.1.19" }
solana-storage-program = { path = "../programs/storage", version = "1.1.19" }
solana-vest-program = { path = "../programs/vest", version = "1.1.19" }
solana-vote-program = { path = "../programs/vote", version = "1.1.19" }
tempfile = "3.1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.15" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.19" }
[dev-dependencies]
assert_matches = "1.3.0"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-log-analyzer"
description = "The solana cluster network analysis tool"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,8 +14,8 @@ byte-unit = "3.0.3"
clap = "2.33.0"
serde = "1.0.105"
serde_json = "1.0.48"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
[[bin]]
name = "solana-log-analyzer"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "1.1.15"
version = "1.1.19"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-measure"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.15"
version = "1.1.19"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -12,8 +12,8 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
solana-metrics = { path = "../metrics", version = "1.1.19" }
[target."cfg(unix)".dependencies]
jemallocator = "0.3.2"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-merkle-tree"
version = "1.1.15"
version = "1.1.19"
description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
fast-math = "0.1"
[dev-dependencies]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "1.1.15"
version = "1.1.19"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,7 +14,7 @@ gethostname = "0.2.1"
lazy_static = "1.4.0"
log = "0.4.8"
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.19" }
[dev-dependencies]
rand = "0.7.0"

View File

@@ -33,6 +33,9 @@ while [[ -n $1 ]]; do
elif [[ $1 = --gossip-port ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --dev-halt-at-slot ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --dynamic-port-range ]]; then
args+=("$1" "$2")
shift 2
@@ -54,6 +57,9 @@ while [[ -n $1 ]]; do
elif [[ $1 = --no-restart ]]; then
no_restart=1
shift
elif [[ $1 == --wait-for-supermajority ]]; then
args+=("$1" "$2")
shift 2
else
echo "Unknown argument: $1"
$program --help

View File

@@ -88,16 +88,15 @@ if [[ ! -f $vote_account ]]; then
exit 1
fi
if [[ -f $stake_account ]]; then
echo "Error: $stake_account already exists"
exit 1
fi
if ((airdrops_enabled)); then
$solana_cli "${common_args[@]}" airdrop "$stake_sol"
fi
$solana_keygen new --no-passphrase -so "$stake_account"
if ! [[ -f "$stake_account" ]]; then
$solana_keygen new --no-passphrase -so "$stake_account"
else
echo "$stake_account already exists! Using it"
fi
set -x
$solana_cli "${common_args[@]}" \

View File

@@ -27,6 +27,7 @@ $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/bootstrap-validator/
args=(
"$@"
--max-genesis-archive-unpacked-size 1073741824
--enable-warmup-epochs
--bootstrap-validator "$SOLANA_CONFIG_DIR"/bootstrap-validator/identity.json
"$SOLANA_CONFIG_DIR"/bootstrap-validator/vote-account.json

View File

@@ -6,7 +6,9 @@ here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
args=()
args=(
--max-genesis-archive-unpacked-size 1073741824
)
airdrops_enabled=1
node_sol=500 # 500 SOL: number of SOL to airdrop the node for transaction fees and vote account rent exemption (ignored if airdrops_enabled=0)
label=
@@ -147,6 +149,12 @@ while [[ -n $1 ]]; do
elif [[ $1 = --halt-on-trusted-validators-accounts-hash-mismatch ]]; then
args+=("$1")
shift
elif [[ $1 = --max-genesis-archive-unpacked-size ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 == --wait-for-supermajority ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = -h ]]; then
usage "$@"
else

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-net-shaper"
description = "The solana cluster network shaping tool"
version = "1.1.15"
version = "1.1.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,8 +13,8 @@ publish = false
clap = "2.33.0"
serde = "1.0.105"
serde_json = "1.0.48"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
rand = "0.7.0"
[[bin]]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-net-utils"
version = "1.1.15"
version = "1.1.19"
description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,8 +18,8 @@ rand = "0.7.0"
serde = "1.0.105"
serde_derive = "1.0.103"
socket2 = "0.3.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.19" }
solana-logger = { path = "../logger", version = "1.1.19" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -7,10 +7,12 @@ use tokio_codec::{BytesCodec, Decoder};
pub type IpEchoServer = Runtime;
pub const MAX_PORT_COUNT_PER_MESSAGE: usize = 4;
#[derive(Serialize, Deserialize, Default)]
pub(crate) struct IpEchoServerMessage {
tcp_ports: [u16; 4], // Fixed size list of ports to avoid vec serde
udp_ports: [u16; 4], // Fixed size list of ports to avoid vec serde
tcp_ports: [u16; MAX_PORT_COUNT_PER_MESSAGE], // Fixed size list of ports to avoid vec serde
udp_ports: [u16; MAX_PORT_COUNT_PER_MESSAGE], // Fixed size list of ports to avoid vec serde
}
impl IpEchoServerMessage {

Some files were not shown because too many files have changed in this diff Show More