Compare commits

..

185 Commits
v1.8 ... v1.6.6

Author SHA1 Message Date
mergify[bot]
fe775a9716 CLI BIP32 prep: KeypairUrl refactor (backport #16592) (#16605)
* clap-utils: Rename KeypairUrl to SignerSource

(cherry picked from commit 09dcc9ea04)

* clap-utils: Reduce SignerSource's visibility

(cherry picked from commit c5ab3ba6f1)

* clap-utils: Use `uriparse` crate to parse `SignerSource`

(cherry picked from commit 5d1ef5d01d)

* clap-utils: Add explicit schemes for `ask` and `file` `SignerSource`s

(cherry picked from commit 6444f0e57b)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-16 21:14:31 +00:00
mergify[bot]
ac76a75937 Feature-gate hash-based duplicate transaction check (#16601)
(cherry picked from commit 285f3c9d56)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-16 19:59:55 +00:00
mergify[bot]
6c1678244f docs: Fix typo in program deploy instructions (#16572) (#16575)
(cherry picked from commit c8ed14c647)

Co-authored-by: Justin Starry <justin@solana.com>
2021-04-16 05:58:31 +00:00
mergify[bot]
63a9f33be1 Don't parse uninitialized system/nonce accounts (#16584) (#16587)
(cherry picked from commit ba77e48c12)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-15 23:39:16 +00:00
mergify[bot]
c9da91cb1c Rotate CODECOV_TOKEN (#16579)
(cherry picked from commit a535c0e129)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-04-15 16:15:20 +00:00
mergify[bot]
b3488e0139 Cli: move airdrop to rpc requests (#16557) (#16564)
* Add recent_blockhash to requestAirdrop

* Move tx confirmation to separate method

* Add RpcClient airdrop methods

* Request cli airdrop via RpcClient

* Pass optional faucet_addr into TestValidator and fix tests

* Update client/src/rpc_client.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 7dfb51c0b4)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-15 07:35:19 +00:00
mergify[bot]
f3814a0478 docs: freshen and clarify rent-exempt dev description (#16562)
(cherry picked from commit 76ce28c723)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-15 04:35:08 +00:00
mergify[bot]
5e8d8cfb49 fix transaction spelling (#16558) (#16559)
(cherry picked from commit 1f29031b9d)

Co-authored-by: strykerin <dacosta.pereirafabio@gmail.com>
2021-04-15 02:24:26 +00:00
mergify[bot]
ad37276d83 dl-utils: use wide_msg everywhere for truncation on narrow terminals (#16555)
(cherry picked from commit e61b4b7d70)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-15 01:01:55 +00:00
mergify[bot]
719db7eed0 uses timeouts based on stake for filtering pull responses (#16549) (#16551)
filter_pull_responses is using default timeout when discarding pull
responses (except for ContactInfo):
https://github.com/solana-labs/solana/blob/f804ce63c/core/src/crds_gossip_pull.rs#L349-L350

But purging code uses timeouts based on stake:
https://github.com/solana-labs/solana/blob/f804ce63c/core/src/cluster_info.rs#L1867-L1870

So the crds value will not be purged from the sender's table and will be
sent again over the next pull request.

(cherry picked from commit d92721aab9)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-04-14 21:43:48 +00:00
mergify[bot]
4ddb72a32d prioritizes contact-infos in pull responses (#16541) (#16550)
Expired crds values where the contact-info does not exist are wasted:
https://github.com/solana-labs/solana/blob/f804ce63c/core/src/crds_gossip_pull.rs#L353-L378
and then are sent again over the next pull-request.

Also, the stake of the first response (which can be anything) is used to
weight all pull-responses to a node, while the rest of responses can
have different stake.
https://github.com/solana-labs/solana/blob/f804ce63c/core/src/cluster_info.rs#L2231

(cherry picked from commit f35a6a8be0)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-04-14 20:14:22 +00:00
mergify[bot]
ff1171338f Fix channel panic in tests (#16503) (#16543)
* Fix channel panic

* Add exit signal to PohRecorder because Crossbeam doesnt drop objects inside dropped channel

(cherry picked from commit f0c150cfb9)

Co-authored-by: carllin <carl@solana.com>
2021-04-14 19:04:31 +00:00
mergify[bot]
28683b0ad8 Fix sanity test flakiness by prebuilding binaries (#16530) (#16547)
* Fix sanity test flakiness by prebuilding binaries

* ignore shellcheck

* bump

* nudge

* simplify

(cherry picked from commit 328e7690f3)

Co-authored-by: Justin Starry <justin@solana.com>
2021-04-14 18:52:15 +00:00
Michael Vines
4ef3a679a4 Bump version to v1.6.6 2021-04-14 10:27:02 -07:00
Connor McFarlane
e02bcbdae2 Other hostname changes
(cherry picked from commit eddfe06a00)
2021-04-14 10:08:29 -07:00
Connor McFarlane
7b0187a148 Correct gossip hostname
(cherry picked from commit d684ec00aa)
2021-04-14 10:08:29 -07:00
Michael Vines
e92283c8d2 Add --faucet-port option
(cherry picked from commit f804ce63c2)
2021-04-14 09:39:27 -07:00
Jack May
ef3781d4ee fix cross-merge (#16535) 2021-04-14 10:16:24 +00:00
mergify[bot]
6da4bec41d Return sysvars via syscalls (bp #16422) (#16497)
* Return sysvars via syscalls (#16422)

(cherry picked from commit fa83f3bd73)

* bad merge

* Fix branch diffs

* nudge

Co-authored-by: Jack May <jack@solana.com>
2021-04-14 05:33:27 +00:00
mergify[bot]
31ed985fd0 RpcClient no longer panics in a tokio multi-threaded runtime (#16393)
(cherry picked from commit a4f0d8636a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-04-14 03:17:33 +00:00
mergify[bot]
cdc10712b1 Bump scripts to current commitment variants (#16526) (#16527)
(cherry picked from commit 3bfae8e829)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-14 01:51:56 +00:00
mergify[bot]
935a836a7d bump solana_rbpf from 0.2.5 to 0.2.7 (backport #16515) (#16525)
* bump solana_rbpf from 0.2.5 to 0.2.7 (#16515)

(cherry picked from commit f7eadd9d70)

# Conflicts:
#	cli/Cargo.toml
#	programs/bpf/Cargo.toml
#	programs/bpf_loader/Cargo.toml
#	programs/bpf_loader/src/syscalls.rs

* Fix conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
Co-authored-by: Jack May <jack@solana.com>
2021-04-13 23:53:48 +00:00
mergify[bot]
97ba3cbeb0 Cleanup unsupported sysvars (backport #16390) (#16517)
* Cleanup unsupported sysvars (#16390)

* Cleanup unsupported sysvars

* fix ser description

(cherry picked from commit 92f4018b07)

# Conflicts:
#	runtime/src/bank.rs

* fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2021-04-13 23:28:08 +00:00
mergify[bot]
e8ca35f9ec Deprecate RpcClient methods, RpcRequest variants (#16516) (#16519)
* Deprecate RpcClient methods, RpcRequest variants

* Update cli to getSupply

(cherry picked from commit ccb11a939f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-13 22:23:39 +00:00
Michael Vines
d5aae9a8af Derive PartialEq for RpcStakeActivation 2021-04-13 12:33:46 -07:00
mergify[bot]
3bb8016a40 Remove blake3 from bpf program dependencies (#16506) (#16509)
(cherry picked from commit f641429056)

Co-authored-by: Justin Starry <justin@solana.com>
2021-04-13 11:18:26 +00:00
Justin Starry
579065443a v1.6: Use blake3 message hash in status cache (#16507) 2021-04-13 16:57:20 +08:00
Trent Nelson
81d636c2bf Merge pull request from GHSA-fmvj-vqp5-qqh9
* Sanitize permissions

* Forbid creating directories under ledger/rocksdb/

* hardened_unpack: Disallow dirs under rocksdb/ in genesis

* hardened_unpack: expand valid genesis entry test coverage

* hardened_unpack: rework old-style bsd directory entry rejection

Co-authored-by: Ivan Mironov <mironov.ivan@gmail.com>
2021-04-12 23:56:37 -06:00
Michael Vines
6a7ce8500b canonicalize authorized voter filepath
(cherry picked from commit 05ad979a2d)
2021-04-12 20:01:56 -07:00
mergify[bot]
8ee294639a validator: Add authorized-voter add/remove-all commands (bp #16492) (#16496)
* Clean up build warning

(cherry picked from commit 17a173ebb5)

* Add authorized-voter add/remove-all commands

(cherry picked from commit 2229b70c4e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-04-13 00:07:06 +00:00
Tyera Eulberg
b275f65ef1 Add address_cache and exclude loopback from ip limit (#16491) 2021-04-12 20:31:30 +00:00
mergify[bot]
37c2b68677 poll checking for new record in poh service after every batch of hashes instead of busy waiting (#16167) (#16486)
* poll waiting in poh service after every batch of hashes

* clippy

(cherry picked from commit 414c7070cb)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-04-12 19:07:23 +00:00
mergify[bot]
d9944c8ae3 TransactionRecorder uses unique channel so we can use Recv instead of RecvTimeout (#16195) (#16485)
* time

* new channel each call

* new channel every time

(cherry picked from commit 5eff23db0c)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-04-12 18:56:22 +00:00
mergify[bot]
6c8bbdca0a Allow fork choice to support multiple versions of a slot (#16266) (#16480)
(cherry picked from commit dc7030ffaa)

Co-authored-by: carllin <carl@solana.com>
2021-04-12 09:14:02 +00:00
Michael Vines
10e8f3ab32 Fix up App formatting
(cherry picked from commit ef30943c5c)
2021-04-11 23:36:31 -07:00
mergify[bot]
8c0b0f235e docker: Expose all ports in Dockerfile, add back localnet.sh (#16401) (#16474)
* docker: Expose all ports in Dockerfile, add back localnet.sh

* Add documentation for where to find containers

* Obliterate script

(cherry picked from commit 448d5be79f)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-04-11 20:17:08 +00:00
mergify[bot]
ec8ba76e4d Fix account copy step in program test message processor (#16469) (#16472)
(cherry picked from commit 278c125d99)

Co-authored-by: Justin Starry <justin@solana.com>
2021-04-11 20:31:22 +08:00
mergify[bot]
60fba7be75 Track gossip vote updates per hash for replay stage (#16421) (#16468)
* Track gossip vote updates per hash for replay stage

(cherry picked from commit 99b3aab703)

Co-authored-by: carllin <carl@solana.com>
2021-04-11 09:33:28 +00:00
mergify[bot]
24075ceeff Fill in not-yet-finalized block-time if possible (#16460) (#16463)
(cherry picked from commit 8bc0bdd40b)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-09 21:48:16 +00:00
mergify[bot]
f7ef1e68b0 patches bug in banking stage where buffered packets are never retained (#16276) (#16458)
banking_stage::handle_forwarding is retaining buffered packets with
empty index, so nothing is held:
https://github.com/solana-labs/solana/blob/6f3926b64/core/src/banking_stage.rs#L520

(cherry picked from commit 701fc93343)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-04-09 18:44:32 +00:00
mergify[bot]
723e7f11b9 Simplify some pattern-matches (#16402) (#16446)
When those match an exact combinator on Option / Result.

Tool-aided by [comby-rust](https://github.com/huitseeker/comby-rust).

(cherry picked from commit b08cff9e77)

Co-authored-by: François Garillot <4142+huitseeker@users.noreply.github.com>
2021-04-08 20:45:01 +00:00
mergify[bot]
f7211d3c07 Cli: use get_inflation_rewards and limit epochs queried (#16408) (#16444)
* Fix block-with-limit when not finalized blocks found

* Enable confirmed commitment in getInflationReward

* Use get_inflation_rewards in cli

* Line up rewards output

* Add range validator

* Change cli epoch arg -> num epochs

* Add solana inflation rewards subcommand

* Consolidate epoch rewards meta

(cherry picked from commit bb9d2fd07a)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-08 18:16:04 +00:00
mergify[bot]
6234090361 Fix cargo-build/test-bpf --workspace (bp #16431) (#16432)
* Fix cargo-build/test-bpf --workspace (#16431)

(cherry picked from commit 878e52f0b9)

# Conflicts:
#	ci/test-stable.sh

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2021-04-08 16:55:21 +00:00
mergify[bot]
a001c1c8f6 CI: Let cargo-install-all.sh resolve stable (#16430)
(cherry picked from commit 388ce12207)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-07 21:30:08 +00:00
mergify[bot]
7f62f4f621 CLI: Fix rent panic (#16417) (#16426)
* CLI: Fix `rent` panic on non-numeric input (+monikers)

* Update cli/src/cluster_query.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Update cli/src/cluster_query.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Update cli/src/cluster_query.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit c5c3ae0203)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-07 18:04:20 +00:00
mergify[bot]
8334a76e5b docs: Validator SOL reqs followup (#16424)
(cherry picked from commit 117860218f)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-07 16:15:20 +00:00
mergify[bot]
eadab5e2f0 No wallclock throttle tests (#16396) (#16399)
(cherry picked from commit 1219842a96)

Co-authored-by: carllin <carl@solana.com>
2021-04-07 11:05:51 +00:00
mergify[bot]
8bb7b53f3b Speed up net.sh builds (bp #16360) (#16420)
* Speed up net.sh builds (#16360)

* Speed up net.sh builds

* feedback

* Update net/net.sh

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* feedback

* fix

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 6cd4bc5e60)

# Conflicts:
#	scripts/cargo-install-all.sh

* fix

Co-authored-by: Justin Starry <justin@solana.com>
2021-04-07 09:03:53 +00:00
Trent Nelson
8c7b8e8c5d docs: Add validator SOL reqs
(cherry picked from commit 0e42a35e4f)
2021-04-06 22:47:48 -06:00
mergify[bot]
a2857928a4 Rpc: introduce get_inflation_reward rpc call (#16278) (#16410)
* feat: introduce get_inflation_reward rpc call

* fix: style suggestions

* fix: more style changes and match how other rpc functions are defined

* feat: get reward for a single epoch

* feat: default to the most recent epoch

* fix: don't factor out get_confirmed_block

* style: introduce from impl for RpcEncodingConfigWrapper

* style: bring commitment into variable

* feat: support multiple pubkeys for get_inflation_reward

* feat: add get_inflation_reward to rpc client

* feat: return rewards in order

* fix: rename pubkeys to addresses

* docs: introduce jsonrpc docs for get_inflation_reward

* style: early return in map (not sure which is more idiomatic)

* fix: call the rpc client function args addresses as well

* fix: style

* fix: filter out only addresses we care about

* style: make this more idiomatic

* fix: change rpc client epoch to optional and include some docs edits

* feat: filter out rent rewards in get_inflation_reward

* feat: add option epoch config param to get_inflation_reward

* feat: rpc client get_inflation_reward takes epoch instead of config and some filter staking and voting rewards

(cherry picked from commit e501fa5f0b)

Co-authored-by: Josh <josh.hundley@gmail.com>
2021-04-07 02:26:45 +00:00
mergify[bot]
f6780d72b1 Faucet: repurpose cap and slice args to apply to single IPs (bp #16381) (#16400)
* Faucet: repurpose cap and slice args to apply to single IPs (#16381)

* Single use stmt

* Log request IP

* Switch cap and slice to apply per IP

* Use SOL in logs, error msgs

* Use thiserror instead of overloading io::Error

* Return memo transaction for requests that exceed per-request-cap

* Handle faucet memos in cli

* Add some docs, esp about memo transaction

* Use SOL symbol & standardize memo

Co-authored-by: Michael Vines <mvines@gmail.com>

* Differentiate faucet tx-length errors

* Populate signature in cli airdrop memo case

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 03d3ae1cb9)

# Conflicts:
#	Cargo.lock
#	client/Cargo.toml
#	faucet/Cargo.toml

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-04-06 08:48:10 +00:00
mergify[bot]
5d003c6dab Use spl-memo v3.0.1 (#16384) (#16397)
* Use memo v3.0.1, which simplifies id imports

* tree

(cherry picked from commit ae7bc8299d)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-06 05:12:04 +00:00
mergify[bot]
79ee0e06b2 Cluster info shred spies (bp #16389) (#16395)
* cluster-info: Don't subtract non-shred spies from node count

(cherry picked from commit b6b08706b9)

* cluster-info: Get rid of some integer math while we're here

(cherry picked from commit b71875df61)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-06 01:37:16 +00:00
mergify[bot]
443f132de5 Add cluster state verifier logging (#16330) (#16336)
* Add cluster state verifier logging

* Add duplicate-slots iterator to ledger tool

(cherry picked from commit 4e5ef6bce2)

Co-authored-by: carllin <carl@solana.com>
2021-04-06 01:25:12 +00:00
mergify[bot]
95299e43a2 validator: Use a const for wait for supermajority threshold (#16392)
(cherry picked from commit 7a2a39093d)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-06 00:53:05 +00:00
mergify[bot]
0cf1894ede issue #10831: added --with-memo option to all cli commands that submit (bp #16291) (#16387)
* issue #10831: added --with-memo option to all cli commands that submit (#16291)

* issue #10831: added --with-memo option to all cli commands that submit
transactions.  Also, improve the block command to show UTF-8 string instead
of integer values for memo program data.

* Fixed tests and changed some syntax according to feedback.

* Use spl_memo id (all versions where applicable) instead of hardcoding id.

* Update Cargo.toml in programs/bpf.

* Update formatting via cargo fmt.

* Update to use spl_memo version 3.0.1, which simplifies package imports

(cherry picked from commit 364af3a3e0)

# Conflicts:
#	cli-output/Cargo.toml
#	cli/Cargo.toml

* Fix conflicts

Co-authored-by: bji <bryan@ischo.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-04-05 23:14:16 +00:00
mergify[bot]
f2f4f28c0b merkle-tree: fix build when targeting bpf (bp #16335) (#16342)
* merkle-tree: Add Xargo.toml

(cherry picked from commit a1d9b53cd7)

* merkle-tree: Get `Hash` et. al from program instead of sdk

(cherry picked from commit ddc0a16cec)

* merkle-tree: Use `matches` crate when targeting eBPF

(cherry picked from commit a44c32694f)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-04-05 21:57:26 +00:00
Michael Vines
57da68d563 Update Cargo.toml 2021-04-05 14:02:34 -07:00
Michael Vines
8d2337ccf8 Update Cargo.toml 2021-04-05 14:02:34 -07:00
Michael Vines
270749185c Adjust tokio version to just "1"
(cherry picked from commit 43feef7362)

# Conflicts:
#	faucet/Cargo.toml
#	net-utils/Cargo.toml
2021-04-05 14:02:34 -07:00
Michael Vines
6184254416 Reduce test-validator ledger size
(cherry picked from commit b242f82696)
2021-04-05 09:24:49 -07:00
mergify[bot]
c8bb13b3f7 Fixup AncestorIterator method (bp #16357) (#16359)
* Fixup iterator method (#16357)

(cherry picked from commit 1a13d22984)

* Only get Blockstore::last_root once (#16362)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-05 05:27:58 +00:00
Tyera Eulberg
5da83c1491 Bump version to v1.6.5 (#16361) 2021-04-04 22:00:40 -06:00
Michael Vines
b04ce80255 Add channel version check
(cherry picked from commit 527adbed34)
2021-04-04 13:53:50 -07:00
sakridge
a788021181 Bump version to v1.6.4 (#16345) 2021-04-04 13:31:35 -07:00
mergify[bot]
553e9fb8cd Set ticks_per_slot higher for banking stage tests (#16094) (#16356)
Tests are timing out because the bank hit the MaxTickHeight and
will not process the transactions.

(cherry picked from commit 96ccc40f0a)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-04-04 20:29:53 +00:00
Michael Vines
f1bc7ec4fa wait-for-restart-window works again for unstaked nodes
(cherry picked from commit a679aebc82)
2021-04-04 12:59:13 -07:00
mergify[bot]
581181e87f Fix test_replay_commitment_cache (#16131) (#16355)
(cherry picked from commit 9b94741290)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-04-04 19:41:45 +00:00
mergify[bot]
36e1f9fae8 Bump bpf-tools to version v1.5 (#16331) (#16350)
The new version of bpf-tools eliminates the separate
rust-bpf-sysroot. The Rust standard libraries for the BPF target are
built in tree when the compiler is built.  The standard libraries code
is slightly more optimized and some reduction of compute budget can be
expected with this version of bpf-tools.

(cherry picked from commit 1359bceb5d)

Co-authored-by: Dmitri Makarov <dmakarov@users.noreply.github.com>
2021-04-04 16:58:52 +00:00
Justin Starry
f10ae394c8 Remove unprocessed transactions from log notifications (#16349)
(cherry picked from commit 0596cf5405)
2021-04-04 09:38:23 -07:00
mergify[bot]
f7905d369a Throttle PoH ticks by cumulative slot time (#16139) (#16315)
* Throttle PoH ticks by cumulative slot time

    * respond to pr feedback

    * saturating sub

    * updated comment

    (cherry picked from commit 4f4cffbd03)

    # Conflicts:
    #       core/src/poh_recorder.rs

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2021-04-03 23:40:46 +00:00
mergify[bot]
ef079d202b Wait for 90 percent of stake before starting (#16340) (#16344)
(cherry picked from commit 3429785d9b)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-04-03 22:50:58 +00:00
Michael Vines
b7efc2373c wait-for-restart-window now indicates how far away the next restart window is
(cherry picked from commit c8c89dd5f7)
2021-04-02 23:23:16 -07:00
Michael Vines
d3b50bc55b Remove UNSTABLE warning from logsSubscribe 2021-04-02 12:54:20 -07:00
mergify[bot]
8fd3465f8a Cleanup use (bp #16327) (#16328)
* Cleanup use (#16327)

(cherry picked from commit dee655df35)

# Conflicts:
#	Cargo.lock
#	program-test/Cargo.toml

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2021-04-02 19:54:00 +00:00
mergify[bot]
23b9e6eae3 add metric for ticks from poh_recorder.record (#16047) (#16312)
(cherry picked from commit 2fc609a294)

    # Conflicts:
    #       core/src/poh_recorder.rs

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2021-04-02 18:50:50 +00:00
mergify[bot]
fe1a977f9e Parse SPL associated-token-account instructions (bp #16318) (#16321)
* Parse SPL associated-token-account instructions (#16318)

(cherry picked from commit a902505810)

# Conflicts:
#	transaction-status/Cargo.toml

* Fix conflict

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-04-02 02:38:28 +00:00
mergify[bot]
5e538eff7c metrics for poh_recorder.record (#15998) (#16317)
(cherry picked from commit ddc758439e)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-04-02 02:35:09 +00:00
mergify[bot]
3efe4b5478 increase timeout in TransactionRecorder.record (#16133) (#16314)
(cherry picked from commit 06ac0fe9a3)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-04-02 00:58:52 +00:00
mergify[bot]
90e0d4fefe poh record metrics (#16092) (#16313)
(cherry picked from commit f68860a643)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-04-01 21:27:16 +00:00
behzad nouri
2e983fb39f pushes addresses instead of insert 2021-04-01 11:14:05 -06:00
mergify[bot]
527b20fbbd nit: fix variable names (#16283) (#16295)
(cherry picked from commit aa45e81b3e)

Co-authored-by: Jack May <jack@solana.com>
2021-04-01 09:24:52 +00:00
mergify[bot]
a0c4b4e5fc Rpc: enable getConfirmedSignaturesForAddress2 to return confirmed (not yet finalized) data (#16281) (#16293)
* Update blockstore method to allow return of unfinalized signature

* Support confirmed sigs in getConfirmedSignaturesForAddress2

* Add deprecated comments

* Update docs

* Enable confirmed transaction-history in cli

* Return real confirmation_status; fill in not-yet-finalized block time if possible

(cherry picked from commit da27acabcc)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-01 06:30:36 +00:00
mergify[bot]
282315a721 Rpc: fix getConfirmedTransaction slot (#16288) (#16290)
* Fix transaction blockstore apis

* Update blockstore apis in rpc

(cherry picked from commit 18bd47dbe1)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-04-01 04:58:53 +00:00
mergify[bot]
b8198f8cc5 removes OrderedIterator and transaction batch iteration order (#16153) (#16285)
In TransactionBatch,
https://github.com/solana-labs/solana/blob/e50f59844/runtime/src/transaction_batch.rs#L4-L11
lock_results[i] is aligned with transactions[iteration_order[i]]:
https://github.com/solana-labs/solana/blob/e50f59844/runtime/src/bank.rs#L2414-L2424
https://github.com/solana-labs/solana/blob/e50f59844/runtime/src/accounts.rs#L788-L817

However load_and_execute_transactions is iterating over
  lock_results[iteration_order[i]]
https://github.com/solana-labs/solana/blob/e50f59844/runtime/src/bank.rs#L2878-L2889
and then returning i as for the index of the retryable transaction.

If iteratorion_order is [1, 2, 0], and i is 0, then:
  lock_results[iteration_order[i]] = lock_results[1]
which corresponds to
  transactions[iteration_order[1]] = transactions[2]
so neither i = 0, nor iteration_order[i] = 1 gives the correct index for the
corresponding transaction (which is 2).

This commit removes OrderedIterator and transaction batch iteration order
entirely. There is only one place in blockstore processor which the
iteration order is not ordinal:
https://github.com/solana-labs/solana/blob/e50f59844/ledger/src/blockstore_processor.rs#L269-L271
It seems like, instead of using an iteration order, that can shuffle entry
transactions in-place.

(cherry picked from commit 3f63ed9a72)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-04-01 01:28:01 +00:00
mergify[bot]
68ad2dcce1 Use more performant copy (#16282) (#16284)
(cherry picked from commit ad7f8e7f23)

Co-authored-by: Jack May <jack@solana.com>
2021-04-01 01:08:01 +00:00
mergify[bot]
e87c3421bc Update overview.md (#16280)
fix link which was broken/wrong

(cherry picked from commit c723251575)

Co-authored-by: Huge <mr.huge@seznam.cz>
2021-03-31 22:05:24 +00:00
mergify[bot]
20754a7115 Drop write lock on sysvars (#15497) (#16233)
* Drop write lock on sysvars

* adds env var for demoting sysvar write lock demotion

* moves demote logic to is_writable

* feature gates sysvar write lock demotion

* adds builtins to write lock demotion

* adds system program id to builtins

* adds Feature111...

* adds an abi-freeze test

* mvines set of builtin program keys

Co-authored-by: Michael Vines <mvines@gmail.com>

* update tests

* adds bpf loader keys

* Add test sysvar

* Plumb demote_sysvar to is_writable

* more plumbing of demote_sysvar_write_locks to is_writable

* patches test_program_bpf_instruction_introspection

* hard codes demote_sysvar_write_locks to false for serialization/encoding methods

* Revert "hard codes demote_sysvar_write_locks to false for serialization/encoding methods"

This reverts commit ae3e2d2e777437bddd753933097a210dcbc1b1fc.

* change the hardcoded ones to demote_sysvar_write_locks=true

* Use data_as_mut_slice

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 54c68ea83f)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-03-31 20:23:20 +00:00
mergify[bot]
8a57ee181e Cleanup nits (bp #16211) (#16237)
* Cleanup nits (#16211)

(cherry picked from commit f84e88f0a2)

# Conflicts:
#	programs/bpf/Cargo.lock
#	programs/bpf/rust/sysvar/Cargo.toml

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2021-03-31 10:01:18 +00:00
mergify[bot]
4e6b5a9808 Fix BPF ELF layout (#16256) (#16261)
* Fix BPF ELF layout

* whitespace

(cherry picked from commit bcd89dd34c)

Co-authored-by: Jack May <jack@solana.com>
2021-03-31 09:56:57 +00:00
mergify[bot]
f24fbde43b Helpful const and Arg doc (#16248) (#16252)
(cherry picked from commit 67b747938f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-31 06:32:18 +00:00
Michael Vines
47f60c7607 Validator monitor now displays the max retransmit slot
(cherry picked from commit aac18d7564)
2021-03-30 21:57:23 -07:00
mergify[bot]
8b307ed409 security policy: Add out-of-scope section (bp #16249) (#16251)
* security policy: Add out-of-scope section

(cherry picked from commit e9e46ff521)

* Update SECURITY.md

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 700ebde474)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-31 04:49:17 +00:00
mergify[bot]
cf21719a07 Add get_max_retransmit_slot/get_max_shred_insert_slot to RpcClient (#16243)
(cherry picked from commit 2a1639836a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-31 01:09:11 +00:00
mergify[bot]
3157b464c4 Align ProcessInstruction error handling (#16232) (#16238)
(cherry picked from commit ce7f7c2b6c)

Co-authored-by: Jack May <jack@solana.com>
2021-03-30 21:55:08 +00:00
mergify[bot]
2581db5748 docs: Reduce airdrop examples to 1 SOL (#16241)
(cherry picked from commit 2bcfbad653)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-30 21:52:42 +00:00
Trent Nelson
634959b3ab Bump version to v1.6.3 2021-03-30 16:17:47 +00:00
Trent Nelson
03b21f2e9d Bump version to v1.6.2 2021-03-30 00:06:01 -06:00
carllin
cc5565b17e Setup ReplayStage confirmation scaffolding for duplicate slots (#9698)
(cherry picked from commit 52703badfa)
2021-03-29 22:07:14 -06:00
mergify[bot]
50beef0b15 Allow incomplete features in frozen-abi (#16205)
(cherry picked from commit 9ba9d2a8ae)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-30 03:46:10 +00:00
mergify[bot]
06a54e1423 remove old code (#15988) (#15993)
(cherry picked from commit 9760fded2d)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-03-30 00:50:27 +00:00
mergify[bot]
4d731ecd08 eliminate lock on record (#15929) (#16073)
* eliminate lock on record

* use same error as MaxHeightReached

* clippy

* review feedback

* refactor should_tick code

* pr feedback

(cherry picked from commit 57ba86c821)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-03-30 00:46:13 +00:00
mergify[bot]
ee06789a66 sdk: Add try_from_slice_unchecked for Borsh (#16098) (#16158)
* sdk: Add try_from_slice_unchecked for Borsh

* Add tests

* Rename + clarify comment

* Rename back to unchecked

(cherry picked from commit cffa851e0f)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-03-29 23:15:34 +00:00
mergify[bot]
2dabe1d706 Add handling to close accounts to many-accounts bench (#16199) (#16201)
* gitignore farf

* Improve cli args

* Use derived addresses for accounts

* Add parameter to close every nth account created

(cherry picked from commit 1d145e1fc2)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-29 22:54:09 +00:00
Tyera Eulberg
3b1279a005 Future-aware enum name 2021-03-29 14:58:35 -06:00
mergify[bot]
5c9f85f28d Rpc: enable getConfirmedBlocks and getConfirmedBlocksWithLimit to return confirmed (not yet finalized) data (#16161) (#16198)
* Add commitment config capabilities

* Use rpc limit if no end_slot provided

* Limit to actually finalized blocks

* Support confirmed blocks in getConfirmedBlocks and getConfirmedBlocksWithLimit

* Update docs

* Add client plumbing

* Rename config enum

(cherry picked from commit 60ed8e2892)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-29 19:53:17 +00:00
mergify[bot]
e12dd46ef3 Derive PartialEq for StakeActivationState (#16196)
(cherry picked from commit 4e7bd45d4c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-29 18:16:44 +00:00
mergify[bot]
c4fa03b478 Status cache improvements (#16174) (#16178)
(cherry picked from commit 5e5b63712b)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-03-29 10:11:16 -07:00
mergify[bot]
9fb749deb7 Print the rust version when building bpf programs (#16181) (#16183)
(cherry picked from commit abada56ba1)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-29 07:18:55 +00:00
mergify[bot]
bd48344de2 Fix handling of invoked ix accounts in program-test (#16170) (#16176)
(cherry picked from commit 27ab415ecc)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-29 01:55:11 +00:00
mergify[bot]
78e54f1d2c Implement mnemonic support for solana-keygen grind (solana-labs#9325) (#16108) (#16173)
* Implement mnemonic support for solana-keygen grind (solana-labs#9325)

* Updated to include feedback from review.

* Renaming as per review feedback

* Fixed an incorrectly transcribed underscore

* Properly re-use string constants.

(cherry picked from commit e50f598449)

Co-authored-by: bji <bryan@ischo.com>
2021-03-28 07:05:17 +00:00
mergify[bot]
76a6576976 sdk: Use u32::MAX from std to unbreak BPF builds (#16171) (#16172)
(cherry picked from commit aabe186e3f)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-27 17:05:53 +00:00
mergify[bot]
92ec1ae255 Switch to a single use (#16169)
(cherry picked from commit 16e4ccca13)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-27 06:58:31 +00:00
Michael Vines
0d203728cc Add RpcClient::get_stake_activation() 2021-03-26 22:33:06 -07:00
mergify[bot]
625773e5b8 Rpc: enable getConfirmedBlock and getConfirmedTransaction to return confirmed (not yet finalized) data (bp #16142) (#16160)
* Rpc: enable getConfirmedBlock and getConfirmedTransaction to return confirmed (not yet finalized) data (#16142)

* Add Blockstore block and tx apis that allow unrooted responses

* Add TransactionStatusMessage, and send on bank freeze; also refactor TransactionStatusSender

* Track highest slot with tx-status writes complete

* Rename and unpub fn

* Add commitment to GetConfirmed input configs

* Support confirmed blocks in getConfirmedBlock

* Support confirmed txs in getConfirmedTransaction

* Update sigs-for-addr2 comment

* Enable confirmed block in cli

* Enable confirmed transaction in cli

* Review comments

* Rename blockstore method

(cherry picked from commit 433f1ead1c)

# Conflicts:
#	core/src/replay_stage.rs

* Fix conflict

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-03-27 04:51:53 +00:00
mergify[bot]
a4cb1e45ae Only print skipped leader slot message when the node is actually leader (#16156) (#16164)
Also, check vote signature after the vote is signed

(cherry picked from commit 60b4771fc6)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-03-27 02:03:10 +00:00
mergify[bot]
8aded2778e Bump bpf-tools to version v1.4 (#16152) (#16154)
(cherry picked from commit 658ddd1c9c)

Co-authored-by: Dmitri Makarov <dmakarov@users.noreply.github.com>
2021-03-26 20:51:25 +00:00
mergify[bot]
d940c5b1a3 Skip leader slots until a vote lands (#15607) (#16147)
(cherry picked from commit b99ae8f334)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-03-26 19:07:24 +00:00
Trent Nelson
1be045df94 sq: optimize
(cherry picked from commit 482c027d3b)
2021-03-25 21:31:52 -06:00
Trent Nelson
86191911c7 perf: use saturating/checked integer arithmetic
(cherry picked from commit 834fae684b)
2021-03-25 21:31:52 -06:00
mergify[bot]
8f852d8a6b makes test_pull_request_time_pruning smaller (#16128) (#16144)
(cherry picked from commit b041b55028)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-26 01:20:26 +00:00
Kristofer Peterson
68a439f8da Refactored ShortU16Visitor::visit_seq() to reject overflows, extra leading zeros and ensure one-to-one encoding. 2021-03-26 01:20:22 +00:00
Trent Nelson
e021832708 sdk: ShortU16 - rename variables for clarity
ShortU16's implementation embeds its usage as the length of a
ShortVec, confusingly referring to both a 'len' and a 'size'
at the same time.
2021-03-26 01:20:22 +00:00
Trent Nelson
87b11aa187 sdk: Add ShortU16 deser test 2021-03-26 01:20:22 +00:00
mergify[bot]
7475a6f444 makes turbine peer computation consistent between broadcast and retransmit (#14910) (#16143)
get_broadcast_peers is using tvu_peers:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/broadcast_stage.rs#L362-L370
which is potentially inconsistent with retransmit_peers:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/cluster_info.rs#L1332-L1345

Also, the leader does not include its own contact-info when broadcasting
shreds:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/cluster_info.rs#L1324
but on the retransmit side, slot leader is removed only _after_ neighbors and
children are computed:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/retransmit_stage.rs#L383-L384
So the turbine broadcast tree is different between the two stages.

This commit:
* Removes retransmit_peers. Broadcast and retransmit stages will use tvu_peers
  consistently.
* Retransmit stage removes slot leader _before_ computing children and
  neighbors.

(cherry picked from commit 570fd3f810)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-26 00:16:48 +00:00
mergify[bot]
86ce650661 Add timeout for local cluster partition tests (bp #16123) (#16137)
* Add timeout for local cluster partition tests (#16123)

* Add timeout for local cluster partition tests

* fix optimistic conf test logs

* Bump instruction count assertions

(cherry picked from commit e817a6db00)

# Conflicts:
#	local-cluster/Cargo.toml

* Fix conflict

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-03-25 22:56:05 +00:00
mergify[bot]
4dc5a53014 Show bpf-tools download progress (#16135)
(cherry picked from commit 07273bfa9e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-25 20:55:11 +00:00
mergify[bot]
5e35cf3536 program: Correct clamp in Message::signer_keys() (#16114)
(cherry picked from commit 8b3de72e2a)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-25 17:53:34 +00:00
Trent Nelson
e8a8d1efb3 clap-utils: Allow NullSigners outside sign-only mode
(cherry picked from commit 7f0ac6a67c)
2021-03-25 11:10:53 -06:00
mergify[bot]
defd9238fa Simplify account.rent_epoch handling for sysvar rent (bp #16049) (#16118)
* Simplify account.rent_epoch handling for sysvar rent (#16049)

* Add some code for special local testing

* Add comment to store_account_and_update_capitalization

* Simplify account.rent_epoch handling for sysvar rent

* Introduce *_for_test functions

* Add deprecation messages to existing api

(cherry picked from commit 6d5c6c17c5)

# Conflicts:
#	sdk/src/native_loader.rs

* Fix conflicts

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2021-03-25 17:17:43 +09:00
mergify[bot]
5f061dcea1 Support getBlockTime for unfinalized blocks (#16103) (#16110)
(cherry picked from commit a8ef29df27)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-25 04:18:00 +00:00
mergify[bot]
e6ee27a738 Add Exodus as Solana Mobile app option (#16100) (#16101)
* Add Exodus as Solana Mobile app option

* Update docs/src/wallet-guide/apps.md

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit ad47c63f27)

Co-authored-by: Davey <35187388+davidzelaya@users.noreply.github.com>
2021-03-24 21:34:58 +00:00
mergify[bot]
dd2d25d698 limits CrdsGossipPull::pull_request_time size (#15793) (#16097)
There is no pruning logic on CrdsGossipPull::pull_request_time
https://github.com/solana-labs/solana/blob/79ac1997d/core/src/crds_gossip_pull.rs#L172-L174
potentially allowing this to take too much memory.

Additionally, CrdsGossipPush::last_pushed_to is pruning recent push
timestamps:
https://github.com/solana-labs/solana/blob/79ac1997d/core/src/crds_gossip_push.rs#L275-L279
instead of the older ones.

Co-authored-by: Nathan Hawkins <utsl@utsl.org>
(cherry picked from commit a6c23648cb)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-24 20:05:04 +00:00
Dmitri Makarov
9096c3df02 Adjust BPF test programs instruction counts 2021-03-24 11:59:59 +01:00
Dmitri Makarov
9f94c2a9a0 Bump bpf-tools to version v1.3
This brings in the fix for increased compute budget that wasn't caught
when bpf-tools v1.2 were released.
2021-03-24 11:59:59 +01:00
Dmitri Makarov
34213da9f4 Bump bpf-tools to v1.2 and get rid of xargo 2021-03-24 11:59:59 +01:00
mergify[bot]
c3c4991c44 rpc: add getSlotLeaders method (#16057) (#16079)
(cherry picked from commit e7fd7d46cf)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-23 19:27:18 +00:00
mergify[bot]
9d37a33dcd buffers data shreds to make larger erasure coded sets (bp #15849) (#16074)
* buffers data shreds to make larger erasure coded sets (#15849)

Broadcast stage batches up to 8 entries:
https://github.com/solana-labs/solana/blob/79280b304/core/src/broadcast_stage/broadcast_utils.rs#L26-L29
which will be serialized into some number of shreds and chunked into FEC
sets of at most 32 shreds each:
https://github.com/solana-labs/solana/blob/79280b304/ledger/src/shred.rs#L576-L597
So depending on the size of entries, FEC sets can be small, which may
aggravate loss rate.
For example 16 FEC sets of 2:2 data/code shreds each have higher loss
rate than one 32:32 set.

This commit broadcasts data shreds immediately, but also buffers them
until it has a batch of 32 data shreds, at which point 32 coding shreds
are generated and broadcasted.

(cherry picked from commit 4f82b897bc)

# Conflicts:
#	ledger/src/shred.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-23 18:23:09 +00:00
mergify[bot]
a04ca03fee renames is_last_in_fec_set back to is_last_data (#15848) (#16075)
https://github.com/solana-labs/solana/pull/10095
renamed is_last_data to is_last_in_fec_set. However, the code shows that
this is actually meant to indicate where the serialized data is
complete:
https://github.com/solana-labs/solana/blob/420174d3d/ledger/src/shred.rs#L599-L600
https://github.com/solana-labs/solana/blob/420174d3d/ledger/src/shred.rs#L229-L231

There are multiple FEC sets for each `&[Entry]` serialized and this flag
does not represent shreds last in FEC sets (only the very last one by
overlap). So the name is wrong and confusing

(cherry picked from commit 3b85cbc504)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-23 16:59:47 +00:00
mergify[bot]
64ce4a6203 solana transfer now requires --allow-unfunded-recipient if the recipient doesn't exist (bp #16060) (#16067)
* transfer now requires --allow-unfunded-recipient if the recipient doesn't exist

(cherry picked from commit 3dff5c9dee)

* Avoid RPC in `--sign-only` mode

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 6271665ba6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-23 03:54:42 +00:00
mergify[bot]
7ac3c9ec76 Handle blockstore insert dup checks (#16051) (#16066)
(cherry picked from commit d76ad33597)

Co-authored-by: carllin <carl@solana.com>
2021-03-23 00:49:10 +00:00
mergify[bot]
7d91515e8d Make getStakeActivation response consistent for undelegated accounts (#16038) (#16040)
(cherry picked from commit 2ec24d438f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-19 22:07:30 +00:00
mergify[bot]
4e3f2c3d2d program-test: Fix warp and staking issue (#16002) (#16031)
Since program-test creates a test genesis and then adds fees and rent,
some of the genesis accounts get rent-collected after warping.  Most
notably, `StakeConfig` gets rent-collected, causing any stake operations
to fail after warp.  This fix creates genesis with the `Rent` and
`FeeRateGovernor` actually used by the bank.

(cherry picked from commit 6cc22e62d4)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-03-19 14:54:58 +00:00
mergify[bot]
8b67ba6d3d docs: SIGUSR1 killing wrapper shell scripts (#16009)
(cherry picked from commit 07dc522981)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-19 07:45:34 +00:00
mergify[bot]
c2ce68ab90 Santize instruction index when loading instruction from sysvar (#15942) (#16004)
(cherry picked from commit 4c5660ba7a)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-19 02:48:41 +00:00
mergify[bot]
fe87cb1cd1 Update to reqwest 0.11.2 (#16000)
(cherry picked from commit 02b81dd05d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-18 22:12:01 +00:00
mergify[bot]
1c8f6a836a cli cleanup (#15990) (#15997)
(cherry picked from commit 067b390194)

Co-authored-by: Jack May <jack@solana.com>
2021-03-18 20:03:04 +00:00
mergify[bot]
3d5ff7968e rpc: Add config options limiting getConfirmedBlock response data (#15970) (#15995)
* Add new confirmed block struct

* Add RpcConfirmedBlockConfig options

* Configure block response based on new options

* Add client api, use in cli fetch_epoch_rewards

* Update docs

* Apply review suggestions

(cherry picked from commit aa54c468ea)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-18 19:33:01 +00:00
Tyera Eulberg
d6160f7744 Avoid panic when validator doesn't have performance samples (#15976)
(cherry picked from commit ba33c9e18e)
2021-03-18 08:28:31 -07:00
mergify[bot]
5e9ce99abf remote-wallet: Expose Ledger app settings (#15978)
(cherry picked from commit 2dabcac0da)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-18 09:13:24 +00:00
mergify[bot]
ebd6fe7acb Avoid a panic when --slots-per-epoch is less than MINIMUM_SLOTS_PER_EPOCH (#15975)
(cherry picked from commit 4ab98fff02)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-18 07:17:50 +00:00
mergify[bot]
9e91a2c2fd Add Close instrruction and tooling to upgradeable loader (#15887) (#15972)
(cherry picked from commit 7f500d610c)

Co-authored-by: Jack May <jack@solana.com>
2021-03-18 06:02:57 +00:00
Michael Vines
899f57962a Add --slots-per-epoch argument
(cherry picked from commit 04c99cf7ea)
2021-03-17 17:25:51 -07:00
Michael Vines
3176b00e57 Add --slots-per-epoch validator
(cherry picked from commit c06ff47a90)
2021-03-17 17:25:51 -07:00
Jeff Washington (jwash)
08b9da8397 drop poh lock after record (#15930)
(cherry picked from commit 5460fb10a2)
2021-03-17 17:24:53 -07:00
mergify[bot]
2bc21ecba2 Allow unbounded wallclock processing time in tests (#15961) (#15966)
(cherry picked from commit f548a04fae)

Co-authored-by: carllin <carl@solana.com>
2021-03-18 00:22:06 +00:00
Jeff Washington (jwash)
5b2a65fab3 add metrics for tick producer and poh_recorder (#15931)
(cherry picked from commit 40997d0aef)
2021-03-17 16:36:50 -07:00
mergify[bot]
f5d56eabf3 Build full SPL in CI (bp #15886) (#15964)
* Build full SPL in CI

(cherry picked from commit 82269f1351)

* Avoid changing signature of ProgramTest::add_account

(cherry picked from commit 03180b502d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-17 22:46:55 +00:00
Michael Vines
af45efb62c Notice the user when the --mint, --bpf-program, or --clone arguments are ignored
(cherry picked from commit 59c19d9fbf)
2021-03-17 14:10:14 -07:00
mergify[bot]
f528cda832 Ignore flaky test_banking_stage_entries_only and test_banking_stage_entryfication (#15959)
(cherry picked from commit 8a9b51952e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-17 20:34:30 +00:00
mergify[bot]
eeef9f4e59 Separate snapshot location (bp #15840) (#15956)
* Add option for separate snapshot location

(cherry picked from commit 6126878f509c69e23480a5ec22b3271e2b16e072)
(cherry picked from commit 0209d334bd)

* Apply suggestions from code review

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit cfb01e26dd)

* add missed suggestion

(cherry picked from commit a43b3674c7)

* Revert to snapshots

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 0b42379ed7)

* Revert to snapshots 2

(cherry picked from commit 20b53eb4b4)

* Revert to removing only tmp-

(cherry picked from commit a5d144b00f)

Co-authored-by: DimAn <diman@diman.io>
Co-authored-by: DimAn <andiman7000@gmail.com>
2021-03-17 20:25:18 +00:00
Michael Vines
32124b59e9 Download snapshot files with a tmp- prefix so they'll automatically be cleaned up if interrupted
(cherry picked from commit 58b980f9cd)
2021-03-17 10:18:18 -07:00
Michael Vines
aa9772f9c0 Replace solana-program-test when building example-helloworld 2021-03-17 09:08:41 -07:00
mergify[bot]
5f183bd773 Add helper for paring down signers to those requried by a tx message (bp #15899) (#15938)
* sdk: Add accessor for signer pubkeys of a tx message

(cherry picked from commit bf33ce8906)

* clap-utils: Add helper to `CliSignerInfo` for getting signers for a message

(cherry picked from commit 4e99f1e634)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-17 07:48:47 +00:00
mergify[bot]
2238e5001b solana-install init can now select a pre-release from Github (#15936)
(cherry picked from commit d9176c1903)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-17 04:31:55 +00:00
mergify[bot]
79fa7ef55c CLI: Support dumping the TX message in sign-only mode (#15933)
(cherry picked from commit 672e9c640f)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-17 04:13:21 +00:00
mergify[bot]
07df827411 Bump tokio to 1.1 (#15926) (#15928)
(cherry picked from commit 654449ce91)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-16 23:29:55 +00:00
mergify[bot]
a259ff0e72 Wallclock BankingStage Throttle (#15731) (#15890)
(cherry picked from commit c1ba265dd9)

Co-authored-by: carllin <carl@solana.com>
2021-03-16 21:12:59 +00:00
mergify[bot]
d7d3e767e7 fix: compute pre/post token balances on all accounts if token program present (#15900) (#15923)
* fix: compute pre/post token balances on all accounts if token program present

* fix: skip token program in balance query

* fix: prevent program ids from being collected

(cherry picked from commit 61112d4826)

Co-authored-by: Josh <josh.hundley@gmail.com>
2021-03-16 18:23:29 +00:00
mergify[bot]
6e8aa9af17 nit: fix spelling (#15908) (#15911)
(cherry picked from commit 5760cf0f41)

# Conflicts:
#	sdk/src/feature_set.rs

Co-authored-by: Jack May <jack@solana.com>
2021-03-16 10:58:39 -07:00
mergify[bot]
0236de7bc8 Encourage use of the default --ledger location (#15921)
(cherry picked from commit 1c261d293f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-16 16:58:14 +00:00
mergify[bot]
899bd1572a Show flags for accounts in tx by solana confirm (#15804) (#15906)
* Show flags for accounts in tx by solana confirm

* Address review comments

* Improve comment a bit

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Further apply review suggestions

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 74aa32175b)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2021-03-16 10:43:06 +00:00
mergify[bot]
97ec4cd44e Cli: better estimate of epoch time elapsed/remaining (#15893) (#15918)
* Add rpc_client api for getRecentPerformanceSamples

* Prep fn for variable avg slot time

* Use recent-perf-samples to more-accurately estimate epoch completed times

* Spell out average

(cherry picked from commit 3726358f51)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-16 09:58:51 +00:00
mergify[bot]
5500970a7e Add cargo-bpf-test --no-run flag, matching cargo-test (#15916)
(cherry picked from commit eb19e11688)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-16 09:45:35 +00:00
Michael Vines
caea04d8d5 Pin solana crate versions to prevent downstream users from accidentally mixing crate versions 2021-03-16 08:41:28 +00:00
Michael Vines
b1a90c3580 =1.6.1 2021-03-16 08:41:28 +00:00
mergify[bot]
5bd4e38345 Charge compute budget for bytes passed via cpi (#15874) (#15905)
(cherry picked from commit ad9901d7c6)

Co-authored-by: Jack May <jack@solana.com>
2021-03-16 07:57:32 +00:00
mergify[bot]
fddba08571 Improve Instruction::new deprecation warning (#15896)
(cherry picked from commit 8567b41d5f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-16 05:18:31 +00:00
Michael Vines
87963764fa Export tokio for program-test clients
(cherry picked from commit 430ed6d774)
2021-03-15 22:14:17 -07:00
mergify[bot]
b691a159dd increment_cargo_version.sh tune ups (bp #15880) (#15892)
* Disallow version bump with dirty working tree

(cherry picked from commit 853e735edf)

* Ignore `not_paths` for `*.md` files when bumping version

(cherry picked from commit 510760d81b)

* Also ignore `*/node_modules/*` paths when bumping version

(cherry picked from commit 2bf46b789f)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-16 02:07:46 +00:00
mergify[bot]
5af1d48be8 Display actual account length (#15875) (#15884)
(cherry picked from commit 60e5fd11c9)

Co-authored-by: Jack May <jack@solana.com>
2021-03-16 01:01:25 +00:00
mergify[bot]
3b3ec3313f Fix real_number_string_trimmed zero-decimal behavior (#15873) (#15877)
* Add failing test

* Don't strip zeroes from zero-decimal amounts

* Add zero-case test

(cherry picked from commit c40bd5f394)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-15 21:33:01 +00:00
Michael Vines
be00246fb5 Bump version to v1.6.1 2021-03-15 14:47:58 -06:00
Michael Vines
1d80ba9edf Update cargo lock files on version bump 2021-03-15 14:47:58 -06:00
mergify[bot]
4bcf976ecd Fix delinquent stake display (#15839)
(cherry picked from commit eab182188a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-13 20:27:25 +00:00
1340 changed files with 76572 additions and 174964 deletions

View File

@@ -12,8 +12,7 @@ export PS4="++"
# Restore target/ from the previous CI build on this machine
#
eval "$(ci/channel-info.sh)"
eval "$(ci/sbf-tools-info.sh)"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
(
set -x
MAX_CACHE_SIZE=18 # gigabytes
@@ -37,7 +36,4 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
# `std:
# "found possibly newer version of crate `std` which `xyz` depends on
rm -rf target/bpfel-unknown-unknown
if [[ $BUILDKITE_LABEL = "stable-perf" ]]; then
rm -rf target/release
fi
)

View File

@@ -65,11 +65,20 @@ pull_request_rules:
backport:
ignore_conflicts: true
branches:
- v1.9
commands_restrictions:
# The author of copied PRs is the Mergify user.
# Restrict `copy` access to Core Contributors
copy:
- v1.5
- name: v1.6 backport
conditions:
- author=@core-contributors
- label=v1.6
actions:
backport:
ignore_conflicts: true
branches:
- v1.6
- name: v1.7 backport
conditions:
- label=v1.7
actions:
backport:
ignore_conflicts: true
branches:
- v1.7

View File

@@ -29,7 +29,6 @@ jobs:
if: type IN (api, cron) OR tag IS present
name: "macOS release artifacts"
os: osx
osx_image: xcode12
language: rust
rust:
- stable
@@ -37,12 +36,8 @@ jobs:
- source ci/rust-version.sh
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
- readlink -f .
- brew install gnu-tar
- PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH"
- tar --version
script:
- source ci/env.sh
- rustup set profile default
- ci/publish-tarball.sh
deploy:
- provider: s3
@@ -65,12 +60,6 @@ jobs:
- <<: *release-artifacts
name: "Windows release artifacts"
os: windows
install:
- choco install openssl
- export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64"
- source ci/rust-version.sh
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
- readlink -f .
# Linux release artifacts are still built by ci/buildkite-secondary.yml
#- <<: *release-artifacts
# name: "Linux release artifacts"
@@ -84,7 +73,7 @@ jobs:
language: node_js
node_js:
- "lts/*"
- "node"
cache:
directories:
@@ -127,7 +116,7 @@ jobs:
if: type IN (push, pull_request) OR tag IS present
language: node_js
node_js:
- "lts/*"
- "node"
services:
- docker

3006
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,5 @@
[workspace]
members = [
"accountsdb-plugin-interface",
"accountsdb-plugin-manager",
"accountsdb-plugin-postgres",
"accounts-cluster-bench",
"bench-exchange",
"bench-streamer",
@@ -12,7 +9,6 @@ members = [
"banks-client",
"banks-interface",
"banks-server",
"bloom",
"clap-utils",
"cli-config",
"cli-output",
@@ -25,7 +21,6 @@ members = [
"perf",
"validator",
"genesis",
"genesis-utils",
"gossip",
"install",
"keygen",
@@ -36,6 +31,7 @@ members = [
"log-analyzer",
"merkle-root-bench",
"merkle-tree",
"stake-o-matic",
"storage-bigtable",
"storage-proto",
"streamer",
@@ -43,19 +39,21 @@ members = [
"metrics",
"net-shaper",
"notifier",
"poh",
"poh-bench",
"program-test",
"programs/secp256k1",
"programs/bpf_loader",
"programs/compute-budget",
"programs/budget",
"programs/config",
"programs/exchange",
"programs/ed25519",
"programs/secp256k1",
"programs/failure",
"programs/noop",
"programs/ownable",
"programs/stake",
"programs/vest",
"programs/vote",
"remote-wallet",
"rpc",
"ramp-tps",
"runtime",
"runtime/store-tool",
"sdk",
@@ -63,6 +61,7 @@ members = [
"sdk/cargo-test-bpf",
"scripts",
"stake-accounts",
"stake-monitor",
"sys-tuner",
"tokens",
"transaction-status",
@@ -78,11 +77,3 @@ members = [
exclude = [
"programs/bpf",
]
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
# specify arch-specific features.
resolver = "2"
[profile.dev]
split-debuginfo = "unpacked"

View File

@@ -1,6 +1,6 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
</a>
</p>
@@ -19,7 +19,7 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt
```
Please make sure you are always using the latest stable rust version by running:
Please sure you are always using the latest stable rust version by running:
```bash
$ rustup update
@@ -32,12 +32,6 @@ $ sudo apt-get update
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
```
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
```bash
$ softwareupdate --install-rosetta
```
## **2. Download the source code.**
```bash
@@ -51,6 +45,11 @@ $ cd solana
$ cargo build
```
## **4. Run a minimal local cluster.**
```bash
$ ./run.sh
```
# Testing
**Run the test suite:**

View File

@@ -51,27 +51,13 @@ The following components are out of scope for the bounty program
* Attacks that require social engineering
Eligibility:
* The participant submitting the bug report shall follow the process outlined within this document
* The participant submitting the bug bounty shall follow the process outlined within this document
* Valid exploits can be eligible even if they are not successfully executed on the cluster
* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
Payment of Bug Bounties:
* Payments for eligible bug reports are distributed monthly.
* Bounties for all bug reports submitted in a given month are paid out in the middle of the
following month.
* The SOL/USD conversion rate used for payments is the market price at the end of
the last day of the month for the month in which the bug was submitted.
* The reference for this price is the Closing Price given by Coingecko.com on
that date given here:
https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
* For example, for all bugs submitted in March 2021, the SOL/USD price for bug
payouts is the Close price on 2021-03-31 of $19.49. This applies to all bugs
submitted in March 2021, to be paid in mid-April 2021.
* Bug bounties are paid out in
[stake accounts](https://solana.com/staking) with a
[lockup](https://docs.solana.com/staking/stake-accounts#lockups)
expiring 12 months from the last day of the month in which the bug was submitted.
Notes:
* All locked tokens can be staked during the lockup period
<a name="process"></a>
## Incident Response Process

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.8.17"
version = "1.6.6"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,11 @@ lazy_static = "1.4.0"
serde = "1.0.122"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-vote-program = { path = "../programs/vote", version = "=1.8.17" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
solana-config-program = { path = "../programs/config", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
solana-stake-program = { path = "../programs/stake", version = "=1.6.6" }
solana-vote-program = { path = "../programs/vote", version = "=1.6.6" }
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.5.1"

View File

@@ -17,10 +17,8 @@ pub mod validator_info;
use {
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
solana_sdk::{
account::{ReadableAccount, WritableAccount},
clock::Epoch,
fee_calculator::FeeCalculator,
pubkey::Pubkey,
account::ReadableAccount, account::WritableAccount, clock::Epoch,
fee_calculator::FeeCalculator, pubkey::Pubkey,
},
std::{
io::{Read, Write},
@@ -30,7 +28,6 @@ use {
pub type StringAmount = String;
pub type StringDecimals = String;
pub const MAX_BASE58_BYTES: usize = 128;
/// A duplicate representation of an Account for pretty JSON serialization
#[derive(Serialize, Deserialize, Clone, Debug)]
@@ -51,7 +48,7 @@ pub enum UiAccountData {
Binary(String, UiAccountEncoding),
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum UiAccountEncoding {
Binary, // Legacy. Retained for RPC backwards compatibility
@@ -63,53 +60,41 @@ pub enum UiAccountEncoding {
}
impl UiAccount {
fn encode_bs58<T: ReadableAccount>(
account: &T,
data_slice_config: Option<UiDataSliceConfig>,
) -> String {
if account.data().len() <= MAX_BASE58_BYTES {
bs58::encode(slice_data(account.data(), data_slice_config)).into_string()
} else {
"error: data too large for bs58 encoding".to_string()
}
}
pub fn encode<T: ReadableAccount>(
pubkey: &Pubkey,
account: &T,
account: T,
encoding: UiAccountEncoding,
additional_data: Option<AccountAdditionalData>,
data_slice_config: Option<UiDataSliceConfig>,
) -> Self {
let data = match encoding {
UiAccountEncoding::Binary => {
let data = Self::encode_bs58(account, data_slice_config);
UiAccountData::LegacyBinary(data)
}
UiAccountEncoding::Base58 => {
let data = Self::encode_bs58(account, data_slice_config);
UiAccountData::Binary(data, encoding)
}
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
),
UiAccountEncoding::Base58 => UiAccountData::Binary(
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
encoding,
),
UiAccountEncoding::Base64 => UiAccountData::Binary(
base64::encode(slice_data(account.data(), data_slice_config)),
base64::encode(slice_data(&account.data(), data_slice_config)),
encoding,
),
UiAccountEncoding::Base64Zstd => {
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
match encoder
.write_all(slice_data(account.data(), data_slice_config))
.write_all(slice_data(&account.data(), data_slice_config))
.and_then(|()| encoder.finish())
{
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
Err(_) => UiAccountData::Binary(
base64::encode(slice_data(account.data(), data_slice_config)),
base64::encode(slice_data(&account.data(), data_slice_config)),
UiAccountEncoding::Base64,
),
}
}
UiAccountEncoding::JsonParsed => {
if let Ok(parsed_data) =
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
parse_account_data(pubkey, &account.owner(), &account.data(), additional_data)
{
UiAccountData::Json(parsed_data)
} else {
@@ -181,7 +166,7 @@ impl Default for UiFeeCalculator {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UiDataSliceConfig {
pub offset: usize,
@@ -204,10 +189,8 @@ fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::account::{Account, AccountSharedData},
};
use super::*;
use solana_sdk::account::{Account, AccountSharedData};
#[test]
fn test_slice_data() {
@@ -241,7 +224,7 @@ mod test {
fn test_base64_zstd() {
let encoded_account = UiAccount::encode(
&Pubkey::default(),
&AccountSharedData::from(Account {
AccountSharedData::from(Account {
data: vec![0; 1024],
..Account::default()
}),

View File

@@ -1,27 +1,25 @@
use {
crate::{
parse_bpf_loader::parse_bpf_upgradeable_loader,
parse_config::parse_config,
parse_nonce::parse_nonce,
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_id},
parse_vote::parse_vote,
},
inflector::Inflector,
serde_json::Value,
solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar},
std::collections::HashMap,
thiserror::Error,
use crate::{
parse_bpf_loader::parse_bpf_upgradeable_loader,
parse_config::parse_config,
parse_nonce::parse_nonce,
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_id_v2_0},
parse_vote::parse_vote,
};
use inflector::Inflector;
use serde_json::Value;
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
use std::collections::HashMap;
use thiserror::Error;
lazy_static! {
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
let mut m = HashMap::new();
@@ -114,14 +112,12 @@ pub fn parse_account_data(
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::nonce::{
state::{Data, Versions},
State,
},
solana_vote_program::vote_state::{VoteState, VoteStateVersions},
use super::*;
use solana_sdk::nonce::{
state::{Data, Versions},
State,
};
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
#[test]
fn test_parse_account_data() {

View File

@@ -1,11 +1,9 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
UiAccountData, UiAccountEncoding,
},
bincode::{deserialize, serialized_size},
solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
UiAccountData, UiAccountEncoding,
};
use bincode::{deserialize, serialized_size};
use solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey};
pub fn parse_bpf_upgradeable_loader(
data: &[u8],
@@ -92,7 +90,9 @@ pub struct UiProgramData {
#[cfg(test)]
mod test {
use {super::*, bincode::serialize, solana_sdk::pubkey::Pubkey};
use super::*;
use bincode::serialize;
use solana_sdk::pubkey::Pubkey;
#[test]
fn test_parse_bpf_upgradeable_loader_accounts() {

View File

@@ -1,19 +1,15 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
validator_info,
},
bincode::deserialize,
serde_json::Value,
solana_config_program::{get_config_data, ConfigKeys},
solana_sdk::{
pubkey::Pubkey,
stake::config::{self as stake_config, Config as StakeConfig},
},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
validator_info,
};
use bincode::deserialize;
use serde_json::Value;
use solana_config_program::{get_config_data, ConfigKeys};
use solana_sdk::pubkey::Pubkey;
use solana_stake_program::config::Config as StakeConfig;
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
let parsed_account = if pubkey == &stake_config::id() {
let parsed_account = if pubkey == &solana_stake_program::config::id() {
get_config_data(data)
.ok()
.and_then(|data| deserialize::<StakeConfig>(data).ok())
@@ -41,7 +37,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
where
T: serde::de::DeserializeOwned,
{
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
let keys = keys
.iter()
.map(|key| UiConfigKey {
@@ -91,10 +87,11 @@ pub struct UiConfig<T> {
#[cfg(test)]
mod test {
use {
super::*, crate::validator_info::ValidatorInfo, serde_json::json,
solana_config_program::create_config_account, solana_sdk::account::ReadableAccount,
};
use super::*;
use crate::validator_info::ValidatorInfo;
use serde_json::json;
use solana_config_program::create_config_account;
use solana_sdk::account::ReadableAccount;
#[test]
fn test_parse_config() {
@@ -104,7 +101,11 @@ mod test {
};
let stake_config_account = create_config_account(vec![], &stake_config, 10);
assert_eq!(
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
parse_config(
&stake_config_account.data(),
&solana_stake_program::config::id()
)
.unwrap(),
ConfigAccountType::StakeConfig(UiStakeConfig {
warmup_cooldown_rate: 0.25,
slash_penalty: 50,
@@ -124,7 +125,7 @@ mod test {
10,
);
assert_eq!(
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(),
ConfigAccountType::ValidatorInfo(UiConfig {
keys: vec![
UiConfigKey {

View File

@@ -1,9 +1,7 @@
use {
crate::{parse_account_data::ParseAccountError, UiFeeCalculator},
solana_sdk::{
instruction::InstructionError,
nonce::{state::Versions, State},
},
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
use solana_sdk::{
instruction::InstructionError,
nonce::{state::Versions, State},
};
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
@@ -44,16 +42,14 @@ pub struct UiNonceData {
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::{
hash::Hash,
nonce::{
state::{Data, Versions},
State,
},
pubkey::Pubkey,
use super::*;
use solana_sdk::{
hash::Hash,
nonce::{
state::{Data, Versions},
State,
},
pubkey::Pubkey,
};
#[test]

View File

@@ -1,14 +1,10 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount,
},
bincode::deserialize,
solana_sdk::{
clock::{Epoch, UnixTimestamp},
stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState},
},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount,
};
use bincode::deserialize;
use solana_sdk::clock::{Epoch, UnixTimestamp};
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
let stake_state: StakeState = deserialize(data)
@@ -136,7 +132,8 @@ impl From<Delegation> for UiDelegation {
#[cfg(test)]
mod test {
use {super::*, bincode::serialize};
use super::*;
use bincode::serialize;
#[test]
fn test_parse_stake() {

View File

@@ -1,20 +1,18 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, UiFeeCalculator,
},
bincode::deserialize,
bv::BitVec,
solana_sdk::{
clock::{Clock, Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
pubkey::Pubkey,
rent::Rent,
slot_hashes::SlotHashes,
slot_history::{self, SlotHistory},
stake_history::{StakeHistory, StakeHistoryEntry},
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, UiFeeCalculator,
};
use bincode::deserialize;
use bv::BitVec;
use solana_sdk::{
clock::{Clock, Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
pubkey::Pubkey,
rent::Rent,
slot_hashes::SlotHashes,
slot_history::{self, SlotHistory},
stake_history::{StakeHistory, StakeHistoryEntry},
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
};
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
@@ -214,12 +212,10 @@ pub struct UiStakeHistoryEntry {
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::{
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
sysvar::recent_blockhashes::IterItem,
},
use super::*;
use solana_sdk::{
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
sysvar::recent_blockhashes::IterItem,
};
#[test]

View File

@@ -1,38 +1,36 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, StringDecimals,
},
solana_sdk::pubkey::Pubkey,
spl_token::{
solana_program::{
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
},
state::{Account, AccountState, Mint, Multisig},
},
std::str::FromStr,
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, StringDecimals,
};
use solana_sdk::pubkey::Pubkey;
use spl_token_v2_0::{
solana_program::{
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
},
state::{Account, AccountState, Mint, Multisig},
};
use std::str::FromStr;
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_id() -> Pubkey {
Pubkey::new_from_array(spl_token::id().to_bytes())
pub fn spl_token_id_v2_0() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap()
}
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_native_mint() -> Pubkey {
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
pub fn spl_token_v2_0_native_mint() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
}
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
SplTokenPubkey::new_from_array(pubkey.to_bytes())
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
SplTokenPubkey::from_str(&pubkey.to_string()).unwrap()
}
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey {
Pubkey::new_from_array(pubkey.to_bytes())
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
Pubkey::from_str(&pubkey.to_string()).unwrap()
}
pub fn parse_token(

View File

@@ -1,11 +1,9 @@
use {
crate::{parse_account_data::ParseAccountError, StringAmount},
solana_sdk::{
clock::{Epoch, Slot},
pubkey::Pubkey,
},
solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState},
use crate::{parse_account_data::ParseAccountError, StringAmount};
use solana_sdk::{
clock::{Epoch, Slot},
pubkey::Pubkey,
};
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
@@ -123,7 +121,8 @@ struct UiEpochCredits {
#[cfg(test)]
mod test {
use {super::*, solana_vote_program::vote_state::VoteStateVersions};
use super::*;
use solana_vote_program::vote_state::VoteStateVersions;
#[test]
fn test_parse_vote() {

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.8.17"
version = "1.6.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.11"
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.6.6" }
solana-runtime = { path = "../runtime", version = "=1.6.6" }
solana-measure = { path = "../measure", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
solana-version = { path = "../version", version = "=1.6.6" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"

View File

@@ -1,19 +1,15 @@
#![allow(clippy::integer_arithmetic)]
#[macro_use]
extern crate log;
use {
clap::{crate_description, crate_name, value_t, App, Arg},
rayon::prelude::*,
solana_measure::measure::Measure,
solana_runtime::{
accounts::{create_test_accounts, update_accounts_bench, Accounts},
accounts_db::AccountShrinkThreshold,
accounts_index::AccountSecondaryIndexes,
ancestors::Ancestors,
},
solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey},
std::{env, fs, path::PathBuf},
use clap::{crate_description, crate_name, value_t, App, Arg};
use rayon::prelude::*;
use solana_measure::measure::Measure;
use solana_runtime::{
accounts::{create_test_accounts, update_accounts_bench, Accounts},
accounts_index::Ancestors,
};
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
use std::{collections::HashSet, env, fs, path::PathBuf};
fn main() {
solana_logger::setup();
@@ -62,14 +58,8 @@ fn main() {
if fs::remove_dir_all(path.clone()).is_err() {
println!("Warning: Couldn't remove {:?}", path);
}
let accounts = Accounts::new_with_config(
vec![path],
&ClusterType::Testnet,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
None,
);
let accounts =
Accounts::new_with_config(vec![path], &ClusterType::Testnet, HashSet::new(), false);
println!("Creating {} accounts", num_accounts);
let mut create_time = Measure::start("create accounts");
let pubkeys: Vec<_> = (0..num_slots)
@@ -93,19 +83,17 @@ fn main() {
num_slots,
create_time
);
let mut ancestors = Vec::with_capacity(num_slots);
ancestors.push(0);
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..num_slots {
ancestors.push(i as u64);
ancestors.insert(i as u64, i - 1);
accounts.add_root(i as u64);
}
let ancestors = Ancestors::from(ancestors);
let mut elapsed = vec![0; iterations];
let mut elapsed_store = vec![0; iterations];
for x in 0..iterations {
if clean {
let mut time = Measure::start("clean");
accounts.accounts_db.clean_accounts(None, false);
accounts.accounts_db.clean_accounts(None);
time.stop();
println!("{}", time);
for slot in 0..num_slots {
@@ -124,8 +112,6 @@ fn main() {
solana_sdk::clock::Slot::default(),
&ancestors,
None,
false,
None,
);
time_store.stop();
if results != results_store {

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-cluster-bench"
version = "1.8.17"
version = "1.6.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,24 +13,22 @@ clap = "2.33.1"
log = "0.4.11"
rand = "0.7.0"
rayon = "1.4.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-client = { path = "../client", version = "=1.8.17" }
solana-core = { path = "../core", version = "=1.8.17" }
solana-faucet = { path = "../faucet", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
solana-account-decoder = { path = "../account-decoder", version = "=1.6.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.6" }
solana-client = { path = "../client", version = "=1.6.6" }
solana-core = { path = "../core", version = "=1.6.6" }
solana-measure = { path = "../measure", version = "=1.6.6" }
solana-logger = { path = "../logger", version = "=1.6.6" }
solana-net-utils = { path = "../net-utils", version = "=1.6.6" }
solana-faucet = { path = "../faucet", version = "=1.6.6" }
solana-runtime = { path = "../runtime", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.6.6" }
solana-version = { path = "../version", version = "=1.6.6" }
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
solana-local-cluster = { path = "../local-cluster", version = "=1.6.6" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,38 +1,35 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg},
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_account_decoder::parse_token::spl_token_pubkey,
solana_clap_utils::input_parsers::pubkey_of,
solana_client::rpc_client::RpcClient,
solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT},
solana_gossip::gossip_service::discover,
solana_measure::measure::Measure,
solana_runtime::inline_spl_token,
solana_sdk::{
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
rpc_port::DEFAULT_RPC_PORT,
signature::{read_keypair_file, Keypair, Signature, Signer},
system_instruction, system_program,
timing::timestamp,
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::parse_token::spl_token_instruction,
std::{
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock,
},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
use clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg};
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_account_decoder::parse_token::spl_token_v2_0_pubkey;
use solana_clap_utils::input_parsers::pubkey_of;
use solana_client::rpc_client::RpcClient;
use solana_core::gossip_service::discover;
use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT};
use solana_measure::measure::Measure;
use solana_runtime::inline_spl_token_v2_0;
use solana_sdk::{
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
rpc_port::DEFAULT_RPC_PORT,
signature::{read_keypair_file, Keypair, Signature, Signer},
system_instruction, system_program,
timing::timestamp,
transaction::Transaction,
};
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
use std::{
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock,
},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
};
// Create and close messages both require 2 signatures; if transaction construction changes, update
@@ -58,7 +55,7 @@ pub fn airdrop_lamports(
);
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let mut tries = 0;
loop {
@@ -276,7 +273,7 @@ fn make_create_message(
.into_iter()
.map(|_| {
let program_id = if mint.is_some() {
inline_spl_token::id()
inline_spl_token_v2_0::id()
} else {
system_program::id()
};
@@ -293,12 +290,12 @@ fn make_create_message(
&program_id,
)];
if let Some(mint_address) = mint {
instructions.push(spl_token_instruction(
spl_token::instruction::initialize_account(
&spl_token::id(),
&spl_token_pubkey(&to_pubkey),
&spl_token_pubkey(&mint_address),
&spl_token_pubkey(&base_keypair.pubkey()),
instructions.push(spl_token_v2_0_instruction(
spl_token_v2_0::instruction::initialize_account(
&spl_token_v2_0::id(),
&spl_token_v2_0_pubkey(&to_pubkey),
&spl_token_v2_0_pubkey(&mint_address),
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
)
.unwrap(),
));
@@ -324,7 +321,7 @@ fn make_close_message(
.into_iter()
.map(|_| {
let program_id = if spl_token {
inline_spl_token::id()
inline_spl_token_v2_0::id()
} else {
system_program::id()
};
@@ -332,12 +329,12 @@ fn make_close_message(
let address =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
if spl_token {
spl_token_instruction(
spl_token::instruction::close_account(
&spl_token::id(),
&spl_token_pubkey(&address),
&spl_token_pubkey(&keypair.pubkey()),
&spl_token_pubkey(&base_keypair.pubkey()),
spl_token_v2_0_instruction(
spl_token_v2_0::instruction::close_account(
&spl_token_v2_0::id(),
&spl_token_v2_0_pubkey(&address),
&spl_token_v2_0_pubkey(&keypair.pubkey()),
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
&[],
)
.unwrap(),
@@ -362,11 +359,11 @@ fn make_close_message(
fn run_accounts_bench(
entrypoint_addr: SocketAddr,
faucet_addr: SocketAddr,
payer_keypairs: &[&Keypair],
keypair: &Keypair,
iterations: usize,
maybe_space: Option<u64>,
batch_size: usize,
close_nth_batch: u64,
close_nth: u64,
maybe_lamports: Option<u64>,
num_instructions: usize,
mint: Option<Pubkey>,
@@ -375,7 +372,7 @@ fn run_accounts_bench(
let client =
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
info!("Targeting {}", entrypoint_addr);
info!("Targetting {}", entrypoint_addr);
let mut last_blockhash = Instant::now();
let mut last_log = Instant::now();
@@ -384,10 +381,7 @@ fn run_accounts_bench(
let mut tx_sent_count = 0;
let mut total_accounts_created = 0;
let mut total_accounts_closed = 0;
let mut balances: Vec<_> = payer_keypairs
.iter()
.map(|keypair| client.get_balance(&keypair.pubkey()).unwrap_or(0))
.collect();
let mut balance = client.get_balance(&keypair.pubkey()).unwrap_or(0);
let mut last_balance = Instant::now();
let default_max_lamports = 1000;
@@ -404,7 +398,7 @@ fn run_accounts_bench(
max_closed: Arc::new(AtomicU64::default()),
};
info!("Starting balance(s): {:?}", balances);
info!("Starting balance: {}", balance);
let executor = TransactionExecutor::new(entrypoint_addr);
@@ -420,88 +414,69 @@ fn run_accounts_bench(
.saturating_mul(NUM_SIGNATURES);
let lamports = min_balance + fee;
for (i, balance) in balances.iter_mut().enumerate() {
if *balance < lamports || last_balance.elapsed().as_millis() > 2000 {
if let Ok(b) = client.get_balance(&payer_keypairs[i].pubkey()) {
*balance = b;
}
last_balance = Instant::now();
if *balance < lamports * 2 {
info!(
"Balance {} is less than needed: {}, doing aidrop...",
balance, lamports
);
if !airdrop_lamports(
&client,
&faucet_addr,
payer_keypairs[i],
lamports * 100_000,
) {
warn!("failed airdrop, exiting");
return;
}
if balance < lamports || last_balance.elapsed().as_millis() > 2000 {
if let Ok(b) = client.get_balance(&keypair.pubkey()) {
balance = b;
}
last_balance = Instant::now();
if balance < lamports {
info!(
"Balance {} is less than needed: {}, doing aidrop...",
balance, lamports
);
if !airdrop_lamports(&client, &faucet_addr, keypair, lamports * 100_000) {
warn!("failed airdrop, exiting");
return;
}
}
}
// Create accounts
let sigs_len = executor.num_outstanding();
if sigs_len < batch_size {
let num_to_create = batch_size - sigs_len;
if num_to_create >= payer_keypairs.len() {
info!("creating {} new", num_to_create);
let chunk_size = num_to_create / payer_keypairs.len();
if chunk_size > 0 {
for (i, keypair) in payer_keypairs.iter().enumerate() {
let txs: Vec<_> = (0..chunk_size)
.into_par_iter()
.map(|_| {
let message = make_create_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
num_instructions,
min_balance,
maybe_space,
mint,
);
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.collect();
balances[i] = balances[i].saturating_sub(lamports * txs.len() as u64);
info!("txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_created += num_instructions * new_ids.len();
}
}
}
info!("creating {} new", num_to_create);
let txs: Vec<_> = (0..num_to_create)
.into_par_iter()
.map(|_| {
let message = make_create_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
num_instructions,
min_balance,
maybe_space,
mint,
);
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.collect();
balance = balance.saturating_sub(lamports * txs.len() as u64);
info!("txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_created += num_instructions * new_ids.len();
if close_nth_batch > 0 {
let num_batches_to_close =
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
let expected_closed = num_batches_to_close * batch_size as u64;
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
// Close every account we've created with seed between max_closed_seed..expected_closed
if max_closed_seed < expected_closed {
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
if close_nth > 0 {
let expected_closed = total_accounts_created as u64 / close_nth;
if expected_closed > total_accounts_closed {
let txs: Vec<_> = (0..expected_closed - total_accounts_closed)
.into_par_iter()
.map(|_| {
let message = make_close_message(
payer_keypairs[0],
keypair,
&base_keypair,
seed_tracker.max_closed.clone(),
1,
min_balance,
mint.is_some(),
);
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.collect();
balances[0] = balances[0].saturating_sub(fee * txs.len() as u64);
balance = balance.saturating_sub(fee * txs.len() as u64);
info!("close txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("close ids: {}", new_ids.len());
@@ -516,8 +491,8 @@ fn run_accounts_bench(
count += 1;
if last_log.elapsed().as_millis() > 3000 {
info!(
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance: {}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balance
);
last_log = Instant::now();
}
@@ -568,7 +543,6 @@ fn main() {
Arg::with_name("identity")
.long("identity")
.takes_value(true)
.multiple(true)
.value_name("FILE")
.help("keypair file"),
)
@@ -580,14 +554,14 @@ fn main() {
.help("Number of transactions to send per batch"),
)
.arg(
Arg::with_name("close_nth_batch")
Arg::with_name("close_nth")
.long("close-frequency")
.takes_value(true)
.value_name("BYTES")
.help(
"Every `n` batches, create a batch of close transactions for
the earliest remaining batch of accounts created.
Note: Should be > 1 to avoid situations where the close \
"Send close transactions after this many accounts created. \
Note: a `close-frequency` value near or below `batch-size` \
may result in transaction-simulation errors, as the close \
transactions will be submitted before the corresponding \
create transactions have been confirmed",
),
@@ -640,7 +614,7 @@ fn main() {
let space = value_t!(matches, "space", u64).ok();
let lamports = value_t!(matches, "lamports", u64).ok();
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0);
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
if num_instructions == 0 || num_instructions > 500 {
@@ -650,30 +624,20 @@ fn main() {
let mint = pubkey_of(&matches, "mint");
let payer_keypairs: Vec<_> = values_t_or_exit!(matches, "identity", String)
.iter()
.map(|keypair_string| {
read_keypair_file(keypair_string)
.unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_string))
})
.collect();
let mut payer_keypair_refs: Vec<&Keypair> = vec![];
for keypair in payer_keypairs.iter() {
payer_keypair_refs.push(keypair);
}
let keypair =
read_keypair_file(&value_t_or_exit!(matches, "identity", String)).expect("bad keypair");
let rpc_addr = if !skip_gossip {
info!("Finding cluster entry: {:?}", entrypoint_addr);
let (gossip_nodes, _validators) = discover(
None, // keypair
None,
Some(&entrypoint_addr),
None, // num_nodes
Duration::from_secs(60), // timeout
None, // find_node_by_pubkey
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
SocketAddrSpace::Unspecified,
None,
Some(60),
None,
Some(&entrypoint_addr),
None,
0,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
@@ -690,11 +654,11 @@ fn main() {
run_accounts_bench(
rpc_addr,
faucet_addr,
&payer_keypair_refs,
&keypair,
iterations,
space,
batch_size,
close_nth_batch,
close_nth,
lamports,
num_instructions,
mint,
@@ -703,20 +667,18 @@ fn main() {
#[cfg(test)]
pub mod test {
use {
super::*,
solana_core::validator::ValidatorConfig,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_sdk::poh_config::PohConfig,
use super::*;
use solana_core::validator::ValidatorConfig;
use solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
};
use solana_sdk::poh_config::PohConfig;
#[test]
fn test_accounts_cluster_bench() {
solana_logger::setup();
let validator_config = ValidatorConfig::default_for_test();
let validator_config = ValidatorConfig::default();
let num_nodes = 1;
let mut config = ClusterConfig {
cluster_lamports: 10_000_000,
@@ -727,22 +689,22 @@ pub mod test {
};
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster = LocalCluster::new(&mut config);
let iterations = 10;
let maybe_space = None;
let batch_size = 100;
let close_nth_batch = 100;
let close_nth = 100;
let maybe_lamports = None;
let num_instructions = 2;
let mut start = Measure::start("total accounts run");
run_accounts_bench(
cluster.entry_point_info.rpc,
faucet_addr,
&[&cluster.funding_keypair],
&cluster.funding_keypair,
iterations,
maybe_space,
batch_size,
close_nth_batch,
close_nth,
maybe_lamports,
num_instructions,
None,

View File

@@ -1,17 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accountsdb-plugin-interface"
description = "The Solana AccountsDb plugin interface."
version = "1.8.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
[dependencies]
log = "0.4.11"
thiserror = "1.0.29"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,20 +0,0 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
</a>
</p>
# Solana AccountsDb Plugin Interface
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
instantiates the implementation of the interface.
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
external PostgreSQL databases.
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)

View File

@@ -1,143 +0,0 @@
/// The interface for AccountsDb plugins. A plugin must implement
/// the AccountsDbPlugin trait to work with the runtime.
/// In addition, the dynamic library must export a "C" function _create_plugin which
/// creates the implementation of the plugin.
use {
std::{any::Any, error, io},
thiserror::Error,
};
impl Eq for ReplicaAccountInfo<'_> {}
#[derive(Clone, PartialEq, Debug)]
/// Information about an account being updated
pub struct ReplicaAccountInfo<'a> {
/// The Pubkey for the account
pub pubkey: &'a [u8],
/// The lamports for the account
pub lamports: u64,
/// The Pubkey of the owner program account
pub owner: &'a [u8],
/// This account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// The epoch at which this account will next owe rent
pub rent_epoch: u64,
/// The data held in this account.
pub data: &'a [u8],
/// A global monotonically increasing atomic number, which can be used
/// to tell the order of the account update. For example, when an
/// account is updated in the same slot multiple times, the update
/// with higher write_version should supersede the one with lower
/// write_version.
pub write_version: u64,
}
/// A wrapper to future-proof ReplicaAccountInfo handling.
/// If there were a change to the structure of ReplicaAccountInfo,
/// there would be new enum entry for the newer version, forcing
/// plugin implementations to handle the change.
pub enum ReplicaAccountInfoVersions<'a> {
V0_0_1(&'a ReplicaAccountInfo<'a>),
}
/// Errors returned by plugin calls
#[derive(Error, Debug)]
pub enum AccountsDbPluginError {
/// Error opening the configuration file; for example, when the file
/// is not found or when the validator process has no permission to read it.
#[error("Error opening config file. Error detail: ({0}).")]
ConfigFileOpenError(#[from] io::Error),
/// Error in reading the content of the config file or the content
/// is not in the expected format.
#[error("Error reading config file. Error message: ({msg})")]
ConfigFileReadError { msg: String },
/// Error when updating the account.
#[error("Error updating account. Error message: ({msg})")]
AccountsUpdateError { msg: String },
/// Error when updating the slot status
#[error("Error updating slot status. Error message: ({msg})")]
SlotStatusUpdateError { msg: String },
/// Any custom error defined by the plugin.
#[error("Plugin-defined custom error. Error message: ({0})")]
Custom(Box<dyn error::Error + Send + Sync>),
}
/// The current status of a slot
#[derive(Debug, Clone)]
pub enum SlotStatus {
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
/// the fork the validator believes is most likely to finalize.
Processed,
/// The highest slot having reached max vote lockout.
Rooted,
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
Confirmed,
}
impl SlotStatus {
pub fn as_str(&self) -> &'static str {
match self {
SlotStatus::Confirmed => "confirmed",
SlotStatus::Processed => "processed",
SlotStatus::Rooted => "rooted",
}
}
}
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
/// Defines an AccountsDb plugin, to stream data from the runtime.
/// AccountsDb plugins must describe desired behavior for load and unload,
/// as well as how they will handle streamed data.
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn name(&self) -> &'static str;
/// The callback called when a plugin is loaded by the system,
/// used for doing whatever initialization is required by the plugin.
/// The _config_file contains the name of the
/// of the config file. The config must be in JSON format and
/// include a field "libpath" indicating the full path
/// name of the shared library implementing this interface.
fn on_load(&mut self, _config_file: &str) -> Result<()> {
Ok(())
}
/// The callback called right before a plugin is unloaded by the system
/// Used for doing cleanup before unload.
fn on_unload(&mut self) {}
/// Called when an account is updated at a slot.
/// When `is_startup` is true, it indicates the account is loaded from
/// snapshots when the validator starts up. When `is_startup` is false,
/// the account is updated during transaction processing.
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()>;
/// Called when all accounts are notified of during startup.
fn notify_end_of_startup(&mut self) -> Result<()>;
/// Called when a slot status is updated
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()>;
}

View File

@@ -1 +0,0 @@
pub mod accountsdb_plugin_interface;

View File

@@ -1,30 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.8.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[dependencies]
bs58 = "0.4.0"
crossbeam-channel = "0.4"
libloading = "0.7.0"
log = "0.4.11"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.67"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-rpc = { path = "../rpc", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
thiserror = "1.0.21"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,227 +0,0 @@
/// Module responsible for notifying plugins of account updates
use {
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
log::*,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
ReplicaAccountInfo, ReplicaAccountInfoVersions, SlotStatus,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_runtime::{
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
append_vec::{StoredAccountMeta, StoredMeta},
},
solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::Slot,
},
std::sync::{Arc, RwLock},
};
#[derive(Debug)]
pub(crate) struct AccountsUpdateNotifierImpl {
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
}
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData) {
if let Some(account_info) = self.accountinfo_from_shared_account_data(meta, account) {
self.notify_plugins_of_account_update(account_info, slot, false);
}
}
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
let account = self.accountinfo_from_stored_account_meta(account);
measure_copy.stop();
inc_new_counter_debug!(
"accountsdb-plugin-copy-stored-account-info-us",
measure_copy.as_us() as usize,
100000,
100000
);
if let Some(account_info) = account {
self.notify_plugins_of_account_update(account_info, slot, true);
}
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-notify-account-restore-all-us",
measure_all.as_us() as usize,
100000,
100000
);
}
fn notify_end_of_restore_from_snapshot(&self) {
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
match plugin.notify_end_of_startup() {
Err(err) => {
error!(
"Failed to notify the end of restore from snapshot, error: {} to plugin {}",
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully notified the end of restore from snapshot to plugin {}",
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-end-of-restore-from-snapshot",
measure.as_us() as usize
);
}
}
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>) {
self.notify_slot_status(slot, parent, SlotStatus::Confirmed);
}
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>) {
self.notify_slot_status(slot, parent, SlotStatus::Processed);
}
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>) {
self.notify_slot_status(slot, parent, SlotStatus::Rooted);
}
}
impl AccountsUpdateNotifierImpl {
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
AccountsUpdateNotifierImpl { plugin_manager }
}
fn accountinfo_from_shared_account_data<'a>(
&self,
meta: &'a StoredMeta,
account: &'a AccountSharedData,
) -> Option<ReplicaAccountInfo<'a>> {
Some(ReplicaAccountInfo {
pubkey: meta.pubkey.as_ref(),
lamports: account.lamports(),
owner: account.owner().as_ref(),
executable: account.executable(),
rent_epoch: account.rent_epoch(),
data: account.data(),
write_version: meta.write_version,
})
}
fn accountinfo_from_stored_account_meta<'a>(
&self,
stored_account_meta: &'a StoredAccountMeta,
) -> Option<ReplicaAccountInfo<'a>> {
Some(ReplicaAccountInfo {
pubkey: stored_account_meta.meta.pubkey.as_ref(),
lamports: stored_account_meta.account_meta.lamports,
owner: stored_account_meta.account_meta.owner.as_ref(),
executable: stored_account_meta.account_meta.executable,
rent_epoch: stored_account_meta.account_meta.rent_epoch,
data: stored_account_meta.data,
write_version: stored_account_meta.meta.write_version,
})
}
fn notify_plugins_of_account_update(
&self,
account: ReplicaAccountInfo,
slot: Slot,
is_startup: bool,
) {
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-update-account");
match plugin.update_account(
ReplicaAccountInfoVersions::V0_0_1(&account),
slot,
is_startup,
) {
Err(err) => {
error!(
"Failed to update account {} at slot {}, error: {} to plugin {}",
bs58::encode(account.pubkey).into_string(),
slot,
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully updated account {} at slot {} to plugin {}",
bs58::encode(account.pubkey).into_string(),
slot,
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-update-account-us",
measure.as_us() as usize,
100000,
100000
);
}
measure2.stop();
inc_new_counter_debug!(
"accountsdb-plugin-notify_plugins_of_account_update-us",
measure2.as_us() as usize,
100000,
100000
);
}
pub fn notify_slot_status(&self, slot: Slot, parent: Option<Slot>, slot_status: SlotStatus) {
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-update-slot");
match plugin.update_slot_status(slot, parent, slot_status.clone()) {
Err(err) => {
error!(
"Failed to update slot status at slot {}, error: {} to plugin {}",
slot,
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully updated slot status at slot {} to plugin {}",
slot,
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-update-slot-us",
measure.as_us() as usize,
1000,
1000
);
}
}
}

View File

@@ -1,55 +0,0 @@
/// Managing the AccountsDb plugins
use {
libloading::{Library, Symbol},
log::*,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::AccountsDbPlugin,
std::error::Error,
};
#[derive(Default, Debug)]
pub struct AccountsDbPluginManager {
pub plugins: Vec<Box<dyn AccountsDbPlugin>>,
libs: Vec<Library>,
}
impl AccountsDbPluginManager {
pub fn new() -> Self {
AccountsDbPluginManager {
plugins: Vec::default(),
libs: Vec::default(),
}
}
/// # Safety
///
/// This function loads the dynamically linked library specified in the path. The library
/// must do necessary initializations.
pub unsafe fn load_plugin(
&mut self,
libpath: &str,
config_file: &str,
) -> Result<(), Box<dyn Error>> {
type PluginConstructor = unsafe fn() -> *mut dyn AccountsDbPlugin;
let lib = Library::new(libpath)?;
let constructor: Symbol<PluginConstructor> = lib.get(b"_create_plugin")?;
let plugin_raw = constructor();
let mut plugin = Box::from_raw(plugin_raw);
plugin.on_load(config_file)?;
self.plugins.push(plugin);
self.libs.push(lib);
Ok(())
}
/// Unload all plugins and loaded plugin libraries, making sure to fire
/// their `on_plugin_unload()` methods so they can do any necessary cleanup.
pub fn unload(&mut self) {
for mut plugin in self.plugins.drain(..) {
info!("Unloading plugin for {:?}", plugin.name());
plugin.on_unload();
}
for lib in self.libs.drain(..) {
drop(lib);
}
}
}

View File

@@ -1,157 +0,0 @@
use {
crate::{
accounts_update_notifier::AccountsUpdateNotifierImpl,
accountsdb_plugin_manager::AccountsDbPluginManager,
slot_status_observer::SlotStatusObserver,
},
crossbeam_channel::Receiver,
log::*,
serde_json,
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
std::{
fs::File,
io::Read,
path::{Path, PathBuf},
sync::{Arc, RwLock},
thread,
},
thiserror::Error,
};
#[derive(Error, Debug)]
pub enum AccountsdbPluginServiceError {
#[error("Cannot open the the plugin config file")]
CannotOpenConfigFile(String),
#[error("Cannot read the the plugin config file")]
CannotReadConfigFile(String),
#[error("The config file is not in a valid Json format")]
InvalidConfigFileFormat(String),
#[error("Plugin library path is not specified in the config file")]
LibPathNotSet,
#[error("Invalid plugin path")]
InvalidPluginPath,
#[error("Cannot load plugin shared library")]
PluginLoadError(String),
}
/// The service managing the AccountsDb plugin workflow.
pub struct AccountsDbPluginService {
slot_status_observer: SlotStatusObserver,
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
accounts_update_notifier: AccountsUpdateNotifier,
}
impl AccountsDbPluginService {
/// Creates and returns the AccountsDbPluginService.
/// # Arguments
/// * `confirmed_bank_receiver` - The receiver for confirmed bank notification
/// * `accountsdb_plugin_config_file` - The config file path for the plugin. The
/// config file controls the plugin responsible
/// for transporting the data to external data stores. It is defined in JSON format.
/// The `libpath` field should be pointed to the full path of the dynamic shared library
/// (.so file) to be loaded. The shared library must implement the `AccountsDbPlugin`
/// trait. And the shared library shall export a `C` function `_create_plugin` which
/// shall create the implementation of `AccountsDbPlugin` and returns to the caller.
/// The rest of the JSON fields' definition is up to to the concrete plugin implementation
/// It is usually used to configure the connection information for the external data store.
pub fn new(
confirmed_bank_receiver: Receiver<BankNotification>,
accountsdb_plugin_config_files: &[PathBuf],
) -> Result<Self, AccountsdbPluginServiceError> {
info!(
"Starting AccountsDbPluginService from config files: {:?}",
accountsdb_plugin_config_files
);
let mut plugin_manager = AccountsDbPluginManager::new();
for accountsdb_plugin_config_file in accountsdb_plugin_config_files {
Self::load_plugin(&mut plugin_manager, accountsdb_plugin_config_file)?;
}
let plugin_manager = Arc::new(RwLock::new(plugin_manager));
let accounts_update_notifier = Arc::new(RwLock::new(AccountsUpdateNotifierImpl::new(
plugin_manager.clone(),
)));
let slot_status_observer =
SlotStatusObserver::new(confirmed_bank_receiver, accounts_update_notifier.clone());
info!("Started AccountsDbPluginService");
Ok(AccountsDbPluginService {
slot_status_observer,
plugin_manager,
accounts_update_notifier,
})
}
fn load_plugin(
plugin_manager: &mut AccountsDbPluginManager,
accountsdb_plugin_config_file: &Path,
) -> Result<(), AccountsdbPluginServiceError> {
let mut file = match File::open(accountsdb_plugin_config_file) {
Ok(file) => file,
Err(err) => {
return Err(AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
"Failed to open the plugin config file {:?}, error: {:?}",
accountsdb_plugin_config_file, err
)));
}
};
let mut contents = String::new();
if let Err(err) = file.read_to_string(&mut contents) {
return Err(AccountsdbPluginServiceError::CannotReadConfigFile(format!(
"Failed to read the plugin config file {:?}, error: {:?}",
accountsdb_plugin_config_file, err
)));
}
let result: serde_json::Value = match serde_json::from_str(&contents) {
Ok(value) => value,
Err(err) => {
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
format!(
"The config file {:?} is not in a valid Json format, error: {:?}",
accountsdb_plugin_config_file, err
),
));
}
};
let libpath = result["libpath"]
.as_str()
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
let config_file = accountsdb_plugin_config_file
.as_os_str()
.to_str()
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
unsafe {
let result = plugin_manager.load_plugin(libpath, config_file);
if let Err(err) = result {
let msg = format!(
"Failed to load the plugin library: {:?}, error: {:?}",
libpath, err
);
return Err(AccountsdbPluginServiceError::PluginLoadError(msg));
}
}
Ok(())
}
pub fn get_accounts_update_notifier(&self) -> AccountsUpdateNotifier {
self.accounts_update_notifier.clone()
}
pub fn join(mut self) -> thread::Result<()> {
self.slot_status_observer.join()?;
self.plugin_manager.write().unwrap().unload();
Ok(())
}
}

View File

@@ -1,4 +0,0 @@
pub mod accounts_update_notifier;
pub mod accountsdb_plugin_manager;
pub mod accountsdb_plugin_service;
pub mod slot_status_observer;

View File

@@ -1,80 +0,0 @@
use {
crossbeam_channel::Receiver,
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
},
};
#[derive(Debug)]
pub(crate) struct SlotStatusObserver {
bank_notification_receiver_service: Option<JoinHandle<()>>,
exit_updated_slot_server: Arc<AtomicBool>,
}
impl SlotStatusObserver {
pub fn new(
bank_notification_receiver: Receiver<BankNotification>,
accounts_update_notifier: AccountsUpdateNotifier,
) -> Self {
let exit_updated_slot_server = Arc::new(AtomicBool::new(false));
Self {
bank_notification_receiver_service: Some(Self::run_bank_notification_receiver(
bank_notification_receiver,
exit_updated_slot_server.clone(),
accounts_update_notifier,
)),
exit_updated_slot_server,
}
}
pub fn join(&mut self) -> thread::Result<()> {
self.exit_updated_slot_server.store(true, Ordering::Relaxed);
self.bank_notification_receiver_service
.take()
.map(JoinHandle::join)
.unwrap()
}
fn run_bank_notification_receiver(
bank_notification_receiver: Receiver<BankNotification>,
exit: Arc<AtomicBool>,
accounts_update_notifier: AccountsUpdateNotifier,
) -> JoinHandle<()> {
Builder::new()
.name("bank_notification_receiver".to_string())
.spawn(move || {
while !exit.load(Ordering::Relaxed) {
if let Ok(slot) = bank_notification_receiver.recv() {
match slot {
BankNotification::OptimisticallyConfirmed(slot) => {
accounts_update_notifier
.read()
.unwrap()
.notify_slot_confirmed(slot, None);
}
BankNotification::Frozen(bank) => {
accounts_update_notifier
.read()
.unwrap()
.notify_slot_processed(bank.slot(), Some(bank.parent_slot()));
}
BankNotification::Root(bank) => {
accounts_update_notifier
.read()
.unwrap()
.notify_slot_rooted(bank.slot(), Some(bank.parent_slot()));
}
}
}
}
})
.unwrap()
}
}

View File

@@ -1,33 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accountsdb-plugin-postgres"
description = "The Solana AccountsDb plugin for PostgreSQL database."
version = "1.8.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
bs58 = "0.4.0"
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.5"
log = "0.4.14"
postgres = { version = "0.19.1", features = ["with-chrono-0_4"] }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.67"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
thiserror = "1.0.21"
tokio-postgres = "0.7.3"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,5 +0,0 @@
This is an example implementing the AccountsDb plugin for PostgreSQL database.
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
To create the schema objects for the database, please use `scripts/create_schema.sql`.
`scripts/drop_schema.sql` can be used to tear down the schema objects.

View File

@@ -1,54 +0,0 @@
/**
* This plugin implementation for PostgreSQL requires the following tables
*/
-- The table storing accounts
CREATE TABLE account (
pubkey BYTEA PRIMARY KEY,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- The table storing slot information
CREATE TABLE slot (
slot BIGINT PRIMARY KEY,
parent BIGINT,
status varchar(16) NOT NULL,
updated_on TIMESTAMP NOT NULL
);
/**
* The following is for keeping historical data for accounts and is not required for plugin to work.
*/
-- The table storing historical data for accounts
CREATE TABLE account_audit (
pubkey BYTEA,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
BEGIN
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
RETURN NEW;
END;
$audit_account_update$ LANGUAGE plpgsql;
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();

View File

@@ -1,9 +0,0 @@
/**
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
*/
DROP TRIGGER account_update_trigger ON account;
DROP FUNCTION audit_account_update;
DROP TABLE account_audit;
DROP TABLE account;
DROP TABLE slot;

View File

@@ -1,802 +0,0 @@
# This a reference configuration file for the PostgreSQL database version 14.
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
listen_addresses = '*'
port = 5433 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 1GB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, replica, or logical
# (change requires restart)
fsync = off # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
synchronous_commit = off # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
full_page_writes = off # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enable compression of full-page writes
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = -1 # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '14/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -1,69 +0,0 @@
use {log::*, std::collections::HashSet};
#[derive(Debug)]
pub(crate) struct AccountsSelector {
pub accounts: HashSet<Vec<u8>>,
pub owners: HashSet<Vec<u8>>,
pub select_all_accounts: bool,
}
impl AccountsSelector {
pub fn default() -> Self {
AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts: true,
}
}
pub fn new(accounts: &[String], owners: &[String]) -> Self {
info!(
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
accounts, owners
);
let select_all_accounts = accounts.iter().any(|key| key == "*");
if select_all_accounts {
return AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts,
};
}
let accounts = accounts
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
let owners = owners
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
AccountsSelector {
accounts,
owners,
select_all_accounts,
}
}
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_create_accounts_selector() {
AccountsSelector::new(
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
&[],
);
AccountsSelector::new(
&[],
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
);
}
}

View File

@@ -1,345 +0,0 @@
use solana_measure::measure::Measure;
/// Main entry for the PostgreSQL plugin
use {
crate::{
accounts_selector::AccountsSelector,
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
},
bs58,
log::*,
serde_derive::{Deserialize, Serialize},
serde_json,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions, Result, SlotStatus,
},
solana_metrics::*,
std::{fs::File, io::Read},
thiserror::Error,
};
#[derive(Default)]
pub struct AccountsDbPluginPostgres {
client: Option<ParallelPostgresClient>,
accounts_selector: Option<AccountsSelector>,
}
impl std::fmt::Debug for AccountsDbPluginPostgres {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountsDbPluginPostgresConfig {
pub host: Option<String>,
pub user: Option<String>,
pub port: Option<u16>,
pub connection_str: Option<String>,
pub threads: Option<usize>,
pub batch_size: Option<usize>,
pub panic_on_db_errors: Option<bool>,
}
#[derive(Error, Debug)]
pub enum AccountsDbPluginPostgresError {
#[error("Error connecting to the backend data store. Error message: ({msg})")]
DataStoreConnectionError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
DataSchemaError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
ConfigurationError { msg: String },
}
impl AccountsDbPlugin for AccountsDbPluginPostgres {
fn name(&self) -> &'static str {
"AccountsDbPluginPostgres"
}
/// Do initialization for the PostgreSQL plugin.
///
/// # Format of the config file:
/// * The `accounts_selector` section allows the user to controls accounts selections.
/// "accounts_selector" : {
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// or:
/// "accounts_selector" = {
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
/// }
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
/// When only owners is specified,
/// all accounts belonging to the owners will be streamed.
/// The accounts field support wildcard to select all accounts:
/// "accounts_selector" : {
/// "accounts" : \["*"\],
/// }
/// * "host", optional, specifies the PostgreSQL server.
/// * "user", optional, specifies the PostgreSQL user.
/// * "port", optional, specifies the PostgreSQL server's port.
/// * "connection_str", optional, the custom PostgreSQL connection string.
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
/// `host` and `user` must be given.
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
/// maintains a PostgreSQL connection to the server. The default is '10'.
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
/// from restoring a snapshot. The default is '10'.
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
/// PostgreSQL database. The default is 'false'.
/// # Examples
///
/// {
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
/// "host": "host_foo",
/// "user": "solana",
/// "threads": 10,
/// "accounts_selector" : {
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
/// }
/// }
fn on_load(&mut self, config_file: &str) -> Result<()> {
solana_logger::setup_with_default("info");
info!(
"Loading plugin {:?} from config_file {:?}",
self.name(),
config_file
);
let mut file = File::open(config_file)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
serde_json::from_str(&contents);
match result {
Err(err) => {
return Err(AccountsDbPluginError::ConfigFileReadError {
msg: format!(
"The config file is not in the JSON format expected: {:?}",
err
),
})
}
Ok(config) => {
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
self.client = Some(client);
}
}
Ok(())
}
fn on_unload(&mut self) {
info!("Unloading plugin: {:?}", self.name());
match &mut self.client {
None => {}
Some(client) => {
client.join().unwrap();
}
}
}
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()> {
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
match account {
ReplicaAccountInfoVersions::V0_0_1(account) => {
let mut measure_select =
Measure::start("accountsdb-plugin-postgres-update-account-select");
if let Some(accounts_selector) = &self.accounts_selector {
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
return Ok(());
}
} else {
return Ok(());
}
measure_select.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-select-us",
measure_select.as_us() as usize,
100000,
100000
);
debug!(
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
bs58::encode(account.pubkey).into_string(),
bs58::encode(account.owner).into_string(),
slot,
self.accounts_selector.as_ref().unwrap()
);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database."
.to_string(),
},
)));
}
Some(client) => {
let mut measure_update =
Measure::start("accountsdb-plugin-postgres-update-account-client");
let result = { client.update_account(account, slot, is_startup) };
measure_update.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-client-us",
measure_update.as_us() as usize,
100000,
100000
);
if let Err(err) = result {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
}
}
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-main-us",
measure_all.as_us() as usize,
100000,
100000
);
Ok(())
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()> {
info!("Updating slot {:?} at with status {:?}", slot, status);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.update_slot_status(slot, parent, status);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<()> {
info!("Notifying the end of startup for accounts notifications");
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.notify_end_of_startup();
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
});
}
}
}
Ok(())
}
}
impl AccountsDbPluginPostgres {
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
let accounts_selector = &config["accounts_selector"];
if accounts_selector.is_null() {
AccountsSelector::default()
} else {
let accounts = &accounts_selector["accounts"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
let owners = &accounts_selector["owners"];
let owners: Vec<String> = if owners.is_array() {
owners
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
AccountsSelector::new(&accounts, &owners)
}
}
pub fn new() -> Self {
AccountsDbPluginPostgres {
client: None,
accounts_selector: None,
}
}
}
#[no_mangle]
#[allow(improper_ctypes_definitions)]
/// # Safety
///
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
let plugin = AccountsDbPluginPostgres::new();
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
Box::into_raw(plugin)
}
#[cfg(test)]
pub(crate) mod tests {
use {super::*, serde_json};
#[test]
fn test_accounts_selector_from_config() {
let config = "{\"accounts_selector\" : { \
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
}}";
let config: serde_json::Value = serde_json::from_str(config).unwrap();
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
}
}

View File

@@ -1,3 +0,0 @@
pub mod accounts_selector;
pub mod accountsdb_plugin_postgres;
pub mod postgres_client;

View File

@@ -1,879 +0,0 @@
#![allow(clippy::integer_arithmetic)]
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
use {
crate::accountsdb_plugin_postgres::{
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
},
chrono::Utc,
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
log::*,
postgres::{Client, NoTls, Statement},
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_sdk::timing::AtomicInterval,
std::{
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Arc, Mutex,
},
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
},
tokio_postgres::types::ToSql,
};
/// The maximum asynchronous requests allowed in the channel to avoid excessive
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
const MAX_ASYNC_REQUESTS: usize = 40960;
const DEFAULT_POSTGRES_PORT: u16 = 5432;
const DEFAULT_THREADS_COUNT: usize = 100;
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
const ACCOUNT_COLUMN_COUNT: usize = 9;
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
struct PostgresSqlClientWrapper {
client: Client,
update_account_stmt: Statement,
bulk_account_insert_stmt: Statement,
update_slot_with_parent_stmt: Statement,
update_slot_without_parent_stmt: Statement,
}
pub struct SimplePostgresClient {
batch_size: usize,
pending_account_updates: Vec<DbAccountInfo>,
client: Mutex<PostgresSqlClientWrapper>,
}
struct PostgresClientWorker {
client: SimplePostgresClient,
/// Indicating if accounts notification during startup is done.
is_startup_done: bool,
}
impl Eq for DbAccountInfo {}
#[derive(Clone, PartialEq, Debug)]
pub struct DbAccountInfo {
pub pubkey: Vec<u8>,
pub lamports: i64,
pub owner: Vec<u8>,
pub executable: bool,
pub rent_epoch: i64,
pub data: Vec<u8>,
pub slot: i64,
pub write_version: i64,
}
pub(crate) fn abort() -> ! {
#[cfg(not(test))]
{
// standard error is usually redirected to a log file, cry for help on standard output as
// well
eprintln!("Validator process aborted. The validator log may contain further details");
std::process::exit(1);
}
#[cfg(test)]
panic!("process::exit(1) is intercepted for friendly test failure...");
}
impl DbAccountInfo {
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
let data = account.data().to_vec();
Self {
pubkey: account.pubkey().to_vec(),
lamports: account.lamports() as i64,
owner: account.owner().to_vec(),
executable: account.executable(),
rent_epoch: account.rent_epoch() as i64,
data,
slot: slot as i64,
write_version: account.write_version(),
}
}
}
pub trait ReadableAccountInfo: Sized {
fn pubkey(&self) -> &[u8];
fn owner(&self) -> &[u8];
fn lamports(&self) -> i64;
fn executable(&self) -> bool;
fn rent_epoch(&self) -> i64;
fn data(&self) -> &[u8];
fn write_version(&self) -> i64;
}
impl ReadableAccountInfo for DbAccountInfo {
fn pubkey(&self) -> &[u8] {
&self.pubkey
}
fn owner(&self) -> &[u8] {
&self.owner
}
fn lamports(&self) -> i64 {
self.lamports
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch
}
fn data(&self) -> &[u8] {
&self.data
}
fn write_version(&self) -> i64 {
self.write_version
}
}
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
fn pubkey(&self) -> &[u8] {
self.pubkey
}
fn owner(&self) -> &[u8] {
self.owner
}
fn lamports(&self) -> i64 {
self.lamports as i64
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch as i64
}
fn data(&self) -> &[u8] {
self.data
}
fn write_version(&self) -> i64 {
self.write_version as i64
}
}
pub trait PostgresClient {
fn join(&mut self) -> thread::Result<()> {
Ok(())
}
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError>;
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError>;
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
}
impl SimplePostgresClient {
fn connect_to_db(
config: &AccountsDbPluginPostgresConfig,
) -> Result<Client, AccountsDbPluginError> {
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
let connection_str = if let Some(connection_str) = &config.connection_str {
connection_str.clone()
} else {
if config.host.is_none() || config.user.is_none() {
let msg = format!(
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
config.connection_str, config.host, config.user
);
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::ConfigurationError { msg },
)));
}
format!(
"host={} user={} port={}",
config.host.as_ref().unwrap(),
config.user.as_ref().unwrap(),
port
)
};
match Client::connect(&connection_str, NoTls) {
Err(err) => {
let msg = format!(
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
err, connection_str
);
error!("{}", msg);
Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
)))
}
Ok(client) => Ok(client),
}
}
fn build_bulk_account_insert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
for j in 0..batch_size {
let row = j * ACCOUNT_COLUMN_COUNT;
let val_str = format!(
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
row + 1,
row + 2,
row + 3,
row + 4,
row + 5,
row + 6,
row + 7,
row + 8,
row + 9,
);
if j == 0 {
stmt = format!("{} {}", &stmt, val_str);
} else {
stmt = format!("{}, {}", &stmt, val_str);
}
}
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
stmt = format!("{} {}", stmt, handle_conflict);
info!("{}", stmt);
let bulk_stmt = client.prepare(&stmt);
match bulk_stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_single_account_upsert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_slot_upsert_statement_with_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
VALUES ($1, $2, $3, $4) \
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
fn build_slot_upsert_statement_without_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, status, updated_on) \
VALUES ($1, $2, $3) \
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
/// Internal function for updating or inserting a single account
fn upsert_account_internal(
account: &DbAccountInfo,
statement: &Statement,
client: &mut Client,
) -> Result<(), AccountsDbPluginError> {
let lamports = account.lamports() as i64;
let rent_epoch = account.rent_epoch() as i64;
let updated_on = Utc::now().naive_utc();
let result = client.query(
statement,
&[
&account.pubkey(),
&account.slot,
&account.owner(),
&lamports,
&account.executable(),
&rent_epoch,
&account.data(),
&account.write_version(),
&updated_on,
],
);
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
Ok(())
}
/// Update or insert a single account
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
Self::upsert_account_internal(account, statement, client)
}
/// Insert accounts in batch to reduce network overhead
fn insert_accounts_in_batch(
&mut self,
account: DbAccountInfo,
) -> Result<(), AccountsDbPluginError> {
self.pending_account_updates.push(account);
if self.pending_account_updates.len() == self.batch_size {
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
let mut values: Vec<&(dyn ToSql + Sync)> =
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
let updated_on = Utc::now().naive_utc();
for j in 0..self.batch_size {
let account = &self.pending_account_updates[j];
values.push(&account.pubkey);
values.push(&account.slot);
values.push(&account.owner);
values.push(&account.lamports);
values.push(&account.executable);
values.push(&account.rent_epoch);
values.push(&account.data);
values.push(&account.write_version);
values.push(&updated_on);
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-prepare-values-us",
measure.as_us() as usize,
10000,
10000
);
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
let client = self.client.get_mut().unwrap();
let result = client
.client
.query(&client.bulk_account_insert_stmt, &values);
self.pending_account_updates.clear();
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-us",
measure.as_us() as usize,
10000,
10000
);
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-count",
self.batch_size,
10000,
10000
);
}
Ok(())
}
/// Flush any left over accounts in batch which are not processed in the last batch
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
if self.pending_account_updates.is_empty() {
return Ok(());
}
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
for account in self.pending_account_updates.drain(..) {
Self::upsert_account_internal(&account, statement, client)?;
}
Ok(())
}
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating SimplePostgresClient...");
let mut client = Self::connect_to_db(config)?;
let bulk_account_insert_stmt =
Self::build_bulk_account_insert_statement(&mut client, config)?;
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
let update_slot_with_parent_stmt =
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
let update_slot_without_parent_stmt =
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
info!("Created SimplePostgresClient.");
Ok(Self {
batch_size,
pending_account_updates: Vec::with_capacity(batch_size),
client: Mutex::new(PostgresSqlClientWrapper {
client,
update_account_stmt,
bulk_account_insert_stmt,
update_slot_with_parent_stmt,
update_slot_without_parent_stmt,
}),
})
}
}
impl PostgresClient for SimplePostgresClient {
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
trace!(
"Updating account {} with owner {} at slot {}",
bs58::encode(account.pubkey()).into_string(),
bs58::encode(account.owner()).into_string(),
account.slot,
);
if !is_startup {
return self.upsert_account(&account);
}
self.insert_accounts_in_batch(account)
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
info!("Updating slot {:?} at with status {:?}", slot, status);
let slot = slot as i64; // postgres only supports i64
let parent = parent.map(|parent| parent as i64);
let updated_on = Utc::now().naive_utc();
let status_str = status.as_str();
let client = self.client.get_mut().unwrap();
let result = match parent {
Some(parent) => client.client.execute(
&client.update_slot_with_parent_stmt,
&[&slot, &parent, &status_str, &updated_on],
),
None => client.client.execute(
&client.update_slot_without_parent_stmt,
&[&slot, &status_str, &updated_on],
),
};
match result {
Err(err) => {
let msg = format!(
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
err
);
error!("{:?}", msg);
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
}
Ok(rows) => {
assert_eq!(1, rows, "Expected one rows to be updated a time");
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
self.flush_buffered_writes()
}
}
struct UpdateAccountRequest {
account: DbAccountInfo,
is_startup: bool,
}
struct UpdateSlotRequest {
slot: u64,
parent: Option<u64>,
slot_status: SlotStatus,
}
enum DbWorkItem {
UpdateAccount(UpdateAccountRequest),
UpdateSlot(UpdateSlotRequest),
}
impl PostgresClientWorker {
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
let result = SimplePostgresClient::new(&config);
match result {
Ok(client) => Ok(PostgresClientWorker {
client,
is_startup_done: false,
}),
Err(err) => {
error!("Error in creating SimplePostgresClient: {}", err);
Err(err)
}
}
}
fn do_work(
&mut self,
receiver: Receiver<DbWorkItem>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
panic_on_db_errors: bool,
) -> Result<(), AccountsDbPluginError> {
while !exit_worker.load(Ordering::Relaxed) {
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
let work = receiver.recv_timeout(Duration::from_millis(500));
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-worker-recv-us",
measure.as_us() as usize,
100000,
100000
);
match work {
Ok(work) => match work {
DbWorkItem::UpdateAccount(request) => {
if let Err(err) = self
.client
.update_account(request.account, request.is_startup)
{
error!("Failed to update account: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
DbWorkItem::UpdateSlot(request) => {
if let Err(err) = self.client.update_slot_status(
request.slot,
request.parent,
request.slot_status,
) {
error!("Failed to update slot: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
},
Err(err) => match err {
RecvTimeoutError::Timeout => {
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
if let Err(err) = self.client.notify_end_of_startup() {
error!("Error in notifying end of startup: ({})", err);
if panic_on_db_errors {
abort();
}
}
self.is_startup_done = true;
startup_done_count.fetch_add(1, Ordering::Relaxed);
}
continue;
}
_ => {
error!("Error in receiving the item {:?}", err);
if panic_on_db_errors {
abort();
}
break;
}
},
}
}
Ok(())
}
}
pub struct ParallelPostgresClient {
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
initialized_worker_count: Arc<AtomicUsize>,
sender: Sender<DbWorkItem>,
last_report: AtomicInterval,
}
impl ParallelPostgresClient {
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating ParallelPostgresClient...");
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
let exit_worker = Arc::new(AtomicBool::new(false));
let mut workers = Vec::default();
let is_startup_done = Arc::new(AtomicBool::new(false));
let startup_done_count = Arc::new(AtomicUsize::new(0));
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
for i in 0..worker_count {
let cloned_receiver = receiver.clone();
let exit_clone = exit_worker.clone();
let is_startup_done_clone = is_startup_done.clone();
let startup_done_count_clone = startup_done_count.clone();
let initialized_worker_count_clone = initialized_worker_count.clone();
let config = config.clone();
let worker = Builder::new()
.name(format!("worker-{}", i))
.spawn(move || -> Result<(), AccountsDbPluginError> {
let panic_on_db_errors = *config
.panic_on_db_errors
.as_ref()
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
let result = PostgresClientWorker::new(config);
match result {
Ok(mut worker) => {
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
worker.do_work(
cloned_receiver,
exit_clone,
is_startup_done_clone,
startup_done_count_clone,
panic_on_db_errors,
)?;
Ok(())
}
Err(err) => {
error!("Error when making connection to database: ({})", err);
if panic_on_db_errors {
abort();
}
Err(err)
}
}
})
.unwrap();
workers.push(worker);
}
info!("Created ParallelPostgresClient.");
Ok(Self {
last_report: AtomicInterval::default(),
workers,
exit_worker,
is_startup_done,
startup_done_count,
initialized_worker_count,
sender,
})
}
pub fn join(&mut self) -> thread::Result<()> {
self.exit_worker.store(true, Ordering::Relaxed);
while !self.workers.is_empty() {
let worker = self.workers.pop();
if worker.is_none() {
break;
}
let worker = worker.unwrap();
let result = worker.join().unwrap();
if result.is_err() {
error!("The worker thread has failed: {:?}", result);
}
}
Ok(())
}
pub fn update_account(
&mut self,
account: &ReplicaAccountInfo,
slot: u64,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
if self.last_report.should_update(30000) {
datapoint_debug!(
"postgres-plugin-stats",
("message-queue-length", self.sender.len() as i64, i64),
);
}
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
let wrk_item = DbWorkItem::UpdateAccount(UpdateAccountRequest {
account: DbAccountInfo::new(account, slot),
is_startup,
});
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-create-work-item-us",
measure.as_us() as usize,
100000,
100000
);
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
if let Err(err) = self.sender.send(wrk_item) {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!(
"Failed to update the account {:?}, error: {:?}",
bs58::encode(account.pubkey()).into_string(),
err
),
});
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-send-msg-us",
measure.as_us() as usize,
100000,
100000
);
Ok(())
}
pub fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
if let Err(err) = self.sender.send(DbWorkItem::UpdateSlot(UpdateSlotRequest {
slot,
parent,
slot_status: status,
})) {
return Err(AccountsDbPluginError::SlotStatusUpdateError {
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
});
}
Ok(())
}
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
info!("Notifying the end of startup");
// Ensure all items in the queue has been received by the workers
while !self.sender.is_empty() {
sleep(Duration::from_millis(100));
}
self.is_startup_done.store(true, Ordering::Relaxed);
// Wait for all worker threads to be done with flushing
while self.startup_done_count.load(Ordering::Relaxed)
!= self.initialized_worker_count.load(Ordering::Relaxed)
{
info!(
"Startup done count: {}, good worker thread count: {}",
self.startup_done_count.load(Ordering::Relaxed),
self.initialized_worker_count.load(Ordering::Relaxed)
);
sleep(Duration::from_millis(100));
}
info!("Done with notifying the end of startup");
Ok(())
}
}
pub struct PostgresClientBuilder {}
impl PostgresClientBuilder {
pub fn build_pararallel_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
ParallelPostgresClient::new(config)
}
pub fn build_simple_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
SimplePostgresClient::new(config)
}
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.8.17"
version = "1.6.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,18 +14,16 @@ crossbeam-channel = "0.4"
log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.0"
solana-core = { path = "../core", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-ledger = { path = "../ledger", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-perf = { path = "../perf", version = "=1.8.17" }
solana-poh = { path = "../poh", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-core = { path = "../core", version = "=1.6.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.6" }
solana-streamer = { path = "../streamer", version = "=1.6.6" }
solana-perf = { path = "../perf", version = "=1.6.6" }
solana-ledger = { path = "../ledger", version = "=1.6.6" }
solana-logger = { path = "../logger", version = "=1.6.6" }
solana-runtime = { path = "../runtime", version = "=1.6.6" }
solana-measure = { path = "../measure", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
solana-version = { path = "../version", version = "=1.6.6" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,37 +1,38 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, App, Arg},
crossbeam_channel::unbounded,
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_core::banking_stage::BankingStage,
solana_gossip::cluster_info::{ClusterInfo, Node},
solana_ledger::{
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
},
solana_measure::measure::Measure,
solana_perf::packet::to_packet_batches,
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
cost_model::CostModel,
},
solana_sdk::{
hash::Hash,
signature::{Keypair, Signature},
system_transaction,
timing::{duration_as_us, timestamp},
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
thread::sleep,
time::{Duration, Instant},
},
use clap::{crate_description, crate_name, value_t, App, Arg};
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_core::{
banking_stage::{create_test_recorder, BankingStage},
cluster_info::ClusterInfo,
cluster_info::Node,
poh_recorder::PohRecorder,
poh_recorder::WorkingBankEntry,
};
use solana_ledger::{
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_measure::measure::Measure;
use solana_perf::packet::to_packets_chunked;
use solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
};
use solana_sdk::{
hash::Hash,
signature::Keypair,
signature::Signature,
system_transaction,
timing::{duration_as_us, timestamp},
transaction::Transaction,
};
use std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
thread::sleep,
time::{Duration, Instant},
};
fn check_txs(
@@ -77,7 +78,7 @@ fn make_accounts_txs(
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
if !same_payer {
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
}
@@ -168,7 +169,6 @@ fn main() {
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let bank0 = Bank::new(&genesis_config);
let mut bank_forks = BankForks::new(bank0);
@@ -189,7 +189,7 @@ fn main() {
genesis_config.hash(),
);
// Ignore any pesky duplicate signature errors in the case we are using single-payer
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
fund.signatures = vec![Signature::new(&sig[0..64])];
let x = bank.process_transaction(&fund);
x.unwrap();
@@ -199,7 +199,7 @@ fn main() {
if !skip_sanity {
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(tx);
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
});
bank.clear_signatures();
@@ -211,7 +211,7 @@ fn main() {
bank.clear_signatures();
}
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@@ -219,21 +219,15 @@ fn main() {
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
vote_receiver,
None,
replay_vote_sender,
Arc::new(RwLock::new(CostModel::default())),
);
poh_recorder.lock().unwrap().set_bank(&bank);
@@ -361,10 +355,10 @@ fn main() {
if bank.slot() > 0 && bank.slot() % 16 == 0 {
for tx in transactions.iter_mut() {
tx.message.recent_blockhash = bank.last_blockhash();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;
@@ -386,7 +380,6 @@ fn main() {
);
drop(verified_sender);
drop(tpu_vote_sender);
drop(vote_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.8.17"
version = "1.6.6"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,20 +11,20 @@ edition = "2018"
[dependencies]
bincode = "1.3.1"
borsh = "0.9.0"
borsh-derive = "0.9.0"
borsh = "0.8.1"
borsh-derive = "0.8.1"
futures = "0.3"
mio = "0.7.6"
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
solana-program = { path = "../sdk/program", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-banks-interface = { path = "../banks-interface", version = "=1.6.6" }
solana-program = { path = "../sdk/program", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
tarpc = { version = "0.24.1", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-banks-server = { path = "../banks-server", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.6.6" }
solana-banks-server = { path = "../banks-server", version = "=1.6.6" }
[lib]
crate-type = ["lib"]

View File

@@ -5,38 +5,31 @@
//! but they are undocumented, may change over time, and are generally more
//! cumbersome to use.
use borsh::BorshDeserialize;
use futures::{future::join_all, Future, FutureExt};
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
use {
borsh::BorshDeserialize,
futures::{future::join_all, Future, FutureExt},
solana_banks_interface::{BanksRequest, BanksResponse},
solana_program::{
clock::{Clock, Slot},
fee_calculator::FeeCalculator,
hash::Hash,
program_pack::Pack,
pubkey::Pubkey,
rent::Rent,
sysvar::{self, Sysvar},
},
solana_sdk::{
account::{from_account, Account},
commitment_config::CommitmentLevel,
signature::Signature,
transaction::{self, Transaction},
transport,
},
std::io::{self, Error, ErrorKind},
tarpc::{
client::{self, channel::RequestDispatch, NewClient},
context::{self, Context},
rpc::{ClientMessage, Response},
serde_transport::tcp,
Transport,
},
tokio::{net::ToSocketAddrs, time::Duration},
tokio_serde::formats::Bincode,
use solana_banks_interface::{BanksRequest, BanksResponse};
use solana_program::{
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
rent::Rent, sysvar,
};
use solana_sdk::{
account::{from_account, Account},
commitment_config::CommitmentLevel,
signature::Signature,
transaction::{self, Transaction},
transport,
};
use std::io::{self, Error, ErrorKind};
use tarpc::{
client::{self, channel::RequestDispatch, NewClient},
context::{self, Context},
rpc::{ClientMessage, Response},
serde_transport::tcp,
Transport,
};
use tokio::{net::ToSocketAddrs, time::Duration};
use tokio_serde::formats::Bincode;
// This exists only for backward compatibility
pub trait BanksClientExt {}
@@ -70,7 +63,7 @@ impl BanksClient {
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
self.inner
.get_fees_with_commitment_and_context(ctx, commitment)
}
@@ -92,14 +85,6 @@ impl BanksClient {
self.inner.get_slot_with_context(ctx, commitment)
}
pub fn get_block_height_with_context(
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_block_height_with_context(ctx, commitment)
}
pub fn process_transaction_with_commitment_and_context(
&mut self,
ctx: Context,
@@ -130,39 +115,24 @@ impl BanksClient {
self.send_transaction_with_context(context::current(), transaction)
}
/// Return the cluster clock
pub fn get_clock(&mut self) -> impl Future<Output = io::Result<Clock>> + '_ {
self.get_account(sysvar::clock::id()).map(|result| {
let clock_sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Clock sysvar not present"))?;
from_account::<Clock, _>(&clock_sysvar).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Clock sysvar")
})
})
}
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
/// will use the transaction's blockhash to look up these same fee parameters and
/// use them to calculate the transaction fee.
pub fn get_fees(
&mut self,
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
}
/// Return the cluster Sysvar
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(T::id()).map(|result| {
let sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
from_account::<T, _>(&sysvar)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
})
}
/// Return the cluster rent
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
self.get_sysvar::<Rent>()
self.get_account(sysvar::rent::id()).map(|result| {
let rent_sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
from_account::<Rent, _>(&rent_sysvar).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
})
})
}
/// Return a recent, rooted blockhash from the server. The cluster will only accept
@@ -222,18 +192,12 @@ impl BanksClient {
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
}
/// Return the most recent rooted slot. All transactions at or below this slot
/// are said to be finalized. The cluster will not fork to a higher slot.
/// Return the most recent rooted slot height. All transactions at or below this height
/// are said to be finalized. The cluster will not fork to a higher slot height.
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
self.get_slot_with_context(context::current(), CommitmentLevel::default())
}
/// Return the most recent rooted block height. All transactions at or below this height
/// are said to be finalized. The cluster will not fork to a higher block height.
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
}
/// Return the account at the given address at the slot corresponding to the given
/// commitment level. If the account is not found, None is returned.
pub fn get_account_with_commitment(
@@ -349,18 +313,16 @@ pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClie
#[cfg(test)]
mod tests {
use {
super::*,
solana_banks_server::banks_server::start_local_server,
solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
genesis_utils::create_genesis_config,
},
solana_sdk::{message::Message, signature::Signer, system_instruction},
std::sync::{Arc, RwLock},
tarpc::transport,
tokio::{runtime::Runtime, time::sleep},
use super::*;
use solana_banks_server::banks_server::start_local_server;
use solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
genesis_utils::create_genesis_config,
};
use solana_sdk::{message::Message, signature::Signer, system_instruction};
use std::sync::{Arc, RwLock};
use tarpc::transport;
use tokio::{runtime::Runtime, time::sleep};
#[test]
fn test_banks_client_new() {
@@ -388,9 +350,7 @@ mod tests {
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport =
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
.await;
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
let mut banks_client = start_client(client_transport).await?;
let recent_blockhash = banks_client.get_recent_blockhash().await?;
@@ -417,15 +377,13 @@ mod tests {
let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(mint_pubkey));
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport =
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
.await;
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
let mut banks_client = start_client(client_transport).await?;
let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?;
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
let signature = transaction.signatures[0];
banks_client.send_transaction(transaction).await?;
@@ -433,8 +391,8 @@ mod tests {
let mut status = banks_client.get_transaction_status(signature).await?;
while status.is_none() {
let root_block_height = banks_client.get_root_block_height().await?;
if root_block_height > last_valid_block_height {
let root_slot = banks_client.get_root_slot().await?;
if root_slot > last_valid_slot {
break;
}
sleep(Duration::from_millis(100)).await;

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.8.17"
version = "1.6.6"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,7 +12,7 @@ edition = "2018"
[dependencies]
mio = "0.7.6"
serde = { version = "1.0.122", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
tarpc = { version = "0.24.1", features = ["full"] }
[dev-dependencies]

View File

@@ -1,15 +1,13 @@
use {
serde::{Deserialize, Serialize},
solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
},
use serde::{Deserialize, Serialize};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
@@ -36,7 +34,6 @@ pub trait Banks {
async fn get_transaction_status_with_context(signature: Signature)
-> Option<TransactionStatus>;
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
async fn process_transaction_with_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,
@@ -49,10 +46,8 @@ pub trait Banks {
#[cfg(test)]
mod tests {
use {
super::*,
tarpc::{client, transport},
};
use super::*;
use tarpc::{client, transport};
#[test]
fn test_banks_client_new() {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.8.17"
version = "1.6.6"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,10 +14,10 @@ bincode = "1.3.1"
futures = "0.3"
log = "0.4.11"
mio = "0.7.6"
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-banks-interface = { path = "../banks-interface", version = "=1.6.6" }
solana-runtime = { path = "../runtime", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
solana-metrics = { path = "../metrics", version = "=1.6.6" }
tarpc = { version = "0.24.1", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }

View File

@@ -1,52 +1,48 @@
use {
crate::send_transaction_service::{SendTransactionService, TransactionInfo},
bincode::{deserialize, serialize},
futures::{
future,
prelude::stream::{self, StreamExt},
},
solana_banks_interface::{
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
},
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
feature_set::FeatureSet,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
},
std::{
io,
net::{Ipv4Addr, SocketAddr},
sync::{
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::Builder,
time::Duration,
},
tarpc::{
context::Context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
server::{self, Channel, Handler},
transport,
},
tokio::time::sleep,
tokio_serde::formats::Bincode,
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
use bincode::{deserialize, serialize};
use futures::{
future,
prelude::stream::{self, StreamExt},
};
use solana_banks_interface::{
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
};
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
};
use std::{
io,
net::{Ipv4Addr, SocketAddr},
sync::{
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::Builder,
time::Duration,
};
use tarpc::{
context::Context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
server::{self, Channel, Handler},
transport,
};
use tokio::time::sleep;
use tokio_serde::formats::Bincode;
#[derive(Clone)]
struct BanksServer {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
poll_signature_status_sleep_duration: Duration,
}
impl BanksServer {
@@ -58,13 +54,11 @@ impl BanksServer {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
poll_signature_status_sleep_duration: Duration,
) -> Self {
Self {
bank_forks,
block_commitment_cache,
transaction_sender,
poll_signature_status_sleep_duration,
}
}
@@ -87,7 +81,6 @@ impl BanksServer {
fn new_loopback(
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
poll_signature_status_sleep_duration: Duration,
) -> Self {
let (transaction_sender, transaction_receiver) = channel();
let bank = bank_forks.read().unwrap().working_bank();
@@ -102,12 +95,7 @@ impl BanksServer {
.name("solana-bank-forks-client".to_string())
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
.unwrap();
Self::new(
bank_forks,
block_commitment_cache,
transaction_sender,
poll_signature_status_sleep_duration,
)
Self::new(bank_forks, block_commitment_cache, transaction_sender)
}
fn slot(&self, commitment: CommitmentLevel) -> Slot {
@@ -125,16 +113,16 @@ impl BanksServer {
self,
signature: &Signature,
blockhash: &Hash,
last_valid_block_height: u64,
last_valid_slot: Slot,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
let mut status = self
.bank(commitment)
.get_signature_status_with_blockhash(signature, blockhash);
while status.is_none() {
sleep(self.poll_signature_status_sleep_duration).await;
sleep(Duration::from_millis(200)).await;
let bank = self.bank(commitment);
if bank.block_height() > last_valid_block_height {
if bank.slot() > last_valid_slot {
break;
}
status = bank.get_signature_status_with_blockhash(signature, blockhash);
@@ -143,13 +131,10 @@ impl BanksServer {
}
}
fn verify_transaction(
transaction: &Transaction,
feature_set: &Arc<FeatureSet>,
) -> transaction::Result<()> {
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
if let Err(err) = transaction.verify() {
Err(err)
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
} else if let Err(err) = transaction.verify_precompiles() {
Err(err)
} else {
Ok(())
@@ -160,19 +145,16 @@ fn verify_transaction(
impl Banks for BanksServer {
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
let blockhash = &transaction.message.recent_blockhash;
let last_valid_block_height = self
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_block_height(blockhash)
.get_blockhash_last_valid_slot(&blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info = TransactionInfo::new(
signature,
serialize(&transaction).unwrap(),
last_valid_block_height,
);
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
}
@@ -180,13 +162,11 @@ impl Banks for BanksServer {
self,
_: Context,
commitment: CommitmentLevel,
) -> (FeeCalculator, Hash, u64) {
) -> (FeeCalculator, Hash, Slot) {
let bank = self.bank(commitment);
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
let last_valid_block_height = bank
.get_blockhash_last_valid_block_height(&blockhash)
.unwrap();
(fee_calculator, blockhash, last_valid_block_height)
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
(fee_calculator, blockhash, last_valid_slot)
}
async fn get_transaction_status_with_context(
@@ -229,33 +209,29 @@ impl Banks for BanksServer {
self.slot(commitment)
}
async fn get_block_height_with_context(self, _: Context, commitment: CommitmentLevel) -> u64 {
self.bank(commitment).block_height()
}
async fn process_transaction_with_commitment_and_context(
self,
_: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
if let Err(err) = verify_transaction(&transaction) {
return Some(Err(err));
}
let blockhash = &transaction.message.recent_blockhash;
let last_valid_block_height = self
.bank(commitment)
.get_blockhash_last_valid_block_height(blockhash)
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info = TransactionInfo::new(
signature,
serialize(&transaction).unwrap(),
last_valid_block_height,
);
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment)
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
.await
}
@@ -273,13 +249,8 @@ impl Banks for BanksServer {
pub async fn start_local_server(
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
poll_signature_status_sleep_duration: Duration,
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
let banks_server = BanksServer::new_loopback(
bank_forks,
block_commitment_cache,
poll_signature_status_sleep_duration,
);
let banks_server = BanksServer::new_loopback(bank_forks, block_commitment_cache);
let (client_transport, server_transport) = transport::channel::unbounded();
let server = server::new(server::Config::default())
.incoming(stream::once(future::ready(server_transport)))
@@ -314,12 +285,8 @@ pub async fn start_tcp_server(
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
let server = BanksServer::new(
bank_forks.clone(),
block_commitment_cache.clone(),
sender,
Duration::from_millis(200),
);
let server =
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
chan.respond_with(server.serve()).execute()
})
// Max 10 channels.

View File

@@ -1,23 +1,21 @@
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
use {
crate::banks_server::start_tcp_server,
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
use crate::banks_server::start_tcp_server;
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
tokio::{
runtime::Runtime,
time::{self, Duration},
},
tokio_stream::wrappers::IntervalStream,
thread::{self, Builder, JoinHandle},
};
use tokio::{
runtime::Runtime,
time::{self, Duration},
};
use tokio_stream::wrappers::IntervalStream;
pub struct RpcBanksService {
thread_hdl: JoinHandle<()>,
@@ -103,7 +101,8 @@ impl RpcBanksService {
#[cfg(test)]
mod tests {
use {super::*, solana_runtime::bank::Bank};
use super::*;
use solana_runtime::bank::Bank;
#[test]
fn test_rpc_banks_server_exit() {

View File

@@ -1,19 +1,17 @@
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
use {
log::*,
solana_metrics::{datapoint_warn, inc_new_counter_info},
solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::signature::Signature,
std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
mpsc::{Receiver, RecvTimeoutError},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
use log::*;
use solana_metrics::{datapoint_warn, inc_new_counter_info};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{clock::Slot, signature::Signature};
use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
mpsc::{Receiver, RecvTimeoutError},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
/// Maximum size of the transaction queue
@@ -26,19 +24,15 @@ pub struct SendTransactionService {
pub struct TransactionInfo {
pub signature: Signature,
pub wire_transaction: Vec<u8>,
pub last_valid_block_height: u64,
pub last_valid_slot: Slot,
}
impl TransactionInfo {
pub fn new(
signature: Signature,
wire_transaction: Vec<u8>,
last_valid_block_height: u64,
) -> Self {
pub fn new(signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) -> Self {
Self {
signature,
wire_transaction,
last_valid_block_height,
last_valid_slot,
}
}
}
@@ -130,7 +124,7 @@ impl SendTransactionService {
result.rooted += 1;
inc_new_counter_info!("send_transaction_service-rooted", 1);
false
} else if transaction_info.last_valid_block_height < root_bank.block_height() {
} else if transaction_info.last_valid_slot < root_bank.slot() {
info!("Dropping expired transaction: {}", signature);
result.expired += 1;
inc_new_counter_info!("send_transaction_service-expired", 1);
@@ -144,8 +138,8 @@ impl SendTransactionService {
result.retried += 1;
inc_new_counter_info!("send_transaction_service-retry", 1);
Self::send_transaction(
send_socket,
tpu_address,
&send_socket,
&tpu_address,
&transaction_info.wire_transaction,
);
true
@@ -185,14 +179,12 @@ impl SendTransactionService {
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::{
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
system_transaction,
},
std::sync::mpsc::channel,
use super::*;
use solana_sdk::{
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
system_transaction,
};
use std::sync::mpsc::channel;
#[test]
fn service_exit() {

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.8.17"
version = "1.6.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,23 +18,21 @@ rand = "0.7.0"
rayon = "1.5.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-core = { path = "../core", version = "=1.8.17" }
solana-genesis = { path = "../genesis", version = "=1.8.17" }
solana-client = { path = "../client", version = "=1.8.17" }
solana-exchange-program = { path = "../programs/exchange", version = "=1.8.17" }
solana-faucet = { path = "../faucet", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.6" }
solana-core = { path = "../core", version = "=1.6.6" }
solana-genesis = { path = "../genesis", version = "=1.6.6" }
solana-client = { path = "../client", version = "=1.6.6" }
solana-faucet = { path = "../faucet", version = "=1.6.6" }
solana-exchange-program = { path = "../programs/exchange", version = "=1.6.6" }
solana-logger = { path = "../logger", version = "=1.6.6" }
solana-metrics = { path = "../metrics", version = "=1.6.6" }
solana-net-utils = { path = "../net-utils", version = "=1.6.6" }
solana-runtime = { path = "../runtime", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
solana-version = { path = "../version", version = "=1.6.6" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
solana-local-cluster = { path = "../local-cluster", version = "=1.6.6" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,45 +1,43 @@
#![allow(clippy::useless_attribute)]
#![allow(clippy::integer_arithmetic)]
use {
crate::order_book::*,
itertools::izip,
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_client::perf_utils::{sample_txs, SampleStats},
solana_core::gen_keys::GenKeys,
solana_exchange_program::{exchange_instruction, exchange_state::*, id},
solana_faucet::faucet::request_airdrop_transaction,
solana_genesis::Base64Account,
solana_metrics::datapoint_info,
solana_sdk::{
client::{Client, SyncClient},
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction, system_program,
timing::{duration_as_ms, duration_as_s},
transaction::Transaction,
},
std::{
cmp,
collections::{HashMap, VecDeque},
fs::File,
io::prelude::*,
mem,
net::SocketAddr,
path::Path,
process::exit,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::{sleep, Builder},
time::{Duration, Instant},
use crate::order_book::*;
use itertools::izip;
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_core::gen_keys::GenKeys;
use solana_exchange_program::{exchange_instruction, exchange_state::*, id};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_genesis::Base64Account;
use solana_metrics::datapoint_info;
use solana_sdk::{
client::{Client, SyncClient},
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::{duration_as_ms, duration_as_s},
transaction::Transaction,
{system_instruction, system_program},
};
use std::{
cmp,
collections::{HashMap, VecDeque},
fs::File,
io::prelude::*,
mem,
net::SocketAddr,
path::Path,
process::exit,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::{sleep, Builder},
time::{Duration, Instant},
};
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
@@ -453,13 +451,13 @@ fn swapper<T>(
let to_swap_txs: Vec<_> = to_swap
.par_iter()
.map(|(signer, swap, profit)| {
let s: &Keypair = signer;
let s: &Keypair = &signer;
let owner = &signer.pubkey();
let instruction = exchange_instruction::swap_request(
owner,
&swap.0.pubkey,
&swap.1.pubkey,
profit,
&profit,
);
let message = Message::new(&[instruction], Some(&s.pubkey()));
Transaction::new(&[s], message, blockhash)
@@ -602,7 +600,7 @@ fn trader<T>(
src,
),
];
let message = Message::new(&instructions, Some(owner_pubkey));
let message = Message::new(&instructions, Some(&owner_pubkey));
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
})
.collect();
@@ -741,7 +739,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
let instructions = system_instruction::transfer_many(&k.pubkey(), m);
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
let message = Message::new(&instructions, Some(&k.pubkey()));
(k.clone(), Transaction::new_unsigned(message))
})
@@ -779,7 +777,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
let mut waits = 0;
loop {
sleep(Duration::from_millis(200));
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount));
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
if to_fund_txs.is_empty() {
break;
}
@@ -838,7 +836,7 @@ pub fn create_token_accounts<T: Client>(
);
let request_ix =
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey));
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
(
(from_keypair, new_keypair),
Transaction::new_unsigned(message),
@@ -874,7 +872,7 @@ pub fn create_token_accounts<T: Client>(
let mut waits = 0;
while !to_create_txs.is_empty() {
sleep(Duration::from_millis(200));
to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx));
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
if to_create_txs.is_empty() {
break;
}
@@ -960,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>, tot
fn generate_keypairs(num: u64) -> Vec<Keypair> {
let mut seed = [0_u8; 32];
seed.copy_from_slice(Keypair::new().pubkey().as_ref());
seed.copy_from_slice(&Keypair::new().pubkey().as_ref());
let mut rnd = GenKeys::new(seed);
rnd.gen_n_keypairs(num)
}
@@ -991,7 +989,7 @@ pub fn airdrop_lamports<T: Client>(
let (blockhash, _fee_calculator, _last_valid_slot) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.expect("Failed to get blockhash");
match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
Ok(transaction) => {
let signature = client.async_send_transaction(transaction).unwrap();

View File

@@ -1,10 +1,10 @@
use {
clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches},
solana_core::gen_keys::GenKeys,
solana_faucet::faucet::FAUCET_PORT,
solana_sdk::signature::{read_keypair_file, Keypair},
std::{net::SocketAddr, process::exit, time::Duration},
};
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
use solana_core::gen_keys::GenKeys;
use solana_faucet::faucet::FAUCET_PORT;
use solana_sdk::signature::{read_keypair_file, Keypair};
use std::net::SocketAddr;
use std::process::exit;
use std::time::Duration;
pub struct Config {
pub entrypoint_addr: SocketAddr,

View File

@@ -3,13 +3,10 @@ pub mod bench;
mod cli;
pub mod order_book;
use {
crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config},
log::*,
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
solana_sdk::signature::Signer,
solana_streamer::socket::SocketAddrSpace,
};
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
use log::*;
use solana_core::gossip_service::{discover_cluster, get_multi_client};
use solana_sdk::signature::Signer;
fn main() {
solana_logger::setup();
@@ -58,12 +55,11 @@ fn main() {
);
} else {
info!("Connecting to the cluster");
let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified)
.unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
let (client, num_clients) = get_multi_client(&nodes);
info!("{} nodes found", num_clients);
if num_clients < num_nodes {

View File

@@ -1,13 +1,11 @@
use {
itertools::{
EitherOrBoth::{Both, Left, Right},
Itertools,
},
log::*,
solana_exchange_program::exchange_state::*,
solana_sdk::pubkey::Pubkey,
std::{cmp::Ordering, collections::BinaryHeap, error, fmt},
};
use itertools::EitherOrBoth::{Both, Left, Right};
use itertools::Itertools;
use log::*;
use solana_exchange_program::exchange_state::*;
use solana_sdk::pubkey::Pubkey;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::{error, fmt};
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ToOrder {

View File

@@ -1,24 +1,23 @@
use {
log::*,
solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config},
solana_core::validator::ValidatorConfig,
solana_exchange_program::{
exchange_processor::process_instruction, id, solana_exchange_program,
},
solana_faucet::faucet::run_local_faucet_with_port,
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_runtime::{bank::Bank, bank_client::BankClient},
solana_sdk::{
genesis_config::create_genesis_config,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
std::{process::exit, sync::mpsc::channel, time::Duration},
use log::*;
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
use solana_core::{
gossip_service::{discover_cluster, get_multi_client},
validator::ValidatorConfig,
};
use solana_exchange_program::{
exchange_processor::process_instruction, id, solana_exchange_program,
};
use solana_faucet::faucet::run_local_faucet_with_port;
use solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
};
use solana_runtime::{bank::Bank, bank_client::BankClient};
use solana_sdk::{
genesis_config::create_genesis_config,
signature::{Keypair, Signer},
};
use std::{process::exit, sync::mpsc::channel, time::Duration};
#[test]
#[ignore]
@@ -46,19 +45,13 @@ fn test_exchange_local_cluster() {
} = config;
let accounts_in_groups = batch_size * account_groups;
let cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default_for_test(),
NUM_NODES,
),
native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000,
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default()
});
let faucet_keypair = Keypair::new();
cluster.transfer(
@@ -75,17 +68,13 @@ fn test_exchange_local_cluster() {
.expect("faucet_addr");
info!("Connecting to the cluster");
let nodes = discover_cluster(
&cluster.entry_point_info.gossip,
NUM_NODES,
SocketAddrSpace::Unspecified,
)
.unwrap_or_else(|err| {
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
exit(1);
});
let nodes =
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
exit(1);
});
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
let (client, num_clients) = get_multi_client(&nodes);
info!("clients: {}", num_clients);
assert!(num_clients >= NUM_NODES);

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.8.17"
version = "1.6.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.6" }
solana-streamer = { path = "../streamer", version = "=1.6.6" }
solana-logger = { path = "../logger", version = "=1.6.6" }
solana-net-utils = { path = "../net-utils", version = "=1.6.6" }
solana-version = { path = "../version", version = "=1.6.6" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,38 +1,32 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, App, Arg},
solana_streamer::{
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketBatchReceiver},
},
std::{
cmp::max,
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::channel,
Arc,
},
thread::{sleep, spawn, JoinHandle, Result},
time::{Duration, SystemTime},
},
};
use clap::{crate_description, crate_name, App, Arg};
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
use solana_streamer::streamer::{receiver, PacketReceiver};
use std::cmp::max;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::sleep;
use std::thread::{spawn, JoinHandle, Result};
use std::time::Duration;
use std::time::SystemTime;
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut packet_batch = PacketBatch::default();
packet_batch.packets.resize(10, Packet::default());
for w in packet_batch.packets.iter_mut() {
let mut msgs = Packets::default();
msgs.packets.resize(10, Packet::default());
for w in msgs.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(addr);
w.meta.set_addr(&addr);
}
let packet_batch = Arc::new(packet_batch);
let msgs = Arc::new(msgs);
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in &packet_batch.packets {
for p in &msgs.packets {
let a = p.meta.addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
@@ -42,14 +36,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
})
}
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
if let Ok(packet_batch) = r.recv_timeout(timer) {
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
if let Ok(msgs) = r.recv_timeout(timer) {
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
}
})
}
@@ -81,7 +75,7 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
let recycler = PacketBatchRecycler::default();
let recycler = PacketsRecycler::new_without_limit("bench-streamer-recycler-shrink-stats");
for _ in 0..num_sockets {
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
@@ -98,7 +92,6 @@ fn main() -> Result<()> {
recycler.clone(),
"bench-streamer-test",
1,
true,
));
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.8.17"
version = "1.6.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,24 +15,22 @@ log = "0.4.11"
rayon = "1.5.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-core = { path = "../core", version = "=1.8.17" }
solana-genesis = { path = "../genesis", version = "=1.8.17" }
solana-client = { path = "../client", version = "=1.8.17" }
solana-faucet = { path = "../faucet", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.6" }
solana-core = { path = "../core", version = "=1.6.6" }
solana-genesis = { path = "../genesis", version = "=1.6.6" }
solana-client = { path = "../client", version = "=1.6.6" }
solana-faucet = { path = "../faucet", version = "=1.6.6" }
solana-logger = { path = "../logger", version = "=1.6.6" }
solana-metrics = { path = "../metrics", version = "=1.6.6" }
solana-measure = { path = "../measure", version = "=1.6.6" }
solana-net-utils = { path = "../net-utils", version = "=1.6.6" }
solana-runtime = { path = "../runtime", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
solana-version = { path = "../version", version = "=1.6.6" }
[dev-dependencies]
serial_test = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
solana-local-cluster = { path = "../local-cluster", version = "=1.6.6" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,36 +1,34 @@
use {
crate::cli::Config,
log::*,
rayon::prelude::*,
solana_client::perf_utils::{sample_txs, SampleStats},
solana_core::gen_keys::GenKeys,
solana_faucet::faucet::request_airdrop_transaction,
solana_measure::measure::Measure,
solana_metrics::{self, datapoint_info},
solana_sdk::{
client::Client,
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction, system_transaction,
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
transaction::Transaction,
},
std::{
collections::{HashSet, VecDeque},
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
use crate::cli::Config;
use log::*;
use rayon::prelude::*;
use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_core::gen_keys::GenKeys;
use solana_faucet::faucet::request_airdrop_transaction;
use solana_measure::measure::Measure;
use solana_metrics::{self, datapoint_info};
use solana_sdk::{
client::Client,
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction, system_transaction,
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
transaction::Transaction,
};
use std::{
collections::{HashSet, VecDeque},
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
};
// The point at which transactions become "too old", in seconds.
@@ -546,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
// re-sign retained to_fund_txes with updated blockhash
self.sign(blockhash);
self.send(client);
self.send(&client);
// Sleep a few slots to allow transactions to process
sleep(Duration::from_secs(1));
self.verify(client, to_lamports);
self.verify(&client, to_lamports);
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
@@ -566,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
.par_iter()
.map(|(k, t)| {
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
let message = Message::new(&instructions, Some(&k.pubkey()));
(*k, Transaction::new_unsigned(message))
})
@@ -619,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
return None;
}
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
verified_txs.fetch_add(1, Ordering::Relaxed);
Some(k.pubkey())
} else {
@@ -735,7 +733,7 @@ pub fn airdrop_lamports<T: Client>(
);
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let mut tries = 0;
loop {
@@ -926,14 +924,12 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
#[cfg(test)]
mod tests {
use {
super::*,
solana_runtime::{bank::Bank, bank_client::BankClient},
solana_sdk::{
client::SyncClient, fee_calculator::FeeRateGovernor,
genesis_config::create_genesis_config,
},
};
use super::*;
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_sdk::client::SyncClient;
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::genesis_config::create_genesis_config;
#[test]
fn test_bench_tps_bank_client() {

View File

@@ -1,13 +1,11 @@
use {
clap::{crate_description, crate_name, App, Arg, ArgMatches},
solana_faucet::faucet::FAUCET_PORT,
solana_sdk::{
fee_calculator::FeeRateGovernor,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair},
},
std::{net::SocketAddr, process::exit, time::Duration},
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
use solana_faucet::faucet::FAUCET_PORT;
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::{
pubkey::Pubkey,
signature::{read_keypair_file, Keypair},
};
use std::{net::SocketAddr, process::exit, time::Duration};
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;

View File

@@ -1,20 +1,13 @@
#![allow(clippy::integer_arithmetic)]
use {
log::*,
solana_bench_tps::{
bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs},
cli,
},
solana_genesis::Base64Account,
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
solana_sdk::{
fee_calculator::FeeRateGovernor,
signature::{Keypair, Signer},
system_program,
},
solana_streamer::socket::SocketAddrSpace,
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
};
use log::*;
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
use solana_bench_tps::cli;
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
use solana_genesis::Base64Account;
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_program;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
/// Number of signatures for all transactions in ~1 week at ~100K TPS
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
@@ -46,7 +39,7 @@ fn main() {
let keypair_count = *tx_count * keypair_multiplier;
if *write_to_client_file {
info!("Generating {} keypairs", keypair_count);
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
let num_accounts = keypairs.len() as u64;
let max_fee =
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
@@ -75,14 +68,13 @@ fn main() {
}
info!("Connecting to the cluster");
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let client = if *multi_client {
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
let (client, num_clients) = get_multi_client(&nodes);
if nodes.len() < num_clients {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
@@ -96,7 +88,7 @@ fn main() {
let mut target_client = None;
for node in nodes {
if node.id == *target_node {
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
target_client = Some(Arc::new(get_client(&[node])));
break;
}
}
@@ -105,7 +97,7 @@ fn main() {
exit(1);
})
} else {
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
Arc::new(get_client(&nodes))
};
let keypairs = if *read_from_client_file {
@@ -143,7 +135,7 @@ fn main() {
generate_and_fund_keypairs(
client.clone(),
Some(*faucet_addr),
id,
&id,
keypair_count,
*num_lamports_per_account,
)

View File

@@ -1,24 +1,20 @@
#![allow(clippy::integer_arithmetic)]
use {
serial_test::serial,
solana_bench_tps::{
bench::{do_bench_tps, generate_and_fund_keypairs},
cli::Config,
},
solana_client::thin_client::create_client,
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet_with_port,
solana_gossip::cluster_info::VALIDATOR_PORT_RANGE,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_sdk::signature::{Keypair, Signer},
solana_streamer::socket::SocketAddrSpace,
std::{
sync::{mpsc::channel, Arc},
time::Duration,
},
use serial_test::serial;
use solana_bench_tps::{
bench::{do_bench_tps, generate_and_fund_keypairs},
cli::Config,
};
use solana_client::thin_client::create_client;
use solana_core::{cluster_info::VALIDATOR_PORT_RANGE, validator::ValidatorConfig};
use solana_faucet::faucet::run_local_faucet_with_port;
use solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
};
use solana_sdk::signature::{Keypair, Signer};
use std::{
sync::{mpsc::channel, Arc},
time::Duration,
};
fn test_bench_tps_local_cluster(config: Config) {
@@ -26,19 +22,13 @@ fn test_bench_tps_local_cluster(config: Config) {
solana_logger::setup();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default_for_test(),
NUM_NODES,
),
native_instruction_processors,
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
native_instruction_processors,
..ClusterConfig::default()
});
let faucet_keypair = Keypair::new();
cluster.transfer(

View File

@@ -1,32 +0,0 @@
[package]
name = "solana-bloom"
version = "1.8.17"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bloom"
edition = "2018"
[dependencies]
bv = { version = "0.11.1", features = ["serde"] }
fnv = "1.0.7"
rand = "0.7.0"
serde = { version = "1.0.133", features = ["rc"] }
rayon = "1.5.1"
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.8.17" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
log = "0.4.14"
[lib]
crate-type = ["lib"]
name = "solana_bloom"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[build-dependencies]
rustc_version = "0.4"

View File

@@ -1 +0,0 @@
../frozen-abi/build.rs

View File

@@ -1,5 +0,0 @@
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
pub mod bloom;
#[macro_use]
extern crate solana_frozen_abi_macro;

View File

@@ -102,8 +102,6 @@ command_step() {
command: "$2"
timeout_in_minutes: $3
artifact_paths: "log-*.txt"
agents:
- "queue=solana"
EOF
}
@@ -139,7 +137,7 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
wait_step
else
annotate --style info --context test-coverage \
@@ -150,33 +148,6 @@ all_test_steps() {
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
wait_step
# BPF test suite
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-bpf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
; then
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-bpf.sh"
name: "stable-bpf"
timeout_in_minutes: 20
artifact_paths: "bpf-dumps.tar.bz2"
agents:
- "queue=solana"
EOF
else
annotate --style info \
"Stable-BPF skipped as no relevant files were modified"
fi
# Perf test suite
if affects \
.rs$ \
@@ -194,7 +165,7 @@ EOF
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 20
timeout_in_minutes: 40
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
@@ -223,8 +194,6 @@ EOF
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
agents:
- "queue=solana"
EOF
else
annotate --style info \
@@ -247,15 +216,7 @@ EOF
command_step "local-cluster" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
40
command_step "local-cluster-flakey" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
10
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
45
}
pull_or_push_steps() {

View File

@@ -3,24 +3,16 @@
# Pull requests to not run these steps.
steps:
- command: "ci/publish-tarball.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 60
name: "publish tarball"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- wait
- command: "sdk/docker-solana/build.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 60
name: "publish docker"
- command: "ci/publish-crate.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 240
name: "publish crate"
branches: "!master"
- command: "ci/publish-tarball.sh"
agents:
- "queue=release-build-aarch64-apple-darwin"
timeout_in_minutes: 60
name: "publish tarball (aarch64-apple-darwin)"

View File

@@ -28,29 +28,16 @@ cargo_audit_ignores=(
# Blocked on multiple crates updating `time` to >= 0.2.23
--ignore RUSTSEC-2020-0071
# difference is unmaintained
#
# Blocked on predicates v1.0.6 removing its dependency on `difference`
--ignore RUSTSEC-2020-0095
# generic-array: arr! macro erases lifetimes
#
# Blocked on libsecp256k1 releasing with upgraded dependencies
# https://github.com/paritytech/libsecp256k1/issues/66
--ignore RUSTSEC-2020-0146
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
#
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
# https://github.com/paritytech/jsonrpc/issues/605
--ignore RUSTSEC-2021-0078
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
#
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
# https://github.com/paritytech/jsonrpc/issues/605
--ignore RUSTSEC-2021-0079
# chrono: Potential segfault in `localtime_r` invocations
#
# Blocked due to no safe upgrade
# https://github.com/chronotope/chrono/issues/499
--ignore RUSTSEC-2020-0159
)
scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}"

View File

@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.52.1
FROM solanalabs/rust:1.50.0
ARG date
RUN set -x \

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.52.1
FROM rust:1.50.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

View File

@@ -23,9 +23,6 @@ if [[ -n $CI ]]; then
elif [[ -n $BUILDKITE ]]; then
export CI_BRANCH=$BUILDKITE_BRANCH
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
fi
export CI_COMMIT=$BUILDKITE_COMMIT
export CI_JOB_ID=$BUILDKITE_JOB_ID
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
@@ -38,18 +35,7 @@ if [[ -n $CI ]]; then
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
export CI_PULL_REQUEST=
fi
case "$(uname -s)" in
Linux)
export CI_OS_NAME=linux
;;
Darwin)
export CI_OS_NAME=osx
;;
*)
;;
esac
export CI_OS_NAME=linux
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it
@@ -88,13 +74,10 @@ else
export CI_BUILD_ID=
export CI_COMMIT=
export CI_JOB_ID=
export CI_OS_NAME=
export CI_PULL_REQUEST=
export CI_REPO_SLUG=
export CI_TAG=
# Don't override ci/run-local.sh
if [[ -z $CI_LOCAL_RUN ]]; then
export CI_OS_NAME=
fi
fi
cat <<EOF

View File

@@ -127,7 +127,7 @@ startNode() {
waitForNodeToInit() {
declare initCompleteFile=$1
while [[ ! -r $initCompleteFile ]]; do
if [[ $SECONDS -ge 300 ]]; then
if [[ $SECONDS -ge 240 ]]; then
echo "^^^ +++"
echo "Error: $initCompleteFile not found in $SECONDS seconds"
exit 1

View File

@@ -12,14 +12,10 @@ import json
import subprocess
import sys;
real_file = os.path.realpath(__file__)
ci_path = os.path.dirname(real_file)
src_root = os.path.dirname(ci_path)
def load_metadata():
cmd = f'{src_root}/cargo metadata --no-deps --format-version=1'
return json.loads(subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
'cargo metadata --no-deps --format-version=1',
shell=True, stdout=subprocess.PIPE).communicate()[0])
def get_packages():
metadata = load_metadata()

27
ci/publish-bpf-sdk.sh Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
eval "$(ci/channel-info.sh)"
if [[ -n "$CI_TAG" ]]; then
CHANNEL_OR_TAG=$CI_TAG
else
CHANNEL_OR_TAG=$CHANNEL
fi
(
set -x
sdk/bpf/scripts/package.sh
[[ -f bpf-sdk.tar.bz2 ]]
)
source ci/upload-ci-artifact.sh
echo --- AWS S3 Store
if [[ -z $CHANNEL_OR_TAG ]]; then
echo Skipped
else
upload-s3-artifact "/solana/bpf-sdk.tar.bz2" "s3://solana-sdk/$CHANNEL_OR_TAG/bpf-sdk.tar.bz2"
fi
exit 0

View File

@@ -39,11 +39,7 @@ fi
case "$CI_OS_NAME" in
osx)
_cputype="$(uname -m)"
if [[ $_cputype = arm64 ]]; then
_cputype=aarch64
fi
TARGET=${_cputype}-apple-darwin
TARGET=x86_64-apple-darwin
;;
linux)
TARGET=x86_64-unknown-linux-gnu
@@ -150,7 +146,7 @@ elif [[ -n $BUILDKITE ]]; then
cat > release.solana.com-install <<EOF
SOLANA_RELEASE=$CHANNEL_OR_TAG
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
EOF
cat install/solana-install-init.sh >> release.solana.com-install

View File

@@ -1,57 +0,0 @@
#!/usr/bin/env bash
cd "$(dirname "$0")/.."
export CI_LOCAL_RUN=true
set -e
case $(uname -o) in
*/Linux)
export CI_OS_NAME=linux
;;
*)
echo "local CI runs are only supported on Linux" 1>&2
exit 1
;;
esac
steps=()
steps+=(test-sanity)
steps+=(shellcheck)
steps+=(test-checks)
steps+=(test-coverage)
steps+=(test-stable)
steps+=(test-stable-bpf)
steps+=(test-stable-perf)
steps+=(test-downstream-builds)
steps+=(test-bench)
steps+=(test-local-cluster)
steps+=(test-local-cluster-flakey)
steps+=(test-local-cluster-slow)
step_index=0
if [[ -n "$1" ]]; then
start_step="$1"
while [[ $step_index -lt ${#steps[@]} ]]; do
step="${steps[$step_index]}"
if [[ "$step" = "$start_step" ]]; then
break
fi
step_index=$((step_index + 1))
done
if [[ $step_index -eq ${#steps[@]} ]]; then
echo "unexpected start step: \"$start_step\"" 1>&2
exit 1
else
echo "** starting at step: \"$start_step\" **"
echo
fi
fi
while [[ $step_index -lt ${#steps[@]} ]]; do
step="${steps[$step_index]}"
cmd="ci/${step}.sh"
$cmd
step_index=$((step_index + 1))
done

View File

@@ -7,7 +7,7 @@ source multinode-demo/common.sh
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
timeout 120 ./run.sh &
pid=$!
attempts=20
@@ -16,8 +16,6 @@ while [[ ! -f config/run/init-completed ]]; do
if ((--attempts == 0)); then
echo "Error: validator failed to boot"
exit 1
else
echo "Checking init"
fi
done
@@ -26,7 +24,6 @@ snapshot_slot=1
# wait a bit longer than snapshot_slot
while [[ $($solana_cli --url http://localhost:8899 slot --commitment processed) -le $((snapshot_slot + 1)) ]]; do
sleep 1
echo "Checking slot"
done
$solana_validator --ledger config/ledger exit --force || true

View File

@@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.52.1
stable_version=1.50.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2021-05-18
nightly_version=2021-02-18
fi

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
#
# Finds the version of sbf-tools used by this source tree.
#
# stdout of this script may be eval-ed.
#
here="$(dirname "$0")"
SBF_TOOLS_VERSION=unknown
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
if [[ -f "${cargo_build_bpf_main}" ]]; then
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
if [[ ${version} != '' ]]; then
SBF_TOOLS_VERSION="${version}"
else
echo '--- unable to parse SBF_TOOLS_VERSION'
fi
else
echo "--- '${cargo_build_bpf_main}' not present"
fi
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"

View File

@@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE
TimeoutStartSec=10
TimeoutStopSec=0
KillMode=process
LimitNOFILE=1000000
LimitNOFILE=700000
[Install]
WantedBy=multi-user.target

View File

@@ -8,5 +8,5 @@ source "$HERE"/utils.sh
ensure_env || exit 1
# Allow more files to be opened by a user
echo "* - nofile 1000000" > /etc/security/limits.d/90-solana-nofiles.conf
echo "* - nofile 700000" > /etc/security/limits.d/90-solana-nofiles.conf

View File

@@ -27,7 +27,7 @@ BENCH_ARTIFACT=current_bench_results.log
_ "$cargo" build --manifest-path=keygen/Cargo.toml
export PATH="$PWD/target/debug":$PATH
# Clear the C dependency files, if dependency moves these files are not regenerated
# Clear the C dependency files, if dependeny moves these files are not regenerated
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
@@ -45,14 +45,6 @@ _ "$cargo" nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
_ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run gossip benches
_ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run poh benches
_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run core benches
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"

View File

@@ -14,7 +14,7 @@ scripts/increment-cargo-version.sh check
# Disallow uncommitted Cargo.lock changes
(
_ scripts/cargo-for-all-lock-files.sh tree >/dev/null
_ scripts/cargo-for-all-lock-files.sh tree
set +e
if ! _ git diff --exit-code; then
echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2
@@ -35,10 +35,8 @@ echo --- build environment
"$cargo" stable clippy --version --verbose
"$cargo" nightly clippy --version --verbose
# audit is done only with "$cargo stable"
# audit is done only with stable
"$cargo" stable audit --version
grcov --version
)
export RUST_BACKTRACE=1
@@ -67,8 +65,7 @@ _ ci/order-crates-for-publishing.py
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \
--deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
_ "$cargo" stable fmt --all -- --check

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env bash
cd "$(dirname "$0")/.."
export CI_LOCAL_RUN=true
set -ex
scripts/build-downstream-projects.sh

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -21,6 +21,10 @@ export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
source scripts/ulimit-n.sh
# Clear the C dependency files, if dependency moves these files are not regenerated
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Limit compiler jobs to reduce memory usage
# on machines with 2gb/thread of memory
NPROC=$(nproc)
@@ -31,25 +35,17 @@ case $testName in
test-stable)
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
;;
test-stable-bpf)
# Clear the C dependency files, if dependency moves these files are not regenerated
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# rustfilt required for dumping BPF assembly listings
"$cargo" install rustfilt
test-stable-perf)
# solana-keygen required when building C programs
_ "$cargo" build --manifest-path=keygen/Cargo.toml
export PATH="$PWD/target/debug":$PATH
cargo_build_bpf="$(realpath ./cargo-build-bpf)"
# BPF solana-sdk legacy compile test
"$cargo_build_bpf" --manifest-path sdk/Cargo.toml
./cargo-build-bpf --manifest-path sdk/Cargo.toml
# BPF Program unit tests
"$cargo" test --manifest-path programs/bpf/Cargo.toml
"$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
"$cargo" stable test --manifest-path programs/bpf/Cargo.toml
cargo-build-bpf --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
# BPF program system tests
_ make -C programs/bpf/c tests
@@ -57,32 +53,11 @@ test-stable-bpf)
--manifest-path programs/bpf/Cargo.toml \
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
# Dump BPF program assembly listings
for bpf_test in programs/bpf/rust/*; do
if pushd "$bpf_test"; then
"$cargo_build_bpf" --dump
popd
fi
done
# BPF program instruction count assertion
bpf_target_path=programs/bpf/target
_ "$cargo" stable test \
--manifest-path programs/bpf/Cargo.toml \
--no-default-features --features=bpf_c,bpf_rust assert_instruction_count \
-- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt
bpf_dump_archive="bpf-dumps.tar.bz2"
rm -f "$bpf_dump_archive"
tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so}
exit 0
;;
test-stable-perf)
if [[ $(uname) = Linux ]]; then
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
# lengthy and unexpected delay the first time CUDA is involved when the driver
# is not yet loaded.
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh
rm -rf target/perf-libs
./fetch-perf-libs.sh
@@ -100,17 +75,7 @@ test-stable-perf)
;;
test-local-cluster)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-flakey)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-slow)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
*)

View File

@@ -19,24 +19,13 @@ upload-ci-artifact() {
upload-s3-artifact() {
echo "--- artifact: $1 to $2"
(
args=(
--rm
--env AWS_ACCESS_KEY_ID
--env AWS_SECRET_ACCESS_KEY
--volume "$PWD:/solana"
)
if [[ $(uname -m) = arm64 ]]; then
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
args+=(
--platform linux/amd64
)
fi
args+=(
eremite/aws-cli:2018.12.18
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
set -x
docker run "${args[@]}"
docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.8.17"
version = "1.6.6"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,18 +12,14 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-perf = { path = "../perf", version = "=1.8.17" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.6" }
solana-sdk = { path = "../sdk", version = "=1.6.6" }
thiserror = "1.0.21"
tiny-bip39 = "0.8.1"
tiny-bip39 = "0.8.0"
uriparse = "0.6.3"
url = "2.1.0"
chrono = "0.4"
[dev-dependencies]
tempfile = "3.1.0"
[lib]
name = "solana_clap_utils"

View File

@@ -1,7 +1,5 @@
use {
crate::{input_validators, ArgConstant},
clap::Arg,
};
use crate::{input_validators, ArgConstant};
use clap::Arg;
pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant {
name: "fee_payer",

View File

@@ -1,24 +1,19 @@
use {
crate::keypair::{
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
},
chrono::DateTime,
clap::ArgMatches,
solana_remote_wallet::remote_wallet::RemoteWalletManager,
solana_sdk::{
clock::UnixTimestamp,
commitment_config::CommitmentConfig,
genesis_config::ClusterType,
native_token::sol_to_lamports,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair, Signature, Signer},
},
std::{str::FromStr, sync::Arc},
use crate::keypair::{
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
};
// Sentinel value used to indicate to write to screen instead of file
pub const STDOUT_OUTFILE_TOKEN: &str = "-";
use chrono::DateTime;
use clap::ArgMatches;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
clock::UnixTimestamp,
commitment_config::CommitmentConfig,
genesis_config::ClusterType,
native_token::sol_to_lamports,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair, Signature, Signer},
};
use std::{str::FromStr, sync::Arc};
// Return parsed values from matches at `name`
pub fn values_of<T>(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<T>>
@@ -60,7 +55,7 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
if let Some(value) = matches.value_of(name) {
if value == ASK_KEYWORD {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(name, skip_validation, true, None, true).ok()
keypair_from_seed_phrase(name, skip_validation, true).ok()
} else {
read_keypair_file(value).ok()
}
@@ -75,7 +70,7 @@ pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Keypair>>
.filter_map(|value| {
if value == ASK_KEYWORD {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(name, skip_validation, true, None, true).ok()
keypair_from_seed_phrase(name, skip_validation, true).ok()
} else {
read_keypair_file(value).ok()
}
@@ -196,12 +191,10 @@ pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentC
#[cfg(test)]
mod tests {
use {
super::*,
clap::{App, Arg},
solana_sdk::signature::write_keypair_file,
std::fs,
};
use super::*;
use clap::{App, Arg};
use solana_sdk::signature::write_keypair_file;
use std::fs;
fn app<'ab, 'v>() -> App<'ab, 'v> {
App::new("test")

View File

@@ -1,14 +1,13 @@
use {
crate::keypair::{parse_signer_source, SignerSourceKind, ASK_KEYWORD},
chrono::DateTime,
solana_sdk::{
clock::{Epoch, Slot},
hash::Hash,
pubkey::{Pubkey, MAX_SEED_LEN},
signature::{read_keypair_file, Signature},
},
std::{fmt::Display, str::FromStr},
use crate::keypair::{parse_signer_source, SignerSource, ASK_KEYWORD};
use chrono::DateTime;
use solana_sdk::{
clock::{Epoch, Slot},
hash::Hash,
pubkey::{Pubkey, MAX_SEED_LEN},
signature::{read_keypair_file, Signature},
};
use std::fmt::Display;
use std::str::FromStr;
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
where
@@ -95,26 +94,6 @@ where
.map_err(|err| format!("{}", err))
}
// Return an error if a `SignerSourceKind::Prompt` cannot be parsed
pub fn is_prompt_signer_source<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
if string.as_ref() == ASK_KEYWORD {
return Ok(());
}
match parse_signer_source(string.as_ref())
.map_err(|err| format!("{}", err))?
.kind
{
SignerSourceKind::Prompt => Ok(()),
_ => Err(format!(
"Unable to parse input as `prompt:` URI scheme or `ASK` keyword: {}",
string
)),
}
}
// Return an error if string cannot be parsed as pubkey string or keypair file location
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
where
@@ -129,11 +108,8 @@ pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
match parse_signer_source(string.as_ref())
.map_err(|err| format!("{}", err))?
.kind
{
SignerSourceKind::Filepath(path) => is_keypair(path),
match parse_signer_source(string.as_ref()) {
SignerSource::Filepath(path) => is_keypair(path),
_ => Ok(()),
}
}
@@ -214,8 +190,8 @@ where
pub fn normalize_to_url_if_moniker<T: AsRef<str>>(url_or_moniker: T) -> String {
match url_or_moniker.as_ref() {
"m" | "mainnet-beta" => "https://api.mainnet-beta.solana.com",
"t" | "testnet" => "https://api.testnet.solana.com",
"d" | "devnet" => "https://api.devnet.solana.com",
"t" | "testnet" => "https://testnet.solana.com",
"d" | "devnet" => "https://devnet.solana.com",
"l" | "localhost" => "http://localhost:8899",
url => url,
}
@@ -353,27 +329,6 @@ where
}
}
pub fn is_niceness_adjustment_valid<T>(value: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
let adjustment = value.as_ref().parse::<i8>().map_err(|err| {
format!(
"error parsing niceness adjustment value '{}': {}",
value, err
)
})?;
if solana_perf::thread::is_renice_allowed(adjustment) {
Ok(())
} else {
Err(String::from(
"niceness adjustment supported only on Linux; negative adjustment \
(priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \
for details)",
))
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -390,11 +345,4 @@ mod tests {
assert!(is_derivation("a/b").is_err());
assert!(is_derivation("0/4294967296").is_err());
}
#[test]
fn test_is_niceness_adjustment_valid() {
assert_eq!(is_niceness_adjustment_valid("0"), Ok(()));
assert!(is_niceness_adjustment_valid("128").is_err());
assert!(is_niceness_adjustment_valid("-129").is_err());
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
use {crate::ArgConstant, clap::Arg};
use crate::ArgConstant;
use clap::Arg;
pub const MEMO_ARG: ArgConstant<'static> = ArgConstant {
name: "memo",

View File

@@ -1,7 +1,5 @@
use {
crate::{input_validators::*, offline::BLOCKHASH_ARG, ArgConstant},
clap::{App, Arg},
};
use crate::{input_validators::*, offline::BLOCKHASH_ARG, ArgConstant};
use clap::{App, Arg};
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
name: "nonce",

View File

@@ -1,7 +1,5 @@
use {
crate::{input_validators::*, ArgConstant},
clap::{App, Arg},
};
use crate::{input_validators::*, ArgConstant};
use clap::{App, Arg};
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
name: "blockhash",

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.8.17"
version = "1.6.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -1,9 +1,7 @@
// Wallet settings that can be configured for long-term use
use {
serde_derive::{Deserialize, Serialize},
std::{collections::HashMap, io, path::Path},
url::Url,
};
use serde_derive::{Deserialize, Serialize};
use std::{collections::HashMap, io, path::Path};
use url::Url;
lazy_static! {
pub static ref CONFIG_FILE: Option<String> = {
@@ -109,24 +107,24 @@ mod test {
#[test]
fn compute_websocket_url() {
assert_eq!(
Config::compute_websocket_url("http://api.devnet.solana.com"),
"ws://api.devnet.solana.com/".to_string()
Config::compute_websocket_url(&"http://devnet.solana.com"),
"ws://devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url("https://api.devnet.solana.com"),
"wss://api.devnet.solana.com/".to_string()
Config::compute_websocket_url(&"https://devnet.solana.com"),
"wss://devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url("http://example.com:8899"),
Config::compute_websocket_url(&"http://example.com:8899"),
"ws://example.com:8900/".to_string()
);
assert_eq!(
Config::compute_websocket_url("https://example.com:1234"),
Config::compute_websocket_url(&"https://example.com:1234"),
"wss://example.com:1235/".to_string()
);
assert_eq!(Config::compute_websocket_url("garbage"), String::new());
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
}
}

Some files were not shown because too many files have changed in this diff Show More