Compare commits

..

111 Commits
v1.8 ... v1.5.2

Author SHA1 Message Date
mergify[bot]
4b5a05bf38 limits number of crds values associated with a pubkey (bp #14467) (#14490)
* limits number of crds values associated with a pubkey (#14467)

(cherry picked from commit 766195dded)

* updates smallvec

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-01-08 21:52:40 +00:00
mergify[bot]
7dd7141307 Suppress cargo audit failure for difference crate (bp #14488) (#14493)
* Suppress cargo audit failure for `difference` crate, there's no newer crate to upgrade to yet

(cherry picked from commit 3eaa826ad9)

* Bump smallvec version

(cherry picked from commit 21a0a83543)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-01-08 21:52:28 +00:00
mergify[bot]
e5175c843d Add buffer authority to upgradeable loader (#14482) (#14485)
(cherry picked from commit 58487c6360)

Co-authored-by: Jack May <jack@solana.com>
2021-01-08 18:54:11 +00:00
mergify[bot]
d5ff64b0d7 docs: Validator tuning improvements (bp #14478) (#14480)
* docs: wrap lines

(cherry picked from commit 140642ea21)

* docs: Prefer `dd` to `fallocate` when creating swap file

(cherry picked from commit c035f2a745)

* docs: Add RUST_LOG explainer

(cherry picked from commit 30038a8849)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-01-07 19:41:45 +00:00
mergify[bot]
0fbdc7e152 Enable program upgrades via CPI (#14449) (#14469)
(cherry picked from commit 5eacc5d08d)

Co-authored-by: Jack May <jack@solana.com>
2021-01-06 23:45:10 +00:00
Tyera Eulberg
49aca9ecd8 Add fixed tick rate adjustment (#14447) (#14464)
Co-authored-by: sakridge <sakridge@gmail.com>
2021-01-06 21:44:06 +00:00
mergify[bot]
fcc147b4f2 Gate cpi program account passing (#14443) (#14446)
(cherry picked from commit a8b5a32b50)

Co-authored-by: Jack May <jack@solana.com>
2021-01-06 19:20:49 +00:00
mergify[bot]
c455d1b1c5 Enable program-id account index for supply calculations (#14444) (#14456)
* Enable program-id account index for supply calculations

* Fixup comments

(cherry picked from commit ce1766d798)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-01-06 04:04:44 +00:00
mergify[bot]
e9b29fc697 Bump serum-dex pegged commit (#14448) (#14454)
(cherry picked from commit d2b0fd973f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-01-05 19:17:03 -07:00
Ryo Onodera
fdea6fad26 Save 7G mem on mainnet fixing AccIndex overalloc. (#14435)
(cherry picked from commit c9df6134fa)
2021-01-05 17:55:44 -08:00
mergify[bot]
a4bc31341a Lower recycle store count (#14429) (#14442)
Too many stores can cause swap usage which
is detrimental to account store times.

(cherry picked from commit 53d65009a0)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-01-05 21:39:31 +00:00
mergify[bot]
4af797c0a2 Introduce rpc url monikers for cli (#14409) (#14433)
* Introduce rpc url monikers for cli

* Use https:// and support initials as well

(cherry picked from commit 54a5876c48)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2021-01-05 12:16:11 +00:00
mergify[bot]
a1e06df4a8 Add validator --account-index docs (#14418) (#14428)
(cherry picked from commit efd9b769fc)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-01-05 03:06:15 +00:00
mergify[bot]
1f2480fd9f Fix pre-merge old name in the docs (#14425) (#14427)
(cherry picked from commit 974eb6e1ef)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2021-01-05 02:55:02 +00:00
mergify[bot]
8587bd0d69 Improve solana catchup (#14313) (#14424)
* Improve solana catchup

* Overidable port, retry, args error clean up

* print cleanup

* Reduce diff

* Tweak warns a bit

(cherry picked from commit aa4da339ff)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2021-01-05 02:34:53 +00:00
mergify[bot]
0063a58e95 Upgradeable programs needs program account's address as program id (#14417) (#14420)
(cherry picked from commit 0619805806)

Co-authored-by: Jack May <jack@solana.com>
2021-01-04 23:00:36 +00:00
mergify[bot]
9aeb3bc5d6 docs: Use "msg!" instead of "info!" (#14411) (#14416)
* docs: Use "msg!" instead of "info!"

* Update docs/src/developing/deployed-programs/developing-rust.md

Co-authored-by: Michael Vines <mvines@gmail.com>

* Fix typo / format

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit a41b5137f6)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-01-04 20:02:09 +00:00
Michael Vines
97665b977e Bump version to v1.5.2 2021-01-04 06:44:52 +00:00
Michael Vines
c45ed29cf4 Use max commitment when fetching epoch info for block production
(cherry picked from commit 2724f37d0e)
2021-01-03 21:05:13 -08:00
mergify[bot]
635afbabff snapshot_utils: Don't bother restoring snapshots, they're never used (bp #14392) (#14396)
* Remove dead code

(cherry picked from commit b6dcdb90e8)

* Don't bother restoring snapshots, they're never used

(cherry picked from commit db6ee289c9)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-01-03 05:20:26 +00:00
mergify[bot]
98afdad1dd Tune rewards output (#14395)
(cherry picked from commit 560ed90168)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-01-03 02:39:35 +00:00
mergify[bot]
c085b94b43 docs: Update tmpfs partition guidance to include swap (bp #14387) (#14397)
* Update tmpfs partition guidance to include swap

(cherry picked from commit 68a84cf581)

* Update docs/src/running-validator/validator-start.md

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 9bb08ce75e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-01-03 01:45:07 +00:00
Michael Vines
a53946c485 Use singleGossip for program deployment
(cherry picked from commit c63e14dd0e)
2021-01-02 09:21:36 -08:00
mergify[bot]
f6de92c346 Add secondary indexes (#14212) (#14382)
(cherry picked from commit 5affd8aa72)

Co-authored-by: carllin <wumu727@gmail.com>
2021-01-01 07:42:47 +00:00
mergify[bot]
46f9822d62 Only initialize BigTable upload service when requested (#14380)
(cherry picked from commit 4a3d217839)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-01-01 03:06:34 +00:00
mergify[bot]
6dad84d228 Add --ignore-http-bad-gateway flag (#14377)
(cherry picked from commit 6c167615ad)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-31 22:00:29 +00:00
mergify[bot]
3582607aa0 solana-test-validator: bind RPC and faucet to 0.0.0.0 (bp #14369) (#14370)
* Minor help improvements

(cherry picked from commit 04bf5ce830)

* Bind RPC and faucet to 0.0.0.0

(cherry picked from commit 0b23abd479)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-31 09:10:07 +00:00
Michael Vines
f051077350 Require tokio 0.3.5 2020-12-30 22:25:23 -08:00
Michael Vines
ffd6f3e6bf Revert "Upgrade in-tree tokio 0.2 usage to tokio 0.3 (#14326)"
This reverts commit 6c5be574c8.
2020-12-30 22:25:23 -08:00
mergify[bot]
c6b2eb07ee Gate CPI authorized programs (#14361) (#14365)
(cherry picked from commit 2d8dacb72b)

Co-authored-by: Jack May <jack@solana.com>
2020-12-31 03:29:46 +00:00
mergify[bot]
7a3e1f9826 Remove assert (#14356) (#14360)
(cherry picked from commit 1c5427ff17)

Co-authored-by: Jack May <jack@solana.com>
2020-12-30 22:39:55 +00:00
mergify[bot]
8a690b6cf7 nit: clarify loader id (#14355) (#14358)
(cherry picked from commit 6c6095abe7)

Co-authored-by: Jack May <jack@solana.com>
2020-12-30 21:25:41 +00:00
mergify[bot]
8688efa89b Speed up UDP reachable port checks (#14351)
(cherry picked from commit 71b88da48e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-30 19:00:28 +00:00
mergify[bot]
3b047e5b99 Port ip-echo-server to tokio 0.3 (bp #14345) (#14350)
* Port ip-echo-server to tokio 0.3

(cherry picked from commit fb6c660cfd)

# Conflicts:
#	net-utils/Cargo.toml

* Update Cargo.toml

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-30 18:55:24 +00:00
mergify[bot]
3cddc731b2 Add --test arg to cargo-test-bpf (#14342) (#14344)
(cherry picked from commit 3d0cd2cdb0)

Co-authored-by: Justin Starry <justin@solana.com>
2020-12-30 07:55:38 +00:00
mergify[bot]
1d29a583c6 Rewrite faucet with tokio v0.3 (bp #14336) (#14343)
* Rewrite faucet with tokio v0.3 (#14336)

* Rewrite faucet for contemporary tokio

* Move away from framed decoder

(cherry picked from commit d63dd95806)

# Conflicts:
#	faucet/Cargo.toml

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-12-30 05:09:00 +00:00
mergify[bot]
b5335edb35 Add experimental knob for tuning PoH pinned CPU core (bp #14330) (#14341)
* core: Update stale error message

(cherry picked from commit 82f61c0c4a)

* validator: Add experimental flag to select PoH pinned core

(cherry picked from commit fe667db910)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-12-30 03:33:39 +00:00
mergify[bot]
abee1e83eb Add poh speed check and tick speed calibration (#14292) (#14328)
(cherry picked from commit 2074e407cd)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-12-29 19:40:49 +00:00
mergify[bot]
6c5be574c8 Upgrade in-tree tokio 0.2 usage to tokio 0.3 (#14326)
(cherry picked from commit 444ed768dc)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-29 19:03:18 +00:00
mergify[bot]
c1f993d2fc Retry durable-nonce transactions (#14308) (#14325)
* Retry durable-nonce transactions

* Add metric to track durable-nonce txs in queue

* Populate send-tx-service initial addresses with tpu_address if empty (primarily for testing)

* Reinstate last_valid_slot check for durable-nonce txs; use arbitrary future slot

(cherry picked from commit 3f10fb993b)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-12-29 18:03:04 +00:00
mergify[bot]
e2ddb2f0ea Limit CPI instruction size (#14317) (#14321)
(cherry picked from commit 5524938a50)

Co-authored-by: Jack May <jack@solana.com>
2020-12-29 02:38:22 +00:00
mergify[bot]
f3faba5ca9 Remove Testnet-specific old code (#14305) (#14315)
(cherry picked from commit 7893e2e307)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-12-28 22:18:33 +00:00
mergify[bot]
3a6fd91739 Log error from AppendVec removal & a panic clean (#14302) (#14310)
(cherry picked from commit addffd7694)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-12-28 22:08:22 +00:00
mergify[bot]
2d2b3d8287 CLI: Support retrieving past leader schedules (bp #14304) (#14312)
* clap-utils: Add epoch validator

(cherry picked from commit a709850ee4)

* CLI: Support displaying past leader schedules

(cherry picked from commit bd761e2a52)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-12-28 21:41:55 +00:00
mergify[bot]
6e47b88399 run.sh: add env knob for solana-validor (#14303) (#14307)
(cherry picked from commit 4af33674a7)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-12-28 20:48:03 +00:00
Michael Vines
941e56c6c7 Avoid creating "..tmp" files 2020-12-28 08:58:41 -08:00
Michael Vines
d1adc2a446 Persist gossip contact info
(cherry picked from commit 9ddd6f08e8)
2020-12-27 22:09:00 -08:00
Michael Vines
02da7dfedf Bump version to v1.5.1 2020-12-27 21:57:43 -08:00
mergify[bot]
eb0fd3625a Fix subtraction overflow in metrics (#14290) (#14296)
(cherry picked from commit c693ffaa08)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-12-28 02:34:58 +00:00
mergify[bot]
b87e606626 Fix download speed (#14291) (#14295)
(cherry picked from commit 7b49c85aa7)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-12-28 02:21:40 +00:00
mergify[bot]
1c91376f78 obtains staked-nodes from the root-bank (#14257) (#14293)
... as opposed to the working bank

(cherry picked from commit 49019c6613)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-12-27 14:49:29 +00:00
mergify[bot]
10067ad07b indexes votes in crds table (#14272) (#14294)
(cherry picked from commit 2fd38d9912)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-12-27 14:49:23 +00:00
Michael Vines
eb76289107 Fix windows build 2020-12-24 17:49:41 -08:00
Michael Vines
8926736e1c Remove stray dbg 2020-12-24 10:45:34 -08:00
mergify[bot]
bf4c169703 Prevent bpf loader impersonators (#14278) (#14279)
(cherry picked from commit ee0a80a092)

Co-authored-by: Jack May <jack@solana.com>
2020-12-24 04:24:30 +00:00
mergify[bot]
0020e43476 Don't use caller passed executable account (#14276) (#14277)
(cherry picked from commit b1d702a618)

Co-authored-by: Jack May <jack@solana.com>
2020-12-23 23:52:04 +00:00
mergify[bot]
a9a2c76221 Limit CPI from calling loader or native programs (#14252) (#14275)
(cherry picked from commit 0b479ab180)

Co-authored-by: Jack May <jack@solana.com>
2020-12-23 20:01:56 +00:00
mergify[bot]
4754b4e871 Save cloning program account data (#14251) (#14274)
(cherry picked from commit 5945305b1d)

Co-authored-by: Jack May <jack@solana.com>
2020-12-23 19:35:09 +00:00
mergify[bot]
52ffb9a64a Add accounts shrink paths (bp #14238) (#14270)
* Add shrink paths (#14238)


(cherry picked from commit baa9602411)

* Ignore long/hanging test (#14261)

Co-authored-by: sakridge <sakridge@gmail.com>
Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-12-23 08:03:33 +00:00
Trent Nelson
bd0b1503c6 Deinitialize stake data upon zero balance 2020-12-23 06:17:59 +00:00
Trent Nelson
10e7fa40ac Deinitialize vote data upon zero balance 2020-12-23 06:17:59 +00:00
Trent Nelson
198ed407b7 vote: Add helper for creating current-versioned states 2020-12-23 06:17:59 +00:00
Trent Nelson
d96af2dd23 Deinitialize nonce data upon zero balance 2020-12-23 06:17:59 +00:00
mergify[bot]
192cca8f98 validator: Multiple --entrypoint support (bp #14256) (#14264)
* Update entrypoint contact info even when shred version adoption is not requested

(cherry picked from commit 3373082ffa)

* Multiple entrypoint support

(cherry picked from commit ace360ade2)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-23 04:15:44 +00:00
Michael Vines
ee716e1c55 Add log message for when a local snapshot is too old
(cherry picked from commit 65dcb3dc81)
2020-12-22 19:58:29 -08:00
mergify[bot]
6dd3c7c2dd removes &Arc<Self> receivers (#14234) (#14262)
(cherry picked from commit a14cfd660a)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-12-23 02:11:08 +00:00
mergify[bot]
582b4c9edf Upgradeable programs called same as non-upgradeable (#14239) (#14254)
* Upgradeable programs called same as non-upgradeable

* nudge

(cherry picked from commit ab205b682a)

Co-authored-by: Jack May <jack@solana.com>
2020-12-22 21:17:18 +00:00
mergify[bot]
f15add2a74 Feature-gate stake-program-v3 (#14232) (#14250)
* Remove deprecated legacy stake program

* Add legacy stake program

* Strip out duplicative legacy code

* Feature-deploy stake-program-v3

* Add ownership check in stake processor

(cherry picked from commit 7042f11791)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-12-22 19:42:30 +00:00
mergify[bot]
74d48910e2 Rework upgradeable loader cli (#14209) (#14236)
(cherry picked from commit 3316e7166c)

Co-authored-by: Jack May <jack@solana.com>
2020-12-21 22:26:11 +00:00
mergify[bot]
c53e8ee3ad improves performance in replay-stage (#14217) (#14233)
bank::vote_accounts returns a hash-map which is slow to iterate, but all uses
only require an iterator:
https://github.com/solana-labs/solana/blob/b3dc98856/runtime/src/bank.rs#L4300-L4306
Similarly, calculate_stake_weighted_timestamp takes a hash-map whereas it only
requires an iterator:
https://github.com/solana-labs/solana/blob/b3dc98856/sdk/src/stake_weighted_timestamp.rs#L21-L28

(cherry picked from commit 7b08cb1f0d)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-12-21 21:23:35 +00:00
Michael Vines
c5e5fedc47 Allow multiple --accounts arguments
(cherry picked from commit 8082a2454c)
2020-12-21 11:43:04 -08:00
Tyera Eulberg
b9929dcd67 Warp-timestamp pr# 2020-12-21 10:53:43 -07:00
sakridge
554a158443 Fix test_max_hashes (#14189)
(cherry picked from commit a5db6399ad)
2020-12-21 09:05:26 -08:00
behzad nouri
b7fa4b7ee1 caches staked nodes computed from vote-accounts (#13929)
(cherry picked from commit d6d76219b6)
2020-12-21 09:05:17 -08:00
behzad nouri
fd44cee8cc limits number of crds values returned when responding to pull requests (#13739)
Crds values buffered when responding to pull-requests can be very large taking a lot of memory.
Added a limit for number of buffered crds values based on outbound data budget.

(cherry picked from commit 691031fefd)
2020-12-21 09:04:50 -08:00
mergify[bot]
c6a362cce2 Do not delete ALL other snapshots before downloading a new snapshot (#14227)
(cherry picked from commit 93ae177503)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-21 10:27:25 +00:00
mergify[bot]
252180c244 Restore Content-Length header for streaming snapshot download (#14222)
(cherry picked from commit 57b03c5bc1)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-21 08:41:02 +00:00
mergify[bot]
e02b4e698e Fix timestamp handling on ledger warp (#14210) (#14218)
* Reset timestamp for slot and epoch-start on warp

* Fix genesis timestamp metric source

* Remove check that timestamp > unix_timestamp_from_genesis

Default to previous timestamp, not genesis timestamp

* Move timestamp metrics to report even on warp

* Initialize slot 0 timestamps correctly

* Add feature gate to warp testnet timestamp

* Review suggestion: simplify warp-timestamp slot check

(cherry picked from commit e15f95a36f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-12-20 22:52:23 +00:00
mergify[bot]
4811afe8eb Stream RPC snapshot downloads (bp #14213) (#14215)
* Stream RPC snapshot downloads

(cherry picked from commit b3dc988564)

# Conflicts:
#	core/Cargo.toml

* Update Cargo.toml

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-20 01:28:41 +00:00
Michael Vines
bc4568b10f Update Cargo.toml 2020-12-18 20:16:48 -08:00
Michael Vines
d59c131e90 Create a random -keypair.json file alongside the program deploy artifact for easy upgrades
(cherry picked from commit 636a455790)
2020-12-18 20:16:48 -08:00
Michael Vines
825027f9f7 Use AsRef
(cherry picked from commit 9993d2c623)
2020-12-18 20:16:48 -08:00
mergify[bot]
9b8f0bee99 adds crds-value for broadcasting duplicate shreds through gossip (bp #14133) (#14203)
* adds crds-value for broadcasting duplicate shreds through gossip (#14133)

In gossip, the header overhead we get from:
https://github.com/solana-labs/solana/blob/de9ac43eb/core/src/cluster_info.rs#L434-L435
https://github.com/solana-labs/solana/blob/de9ac43eb/core/src/crds_value.rs#L31-L36
https://github.com/solana-labs/solana/blob/de9ac43eb/core/src/crds_value.rs#L73
already exceeds SIZE_OF_NONCE in shreds. We also need aditional
meta-data (wallclock, source pubkey, ...). Which means that given the
SHRED_PAYLOAD_SIZE, we cannot fit all these in PACKET_DATA_SIZE:
https://github.com/solana-labs/solana/blob/de9ac43eb/ledger/src/shred.rs#L80

On top of that, we need 2 shred payloads as the proof of duplicate. So
each DuplicateShred crds value includes only a chunk of the payload,
along with the meta-data to reconstruct the full payload from the chunks
on the receiving end.

(cherry picked from commit 6a3797e164)

# Conflicts:
#	Cargo.lock
#	ledger/Cargo.toml

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-12-18 22:54:50 +00:00
mergify[bot]
fc13c1d654 getBlockTime RPC method now falls back to BigTable in all cases (#14207)
(cherry picked from commit 0090106f60)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-18 22:23:35 +00:00
mergify[bot]
a57758e9c9 Add CPI support for upgradeable loader (bp #14193) (#14199) 2020-12-18 11:23:00 -08:00
Michael Vines
564590462a Add transactionCount field to GetEpochInfo
(cherry picked from commit efc091e28a)
2020-12-18 10:09:30 -08:00
Michael Vines
269f6af97e fix: add transactionCount field to GetEpochInfo
(cherry picked from commit 01fe835e73)
2020-12-18 10:09:30 -08:00
mergify[bot]
57b8a59632 Reject invalid --expected-shred-version (#14183) (#14202)
* Reject invalid --expected-shred-version

* less code

(cherry picked from commit 3c9b853268)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-12-18 19:19:57 +09:00
Trent Nelson
4289f52d2b net/gce.sh: Upgrade to Ubuntu 20.04
(cherry picked from commit 3322b83183)
2020-12-17 18:17:01 -07:00
Trent Nelson
573f68620b net/gce.sh: Switch to SSD boot disks
(cherry picked from commit a0507505f4)
2020-12-17 18:17:01 -07:00
Trent Nelson
4bfe64688b net/gce.sh: Bump machine type to 24-core, 64GB RAM
(cherry picked from commit ffe0532ded)
2020-12-17 18:17:01 -07:00
mergify[bot]
50034848a5 Improved Transaction Forwarding (#13944) (#14195)
* Forwarding

* Dedupe leaders

* Use consistent commitment for last_valid_slot in rpc send_transaction

* Plumb rpc send_transaction options into solana-validator

* Extend num slots banking-stage holds forwarded txs

Co-authored-by: Tyera Eulberg <tyera@solana.com>
(cherry picked from commit da7d1e2302)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-12-17 18:14:06 -07:00
Michael Vines
981294cbc6 Don't require increased open file limit at ledger creation
Follow-up to 0b92720fdb, `create_new_ledger()` does not require a higher fd limit
2020-12-17 08:49:23 -08:00
mergify[bot]
ff728e5e56 Fix program account rent exemption (#14176) (#14180)
(cherry picked from commit 593ad80954)

Co-authored-by: Jack May <jack@solana.com>
2020-12-17 03:46:43 -08:00
Michael Vines
9aaf41bef2 Don't require increased open file limit in solana-test-validator
Travis CI in particular does not allow the open file limit to be
increased.

(cherry picked from commit 0b92720fdb)
2020-12-16 22:59:56 -08:00
Michael Vines
271eec656c Use an ephemeral mint address if the client keypair is not available
Typically this can occur in a CI environment

(cherry picked from commit 8d700c3b94)
2020-12-16 22:59:56 -08:00
Trent Nelson
13d071607f Revert "Ignore RUSTSEC-2020-0077 until next 1.4 release"
This reverts commit 1792100e2b.
2020-12-17 01:54:26 +00:00
Trent Nelson
ffe35d9a10 Bump SPL crates 2020-12-17 01:54:26 +00:00
mergify[bot]
bb2fb07b39 Add blockstore skipped api (#14145) (#14167)
* Add blockstore api to determine if a slot was skipped

* Return custom rpc error if slot is skipped

(cherry picked from commit ac0d32bc7e)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-12-16 22:26:54 +00:00
mergify[bot]
85fc51dc61 fix formatting error in docs (#14163)
(cherry picked from commit 41a93ced23)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2020-12-16 18:51:24 +00:00
mergify[bot]
0276b6c4c2 Correctly show reward percent changes (#14161)
(cherry picked from commit bebfa6e93c)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-12-16 18:15:36 +00:00
mergify[bot]
c481e4fe7f Partial shred deserialize cleanup and shred type differentiation (#14094) (#14139)
* Partial shred deserialize cleanup and shred type differentiation in retransmit

* consolidate packet hashing logic

(cherry picked from commit d4a174fb7c)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-12-16 08:57:21 -08:00
mergify[bot]
76a3b3ad11 Remove lock files from programs/bpf/rust (#14148) (#14158)
(cherry picked from commit 49c3f14016)

Co-authored-by: Jack May <jack@solana.com>
2020-12-16 11:56:48 +00:00
mergify[bot]
356c663e88 check for resize access violations (#14142) (#14152)
(cherry picked from commit 025f886e10)

Co-authored-by: Jack May <jack@solana.com>
2020-12-16 10:28:27 +00:00
mergify[bot]
015bbc1e12 Fix up upgradeable bpf loader activation (#14149)
(cherry picked from commit 501fd83afd)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-16 07:54:44 +00:00
mergify[bot]
454a9f3175 Switch solana deploy commitment default from "max" to "singleGossip" (#14146)
(cherry picked from commit db4ac17259)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-16 04:46:45 +00:00
mergify[bot]
485b3d64a1 Add Program loader/environment instruction errors (#14120) (#14143)
(cherry picked from commit d513b0c4ca)

Co-authored-by: Jack May <jack@solana.com>
2020-12-16 03:50:04 +00:00
Michael Vines
5d170d83c0 Remove stray println 2020-12-15 16:44:56 -08:00
mergify[bot]
f54d8ea3ab solana-test-validator usability improvements (bp #14129) (#14136)
* Clean up Cargo.toml

(cherry picked from commit d2af09a647)

* Prevent multiple test-validators from using the same ledger directory

(cherry picked from commit f3272db7f7)

* Add --reset flag to allow for easy ledger reset

(cherry picked from commit 00c46c528e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-12-15 23:21:21 +00:00
mergify[bot]
ef9f54b3d4 Fix race between setting tick height and calculating accounts hash (#14101) (#14132)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit 75e9e321de)

Co-authored-by: carllin <wumu727@gmail.com>
2020-12-15 22:05:44 +00:00
mergify[bot]
8d0b102b44 Cleanup ledger builtins (#14083) (#14130)
(cherry picked from commit 582418de5e)

Co-authored-by: Jack May <jack@solana.com>
2020-12-15 21:45:44 +00:00
1628 changed files with 109019 additions and 281319 deletions

View File

@@ -2,6 +2,6 @@
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"_comment": "These credentials are encrypted and pose no risk",
"environment": {
"CODECOV_TOKEN": "EJ[1:KToenD1Sr3w82lHGxz1n+j3hwNlLk/1pYrjZHlvY6kE=:hN1Q25omtJ+4yYVn+qzIsPLKT3O6J9XN:DMLNLXi/pkWgvwF6gNIcNF222sgsRR9LnwLZYj0P0wGj7q6w8YQnd1Rskj+sRroI/z5pQg==]"
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]"
}
}

View File

@@ -12,8 +12,7 @@ export PS4="++"
# Restore target/ from the previous CI build on this machine
#
eval "$(ci/channel-info.sh)"
eval "$(ci/sbf-tools-info.sh)"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
(
set -x
MAX_CACHE_SIZE=18 # gigabytes
@@ -37,7 +36,4 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
# `std:
# "found possibly newer version of crate `std` which `xyz` depends on
rm -rf target/bpfel-unknown-unknown
if [[ $BUILDKITE_LABEL = "stable-perf" ]]; then
rm -rf target/release
fi
)

4
.gitignore vendored
View File

@@ -1,3 +1,7 @@
/docs/html/
/docs/src/tests.ok
/docs/src/cli/usage.md
/docs/src/.gitbook/assets/*.svg
/farf/
/solana-release/
/solana-release.tar.bz2

View File

@@ -50,6 +50,14 @@ pull_request_rules:
label:
add:
- automerge
- name: v1.3 backport
conditions:
- label=v1.3
actions:
backport:
ignore_conflicts: true
branches:
- v1.3
- name: v1.4 backport
conditions:
- label=v1.4
@@ -65,11 +73,12 @@ pull_request_rules:
backport:
ignore_conflicts: true
branches:
- v1.9
commands_restrictions:
# The author of copied PRs is the Mergify user.
# Restrict `copy` access to Core Contributors
copy:
- v1.5
- name: v1.6 backport
conditions:
- author=@core-contributors
- label=v1.6
actions:
backport:
ignore_conflicts: true
branches:
- v1.6

View File

@@ -29,7 +29,6 @@ jobs:
if: type IN (api, cron) OR tag IS present
name: "macOS release artifacts"
os: osx
osx_image: xcode12
language: rust
rust:
- stable
@@ -37,12 +36,8 @@ jobs:
- source ci/rust-version.sh
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
- readlink -f .
- brew install gnu-tar
- PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH"
- tar --version
script:
- source ci/env.sh
- rustup set profile default
- ci/publish-tarball.sh
deploy:
- provider: s3
@@ -65,12 +60,6 @@ jobs:
- <<: *release-artifacts
name: "Windows release artifacts"
os: windows
install:
- choco install openssl
- export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64"
- source ci/rust-version.sh
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
- readlink -f .
# Linux release artifacts are still built by ci/buildkite-secondary.yml
#- <<: *release-artifacts
# name: "Linux release artifacts"
@@ -84,7 +73,7 @@ jobs:
language: node_js
node_js:
- "lts/*"
- "node"
cache:
directories:
@@ -127,7 +116,7 @@ jobs:
if: type IN (push, pull_request) OR tag IS present
language: node_js
node_js:
- "lts/*"
- "node"
services:
- docker

3807
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,5 @@
[workspace]
members = [
"accountsdb-plugin-interface",
"accountsdb-plugin-manager",
"accountsdb-plugin-postgres",
"accounts-cluster-bench",
"bench-exchange",
"bench-streamer",
"bench-tps",
@@ -12,7 +8,6 @@ members = [
"banks-client",
"banks-interface",
"banks-server",
"bloom",
"clap-utils",
"cli-config",
"cli-output",
@@ -25,7 +20,6 @@ members = [
"perf",
"validator",
"genesis",
"genesis-utils",
"gossip",
"install",
"keygen",
@@ -34,8 +28,8 @@ members = [
"local-cluster",
"logger",
"log-analyzer",
"merkle-root-bench",
"merkle-tree",
"stake-o-matic",
"storage-bigtable",
"storage-proto",
"streamer",
@@ -43,19 +37,21 @@ members = [
"metrics",
"net-shaper",
"notifier",
"poh",
"poh-bench",
"program-test",
"programs/secp256k1",
"programs/bpf_loader",
"programs/compute-budget",
"programs/budget",
"programs/config",
"programs/exchange",
"programs/ed25519",
"programs/secp256k1",
"programs/failure",
"programs/noop",
"programs/ownable",
"programs/stake",
"programs/vest",
"programs/vote",
"remote-wallet",
"rpc",
"ramp-tps",
"runtime",
"runtime/store-tool",
"sdk",
@@ -63,6 +59,7 @@ members = [
"sdk/cargo-test-bpf",
"scripts",
"stake-accounts",
"stake-monitor",
"sys-tuner",
"tokens",
"transaction-status",
@@ -78,11 +75,3 @@ members = [
exclude = [
"programs/bpf",
]
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
# specify arch-specific features.
resolver = "2"
[profile.dev]
split-debuginfo = "unpacked"

View File

@@ -1,6 +1,6 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
</a>
</p>
@@ -19,7 +19,7 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt
```
Please make sure you are always using the latest stable rust version by running:
Please sure you are always using the latest stable rust version by running:
```bash
$ rustup update
@@ -32,12 +32,6 @@ $ sudo apt-get update
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
```
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
```bash
$ softwareupdate --install-rosetta
```
## **2. Download the source code.**
```bash
@@ -51,6 +45,11 @@ $ cd solana
$ cargo build
```
## **4. Run a minimal local cluster.**
```bash
$ ./run.sh
```
# Testing
**Run the test suite:**
@@ -108,41 +107,6 @@ send us that patch!
# Disclaimer
All claims, content, designs, algorithms, estimates, roadmaps,
specifications, and performance measurements described in this project
are done with the Solana Foundation's ("SF") best efforts. It is up to
the reader to check and validate their accuracy and truthfulness.
Furthermore nothing in this project constitutes a solicitation for
investment.
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
Any content produced by SF or developer resources that SF provides, are
for educational and inspiration purposes only. SF does not encourage,
induce or sanction the deployment, integration or use of any such
applications (including the code comprising the Solana blockchain
protocol) in violation of applicable laws or regulations and hereby
prohibits any such deployment, integration or use. This includes use of
any such applications by the reader (a) in violation of export control
or sanctions laws of the United States or any other applicable
jurisdiction, (b) if the reader is located in or ordinarily resident in
a country or territory subject to comprehensive sanctions administered
by the U.S. Office of Foreign Assets Control (OFAC), or (c) if the
reader is or is working on behalf of a Specially Designated National
(SDN) or a person subject to similar blocking or denied party
prohibitions.
The reader should be aware that U.S. export control and sanctions laws
prohibit U.S. persons (and other persons that are subject to such laws)
from transacting with persons in certain countries and territories or
that are on the SDN list. As a project based primarily on open-source
software, it is possible that such sanctioned persons may nevertheless
bypass prohibitions, obtain the code comprising the Solana blockchain
protocol (or other project code or applications) and deploy, integrate,
or otherwise use it. Accordingly, there is a risk to individuals that
other persons using the Solana blockchain protocol may be sanctioned
persons and that transactions with such persons would be a violation of
U.S. export controls and sanctions law. This risk applies to
individuals, organizations, and other ecosystem participants that
deploy, integrate, or use the Solana blockchain protocol code directly
(e.g., as a node operator), and individuals that transact on the Solana
blockchain through light clients, third party interfaces, and/or wallet
software.
Any content produced by Solana, or developer resources that Solana provides, are for educational and inspiration purposes only. Solana does not encourage, induce or sanction the deployment of any such applications in violation of applicable laws or regulations.

View File

@@ -1,169 +0,0 @@
# Security Policy
1. [Reporting security problems](#reporting)
4. [Security Bug Bounties](#bounty)
2. [Incident Response Process](#process)
<a name="reporting"></a>
## Reporting security problems to Solana
**DO NOT CREATE AN ISSUE** to report a security problem. Instead, please send an
email to security@solana.com and provide your github username so we can add you
to a new draft security advisory for further discussion.
Expect a response as fast as possible, within one business day at the latest.
<a name="bounty"></a>
## Security Bug Bounties
We offer bounties for critical security issues. Please see below for more details.
Loss of Funds:
$500,000 USD in locked SOL tokens (locked for 12 months)
* Theft of funds without users signature from any account
* Theft of funds without users interaction in system, token, stake, vote programs
* Theft of funds that requires users signature - creating a vote program that drains the delegated stakes.
Consensus/Safety Violations:
$250,000 USD in locked SOL tokens (locked for 12 months)
* Consensus safety violation
* Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc..
Other Attacks:
$100,000 USD in locked SOL tokens (locked for 12 months)
* Protocol liveness attacks,
* Eclipse attacks,
* Remote attacks that partition the network,
DoS Attacks:
$25,000 USD in locked SOL tokens (locked for 12 months)
* Remote resource exaustion via Non-RPC protocols
RPC DoS/Crashes:
$5,000 USD in locked SOL tokens (locked for 12 months)
* RPC attacks
Out of Scope:
The following components are out of scope for the bounty program
* Metrics: `/metrics` in the monorepo as well as https://metrics.solana.com
* Explorer: `/explorer` in the monorepo as well as https://explorer.solana.com
* Any encrypted credentials, auth tokens, etc. checked into the repo
* Bugs in dependencies. Please take them upstream!
* Attacks that require social engineering
Eligibility:
* The participant submitting the bug report shall follow the process outlined within this document
* Valid exploits can be eligible even if they are not successfully executed on the cluster
* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
Payment of Bug Bounties:
* Payments for eligible bug reports are distributed monthly.
* Bounties for all bug reports submitted in a given month are paid out in the middle of the
following month.
* The SOL/USD conversion rate used for payments is the market price at the end of
the last day of the month for the month in which the bug was submitted.
* The reference for this price is the Closing Price given by Coingecko.com on
that date given here:
https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
* For example, for all bugs submitted in March 2021, the SOL/USD price for bug
payouts is the Close price on 2021-03-31 of $19.49. This applies to all bugs
submitted in March 2021, to be paid in mid-April 2021.
* Bug bounties are paid out in
[stake accounts](https://solana.com/staking) with a
[lockup](https://docs.solana.com/staking/stake-accounts#lockups)
expiring 12 months from the last day of the month in which the bug was submitted.
<a name="process"></a>
## Incident Response Process
In case an incident is discovered or reported, the following process will be
followed to contain, respond and remediate:
### 1. Establish a new draft security advisory
In response to an email to security@solana.com, a member of the `solana-labs/admins` group will
1. Create a new draft security advisory for the incident at https://github.com/solana-labs/solana/security/advisories
1. Add the reporter's github user and the `solana-labs/security-incident-response` group to the draft security advisory
1. Create a private fork of the repository (grey button towards the bottom of the page)
1. Respond to the reporter by email, sharing a link to the draft security advisory
### 2. Triage
Within the draft security advisory, discuss and determine the severity of the
issue. If necessary, members of the `solana-labs/security-incident-response`
group may add other github users to the advisory to assist.
If it is determined that this not a critical network issue then the advisory
should be closed and if more follow-up is required a normal Solana public github
issue should be created.
### 3. Prepare Fixes
For the affected branches, typically all three (edge, beta and stable), prepare
a fix for the issue and push them to the corresponding branch in the private
repository associated with the draft security advisory.
There is no CI available in the private repository so you must build from source
and manually verify fixes.
Code review from the reporter is ideal, as well as from multiple members of the
core development team.
### 4. Notify Security Group Validators
Once an ETA is available for the fix, a member of the
`solana-labs/security-incident-response` group should notify the validators so
they can prepare for an update using the "Solana Red Alert" notification system.
The teams are all over the world and it's critical to provide actionable
information at the right time. Don't be the person that wakes everybody up at
2am when a fix won't be available for hours.
### 5. Ship the patch
Once the fix is accepted, a member of the
`solana-labs/security-incident-response` group should prepare a single patch
file for each affected branch. The commit title for the patch should only
contain the advisory id, and not disclose any further details about the
incident.
Copy the patches to https://release.solana.com/ under a subdirectory named after
the advisory id (example:
https://release.solana.com/GHSA-hx59-f5g4-jghh/v1.4.patch). Contact a member of
the `solana-labs/admins` group if you require access to release.solana.com
Using the "Solana Red Alert" channel:
1. Notify validators that there's an issue and a patch will be provided in X minutes
2. If X minutes expires and there's no patch, notify of the delay and provide a
new ETA
3. Provide links to patches of https://release.solana.com/ for each affected branch
Validators can be expected to build the patch from source against the latest
release for the affected branch.
Since the software version will not change after the patch is applied, request
that each validator notify in the existing channel once they've updated. Manually
monitor the roll out until a sufficient amount of stake has updated - typically
at least 33.3% or 66.6% depending on the issue.
### 6. Public Disclosure and Release
Once the fix has been deployed to the security group validators, the patches from the security
advisory may be merged into the main source repository. A new official release
for each affected branch should be shipped and all validators requested to
upgrade as quickly as possible.
### 7. Security Advisory Bounty Accounting and Cleanup
If this issue is eligible for a bounty, prefix the title of the security
advisory with one of the following, depending on the severity:
* `[Bounty Category: Critical: Loss of Funds]`
* `[Bounty Category: Critical: Loss of Availability]`
* `[Bounty Category: Critical: DoS]`
* `[Bounty Category: Critical: Other]`
* `[Bounty Category: Non-critical]`
* `[Bounty Category: RPC]`
Confirm with the reporter that they agree with the severity assessment, and
discuss as required to reach a conclusion.
We currently do not use the Github workflow to publish security advisories.
Once the issue and fix have been disclosed, and a bounty category is assessed if
appropriate, the GitHub security advisory is no longer needed and can be closed.
Bounties are currently awarded once a quarter (TODO: link to this process, or
inline the workflow)

View File

@@ -1,11 +1,10 @@
[package]
name = "solana-account-decoder"
version = "1.8.17"
version = "1.5.2"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-account-decoder"
license = "Apache-2.0"
edition = "2018"
@@ -16,13 +15,14 @@ bs58 = "0.3.1"
bv = "0.11.1"
Inflector = "0.11.4"
lazy_static = "1.4.0"
serde = "1.0.122"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-vote-program = { path = "../programs/vote", version = "=1.8.17" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
solana-config-program = { path = "../programs/config", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
solana-stake-program = { path = "../programs/stake", version = "1.5.2" }
solana-vote-program = { path = "../programs/vote", version = "1.5.2" }
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.5.1"

View File

@@ -1,11 +1,9 @@
#![allow(clippy::integer_arithmetic)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate serde_derive;
pub mod parse_account_data;
pub mod parse_bpf_loader;
pub mod parse_config;
pub mod parse_nonce;
pub mod parse_stake;
@@ -16,12 +14,7 @@ pub mod validator_info;
use {
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
solana_sdk::{
account::{ReadableAccount, WritableAccount},
clock::Epoch,
fee_calculator::FeeCalculator,
pubkey::Pubkey,
},
solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey},
std::{
io::{Read, Write},
str::FromStr,
@@ -29,8 +22,6 @@ use {
};
pub type StringAmount = String;
pub type StringDecimals = String;
pub const MAX_BASE58_BYTES: usize = 128;
/// A duplicate representation of an Account for pretty JSON serialization
#[derive(Serialize, Deserialize, Clone, Debug)]
@@ -51,7 +42,7 @@ pub enum UiAccountData {
Binary(String, UiAccountEncoding),
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum UiAccountEncoding {
Binary, // Legacy. Retained for RPC backwards compatibility
@@ -63,73 +54,58 @@ pub enum UiAccountEncoding {
}
impl UiAccount {
fn encode_bs58<T: ReadableAccount>(
account: &T,
data_slice_config: Option<UiDataSliceConfig>,
) -> String {
if account.data().len() <= MAX_BASE58_BYTES {
bs58::encode(slice_data(account.data(), data_slice_config)).into_string()
} else {
"error: data too large for bs58 encoding".to_string()
}
}
pub fn encode<T: ReadableAccount>(
pub fn encode(
pubkey: &Pubkey,
account: &T,
account: Account,
encoding: UiAccountEncoding,
additional_data: Option<AccountAdditionalData>,
data_slice_config: Option<UiDataSliceConfig>,
) -> Self {
let data = match encoding {
UiAccountEncoding::Binary => {
let data = Self::encode_bs58(account, data_slice_config);
UiAccountData::LegacyBinary(data)
}
UiAccountEncoding::Base58 => {
let data = Self::encode_bs58(account, data_slice_config);
UiAccountData::Binary(data, encoding)
}
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
),
UiAccountEncoding::Base58 => UiAccountData::Binary(
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
encoding,
),
UiAccountEncoding::Base64 => UiAccountData::Binary(
base64::encode(slice_data(account.data(), data_slice_config)),
base64::encode(slice_data(&account.data, data_slice_config)),
encoding,
),
UiAccountEncoding::Base64Zstd => {
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
match encoder
.write_all(slice_data(account.data(), data_slice_config))
.write_all(slice_data(&account.data, data_slice_config))
.and_then(|()| encoder.finish())
{
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
Err(_) => UiAccountData::Binary(
base64::encode(slice_data(account.data(), data_slice_config)),
base64::encode(slice_data(&account.data, data_slice_config)),
UiAccountEncoding::Base64,
),
}
}
UiAccountEncoding::JsonParsed => {
if let Ok(parsed_data) =
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
parse_account_data(pubkey, &account.owner, &account.data, additional_data)
{
UiAccountData::Json(parsed_data)
} else {
UiAccountData::Binary(
base64::encode(&account.data()),
UiAccountEncoding::Base64,
)
UiAccountData::Binary(base64::encode(&account.data), UiAccountEncoding::Base64)
}
}
};
UiAccount {
lamports: account.lamports(),
lamports: account.lamports,
data,
owner: account.owner().to_string(),
executable: account.executable(),
rent_epoch: account.rent_epoch(),
owner: account.owner.to_string(),
executable: account.executable,
rent_epoch: account.rent_epoch,
}
}
pub fn decode<T: WritableAccount>(&self) -> Option<T> {
pub fn decode(&self) -> Option<Account> {
let data = match &self.data {
UiAccountData::Json(_) => None,
UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(),
@@ -149,13 +125,13 @@ impl UiAccount {
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
},
}?;
Some(T::create(
self.lamports,
Some(Account {
lamports: self.lamports,
data,
Pubkey::from_str(&self.owner).ok()?,
self.executable,
self.rent_epoch,
))
owner: Pubkey::from_str(&self.owner).ok()?,
executable: self.executable,
rent_epoch: self.rent_epoch,
})
}
}
@@ -181,7 +157,7 @@ impl Default for UiFeeCalculator {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UiDataSliceConfig {
pub offset: usize,
@@ -204,10 +180,7 @@ fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::account::{Account, AccountSharedData},
};
use super::*;
#[test]
fn test_slice_data() {
@@ -241,10 +214,10 @@ mod test {
fn test_base64_zstd() {
let encoded_account = UiAccount::encode(
&Pubkey::default(),
&AccountSharedData::from(Account {
Account {
data: vec![0; 1024],
..Account::default()
}),
},
UiAccountEncoding::Base64Zstd,
None,
None,
@@ -254,9 +227,7 @@ mod test {
UiAccountData::Binary(_, UiAccountEncoding::Base64Zstd)
));
let decoded_account = encoded_account.decode::<Account>().unwrap();
assert_eq!(decoded_account.data(), &vec![0; 1024]);
let decoded_account = encoded_account.decode::<AccountSharedData>().unwrap();
assert_eq!(decoded_account.data(), &vec![0; 1024]);
let decoded_account = encoded_account.decode().unwrap();
assert_eq!(decoded_account.data, vec![0; 1024]);
}
}

View File

@@ -1,34 +1,26 @@
use {
crate::{
parse_bpf_loader::parse_bpf_upgradeable_loader,
parse_config::parse_config,
parse_nonce::parse_nonce,
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_id},
parse_vote::parse_vote,
},
inflector::Inflector,
serde_json::Value,
solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar},
std::collections::HashMap,
thiserror::Error,
use crate::{
parse_config::parse_config,
parse_nonce::parse_nonce,
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_id_v2_0},
parse_vote::parse_vote,
};
use inflector::Inflector;
use serde_json::Value;
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
use std::collections::HashMap;
use thiserror::Error;
lazy_static! {
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
let mut m = HashMap::new();
m.insert(
*BPF_UPGRADEABLE_LOADER_PROGRAM_ID,
ParsableAccount::BpfUpgradeableLoader,
);
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
@@ -68,7 +60,6 @@ pub struct ParsedAccount {
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum ParsableAccount {
BpfUpgradeableLoader,
Config,
Nonce,
SplToken,
@@ -93,9 +84,6 @@ pub fn parse_account_data(
.ok_or(ParseAccountError::ProgramNotParsable)?;
let additional_data = additional_data.unwrap_or_default();
let parsed_json = match program_name {
ParsableAccount::BpfUpgradeableLoader => {
serde_json::to_value(parse_bpf_upgradeable_loader(data)?)?
}
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
ParsableAccount::Nonce => serde_json::to_value(parse_nonce(data)?)?,
ParsableAccount::SplToken => {
@@ -114,14 +102,12 @@ pub fn parse_account_data(
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::nonce::{
state::{Data, Versions},
State,
},
solana_vote_program::vote_state::{VoteState, VoteStateVersions},
use super::*;
use solana_sdk::nonce::{
state::{Data, Versions},
State,
};
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
#[test]
fn test_parse_account_data() {

View File

@@ -1,181 +0,0 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
UiAccountData, UiAccountEncoding,
},
bincode::{deserialize, serialized_size},
solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey},
};
pub fn parse_bpf_upgradeable_loader(
data: &[u8],
) -> Result<BpfUpgradeableLoaderAccountType, ParseAccountError> {
let account_state: UpgradeableLoaderState = deserialize(data).map_err(|_| {
ParseAccountError::AccountNotParsable(ParsableAccount::BpfUpgradeableLoader)
})?;
let parsed_account = match account_state {
UpgradeableLoaderState::Uninitialized => BpfUpgradeableLoaderAccountType::Uninitialized,
UpgradeableLoaderState::Buffer { authority_address } => {
let offset = if authority_address.is_some() {
UpgradeableLoaderState::buffer_data_offset().unwrap()
} else {
// This case included for code completeness; in practice, a Buffer account will
// always have authority_address.is_some()
UpgradeableLoaderState::buffer_data_offset().unwrap()
- serialized_size(&Pubkey::default()).unwrap() as usize
};
BpfUpgradeableLoaderAccountType::Buffer(UiBuffer {
authority: authority_address.map(|pubkey| pubkey.to_string()),
data: UiAccountData::Binary(
base64::encode(&data[offset as usize..]),
UiAccountEncoding::Base64,
),
})
}
UpgradeableLoaderState::Program {
programdata_address,
} => BpfUpgradeableLoaderAccountType::Program(UiProgram {
program_data: programdata_address.to_string(),
}),
UpgradeableLoaderState::ProgramData {
slot,
upgrade_authority_address,
} => {
let offset = if upgrade_authority_address.is_some() {
UpgradeableLoaderState::programdata_data_offset().unwrap()
} else {
UpgradeableLoaderState::programdata_data_offset().unwrap()
- serialized_size(&Pubkey::default()).unwrap() as usize
};
BpfUpgradeableLoaderAccountType::ProgramData(UiProgramData {
slot,
authority: upgrade_authority_address.map(|pubkey| pubkey.to_string()),
data: UiAccountData::Binary(
base64::encode(&data[offset as usize..]),
UiAccountEncoding::Base64,
),
})
}
};
Ok(parsed_account)
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
pub enum BpfUpgradeableLoaderAccountType {
Uninitialized,
Buffer(UiBuffer),
Program(UiProgram),
ProgramData(UiProgramData),
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiBuffer {
pub authority: Option<String>,
pub data: UiAccountData,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiProgram {
pub program_data: String,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiProgramData {
pub slot: u64,
pub authority: Option<String>,
pub data: UiAccountData,
}
#[cfg(test)]
mod test {
use {super::*, bincode::serialize, solana_sdk::pubkey::Pubkey};
#[test]
fn test_parse_bpf_upgradeable_loader_accounts() {
let bpf_loader_state = UpgradeableLoaderState::Uninitialized;
let account_data = serialize(&bpf_loader_state).unwrap();
assert_eq!(
parse_bpf_upgradeable_loader(&account_data).unwrap(),
BpfUpgradeableLoaderAccountType::Uninitialized
);
let program = vec![7u8; 64]; // Arbitrary program data
let authority = Pubkey::new_unique();
let bpf_loader_state = UpgradeableLoaderState::Buffer {
authority_address: Some(authority),
};
let mut account_data = serialize(&bpf_loader_state).unwrap();
account_data.extend_from_slice(&program);
assert_eq!(
parse_bpf_upgradeable_loader(&account_data).unwrap(),
BpfUpgradeableLoaderAccountType::Buffer(UiBuffer {
authority: Some(authority.to_string()),
data: UiAccountData::Binary(base64::encode(&program), UiAccountEncoding::Base64),
})
);
// This case included for code completeness; in practice, a Buffer account will always have
// authority_address.is_some()
let bpf_loader_state = UpgradeableLoaderState::Buffer {
authority_address: None,
};
let mut account_data = serialize(&bpf_loader_state).unwrap();
account_data.extend_from_slice(&program);
assert_eq!(
parse_bpf_upgradeable_loader(&account_data).unwrap(),
BpfUpgradeableLoaderAccountType::Buffer(UiBuffer {
authority: None,
data: UiAccountData::Binary(base64::encode(&program), UiAccountEncoding::Base64),
})
);
let programdata_address = Pubkey::new_unique();
let bpf_loader_state = UpgradeableLoaderState::Program {
programdata_address,
};
let account_data = serialize(&bpf_loader_state).unwrap();
assert_eq!(
parse_bpf_upgradeable_loader(&account_data).unwrap(),
BpfUpgradeableLoaderAccountType::Program(UiProgram {
program_data: programdata_address.to_string(),
})
);
let authority = Pubkey::new_unique();
let slot = 42;
let bpf_loader_state = UpgradeableLoaderState::ProgramData {
slot,
upgrade_authority_address: Some(authority),
};
let mut account_data = serialize(&bpf_loader_state).unwrap();
account_data.extend_from_slice(&program);
assert_eq!(
parse_bpf_upgradeable_loader(&account_data).unwrap(),
BpfUpgradeableLoaderAccountType::ProgramData(UiProgramData {
slot,
authority: Some(authority.to_string()),
data: UiAccountData::Binary(base64::encode(&program), UiAccountEncoding::Base64),
})
);
let bpf_loader_state = UpgradeableLoaderState::ProgramData {
slot,
upgrade_authority_address: None,
};
let mut account_data = serialize(&bpf_loader_state).unwrap();
account_data.extend_from_slice(&program);
assert_eq!(
parse_bpf_upgradeable_loader(&account_data).unwrap(),
BpfUpgradeableLoaderAccountType::ProgramData(UiProgramData {
slot,
authority: None,
data: UiAccountData::Binary(base64::encode(&program), UiAccountEncoding::Base64),
})
);
}
}

View File

@@ -1,19 +1,15 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
validator_info,
},
bincode::deserialize,
serde_json::Value,
solana_config_program::{get_config_data, ConfigKeys},
solana_sdk::{
pubkey::Pubkey,
stake::config::{self as stake_config, Config as StakeConfig},
},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
validator_info,
};
use bincode::deserialize;
use serde_json::Value;
use solana_config_program::{get_config_data, ConfigKeys};
use solana_sdk::pubkey::Pubkey;
use solana_stake_program::config::Config as StakeConfig;
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
let parsed_account = if pubkey == &stake_config::id() {
let parsed_account = if pubkey == &solana_stake_program::config::id() {
get_config_data(data)
.ok()
.and_then(|data| deserialize::<StakeConfig>(data).ok())
@@ -41,7 +37,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
where
T: serde::de::DeserializeOwned,
{
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
let keys = keys
.iter()
.map(|key| UiConfigKey {
@@ -91,10 +87,10 @@ pub struct UiConfig<T> {
#[cfg(test)]
mod test {
use {
super::*, crate::validator_info::ValidatorInfo, serde_json::json,
solana_config_program::create_config_account, solana_sdk::account::ReadableAccount,
};
use super::*;
use crate::validator_info::ValidatorInfo;
use serde_json::json;
use solana_config_program::create_config_account;
#[test]
fn test_parse_config() {
@@ -104,7 +100,11 @@ mod test {
};
let stake_config_account = create_config_account(vec![], &stake_config, 10);
assert_eq!(
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
parse_config(
&stake_config_account.data,
&solana_stake_program::config::id()
)
.unwrap(),
ConfigAccountType::StakeConfig(UiStakeConfig {
warmup_cooldown_rate: 0.25,
slash_penalty: 50,
@@ -124,7 +124,7 @@ mod test {
10,
);
assert_eq!(
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
parse_config(&validator_info_config_account.data, &info_pubkey).unwrap(),
ConfigAccountType::ValidatorInfo(UiConfig {
keys: vec![
UiConfigKey {

View File

@@ -1,9 +1,7 @@
use {
crate::{parse_account_data::ParseAccountError, UiFeeCalculator},
solana_sdk::{
instruction::InstructionError,
nonce::{state::Versions, State},
},
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
use solana_sdk::{
instruction::InstructionError,
nonce::{state::Versions, State},
};
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
@@ -11,13 +9,7 @@ pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
let nonce_state = nonce_state.convert_to_current();
match nonce_state {
// This prevents parsing an allocated System-owned account with empty data of any non-zero
// length as `uninitialized` nonce. An empty account of the wrong length can never be
// initialized as a nonce account, and an empty account of the correct length may not be an
// uninitialized nonce account, since it can be assigned to another program.
State::Uninitialized => Err(ParseAccountError::from(
InstructionError::InvalidAccountData,
)),
State::Uninitialized => Ok(UiNonceState::Uninitialized),
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
authority: data.authority.to_string(),
blockhash: data.blockhash.to_string(),
@@ -44,16 +36,14 @@ pub struct UiNonceData {
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::{
hash::Hash,
nonce::{
state::{Data, Versions},
State,
},
pubkey::Pubkey,
use super::*;
use solana_sdk::{
hash::Hash,
nonce::{
state::{Data, Versions},
State,
},
pubkey::Pubkey,
};
#[test]

View File

@@ -1,14 +1,10 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount,
},
bincode::deserialize,
solana_sdk::{
clock::{Epoch, UnixTimestamp},
stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState},
},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount,
};
use bincode::deserialize;
use solana_sdk::clock::{Epoch, UnixTimestamp};
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
let stake_state: StakeState = deserialize(data)
@@ -136,7 +132,8 @@ impl From<Delegation> for UiDelegation {
#[cfg(test)]
mod test {
use {super::*, bincode::serialize};
use super::*;
use bincode::serialize;
#[test]
fn test_parse_stake() {

View File

@@ -1,20 +1,18 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, UiFeeCalculator,
},
bincode::deserialize,
bv::BitVec,
solana_sdk::{
clock::{Clock, Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
pubkey::Pubkey,
rent::Rent,
slot_hashes::SlotHashes,
slot_history::{self, SlotHistory},
stake_history::{StakeHistory, StakeHistoryEntry},
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, UiFeeCalculator,
};
use bincode::deserialize;
use bv::BitVec;
use solana_sdk::{
clock::{Clock, Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
pubkey::Pubkey,
rent::Rent,
slot_hashes::SlotHashes,
slot_history::{self, SlotHistory},
stake_history::{StakeHistory, StakeHistoryEntry},
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
};
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
@@ -214,17 +212,15 @@ pub struct UiStakeHistoryEntry {
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::{
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
sysvar::recent_blockhashes::IterItem,
},
use super::*;
use solana_sdk::{
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
sysvar::recent_blockhashes::IterItem,
};
#[test]
fn test_parse_sysvars() {
let clock_sysvar = create_account_for_test(&Clock::default());
let clock_sysvar = create_account(&Clock::default(), 1);
assert_eq!(
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
SysvarAccountType::Clock(UiClock::default()),
@@ -237,13 +233,13 @@ mod test {
first_normal_epoch: 1,
first_normal_slot: 12,
};
let epoch_schedule_sysvar = create_account_for_test(&epoch_schedule);
let epoch_schedule_sysvar = create_account(&epoch_schedule, 1);
assert_eq!(
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
SysvarAccountType::EpochSchedule(epoch_schedule),
);
let fees_sysvar = create_account_for_test(&Fees::default());
let fees_sysvar = create_account(&Fees::default(), 1);
assert_eq!(
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
SysvarAccountType::Fees(UiFees::default()),
@@ -256,7 +252,7 @@ mod test {
let recent_blockhashes: RecentBlockhashes = vec![IterItem(0, &hash, &fee_calculator)]
.into_iter()
.collect();
let recent_blockhashes_sysvar = create_account_for_test(&recent_blockhashes);
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
assert_eq!(
parse_sysvar(
&recent_blockhashes_sysvar.data,
@@ -274,13 +270,13 @@ mod test {
exemption_threshold: 2.0,
burn_percent: 5,
};
let rent_sysvar = create_account_for_test(&rent);
let rent_sysvar = create_account(&rent, 1);
assert_eq!(
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
SysvarAccountType::Rent(rent.into()),
);
let rewards_sysvar = create_account_for_test(&Rewards::default());
let rewards_sysvar = create_account(&Rewards::default(), 1);
assert_eq!(
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
SysvarAccountType::Rewards(UiRewards::default()),
@@ -288,7 +284,7 @@ mod test {
let mut slot_hashes = SlotHashes::default();
slot_hashes.add(1, hash);
let slot_hashes_sysvar = create_account_for_test(&slot_hashes);
let slot_hashes_sysvar = create_account(&slot_hashes, 1);
assert_eq!(
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
@@ -299,7 +295,7 @@ mod test {
let mut slot_history = SlotHistory::default();
slot_history.add(42);
let slot_history_sysvar = create_account_for_test(&slot_history);
let slot_history_sysvar = create_account(&slot_history, 1);
assert_eq!(
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
SysvarAccountType::SlotHistory(UiSlotHistory {
@@ -315,7 +311,7 @@ mod test {
deactivating: 3,
};
stake_history.add(1, stake_history_entry.clone());
let stake_history_sysvar = create_account_for_test(&stake_history);
let stake_history_sysvar = create_account(&stake_history, 1);
assert_eq!(
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {

View File

@@ -1,38 +1,36 @@
use {
crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, StringDecimals,
},
solana_sdk::pubkey::Pubkey,
spl_token::{
solana_program::{
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
},
state::{Account, AccountState, Mint, Multisig},
},
std::str::FromStr,
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount,
};
use solana_sdk::pubkey::Pubkey;
use spl_token_v2_0::{
solana_program::{
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
},
state::{Account, AccountState, Mint, Multisig},
};
use std::str::FromStr;
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_id() -> Pubkey {
Pubkey::new_from_array(spl_token::id().to_bytes())
pub fn spl_token_id_v2_0() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap()
}
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_native_mint() -> Pubkey {
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
pub fn spl_token_v2_0_native_mint() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
}
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
SplTokenPubkey::new_from_array(pubkey.to_bytes())
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
SplTokenPubkey::from_str(&pubkey.to_string()).unwrap()
}
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey {
Pubkey::new_from_array(pubkey.to_bytes())
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
Pubkey::from_str(&pubkey.to_string()).unwrap()
}
pub fn parse_token(
@@ -160,66 +158,46 @@ impl From<AccountState> for UiAccountState {
}
}
pub fn real_number_string(amount: u64, decimals: u8) -> StringDecimals {
let decimals = decimals as usize;
if decimals > 0 {
// Left-pad zeros to decimals + 1, so we at least have an integer zero
let mut s = format!("{:01$}", amount, decimals + 1);
// Add the decimal point (Sorry, "," locales!)
s.insert(s.len() - decimals, '.');
s
} else {
amount.to_string()
}
}
pub fn real_number_string_trimmed(amount: u64, decimals: u8) -> StringDecimals {
let mut s = real_number_string(amount, decimals);
if decimals > 0 {
let zeros_trimmed = s.trim_end_matches('0');
s = zeros_trimmed.trim_end_matches('.').to_string();
}
s
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiTokenAmount {
pub ui_amount: Option<f64>,
pub ui_amount: f64,
pub decimals: u8,
pub amount: StringAmount,
pub ui_amount_string: StringDecimals,
}
impl UiTokenAmount {
pub fn real_number_string(&self) -> String {
real_number_string(
u64::from_str(&self.amount).unwrap_or_default(),
self.decimals as u8,
)
let decimals = self.decimals as usize;
if decimals > 0 {
let amount = u64::from_str(&self.amount).unwrap_or(0);
// Left-pad zeros to decimals + 1, so we at least have an integer zero
let mut s = format!("{:01$}", amount, decimals + 1);
// Add the decimal point (Sorry, "," locales!)
s.insert(s.len() - decimals, '.');
s
} else {
self.amount.clone()
}
}
pub fn real_number_string_trimmed(&self) -> String {
if !self.ui_amount_string.is_empty() {
self.ui_amount_string.clone()
} else {
real_number_string_trimmed(
u64::from_str(&self.amount).unwrap_or_default(),
self.decimals as u8,
)
}
let s = self.real_number_string();
let zeros_trimmed = s.trim_end_matches('0');
let decimal_trimmed = zeros_trimmed.trim_end_matches('.');
decimal_trimmed.to_string()
}
}
pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
let amount_decimals = 10_usize
.checked_pow(decimals as u32)
.map(|dividend| amount as f64 / dividend as f64);
// Use `amount_to_ui_amount()` once spl_token is bumped to a version that supports it: https://github.com/solana-labs/solana-program-library/pull/211
let amount_decimals = amount as f64 / 10_usize.pow(decimals as u32) as f64;
UiTokenAmount {
ui_amount: amount_decimals,
decimals,
amount: amount.to_string(),
ui_amount_string: real_number_string_trimmed(amount, decimals),
}
}
@@ -275,10 +253,9 @@ mod test {
mint: mint_pubkey.to_string(),
owner: owner_pubkey.to_string(),
token_amount: UiTokenAmount {
ui_amount: Some(0.42),
ui_amount: 0.42,
decimals: 2,
amount: "42".to_string(),
ui_amount_string: "0.42".to_string()
amount: "42".to_string()
},
delegate: None,
state: UiAccountState::Initialized,
@@ -359,87 +336,17 @@ mod test {
#[test]
fn test_ui_token_amount_real_string() {
assert_eq!(&real_number_string(1, 0), "1");
assert_eq!(&real_number_string_trimmed(1, 0), "1");
let token_amount = token_amount_to_ui_amount(1, 0);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(1, 0)
);
assert_eq!(token_amount.ui_amount, Some(1.0));
assert_eq!(&real_number_string(10, 0), "10");
assert_eq!(&real_number_string_trimmed(10, 0), "10");
let token_amount = token_amount_to_ui_amount(10, 0);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(10, 0)
);
assert_eq!(token_amount.ui_amount, Some(10.0));
assert_eq!(&real_number_string(1, 9), "0.000000001");
assert_eq!(&real_number_string_trimmed(1, 9), "0.000000001");
assert_eq!(&token_amount.real_number_string(), "1");
assert_eq!(&token_amount.real_number_string_trimmed(), "1");
let token_amount = token_amount_to_ui_amount(1, 9);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(1, 9)
);
assert_eq!(token_amount.ui_amount, Some(0.000000001));
assert_eq!(&real_number_string(1_000_000_000, 9), "1.000000000");
assert_eq!(&real_number_string_trimmed(1_000_000_000, 9), "1");
assert_eq!(&token_amount.real_number_string(), "0.000000001");
assert_eq!(&token_amount.real_number_string_trimmed(), "0.000000001");
let token_amount = token_amount_to_ui_amount(1_000_000_000, 9);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(1_000_000_000, 9)
);
assert_eq!(token_amount.ui_amount, Some(1.0));
assert_eq!(&real_number_string(1_234_567_890, 3), "1234567.890");
assert_eq!(&real_number_string_trimmed(1_234_567_890, 3), "1234567.89");
assert_eq!(&token_amount.real_number_string(), "1.000000000");
assert_eq!(&token_amount.real_number_string_trimmed(), "1");
let token_amount = token_amount_to_ui_amount(1_234_567_890, 3);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(1_234_567_890, 3)
);
assert_eq!(token_amount.ui_amount, Some(1234567.89));
assert_eq!(
&real_number_string(1_234_567_890, 25),
"0.0000000000000001234567890"
);
assert_eq!(
&real_number_string_trimmed(1_234_567_890, 25),
"0.000000000000000123456789"
);
let token_amount = token_amount_to_ui_amount(1_234_567_890, 20);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(1_234_567_890, 20)
);
assert_eq!(token_amount.ui_amount, None);
}
#[test]
fn test_ui_token_amount_real_string_zero() {
assert_eq!(&real_number_string(0, 0), "0");
assert_eq!(&real_number_string_trimmed(0, 0), "0");
let token_amount = token_amount_to_ui_amount(0, 0);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(0, 0)
);
assert_eq!(token_amount.ui_amount, Some(0.0));
assert_eq!(&real_number_string(0, 9), "0.000000000");
assert_eq!(&real_number_string_trimmed(0, 9), "0");
let token_amount = token_amount_to_ui_amount(0, 9);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(0, 9)
);
assert_eq!(token_amount.ui_amount, Some(0.0));
assert_eq!(&real_number_string(0, 25), "0.0000000000000000000000000");
assert_eq!(&real_number_string_trimmed(0, 25), "0");
let token_amount = token_amount_to_ui_amount(0, 20);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(0, 20)
);
assert_eq!(token_amount.ui_amount, None);
assert_eq!(&token_amount.real_number_string(), "1234567.890");
assert_eq!(&token_amount.real_number_string_trimmed(), "1234567.89");
}
}

View File

@@ -1,11 +1,9 @@
use {
crate::{parse_account_data::ParseAccountError, StringAmount},
solana_sdk::{
clock::{Epoch, Slot},
pubkey::Pubkey,
},
solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState},
use crate::{parse_account_data::ParseAccountError, StringAmount};
use solana_sdk::{
clock::{Epoch, Slot},
pubkey::Pubkey,
};
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
@@ -123,7 +121,8 @@ struct UiEpochCredits {
#[cfg(test)]
mod test {
use {super::*, solana_vote_program::vote_state::VoteStateVersions};
use super::*;
use solana_vote_program::vote_state::VoteStateVersions;
#[test]
fn test_parse_vote() {

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.8.17"
version = "1.5.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,12 +10,12 @@ publish = false
[dependencies]
log = "0.4.11"
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
rayon = "1.4.0"
solana-logger = { path = "../logger", version = "1.5.2" }
solana-runtime = { path = "../runtime", version = "1.5.2" }
solana-measure = { path = "../measure", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
solana-version = { path = "../version", version = "1.5.2" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"

View File

@@ -1,19 +1,14 @@
#![allow(clippy::integer_arithmetic)]
#[macro_use]
extern crate log;
use {
clap::{crate_description, crate_name, value_t, App, Arg},
rayon::prelude::*,
solana_measure::measure::Measure,
solana_runtime::{
accounts::{create_test_accounts, update_accounts_bench, Accounts},
accounts_db::AccountShrinkThreshold,
accounts_index::AccountSecondaryIndexes,
ancestors::Ancestors,
},
solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey},
std::{env, fs, path::PathBuf},
use clap::{crate_description, crate_name, value_t, App, Arg};
use rayon::prelude::*;
use solana_measure::measure::Measure;
use solana_runtime::{
accounts::{create_test_accounts, update_accounts, Accounts},
accounts_index::Ancestors,
};
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
use std::env;
use std::fs;
use std::path::PathBuf;
fn main() {
solana_logger::setup();
@@ -58,18 +53,10 @@ fn main() {
let path = PathBuf::from(env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_owned()))
.join("accounts-bench");
println!("cleaning file system: {:?}", path);
if fs::remove_dir_all(path.clone()).is_err() {
println!("Warning: Couldn't remove {:?}", path);
}
let accounts = Accounts::new_with_config(
vec![path],
&ClusterType::Testnet,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
None,
);
let accounts = Accounts::new(vec![path], &ClusterType::Testnet);
println!("Creating {} accounts", num_accounts);
let mut create_time = Measure::start("create accounts");
let pubkeys: Vec<_> = (0..num_slots)
@@ -93,61 +80,31 @@ fn main() {
num_slots,
create_time
);
let mut ancestors = Vec::with_capacity(num_slots);
ancestors.push(0);
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..num_slots {
ancestors.push(i as u64);
ancestors.insert(i as u64, i - 1);
accounts.add_root(i as u64);
}
let ancestors = Ancestors::from(ancestors);
let mut elapsed = vec![0; iterations];
let mut elapsed_store = vec![0; iterations];
for x in 0..iterations {
if clean {
let mut time = Measure::start("clean");
accounts.accounts_db.clean_accounts(None, false);
accounts.accounts_db.clean_accounts(None);
time.stop();
println!("{}", time);
for slot in 0..num_slots {
update_accounts_bench(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
update_accounts(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
accounts.add_root((x * num_slots + slot) as u64);
}
} else {
let mut pubkeys: Vec<Pubkey> = vec![];
let mut time = Measure::start("hash");
let results = accounts.accounts_db.update_accounts_hash(0, &ancestors);
let hash = accounts
.accounts_db
.update_accounts_hash(0, &ancestors, true)
.0;
time.stop();
let mut time_store = Measure::start("hash using store");
let results_store = accounts.accounts_db.update_accounts_hash_with_index_option(
false,
false,
solana_sdk::clock::Slot::default(),
&ancestors,
None,
false,
None,
);
time_store.stop();
if results != results_store {
error!("results different: \n{:?}\n{:?}", results, results_store);
}
println!(
"hash,{},{},{},{}%",
results.0,
time,
time_store,
(time_store.as_us() as f64 / time.as_us() as f64 * 100.0f64) as u32
);
println!("hash: {} {}", hash, time);
create_test_accounts(&accounts, &mut pubkeys, 1, 0);
elapsed[x] = time.as_us();
elapsed_store[x] = time_store.as_us();
}
}
for x in elapsed {
info!("update_accounts_hash(us),{}", x);
}
for x in elapsed_store {
info!("calculate_accounts_hash_without_index(us),{}", x);
}
}

View File

@@ -1 +0,0 @@
/farf/

View File

@@ -1,36 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-cluster-bench"
version = "1.8.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
clap = "2.33.1"
log = "0.4.11"
rand = "0.7.0"
rayon = "1.4.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-client = { path = "../client", version = "=1.8.17" }
solana-core = { path = "../core", version = "=1.8.17" }
solana-faucet = { path = "../faucet", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,753 +0,0 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg},
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_account_decoder::parse_token::spl_token_pubkey,
solana_clap_utils::input_parsers::pubkey_of,
solana_client::rpc_client::RpcClient,
solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT},
solana_gossip::gossip_service::discover,
solana_measure::measure::Measure,
solana_runtime::inline_spl_token,
solana_sdk::{
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
rpc_port::DEFAULT_RPC_PORT,
signature::{read_keypair_file, Keypair, Signature, Signer},
system_instruction, system_program,
timing::timestamp,
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::parse_token::spl_token_instruction,
std::{
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock,
},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
},
};
// Create and close messages both require 2 signatures; if transaction construction changes, update
// this magic number
const NUM_SIGNATURES: u64 = 2;
pub fn airdrop_lamports(
client: &RpcClient,
faucet_addr: &SocketAddr,
id: &Keypair,
desired_balance: u64,
) -> bool {
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
info!("starting balance {}", starting_balance);
if starting_balance < desired_balance {
let airdrop_amount = desired_balance - starting_balance;
info!(
"Airdropping {:?} lamports from {} for {}",
airdrop_amount,
faucet_addr,
id.pubkey(),
);
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let mut tries = 0;
loop {
tries += 1;
let result = client.send_and_confirm_transaction(&transaction);
if result.is_ok() {
break;
}
if tries >= 5 {
panic!(
"Error requesting airdrop: to addr: {:?} amount: {} {:?}",
faucet_addr, airdrop_amount, result
)
}
}
}
Err(err) => {
panic!(
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
err, faucet_addr, airdrop_amount
);
}
};
let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| {
panic!("airdrop error {}", e);
});
info!("current balance {}...", current_balance);
if current_balance - starting_balance != airdrop_amount {
info!(
"Airdrop failed? {} {} {} {}",
id.pubkey(),
current_balance,
starting_balance,
airdrop_amount,
);
}
}
true
}
// signature, timestamp, id
type PendingQueue = Vec<(Signature, u64, u64)>;
struct TransactionExecutor {
sig_clear_t: JoinHandle<()>,
sigs: Arc<RwLock<PendingQueue>>,
cleared: Arc<RwLock<Vec<u64>>>,
exit: Arc<AtomicBool>,
counter: AtomicU64,
client: RpcClient,
}
impl TransactionExecutor {
fn new(entrypoint_addr: SocketAddr) -> Self {
let sigs = Arc::new(RwLock::new(Vec::new()));
let cleared = Arc::new(RwLock::new(Vec::new()));
let exit = Arc::new(AtomicBool::new(false));
let sig_clear_t = Self::start_sig_clear_thread(&exit, &sigs, &cleared, entrypoint_addr);
let client =
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
Self {
sigs,
cleared,
sig_clear_t,
exit,
counter: AtomicU64::new(0),
client,
}
}
fn num_outstanding(&self) -> usize {
self.sigs.read().unwrap().len()
}
fn push_transactions(&self, txs: Vec<Transaction>) -> Vec<u64> {
let mut ids = vec![];
let new_sigs = txs.into_iter().filter_map(|tx| {
let id = self.counter.fetch_add(1, Ordering::Relaxed);
ids.push(id);
match self.client.send_transaction(&tx) {
Ok(sig) => {
return Some((sig, timestamp(), id));
}
Err(e) => {
info!("error: {:#?}", e);
}
}
None
});
let mut sigs_w = self.sigs.write().unwrap();
sigs_w.extend(new_sigs);
ids
}
fn drain_cleared(&self) -> Vec<u64> {
std::mem::take(&mut *self.cleared.write().unwrap())
}
fn close(self) {
self.exit.store(true, Ordering::Relaxed);
self.sig_clear_t.join().unwrap();
}
fn start_sig_clear_thread(
exit: &Arc<AtomicBool>,
sigs: &Arc<RwLock<PendingQueue>>,
cleared: &Arc<RwLock<Vec<u64>>>,
entrypoint_addr: SocketAddr,
) -> JoinHandle<()> {
let sigs = sigs.clone();
let exit = exit.clone();
let cleared = cleared.clone();
Builder::new()
.name("sig_clear".to_string())
.spawn(move || {
let client = RpcClient::new_socket_with_commitment(
entrypoint_addr,
CommitmentConfig::confirmed(),
);
let mut success = 0;
let mut error_count = 0;
let mut timed_out = 0;
let mut last_log = Instant::now();
while !exit.load(Ordering::Relaxed) {
let sigs_len = sigs.read().unwrap().len();
if sigs_len > 0 {
let mut sigs_w = sigs.write().unwrap();
let mut start = Measure::start("sig_status");
let statuses: Vec<_> = sigs_w
.chunks(200)
.flat_map(|sig_chunk| {
let only_sigs: Vec<_> = sig_chunk.iter().map(|s| s.0).collect();
client
.get_signature_statuses(&only_sigs)
.expect("status fail")
.value
})
.collect();
let mut num_cleared = 0;
let start_len = sigs_w.len();
let now = timestamp();
let mut new_ids = vec![];
let mut i = 0;
let mut j = 0;
while i != sigs_w.len() {
let mut retain = true;
let sent_ts = sigs_w[i].1;
if let Some(e) = &statuses[j] {
debug!("error: {:?}", e);
if e.status.is_ok() {
success += 1;
} else {
error_count += 1;
}
num_cleared += 1;
retain = false;
} else if now - sent_ts > 30_000 {
retain = false;
timed_out += 1;
}
if !retain {
new_ids.push(sigs_w.remove(i).2);
} else {
i += 1;
}
j += 1;
}
let final_sigs_len = sigs_w.len();
drop(sigs_w);
cleared.write().unwrap().extend(new_ids);
start.stop();
debug!(
"sigs len: {:?} success: {} took: {}ms cleared: {}/{}",
final_sigs_len,
success,
start.as_ms(),
num_cleared,
start_len,
);
if last_log.elapsed().as_millis() > 5000 {
info!(
"success: {} error: {} timed_out: {}",
success, error_count, timed_out,
);
last_log = Instant::now();
}
}
sleep(Duration::from_millis(200));
}
})
.unwrap()
}
}
struct SeedTracker {
max_created: Arc<AtomicU64>,
max_closed: Arc<AtomicU64>,
}
fn make_create_message(
keypair: &Keypair,
base_keypair: &Keypair,
max_created_seed: Arc<AtomicU64>,
num_instructions: usize,
balance: u64,
maybe_space: Option<u64>,
mint: Option<Pubkey>,
) -> Message {
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000));
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
let program_id = if mint.is_some() {
inline_spl_token::id()
} else {
system_program::id()
};
let seed = max_created_seed.fetch_add(1, Ordering::Relaxed).to_string();
let to_pubkey =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
let mut instructions = vec![system_instruction::create_account_with_seed(
&keypair.pubkey(),
&to_pubkey,
&base_keypair.pubkey(),
&seed,
balance,
space,
&program_id,
)];
if let Some(mint_address) = mint {
instructions.push(spl_token_instruction(
spl_token::instruction::initialize_account(
&spl_token::id(),
&spl_token_pubkey(&to_pubkey),
&spl_token_pubkey(&mint_address),
&spl_token_pubkey(&base_keypair.pubkey()),
)
.unwrap(),
));
}
instructions
})
.collect();
let instructions: Vec<_> = instructions.into_iter().flatten().collect();
Message::new(&instructions, Some(&keypair.pubkey()))
}
fn make_close_message(
keypair: &Keypair,
base_keypair: &Keypair,
max_closed_seed: Arc<AtomicU64>,
num_instructions: usize,
balance: u64,
spl_token: bool,
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
let program_id = if spl_token {
inline_spl_token::id()
} else {
system_program::id()
};
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
let address =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
if spl_token {
spl_token_instruction(
spl_token::instruction::close_account(
&spl_token::id(),
&spl_token_pubkey(&address),
&spl_token_pubkey(&keypair.pubkey()),
&spl_token_pubkey(&base_keypair.pubkey()),
&[],
)
.unwrap(),
)
} else {
system_instruction::transfer_with_seed(
&address,
&base_keypair.pubkey(),
seed,
&program_id,
&keypair.pubkey(),
balance,
)
}
})
.collect();
Message::new(&instructions, Some(&keypair.pubkey()))
}
#[allow(clippy::too_many_arguments)]
fn run_accounts_bench(
entrypoint_addr: SocketAddr,
faucet_addr: SocketAddr,
payer_keypairs: &[&Keypair],
iterations: usize,
maybe_space: Option<u64>,
batch_size: usize,
close_nth_batch: u64,
maybe_lamports: Option<u64>,
num_instructions: usize,
mint: Option<Pubkey>,
) {
assert!(num_instructions > 0);
let client =
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
info!("Targeting {}", entrypoint_addr);
let mut last_blockhash = Instant::now();
let mut last_log = Instant::now();
let mut count = 0;
let mut recent_blockhash = client.get_recent_blockhash().expect("blockhash");
let mut tx_sent_count = 0;
let mut total_accounts_created = 0;
let mut total_accounts_closed = 0;
let mut balances: Vec<_> = payer_keypairs
.iter()
.map(|keypair| client.get_balance(&keypair.pubkey()).unwrap_or(0))
.collect();
let mut last_balance = Instant::now();
let default_max_lamports = 1000;
let min_balance = maybe_lamports.unwrap_or_else(|| {
let space = maybe_space.unwrap_or(default_max_lamports);
client
.get_minimum_balance_for_rent_exemption(space as usize)
.expect("min balance")
});
let base_keypair = Keypair::new();
let seed_tracker = SeedTracker {
max_created: Arc::new(AtomicU64::default()),
max_closed: Arc::new(AtomicU64::default()),
};
info!("Starting balance(s): {:?}", balances);
let executor = TransactionExecutor::new(entrypoint_addr);
loop {
if last_blockhash.elapsed().as_millis() > 10_000 {
recent_blockhash = client.get_recent_blockhash().expect("blockhash");
last_blockhash = Instant::now();
}
let fee = recent_blockhash
.1
.lamports_per_signature
.saturating_mul(NUM_SIGNATURES);
let lamports = min_balance + fee;
for (i, balance) in balances.iter_mut().enumerate() {
if *balance < lamports || last_balance.elapsed().as_millis() > 2000 {
if let Ok(b) = client.get_balance(&payer_keypairs[i].pubkey()) {
*balance = b;
}
last_balance = Instant::now();
if *balance < lamports * 2 {
info!(
"Balance {} is less than needed: {}, doing aidrop...",
balance, lamports
);
if !airdrop_lamports(
&client,
&faucet_addr,
payer_keypairs[i],
lamports * 100_000,
) {
warn!("failed airdrop, exiting");
return;
}
}
}
}
// Create accounts
let sigs_len = executor.num_outstanding();
if sigs_len < batch_size {
let num_to_create = batch_size - sigs_len;
if num_to_create >= payer_keypairs.len() {
info!("creating {} new", num_to_create);
let chunk_size = num_to_create / payer_keypairs.len();
if chunk_size > 0 {
for (i, keypair) in payer_keypairs.iter().enumerate() {
let txs: Vec<_> = (0..chunk_size)
.into_par_iter()
.map(|_| {
let message = make_create_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
num_instructions,
min_balance,
maybe_space,
mint,
);
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.collect();
balances[i] = balances[i].saturating_sub(lamports * txs.len() as u64);
info!("txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_created += num_instructions * new_ids.len();
}
}
}
if close_nth_batch > 0 {
let num_batches_to_close =
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
let expected_closed = num_batches_to_close * batch_size as u64;
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
// Close every account we've created with seed between max_closed_seed..expected_closed
if max_closed_seed < expected_closed {
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
.into_par_iter()
.map(|_| {
let message = make_close_message(
payer_keypairs[0],
&base_keypair,
seed_tracker.max_closed.clone(),
1,
min_balance,
mint.is_some(),
);
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.collect();
balances[0] = balances[0].saturating_sub(fee * txs.len() as u64);
info!("close txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("close ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_closed += new_ids.len() as u64;
}
}
} else {
let _ = executor.drain_cleared();
}
count += 1;
if last_log.elapsed().as_millis() > 3000 {
info!(
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
);
last_log = Instant::now();
}
if iterations != 0 && count >= iterations {
break;
}
if executor.num_outstanding() >= batch_size {
sleep(Duration::from_millis(500));
}
}
executor.close();
}
fn main() {
solana_logger::setup_with_default("solana=info");
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("entrypoint")
.long("entrypoint")
.takes_value(true)
.value_name("HOST:PORT")
.help("RPC entrypoint address. Usually <ip>:8899"),
)
.arg(
Arg::with_name("faucet_addr")
.long("faucet")
.takes_value(true)
.value_name("HOST:PORT")
.help("Faucet entrypoint address. Usually <ip>:9900"),
)
.arg(
Arg::with_name("space")
.long("space")
.takes_value(true)
.value_name("BYTES")
.help("Size of accounts to create"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.takes_value(true)
.value_name("LAMPORTS")
.help("How many lamports to fund each account"),
)
.arg(
Arg::with_name("identity")
.long("identity")
.takes_value(true)
.multiple(true)
.value_name("FILE")
.help("keypair file"),
)
.arg(
Arg::with_name("batch_size")
.long("batch-size")
.takes_value(true)
.value_name("BYTES")
.help("Number of transactions to send per batch"),
)
.arg(
Arg::with_name("close_nth_batch")
.long("close-frequency")
.takes_value(true)
.value_name("BYTES")
.help(
"Every `n` batches, create a batch of close transactions for
the earliest remaining batch of accounts created.
Note: Should be > 1 to avoid situations where the close \
transactions will be submitted before the corresponding \
create transactions have been confirmed",
),
)
.arg(
Arg::with_name("num_instructions")
.long("num-instructions")
.takes_value(true)
.value_name("NUM")
.help("Number of accounts to create on each transaction"),
)
.arg(
Arg::with_name("iterations")
.long("iterations")
.takes_value(true)
.value_name("NUM")
.help("Number of iterations to make"),
)
.arg(
Arg::with_name("check_gossip")
.long("check-gossip")
.help("Just use entrypoint address directly"),
)
.arg(
Arg::with_name("mint")
.long("mint")
.takes_value(true)
.help("Mint address to initialize account"),
)
.get_matches();
let skip_gossip = !matches.is_present("check_gossip");
let port = if skip_gossip { DEFAULT_RPC_PORT } else { 8001 };
let mut entrypoint_addr = SocketAddr::from(([127, 0, 0, 1], port));
if let Some(addr) = matches.value_of("entrypoint") {
entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse entrypoint address: {}", e);
exit(1)
});
}
let mut faucet_addr = SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT));
if let Some(addr) = matches.value_of("faucet_addr") {
faucet_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse entrypoint address: {}", e);
exit(1)
});
}
let space = value_t!(matches, "space", u64).ok();
let lamports = value_t!(matches, "lamports", u64).ok();
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
if num_instructions == 0 || num_instructions > 500 {
eprintln!("bad num_instructions: {}", num_instructions);
exit(1);
}
let mint = pubkey_of(&matches, "mint");
let payer_keypairs: Vec<_> = values_t_or_exit!(matches, "identity", String)
.iter()
.map(|keypair_string| {
read_keypair_file(keypair_string)
.unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_string))
})
.collect();
let mut payer_keypair_refs: Vec<&Keypair> = vec![];
for keypair in payer_keypairs.iter() {
payer_keypair_refs.push(keypair);
}
let rpc_addr = if !skip_gossip {
info!("Finding cluster entry: {:?}", entrypoint_addr);
let (gossip_nodes, _validators) = discover(
None, // keypair
Some(&entrypoint_addr),
None, // num_nodes
Duration::from_secs(60), // timeout
None, // find_node_by_pubkey
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
SocketAddrSpace::Unspecified,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
exit(1);
});
info!("done found {} nodes", gossip_nodes.len());
gossip_nodes[0].rpc
} else {
info!("Using {:?} as the RPC address", entrypoint_addr);
entrypoint_addr
};
run_accounts_bench(
rpc_addr,
faucet_addr,
&payer_keypair_refs,
iterations,
space,
batch_size,
close_nth_batch,
lamports,
num_instructions,
mint,
);
}
#[cfg(test)]
pub mod test {
use {
super::*,
solana_core::validator::ValidatorConfig,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_sdk::poh_config::PohConfig,
};
#[test]
fn test_accounts_cluster_bench() {
solana_logger::setup();
let validator_config = ValidatorConfig::default_for_test();
let num_nodes = 1;
let mut config = ClusterConfig {
cluster_lamports: 10_000_000,
poh_config: PohConfig::new_sleep(Duration::from_millis(50)),
node_stakes: vec![100; num_nodes],
validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
..ClusterConfig::default()
};
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let iterations = 10;
let maybe_space = None;
let batch_size = 100;
let close_nth_batch = 100;
let maybe_lamports = None;
let num_instructions = 2;
let mut start = Measure::start("total accounts run");
run_accounts_bench(
cluster.entry_point_info.rpc,
faucet_addr,
&[&cluster.funding_keypair],
iterations,
maybe_space,
batch_size,
close_nth_batch,
maybe_lamports,
num_instructions,
None,
);
start.stop();
info!("{}", start);
}
}

View File

@@ -1,20 +0,0 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
</a>
</p>
# Solana AccountsDb Plugin Interface
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
instantiates the implementation of the interface.
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
external PostgreSQL databases.
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)

View File

@@ -1,143 +0,0 @@
/// The interface for AccountsDb plugins. A plugin must implement
/// the AccountsDbPlugin trait to work with the runtime.
/// In addition, the dynamic library must export a "C" function _create_plugin which
/// creates the implementation of the plugin.
use {
std::{any::Any, error, io},
thiserror::Error,
};
impl Eq for ReplicaAccountInfo<'_> {}
#[derive(Clone, PartialEq, Debug)]
/// Information about an account being updated
pub struct ReplicaAccountInfo<'a> {
/// The Pubkey for the account
pub pubkey: &'a [u8],
/// The lamports for the account
pub lamports: u64,
/// The Pubkey of the owner program account
pub owner: &'a [u8],
/// This account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// The epoch at which this account will next owe rent
pub rent_epoch: u64,
/// The data held in this account.
pub data: &'a [u8],
/// A global monotonically increasing atomic number, which can be used
/// to tell the order of the account update. For example, when an
/// account is updated in the same slot multiple times, the update
/// with higher write_version should supersede the one with lower
/// write_version.
pub write_version: u64,
}
/// A wrapper to future-proof ReplicaAccountInfo handling.
/// If there were a change to the structure of ReplicaAccountInfo,
/// there would be new enum entry for the newer version, forcing
/// plugin implementations to handle the change.
pub enum ReplicaAccountInfoVersions<'a> {
V0_0_1(&'a ReplicaAccountInfo<'a>),
}
/// Errors returned by plugin calls
#[derive(Error, Debug)]
pub enum AccountsDbPluginError {
/// Error opening the configuration file; for example, when the file
/// is not found or when the validator process has no permission to read it.
#[error("Error opening config file. Error detail: ({0}).")]
ConfigFileOpenError(#[from] io::Error),
/// Error in reading the content of the config file or the content
/// is not in the expected format.
#[error("Error reading config file. Error message: ({msg})")]
ConfigFileReadError { msg: String },
/// Error when updating the account.
#[error("Error updating account. Error message: ({msg})")]
AccountsUpdateError { msg: String },
/// Error when updating the slot status
#[error("Error updating slot status. Error message: ({msg})")]
SlotStatusUpdateError { msg: String },
/// Any custom error defined by the plugin.
#[error("Plugin-defined custom error. Error message: ({0})")]
Custom(Box<dyn error::Error + Send + Sync>),
}
/// The current status of a slot
#[derive(Debug, Clone)]
pub enum SlotStatus {
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
/// the fork the validator believes is most likely to finalize.
Processed,
/// The highest slot having reached max vote lockout.
Rooted,
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
Confirmed,
}
impl SlotStatus {
pub fn as_str(&self) -> &'static str {
match self {
SlotStatus::Confirmed => "confirmed",
SlotStatus::Processed => "processed",
SlotStatus::Rooted => "rooted",
}
}
}
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
/// Defines an AccountsDb plugin, to stream data from the runtime.
/// AccountsDb plugins must describe desired behavior for load and unload,
/// as well as how they will handle streamed data.
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn name(&self) -> &'static str;
/// The callback called when a plugin is loaded by the system,
/// used for doing whatever initialization is required by the plugin.
/// The _config_file contains the name of the
/// of the config file. The config must be in JSON format and
/// include a field "libpath" indicating the full path
/// name of the shared library implementing this interface.
fn on_load(&mut self, _config_file: &str) -> Result<()> {
Ok(())
}
/// The callback called right before a plugin is unloaded by the system
/// Used for doing cleanup before unload.
fn on_unload(&mut self) {}
/// Called when an account is updated at a slot.
/// When `is_startup` is true, it indicates the account is loaded from
/// snapshots when the validator starts up. When `is_startup` is false,
/// the account is updated during transaction processing.
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()>;
/// Called when all accounts are notified of during startup.
fn notify_end_of_startup(&mut self) -> Result<()>;
/// Called when a slot status is updated
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()>;
}

View File

@@ -1 +0,0 @@
pub mod accountsdb_plugin_interface;

View File

@@ -1,30 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.8.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[dependencies]
bs58 = "0.4.0"
crossbeam-channel = "0.4"
libloading = "0.7.0"
log = "0.4.11"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.67"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-rpc = { path = "../rpc", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
thiserror = "1.0.21"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,227 +0,0 @@
/// Module responsible for notifying plugins of account updates
use {
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
log::*,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
ReplicaAccountInfo, ReplicaAccountInfoVersions, SlotStatus,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_runtime::{
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
append_vec::{StoredAccountMeta, StoredMeta},
},
solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::Slot,
},
std::sync::{Arc, RwLock},
};
#[derive(Debug)]
pub(crate) struct AccountsUpdateNotifierImpl {
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
}
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData) {
if let Some(account_info) = self.accountinfo_from_shared_account_data(meta, account) {
self.notify_plugins_of_account_update(account_info, slot, false);
}
}
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
let account = self.accountinfo_from_stored_account_meta(account);
measure_copy.stop();
inc_new_counter_debug!(
"accountsdb-plugin-copy-stored-account-info-us",
measure_copy.as_us() as usize,
100000,
100000
);
if let Some(account_info) = account {
self.notify_plugins_of_account_update(account_info, slot, true);
}
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-notify-account-restore-all-us",
measure_all.as_us() as usize,
100000,
100000
);
}
fn notify_end_of_restore_from_snapshot(&self) {
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
match plugin.notify_end_of_startup() {
Err(err) => {
error!(
"Failed to notify the end of restore from snapshot, error: {} to plugin {}",
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully notified the end of restore from snapshot to plugin {}",
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-end-of-restore-from-snapshot",
measure.as_us() as usize
);
}
}
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>) {
self.notify_slot_status(slot, parent, SlotStatus::Confirmed);
}
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>) {
self.notify_slot_status(slot, parent, SlotStatus::Processed);
}
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>) {
self.notify_slot_status(slot, parent, SlotStatus::Rooted);
}
}
impl AccountsUpdateNotifierImpl {
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
AccountsUpdateNotifierImpl { plugin_manager }
}
fn accountinfo_from_shared_account_data<'a>(
&self,
meta: &'a StoredMeta,
account: &'a AccountSharedData,
) -> Option<ReplicaAccountInfo<'a>> {
Some(ReplicaAccountInfo {
pubkey: meta.pubkey.as_ref(),
lamports: account.lamports(),
owner: account.owner().as_ref(),
executable: account.executable(),
rent_epoch: account.rent_epoch(),
data: account.data(),
write_version: meta.write_version,
})
}
fn accountinfo_from_stored_account_meta<'a>(
&self,
stored_account_meta: &'a StoredAccountMeta,
) -> Option<ReplicaAccountInfo<'a>> {
Some(ReplicaAccountInfo {
pubkey: stored_account_meta.meta.pubkey.as_ref(),
lamports: stored_account_meta.account_meta.lamports,
owner: stored_account_meta.account_meta.owner.as_ref(),
executable: stored_account_meta.account_meta.executable,
rent_epoch: stored_account_meta.account_meta.rent_epoch,
data: stored_account_meta.data,
write_version: stored_account_meta.meta.write_version,
})
}
fn notify_plugins_of_account_update(
&self,
account: ReplicaAccountInfo,
slot: Slot,
is_startup: bool,
) {
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-update-account");
match plugin.update_account(
ReplicaAccountInfoVersions::V0_0_1(&account),
slot,
is_startup,
) {
Err(err) => {
error!(
"Failed to update account {} at slot {}, error: {} to plugin {}",
bs58::encode(account.pubkey).into_string(),
slot,
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully updated account {} at slot {} to plugin {}",
bs58::encode(account.pubkey).into_string(),
slot,
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-update-account-us",
measure.as_us() as usize,
100000,
100000
);
}
measure2.stop();
inc_new_counter_debug!(
"accountsdb-plugin-notify_plugins_of_account_update-us",
measure2.as_us() as usize,
100000,
100000
);
}
pub fn notify_slot_status(&self, slot: Slot, parent: Option<Slot>, slot_status: SlotStatus) {
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-update-slot");
match plugin.update_slot_status(slot, parent, slot_status.clone()) {
Err(err) => {
error!(
"Failed to update slot status at slot {}, error: {} to plugin {}",
slot,
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully updated slot status at slot {} to plugin {}",
slot,
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-update-slot-us",
measure.as_us() as usize,
1000,
1000
);
}
}
}

View File

@@ -1,55 +0,0 @@
/// Managing the AccountsDb plugins
use {
libloading::{Library, Symbol},
log::*,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::AccountsDbPlugin,
std::error::Error,
};
#[derive(Default, Debug)]
pub struct AccountsDbPluginManager {
pub plugins: Vec<Box<dyn AccountsDbPlugin>>,
libs: Vec<Library>,
}
impl AccountsDbPluginManager {
pub fn new() -> Self {
AccountsDbPluginManager {
plugins: Vec::default(),
libs: Vec::default(),
}
}
/// # Safety
///
/// This function loads the dynamically linked library specified in the path. The library
/// must do necessary initializations.
pub unsafe fn load_plugin(
&mut self,
libpath: &str,
config_file: &str,
) -> Result<(), Box<dyn Error>> {
type PluginConstructor = unsafe fn() -> *mut dyn AccountsDbPlugin;
let lib = Library::new(libpath)?;
let constructor: Symbol<PluginConstructor> = lib.get(b"_create_plugin")?;
let plugin_raw = constructor();
let mut plugin = Box::from_raw(plugin_raw);
plugin.on_load(config_file)?;
self.plugins.push(plugin);
self.libs.push(lib);
Ok(())
}
/// Unload all plugins and loaded plugin libraries, making sure to fire
/// their `on_plugin_unload()` methods so they can do any necessary cleanup.
pub fn unload(&mut self) {
for mut plugin in self.plugins.drain(..) {
info!("Unloading plugin for {:?}", plugin.name());
plugin.on_unload();
}
for lib in self.libs.drain(..) {
drop(lib);
}
}
}

View File

@@ -1,157 +0,0 @@
use {
crate::{
accounts_update_notifier::AccountsUpdateNotifierImpl,
accountsdb_plugin_manager::AccountsDbPluginManager,
slot_status_observer::SlotStatusObserver,
},
crossbeam_channel::Receiver,
log::*,
serde_json,
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
std::{
fs::File,
io::Read,
path::{Path, PathBuf},
sync::{Arc, RwLock},
thread,
},
thiserror::Error,
};
#[derive(Error, Debug)]
pub enum AccountsdbPluginServiceError {
#[error("Cannot open the the plugin config file")]
CannotOpenConfigFile(String),
#[error("Cannot read the the plugin config file")]
CannotReadConfigFile(String),
#[error("The config file is not in a valid Json format")]
InvalidConfigFileFormat(String),
#[error("Plugin library path is not specified in the config file")]
LibPathNotSet,
#[error("Invalid plugin path")]
InvalidPluginPath,
#[error("Cannot load plugin shared library")]
PluginLoadError(String),
}
/// The service managing the AccountsDb plugin workflow.
pub struct AccountsDbPluginService {
slot_status_observer: SlotStatusObserver,
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
accounts_update_notifier: AccountsUpdateNotifier,
}
impl AccountsDbPluginService {
/// Creates and returns the AccountsDbPluginService.
/// # Arguments
/// * `confirmed_bank_receiver` - The receiver for confirmed bank notification
/// * `accountsdb_plugin_config_file` - The config file path for the plugin. The
/// config file controls the plugin responsible
/// for transporting the data to external data stores. It is defined in JSON format.
/// The `libpath` field should be pointed to the full path of the dynamic shared library
/// (.so file) to be loaded. The shared library must implement the `AccountsDbPlugin`
/// trait. And the shared library shall export a `C` function `_create_plugin` which
/// shall create the implementation of `AccountsDbPlugin` and returns to the caller.
/// The rest of the JSON fields' definition is up to to the concrete plugin implementation
/// It is usually used to configure the connection information for the external data store.
pub fn new(
confirmed_bank_receiver: Receiver<BankNotification>,
accountsdb_plugin_config_files: &[PathBuf],
) -> Result<Self, AccountsdbPluginServiceError> {
info!(
"Starting AccountsDbPluginService from config files: {:?}",
accountsdb_plugin_config_files
);
let mut plugin_manager = AccountsDbPluginManager::new();
for accountsdb_plugin_config_file in accountsdb_plugin_config_files {
Self::load_plugin(&mut plugin_manager, accountsdb_plugin_config_file)?;
}
let plugin_manager = Arc::new(RwLock::new(plugin_manager));
let accounts_update_notifier = Arc::new(RwLock::new(AccountsUpdateNotifierImpl::new(
plugin_manager.clone(),
)));
let slot_status_observer =
SlotStatusObserver::new(confirmed_bank_receiver, accounts_update_notifier.clone());
info!("Started AccountsDbPluginService");
Ok(AccountsDbPluginService {
slot_status_observer,
plugin_manager,
accounts_update_notifier,
})
}
fn load_plugin(
plugin_manager: &mut AccountsDbPluginManager,
accountsdb_plugin_config_file: &Path,
) -> Result<(), AccountsdbPluginServiceError> {
let mut file = match File::open(accountsdb_plugin_config_file) {
Ok(file) => file,
Err(err) => {
return Err(AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
"Failed to open the plugin config file {:?}, error: {:?}",
accountsdb_plugin_config_file, err
)));
}
};
let mut contents = String::new();
if let Err(err) = file.read_to_string(&mut contents) {
return Err(AccountsdbPluginServiceError::CannotReadConfigFile(format!(
"Failed to read the plugin config file {:?}, error: {:?}",
accountsdb_plugin_config_file, err
)));
}
let result: serde_json::Value = match serde_json::from_str(&contents) {
Ok(value) => value,
Err(err) => {
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
format!(
"The config file {:?} is not in a valid Json format, error: {:?}",
accountsdb_plugin_config_file, err
),
));
}
};
let libpath = result["libpath"]
.as_str()
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
let config_file = accountsdb_plugin_config_file
.as_os_str()
.to_str()
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
unsafe {
let result = plugin_manager.load_plugin(libpath, config_file);
if let Err(err) = result {
let msg = format!(
"Failed to load the plugin library: {:?}, error: {:?}",
libpath, err
);
return Err(AccountsdbPluginServiceError::PluginLoadError(msg));
}
}
Ok(())
}
pub fn get_accounts_update_notifier(&self) -> AccountsUpdateNotifier {
self.accounts_update_notifier.clone()
}
pub fn join(mut self) -> thread::Result<()> {
self.slot_status_observer.join()?;
self.plugin_manager.write().unwrap().unload();
Ok(())
}
}

View File

@@ -1,4 +0,0 @@
pub mod accounts_update_notifier;
pub mod accountsdb_plugin_manager;
pub mod accountsdb_plugin_service;
pub mod slot_status_observer;

View File

@@ -1,80 +0,0 @@
use {
crossbeam_channel::Receiver,
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
},
};
#[derive(Debug)]
pub(crate) struct SlotStatusObserver {
bank_notification_receiver_service: Option<JoinHandle<()>>,
exit_updated_slot_server: Arc<AtomicBool>,
}
impl SlotStatusObserver {
pub fn new(
bank_notification_receiver: Receiver<BankNotification>,
accounts_update_notifier: AccountsUpdateNotifier,
) -> Self {
let exit_updated_slot_server = Arc::new(AtomicBool::new(false));
Self {
bank_notification_receiver_service: Some(Self::run_bank_notification_receiver(
bank_notification_receiver,
exit_updated_slot_server.clone(),
accounts_update_notifier,
)),
exit_updated_slot_server,
}
}
pub fn join(&mut self) -> thread::Result<()> {
self.exit_updated_slot_server.store(true, Ordering::Relaxed);
self.bank_notification_receiver_service
.take()
.map(JoinHandle::join)
.unwrap()
}
fn run_bank_notification_receiver(
bank_notification_receiver: Receiver<BankNotification>,
exit: Arc<AtomicBool>,
accounts_update_notifier: AccountsUpdateNotifier,
) -> JoinHandle<()> {
Builder::new()
.name("bank_notification_receiver".to_string())
.spawn(move || {
while !exit.load(Ordering::Relaxed) {
if let Ok(slot) = bank_notification_receiver.recv() {
match slot {
BankNotification::OptimisticallyConfirmed(slot) => {
accounts_update_notifier
.read()
.unwrap()
.notify_slot_confirmed(slot, None);
}
BankNotification::Frozen(bank) => {
accounts_update_notifier
.read()
.unwrap()
.notify_slot_processed(bank.slot(), Some(bank.parent_slot()));
}
BankNotification::Root(bank) => {
accounts_update_notifier
.read()
.unwrap()
.notify_slot_rooted(bank.slot(), Some(bank.parent_slot()));
}
}
}
}
})
.unwrap()
}
}

View File

@@ -1,33 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accountsdb-plugin-postgres"
description = "The Solana AccountsDb plugin for PostgreSQL database."
version = "1.8.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
bs58 = "0.4.0"
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.5"
log = "0.4.14"
postgres = { version = "0.19.1", features = ["with-chrono-0_4"] }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.67"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
thiserror = "1.0.21"
tokio-postgres = "0.7.3"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,5 +0,0 @@
This is an example implementing the AccountsDb plugin for PostgreSQL database.
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
To create the schema objects for the database, please use `scripts/create_schema.sql`.
`scripts/drop_schema.sql` can be used to tear down the schema objects.

View File

@@ -1,54 +0,0 @@
/**
* This plugin implementation for PostgreSQL requires the following tables
*/
-- The table storing accounts
CREATE TABLE account (
pubkey BYTEA PRIMARY KEY,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- The table storing slot information
CREATE TABLE slot (
slot BIGINT PRIMARY KEY,
parent BIGINT,
status varchar(16) NOT NULL,
updated_on TIMESTAMP NOT NULL
);
/**
* The following is for keeping historical data for accounts and is not required for plugin to work.
*/
-- The table storing historical data for accounts
CREATE TABLE account_audit (
pubkey BYTEA,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
BEGIN
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
RETURN NEW;
END;
$audit_account_update$ LANGUAGE plpgsql;
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();

View File

@@ -1,9 +0,0 @@
/**
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
*/
DROP TRIGGER account_update_trigger ON account;
DROP FUNCTION audit_account_update;
DROP TABLE account_audit;
DROP TABLE account;
DROP TABLE slot;

View File

@@ -1,802 +0,0 @@
# This a reference configuration file for the PostgreSQL database version 14.
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
listen_addresses = '*'
port = 5433 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 1GB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, replica, or logical
# (change requires restart)
fsync = off # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
synchronous_commit = off # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
full_page_writes = off # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enable compression of full-page writes
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = -1 # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '14/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -1,69 +0,0 @@
use {log::*, std::collections::HashSet};
#[derive(Debug)]
pub(crate) struct AccountsSelector {
pub accounts: HashSet<Vec<u8>>,
pub owners: HashSet<Vec<u8>>,
pub select_all_accounts: bool,
}
impl AccountsSelector {
pub fn default() -> Self {
AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts: true,
}
}
pub fn new(accounts: &[String], owners: &[String]) -> Self {
info!(
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
accounts, owners
);
let select_all_accounts = accounts.iter().any(|key| key == "*");
if select_all_accounts {
return AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts,
};
}
let accounts = accounts
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
let owners = owners
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
AccountsSelector {
accounts,
owners,
select_all_accounts,
}
}
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_create_accounts_selector() {
AccountsSelector::new(
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
&[],
);
AccountsSelector::new(
&[],
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
);
}
}

View File

@@ -1,345 +0,0 @@
use solana_measure::measure::Measure;
/// Main entry for the PostgreSQL plugin
use {
crate::{
accounts_selector::AccountsSelector,
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
},
bs58,
log::*,
serde_derive::{Deserialize, Serialize},
serde_json,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions, Result, SlotStatus,
},
solana_metrics::*,
std::{fs::File, io::Read},
thiserror::Error,
};
#[derive(Default)]
pub struct AccountsDbPluginPostgres {
client: Option<ParallelPostgresClient>,
accounts_selector: Option<AccountsSelector>,
}
impl std::fmt::Debug for AccountsDbPluginPostgres {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountsDbPluginPostgresConfig {
pub host: Option<String>,
pub user: Option<String>,
pub port: Option<u16>,
pub connection_str: Option<String>,
pub threads: Option<usize>,
pub batch_size: Option<usize>,
pub panic_on_db_errors: Option<bool>,
}
#[derive(Error, Debug)]
pub enum AccountsDbPluginPostgresError {
#[error("Error connecting to the backend data store. Error message: ({msg})")]
DataStoreConnectionError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
DataSchemaError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
ConfigurationError { msg: String },
}
impl AccountsDbPlugin for AccountsDbPluginPostgres {
fn name(&self) -> &'static str {
"AccountsDbPluginPostgres"
}
/// Do initialization for the PostgreSQL plugin.
///
/// # Format of the config file:
/// * The `accounts_selector` section allows the user to controls accounts selections.
/// "accounts_selector" : {
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// or:
/// "accounts_selector" = {
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
/// }
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
/// When only owners is specified,
/// all accounts belonging to the owners will be streamed.
/// The accounts field support wildcard to select all accounts:
/// "accounts_selector" : {
/// "accounts" : \["*"\],
/// }
/// * "host", optional, specifies the PostgreSQL server.
/// * "user", optional, specifies the PostgreSQL user.
/// * "port", optional, specifies the PostgreSQL server's port.
/// * "connection_str", optional, the custom PostgreSQL connection string.
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
/// `host` and `user` must be given.
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
/// maintains a PostgreSQL connection to the server. The default is '10'.
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
/// from restoring a snapshot. The default is '10'.
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
/// PostgreSQL database. The default is 'false'.
/// # Examples
///
/// {
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
/// "host": "host_foo",
/// "user": "solana",
/// "threads": 10,
/// "accounts_selector" : {
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
/// }
/// }
fn on_load(&mut self, config_file: &str) -> Result<()> {
solana_logger::setup_with_default("info");
info!(
"Loading plugin {:?} from config_file {:?}",
self.name(),
config_file
);
let mut file = File::open(config_file)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
serde_json::from_str(&contents);
match result {
Err(err) => {
return Err(AccountsDbPluginError::ConfigFileReadError {
msg: format!(
"The config file is not in the JSON format expected: {:?}",
err
),
})
}
Ok(config) => {
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
self.client = Some(client);
}
}
Ok(())
}
fn on_unload(&mut self) {
info!("Unloading plugin: {:?}", self.name());
match &mut self.client {
None => {}
Some(client) => {
client.join().unwrap();
}
}
}
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()> {
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
match account {
ReplicaAccountInfoVersions::V0_0_1(account) => {
let mut measure_select =
Measure::start("accountsdb-plugin-postgres-update-account-select");
if let Some(accounts_selector) = &self.accounts_selector {
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
return Ok(());
}
} else {
return Ok(());
}
measure_select.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-select-us",
measure_select.as_us() as usize,
100000,
100000
);
debug!(
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
bs58::encode(account.pubkey).into_string(),
bs58::encode(account.owner).into_string(),
slot,
self.accounts_selector.as_ref().unwrap()
);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database."
.to_string(),
},
)));
}
Some(client) => {
let mut measure_update =
Measure::start("accountsdb-plugin-postgres-update-account-client");
let result = { client.update_account(account, slot, is_startup) };
measure_update.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-client-us",
measure_update.as_us() as usize,
100000,
100000
);
if let Err(err) = result {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
}
}
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-main-us",
measure_all.as_us() as usize,
100000,
100000
);
Ok(())
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()> {
info!("Updating slot {:?} at with status {:?}", slot, status);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.update_slot_status(slot, parent, status);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<()> {
info!("Notifying the end of startup for accounts notifications");
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.notify_end_of_startup();
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
});
}
}
}
Ok(())
}
}
impl AccountsDbPluginPostgres {
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
let accounts_selector = &config["accounts_selector"];
if accounts_selector.is_null() {
AccountsSelector::default()
} else {
let accounts = &accounts_selector["accounts"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
let owners = &accounts_selector["owners"];
let owners: Vec<String> = if owners.is_array() {
owners
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
AccountsSelector::new(&accounts, &owners)
}
}
pub fn new() -> Self {
AccountsDbPluginPostgres {
client: None,
accounts_selector: None,
}
}
}
#[no_mangle]
#[allow(improper_ctypes_definitions)]
/// # Safety
///
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
let plugin = AccountsDbPluginPostgres::new();
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
Box::into_raw(plugin)
}
#[cfg(test)]
pub(crate) mod tests {
use {super::*, serde_json};
#[test]
fn test_accounts_selector_from_config() {
let config = "{\"accounts_selector\" : { \
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
}}";
let config: serde_json::Value = serde_json::from_str(config).unwrap();
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
}
}

View File

@@ -1,3 +0,0 @@
pub mod accounts_selector;
pub mod accountsdb_plugin_postgres;
pub mod postgres_client;

View File

@@ -1,879 +0,0 @@
#![allow(clippy::integer_arithmetic)]
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
use {
crate::accountsdb_plugin_postgres::{
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
},
chrono::Utc,
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
log::*,
postgres::{Client, NoTls, Statement},
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_sdk::timing::AtomicInterval,
std::{
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Arc, Mutex,
},
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
},
tokio_postgres::types::ToSql,
};
/// The maximum asynchronous requests allowed in the channel to avoid excessive
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
const MAX_ASYNC_REQUESTS: usize = 40960;
const DEFAULT_POSTGRES_PORT: u16 = 5432;
const DEFAULT_THREADS_COUNT: usize = 100;
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
const ACCOUNT_COLUMN_COUNT: usize = 9;
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
struct PostgresSqlClientWrapper {
client: Client,
update_account_stmt: Statement,
bulk_account_insert_stmt: Statement,
update_slot_with_parent_stmt: Statement,
update_slot_without_parent_stmt: Statement,
}
pub struct SimplePostgresClient {
batch_size: usize,
pending_account_updates: Vec<DbAccountInfo>,
client: Mutex<PostgresSqlClientWrapper>,
}
struct PostgresClientWorker {
client: SimplePostgresClient,
/// Indicating if accounts notification during startup is done.
is_startup_done: bool,
}
impl Eq for DbAccountInfo {}
#[derive(Clone, PartialEq, Debug)]
pub struct DbAccountInfo {
pub pubkey: Vec<u8>,
pub lamports: i64,
pub owner: Vec<u8>,
pub executable: bool,
pub rent_epoch: i64,
pub data: Vec<u8>,
pub slot: i64,
pub write_version: i64,
}
pub(crate) fn abort() -> ! {
#[cfg(not(test))]
{
// standard error is usually redirected to a log file, cry for help on standard output as
// well
eprintln!("Validator process aborted. The validator log may contain further details");
std::process::exit(1);
}
#[cfg(test)]
panic!("process::exit(1) is intercepted for friendly test failure...");
}
impl DbAccountInfo {
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
let data = account.data().to_vec();
Self {
pubkey: account.pubkey().to_vec(),
lamports: account.lamports() as i64,
owner: account.owner().to_vec(),
executable: account.executable(),
rent_epoch: account.rent_epoch() as i64,
data,
slot: slot as i64,
write_version: account.write_version(),
}
}
}
pub trait ReadableAccountInfo: Sized {
fn pubkey(&self) -> &[u8];
fn owner(&self) -> &[u8];
fn lamports(&self) -> i64;
fn executable(&self) -> bool;
fn rent_epoch(&self) -> i64;
fn data(&self) -> &[u8];
fn write_version(&self) -> i64;
}
impl ReadableAccountInfo for DbAccountInfo {
fn pubkey(&self) -> &[u8] {
&self.pubkey
}
fn owner(&self) -> &[u8] {
&self.owner
}
fn lamports(&self) -> i64 {
self.lamports
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch
}
fn data(&self) -> &[u8] {
&self.data
}
fn write_version(&self) -> i64 {
self.write_version
}
}
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
fn pubkey(&self) -> &[u8] {
self.pubkey
}
fn owner(&self) -> &[u8] {
self.owner
}
fn lamports(&self) -> i64 {
self.lamports as i64
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch as i64
}
fn data(&self) -> &[u8] {
self.data
}
fn write_version(&self) -> i64 {
self.write_version as i64
}
}
pub trait PostgresClient {
fn join(&mut self) -> thread::Result<()> {
Ok(())
}
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError>;
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError>;
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
}
impl SimplePostgresClient {
fn connect_to_db(
config: &AccountsDbPluginPostgresConfig,
) -> Result<Client, AccountsDbPluginError> {
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
let connection_str = if let Some(connection_str) = &config.connection_str {
connection_str.clone()
} else {
if config.host.is_none() || config.user.is_none() {
let msg = format!(
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
config.connection_str, config.host, config.user
);
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::ConfigurationError { msg },
)));
}
format!(
"host={} user={} port={}",
config.host.as_ref().unwrap(),
config.user.as_ref().unwrap(),
port
)
};
match Client::connect(&connection_str, NoTls) {
Err(err) => {
let msg = format!(
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
err, connection_str
);
error!("{}", msg);
Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
)))
}
Ok(client) => Ok(client),
}
}
fn build_bulk_account_insert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
for j in 0..batch_size {
let row = j * ACCOUNT_COLUMN_COUNT;
let val_str = format!(
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
row + 1,
row + 2,
row + 3,
row + 4,
row + 5,
row + 6,
row + 7,
row + 8,
row + 9,
);
if j == 0 {
stmt = format!("{} {}", &stmt, val_str);
} else {
stmt = format!("{}, {}", &stmt, val_str);
}
}
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
stmt = format!("{} {}", stmt, handle_conflict);
info!("{}", stmt);
let bulk_stmt = client.prepare(&stmt);
match bulk_stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_single_account_upsert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_slot_upsert_statement_with_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
VALUES ($1, $2, $3, $4) \
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
fn build_slot_upsert_statement_without_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, status, updated_on) \
VALUES ($1, $2, $3) \
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
/// Internal function for updating or inserting a single account
fn upsert_account_internal(
account: &DbAccountInfo,
statement: &Statement,
client: &mut Client,
) -> Result<(), AccountsDbPluginError> {
let lamports = account.lamports() as i64;
let rent_epoch = account.rent_epoch() as i64;
let updated_on = Utc::now().naive_utc();
let result = client.query(
statement,
&[
&account.pubkey(),
&account.slot,
&account.owner(),
&lamports,
&account.executable(),
&rent_epoch,
&account.data(),
&account.write_version(),
&updated_on,
],
);
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
Ok(())
}
/// Update or insert a single account
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
Self::upsert_account_internal(account, statement, client)
}
/// Insert accounts in batch to reduce network overhead
fn insert_accounts_in_batch(
&mut self,
account: DbAccountInfo,
) -> Result<(), AccountsDbPluginError> {
self.pending_account_updates.push(account);
if self.pending_account_updates.len() == self.batch_size {
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
let mut values: Vec<&(dyn ToSql + Sync)> =
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
let updated_on = Utc::now().naive_utc();
for j in 0..self.batch_size {
let account = &self.pending_account_updates[j];
values.push(&account.pubkey);
values.push(&account.slot);
values.push(&account.owner);
values.push(&account.lamports);
values.push(&account.executable);
values.push(&account.rent_epoch);
values.push(&account.data);
values.push(&account.write_version);
values.push(&updated_on);
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-prepare-values-us",
measure.as_us() as usize,
10000,
10000
);
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
let client = self.client.get_mut().unwrap();
let result = client
.client
.query(&client.bulk_account_insert_stmt, &values);
self.pending_account_updates.clear();
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-us",
measure.as_us() as usize,
10000,
10000
);
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-count",
self.batch_size,
10000,
10000
);
}
Ok(())
}
/// Flush any left over accounts in batch which are not processed in the last batch
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
if self.pending_account_updates.is_empty() {
return Ok(());
}
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
for account in self.pending_account_updates.drain(..) {
Self::upsert_account_internal(&account, statement, client)?;
}
Ok(())
}
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating SimplePostgresClient...");
let mut client = Self::connect_to_db(config)?;
let bulk_account_insert_stmt =
Self::build_bulk_account_insert_statement(&mut client, config)?;
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
let update_slot_with_parent_stmt =
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
let update_slot_without_parent_stmt =
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
info!("Created SimplePostgresClient.");
Ok(Self {
batch_size,
pending_account_updates: Vec::with_capacity(batch_size),
client: Mutex::new(PostgresSqlClientWrapper {
client,
update_account_stmt,
bulk_account_insert_stmt,
update_slot_with_parent_stmt,
update_slot_without_parent_stmt,
}),
})
}
}
impl PostgresClient for SimplePostgresClient {
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
trace!(
"Updating account {} with owner {} at slot {}",
bs58::encode(account.pubkey()).into_string(),
bs58::encode(account.owner()).into_string(),
account.slot,
);
if !is_startup {
return self.upsert_account(&account);
}
self.insert_accounts_in_batch(account)
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
info!("Updating slot {:?} at with status {:?}", slot, status);
let slot = slot as i64; // postgres only supports i64
let parent = parent.map(|parent| parent as i64);
let updated_on = Utc::now().naive_utc();
let status_str = status.as_str();
let client = self.client.get_mut().unwrap();
let result = match parent {
Some(parent) => client.client.execute(
&client.update_slot_with_parent_stmt,
&[&slot, &parent, &status_str, &updated_on],
),
None => client.client.execute(
&client.update_slot_without_parent_stmt,
&[&slot, &status_str, &updated_on],
),
};
match result {
Err(err) => {
let msg = format!(
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
err
);
error!("{:?}", msg);
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
}
Ok(rows) => {
assert_eq!(1, rows, "Expected one rows to be updated a time");
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
self.flush_buffered_writes()
}
}
struct UpdateAccountRequest {
account: DbAccountInfo,
is_startup: bool,
}
struct UpdateSlotRequest {
slot: u64,
parent: Option<u64>,
slot_status: SlotStatus,
}
enum DbWorkItem {
UpdateAccount(UpdateAccountRequest),
UpdateSlot(UpdateSlotRequest),
}
impl PostgresClientWorker {
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
let result = SimplePostgresClient::new(&config);
match result {
Ok(client) => Ok(PostgresClientWorker {
client,
is_startup_done: false,
}),
Err(err) => {
error!("Error in creating SimplePostgresClient: {}", err);
Err(err)
}
}
}
fn do_work(
&mut self,
receiver: Receiver<DbWorkItem>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
panic_on_db_errors: bool,
) -> Result<(), AccountsDbPluginError> {
while !exit_worker.load(Ordering::Relaxed) {
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
let work = receiver.recv_timeout(Duration::from_millis(500));
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-worker-recv-us",
measure.as_us() as usize,
100000,
100000
);
match work {
Ok(work) => match work {
DbWorkItem::UpdateAccount(request) => {
if let Err(err) = self
.client
.update_account(request.account, request.is_startup)
{
error!("Failed to update account: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
DbWorkItem::UpdateSlot(request) => {
if let Err(err) = self.client.update_slot_status(
request.slot,
request.parent,
request.slot_status,
) {
error!("Failed to update slot: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
},
Err(err) => match err {
RecvTimeoutError::Timeout => {
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
if let Err(err) = self.client.notify_end_of_startup() {
error!("Error in notifying end of startup: ({})", err);
if panic_on_db_errors {
abort();
}
}
self.is_startup_done = true;
startup_done_count.fetch_add(1, Ordering::Relaxed);
}
continue;
}
_ => {
error!("Error in receiving the item {:?}", err);
if panic_on_db_errors {
abort();
}
break;
}
},
}
}
Ok(())
}
}
pub struct ParallelPostgresClient {
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
initialized_worker_count: Arc<AtomicUsize>,
sender: Sender<DbWorkItem>,
last_report: AtomicInterval,
}
impl ParallelPostgresClient {
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating ParallelPostgresClient...");
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
let exit_worker = Arc::new(AtomicBool::new(false));
let mut workers = Vec::default();
let is_startup_done = Arc::new(AtomicBool::new(false));
let startup_done_count = Arc::new(AtomicUsize::new(0));
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
for i in 0..worker_count {
let cloned_receiver = receiver.clone();
let exit_clone = exit_worker.clone();
let is_startup_done_clone = is_startup_done.clone();
let startup_done_count_clone = startup_done_count.clone();
let initialized_worker_count_clone = initialized_worker_count.clone();
let config = config.clone();
let worker = Builder::new()
.name(format!("worker-{}", i))
.spawn(move || -> Result<(), AccountsDbPluginError> {
let panic_on_db_errors = *config
.panic_on_db_errors
.as_ref()
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
let result = PostgresClientWorker::new(config);
match result {
Ok(mut worker) => {
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
worker.do_work(
cloned_receiver,
exit_clone,
is_startup_done_clone,
startup_done_count_clone,
panic_on_db_errors,
)?;
Ok(())
}
Err(err) => {
error!("Error when making connection to database: ({})", err);
if panic_on_db_errors {
abort();
}
Err(err)
}
}
})
.unwrap();
workers.push(worker);
}
info!("Created ParallelPostgresClient.");
Ok(Self {
last_report: AtomicInterval::default(),
workers,
exit_worker,
is_startup_done,
startup_done_count,
initialized_worker_count,
sender,
})
}
pub fn join(&mut self) -> thread::Result<()> {
self.exit_worker.store(true, Ordering::Relaxed);
while !self.workers.is_empty() {
let worker = self.workers.pop();
if worker.is_none() {
break;
}
let worker = worker.unwrap();
let result = worker.join().unwrap();
if result.is_err() {
error!("The worker thread has failed: {:?}", result);
}
}
Ok(())
}
pub fn update_account(
&mut self,
account: &ReplicaAccountInfo,
slot: u64,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
if self.last_report.should_update(30000) {
datapoint_debug!(
"postgres-plugin-stats",
("message-queue-length", self.sender.len() as i64, i64),
);
}
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
let wrk_item = DbWorkItem::UpdateAccount(UpdateAccountRequest {
account: DbAccountInfo::new(account, slot),
is_startup,
});
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-create-work-item-us",
measure.as_us() as usize,
100000,
100000
);
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
if let Err(err) = self.sender.send(wrk_item) {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!(
"Failed to update the account {:?}, error: {:?}",
bs58::encode(account.pubkey()).into_string(),
err
),
});
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-send-msg-us",
measure.as_us() as usize,
100000,
100000
);
Ok(())
}
pub fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
if let Err(err) = self.sender.send(DbWorkItem::UpdateSlot(UpdateSlotRequest {
slot,
parent,
slot_status: status,
})) {
return Err(AccountsDbPluginError::SlotStatusUpdateError {
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
});
}
Ok(())
}
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
info!("Notifying the end of startup");
// Ensure all items in the queue has been received by the workers
while !self.sender.is_empty() {
sleep(Duration::from_millis(100));
}
self.is_startup_done.store(true, Ordering::Relaxed);
// Wait for all worker threads to be done with flushing
while self.startup_done_count.load(Ordering::Relaxed)
!= self.initialized_worker_count.load(Ordering::Relaxed)
{
info!(
"Startup done count: {}, good worker thread count: {}",
self.startup_done_count.load(Ordering::Relaxed),
self.initialized_worker_count.load(Ordering::Relaxed)
);
sleep(Duration::from_millis(100));
}
info!("Done with notifying the end of startup");
Ok(())
}
}
pub struct PostgresClientBuilder {}
impl PostgresClientBuilder {
pub fn build_pararallel_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
ParallelPostgresClient::new(config)
}
pub fn build_simple_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
SimplePostgresClient::new(config)
}
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.8.17"
version = "1.5.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,19 +13,17 @@ clap = "2.33.1"
crossbeam-channel = "0.4"
log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.0"
solana-core = { path = "../core", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-ledger = { path = "../ledger", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-perf = { path = "../perf", version = "=1.8.17" }
solana-poh = { path = "../poh", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
rayon = "1.4.0"
solana-core = { path = "../core", version = "1.5.2" }
solana-clap-utils = { path = "../clap-utils", version = "1.5.2" }
solana-streamer = { path = "../streamer", version = "1.5.2" }
solana-perf = { path = "../perf", version = "1.5.2" }
solana-ledger = { path = "../ledger", version = "1.5.2" }
solana-logger = { path = "../logger", version = "1.5.2" }
solana-runtime = { path = "../runtime", version = "1.5.2" }
solana-measure = { path = "../measure", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
solana-version = { path = "../version", version = "1.5.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,37 +1,37 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, App, Arg},
crossbeam_channel::unbounded,
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_core::banking_stage::BankingStage,
solana_gossip::cluster_info::{ClusterInfo, Node},
solana_ledger::{
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
},
solana_measure::measure::Measure,
solana_perf::packet::to_packet_batches,
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
cost_model::CostModel,
},
solana_sdk::{
hash::Hash,
signature::{Keypair, Signature},
system_transaction,
timing::{duration_as_us, timestamp},
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
thread::sleep,
time::{Duration, Instant},
},
use clap::{crate_description, crate_name, value_t, App, Arg};
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_core::{
banking_stage::{create_test_recorder, BankingStage},
cluster_info::ClusterInfo,
cluster_info::Node,
poh_recorder::PohRecorder,
poh_recorder::WorkingBankEntry,
};
use solana_ledger::{
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_measure::measure::Measure;
use solana_perf::packet::to_packets_chunked;
use solana_runtime::{
accounts_background_service::ABSRequestSender, bank::Bank, bank_forks::BankForks,
};
use solana_sdk::{
hash::Hash,
signature::Keypair,
signature::Signature,
system_transaction,
timing::{duration_as_us, timestamp},
transaction::Transaction,
};
use std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
thread::sleep,
time::{Duration, Instant},
};
fn check_txs(
@@ -77,7 +77,7 @@ fn make_accounts_txs(
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
if !same_payer {
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
}
@@ -168,7 +168,6 @@ fn main() {
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let bank0 = Bank::new(&genesis_config);
let mut bank_forks = BankForks::new(bank0);
@@ -189,7 +188,7 @@ fn main() {
genesis_config.hash(),
);
// Ignore any pesky duplicate signature errors in the case we are using single-payer
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
fund.signatures = vec![Signature::new(&sig[0..64])];
let x = bank.process_transaction(&fund);
x.unwrap();
@@ -199,7 +198,7 @@ fn main() {
if !skip_sanity {
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(tx);
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
});
bank.clear_signatures();
@@ -211,7 +210,7 @@ fn main() {
bank.clear_signatures();
}
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@@ -219,21 +218,15 @@ fn main() {
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
vote_receiver,
None,
replay_vote_sender,
Arc::new(RwLock::new(CostModel::default())),
);
poh_recorder.lock().unwrap().set_bank(&bank);
@@ -332,7 +325,7 @@ fn main() {
poh_recorder.lock().unwrap().set_bank(&bank);
assert!(poh_recorder.lock().unwrap().bank().is_some());
if bank.slot() > 32 {
bank_forks.set_root(root, &AbsRequestSender::default(), None);
bank_forks.set_root(root, &ABSRequestSender::default(), None);
root += 1;
}
debug!(
@@ -361,10 +354,10 @@ fn main() {
if bank.slot() > 0 && bank.slot() % 16 == 0 {
for tx in transactions.iter_mut() {
tx.message.recent_blockhash = bank.last_blockhash();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;
@@ -386,7 +379,6 @@ fn main() {
);
drop(verified_sender);
drop(tpu_vote_sender);
drop(vote_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();

View File

@@ -1,30 +1,26 @@
[package]
name = "solana-banks-client"
version = "1.8.17"
version = "1.5.2"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-banks-client"
edition = "2018"
[dependencies]
bincode = "1.3.1"
borsh = "0.9.0"
borsh-derive = "0.9.0"
futures = "0.3"
mio = "0.7.6"
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
solana-program = { path = "../sdk/program", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
tarpc = { version = "0.24.1", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
solana-banks-interface = { path = "../banks-interface", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
tarpc = { version = "0.23.0", features = ["full"] }
tokio = { version = "0.3.5", features = ["full"] }
tokio-serde = { version = "0.6", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-banks-server = { path = "../banks-server", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "1.5.2" }
solana-banks-server = { path = "../banks-server", version = "1.5.2" }
[lib]
crate-type = ["lib"]

View File

@@ -5,38 +5,32 @@
//! but they are undocumented, may change over time, and are generally more
//! cumbersome to use.
use futures::{future::join_all, Future, FutureExt};
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
use {
borsh::BorshDeserialize,
futures::{future::join_all, Future, FutureExt},
solana_banks_interface::{BanksRequest, BanksResponse},
solana_program::{
clock::{Clock, Slot},
fee_calculator::FeeCalculator,
hash::Hash,
program_pack::Pack,
pubkey::Pubkey,
rent::Rent,
sysvar::{self, Sysvar},
},
solana_sdk::{
account::{from_account, Account},
commitment_config::CommitmentLevel,
signature::Signature,
transaction::{self, Transaction},
transport,
},
std::io::{self, Error, ErrorKind},
tarpc::{
client::{self, channel::RequestDispatch, NewClient},
context::{self, Context},
rpc::{ClientMessage, Response},
serde_transport::tcp,
Transport,
},
tokio::{net::ToSocketAddrs, time::Duration},
tokio_serde::formats::Bincode,
use solana_banks_interface::{BanksRequest, BanksResponse};
use solana_sdk::{
account::{from_account, Account},
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
rent::Rent,
signature::Signature,
sysvar,
transaction::{self, Transaction},
transport,
};
use std::io::{self, Error, ErrorKind};
use tarpc::{
client::{self, channel::RequestDispatch, NewClient},
context::{self, Context},
rpc::{ClientMessage, Response},
serde_transport::tcp,
Transport,
};
use tokio::{net::ToSocketAddrs, time::Duration};
use tokio_serde::formats::Bincode;
// This exists only for backward compatibility
pub trait BanksClientExt {}
@@ -70,7 +64,7 @@ impl BanksClient {
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
self.inner
.get_fees_with_commitment_and_context(ctx, commitment)
}
@@ -92,14 +86,6 @@ impl BanksClient {
self.inner.get_slot_with_context(ctx, commitment)
}
pub fn get_block_height_with_context(
&mut self,
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_block_height_with_context(ctx, commitment)
}
pub fn process_transaction_with_commitment_and_context(
&mut self,
ctx: Context,
@@ -130,39 +116,24 @@ impl BanksClient {
self.send_transaction_with_context(context::current(), transaction)
}
/// Return the cluster clock
pub fn get_clock(&mut self) -> impl Future<Output = io::Result<Clock>> + '_ {
self.get_account(sysvar::clock::id()).map(|result| {
let clock_sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Clock sysvar not present"))?;
from_account::<Clock, _>(&clock_sysvar).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Clock sysvar")
})
})
}
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
/// will use the transaction's blockhash to look up these same fee parameters and
/// use them to calculate the transaction fee.
pub fn get_fees(
&mut self,
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
}
/// Return the cluster Sysvar
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(T::id()).map(|result| {
let sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
from_account::<T, _>(&sysvar)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
})
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::Root)
}
/// Return the cluster rent
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
self.get_sysvar::<Rent>()
self.get_account(sysvar::rent::id()).map(|result| {
let rent_sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
from_account::<Rent>(&rent_sysvar).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
})
})
}
/// Return a recent, rooted blockhash from the server. The cluster will only accept
@@ -222,16 +193,10 @@ impl BanksClient {
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
}
/// Return the most recent rooted slot. All transactions at or below this slot
/// are said to be finalized. The cluster will not fork to a higher slot.
/// Return the most recent rooted slot height. All transactions at or below this height
/// are said to be finalized. The cluster will not fork to a higher slot height.
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
self.get_slot_with_context(context::current(), CommitmentLevel::default())
}
/// Return the most recent rooted block height. All transactions at or below this height
/// are said to be finalized. The cluster will not fork to a higher block height.
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
self.get_slot_with_context(context::current(), CommitmentLevel::Root)
}
/// Return the account at the given address at the slot corresponding to the given
@@ -253,33 +218,6 @@ impl BanksClient {
self.get_account_with_commitment(address, CommitmentLevel::default())
}
/// Return the unpacked account data at the given address
/// If the account is not found, an error is returned
pub fn get_packed_account_data<T: Pack>(
&mut self,
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
T::unpack_from_slice(&account.data)
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
})
}
/// Return the unpacked account data at the given address
/// If the account is not found, an error is returned
pub fn get_account_data_with_borsh<T: BorshDeserialize>(
&mut self,
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
T::try_from_slice(&account.data)
})
}
/// Return the balance in lamports of an account at the given address at the slot
/// corresponding to the given commitment level.
pub fn get_balance_with_commitment(
@@ -349,18 +287,13 @@ pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClie
#[cfg(test)]
mod tests {
use {
super::*,
solana_banks_server::banks_server::start_local_server,
solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
genesis_utils::create_genesis_config,
},
solana_sdk::{message::Message, signature::Signer, system_instruction},
std::sync::{Arc, RwLock},
tarpc::transport,
tokio::{runtime::Runtime, time::sleep},
};
use super::*;
use solana_banks_server::banks_server::start_local_server;
use solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config};
use solana_sdk::{message::Message, signature::Signer, system_instruction};
use std::sync::{Arc, RwLock};
use tarpc::transport;
use tokio::{runtime::Runtime, time::sleep};
#[test]
fn test_banks_client_new() {
@@ -375,12 +308,9 @@ mod tests {
// `runtime.block_on()` just once, to run all the async code.
let genesis = create_genesis_config(10);
let bank = Bank::new(&genesis.genesis_config);
let slot = bank.slot();
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
));
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(
&genesis.genesis_config,
))));
let bob_pubkey = solana_sdk::pubkey::new_rand();
let mint_pubkey = genesis.mint_keypair.pubkey();
@@ -388,9 +318,7 @@ mod tests {
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport =
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
.await;
let client_transport = start_local_server(&bank_forks).await;
let mut banks_client = start_client(client_transport).await?;
let recent_blockhash = banks_client.get_recent_blockhash().await?;
@@ -408,24 +336,19 @@ mod tests {
// server-side functionality is available to the client.
let genesis = create_genesis_config(10);
let bank = Bank::new(&genesis.genesis_config);
let slot = bank.slot();
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
));
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(
&genesis.genesis_config,
))));
let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(mint_pubkey));
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport =
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
.await;
let client_transport = start_local_server(&bank_forks).await;
let mut banks_client = start_client(client_transport).await?;
let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?;
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
let signature = transaction.signatures[0];
banks_client.send_transaction(transaction).await?;
@@ -433,8 +356,8 @@ mod tests {
let mut status = banks_client.get_transaction_status(signature).await?;
while status.is_none() {
let root_block_height = banks_client.get_root_block_height().await?;
if root_block_height > last_valid_block_height {
let root_slot = banks_client.get_root_slot().await?;
if root_slot > last_valid_slot {
break;
}
sleep(Duration::from_millis(100)).await;

View File

@@ -1,22 +1,21 @@
[package]
name = "solana-banks-interface"
version = "1.8.17"
version = "1.5.2"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-banks-interface"
edition = "2018"
[dependencies]
mio = "0.7.6"
serde = { version = "1.0.122", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
tarpc = { version = "0.24.1", features = ["full"] }
serde = { version = "1.0.112", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "1.5.2" }
tarpc = { version = "0.23.0", features = ["full"] }
[dev-dependencies]
tokio = { version = "1", features = ["full"] }
tokio = { version = "0.3.5", features = ["full"] }
[lib]
crate-type = ["lib"]

View File

@@ -1,30 +1,20 @@
use {
serde::{Deserialize, Serialize},
solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
},
use serde::{Deserialize, Serialize};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TransactionConfirmationStatus {
Processed,
Confirmed,
Finalized,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransactionStatus {
pub slot: Slot,
pub confirmations: Option<usize>, // None = rooted
pub err: Option<TransactionError>,
pub confirmation_status: Option<TransactionConfirmationStatus>,
}
#[tarpc::service]
@@ -36,7 +26,6 @@ pub trait Banks {
async fn get_transaction_status_with_context(signature: Signature)
-> Option<TransactionStatus>;
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
async fn process_transaction_with_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,
@@ -49,10 +38,8 @@ pub trait Banks {
#[cfg(test)]
mod tests {
use {
super::*,
tarpc::{client, transport},
};
use super::*;
use tarpc::{client, transport};
#[test]
fn test_banks_client_new() {

View File

@@ -1,12 +1,11 @@
[package]
name = "solana-banks-server"
version = "1.8.17"
version = "1.5.2"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-banks-server"
edition = "2018"
[dependencies]
@@ -14,14 +13,13 @@ bincode = "1.3.1"
futures = "0.3"
log = "0.4.11"
mio = "0.7.6"
solana-banks-interface = { path = "../banks-interface", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
tarpc = { version = "0.24.1", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
tokio-stream = "0.1"
solana-banks-interface = { path = "../banks-interface", version = "1.5.2" }
solana-runtime = { path = "../runtime", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
solana-metrics = { path = "../metrics", version = "1.5.2" }
tarpc = { version = "0.23.0", features = ["full"] }
tokio = { version = "0.3", features = ["full"] }
tokio-serde = { version = "0.6", features = ["bincode"] }
[lib]
crate-type = ["lib"]

View File

@@ -1,52 +1,46 @@
use {
crate::send_transaction_service::{SendTransactionService, TransactionInfo},
bincode::{deserialize, serialize},
futures::{
future,
prelude::stream::{self, StreamExt},
},
solana_banks_interface::{
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
},
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
feature_set::FeatureSet,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
},
std::{
io,
net::{Ipv4Addr, SocketAddr},
sync::{
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::Builder,
time::Duration,
},
tarpc::{
context::Context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
server::{self, Channel, Handler},
transport,
},
tokio::time::sleep,
tokio_serde::formats::Bincode,
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
use bincode::{deserialize, serialize};
use futures::{
future,
prelude::stream::{self, StreamExt},
};
use solana_banks_interface::{Banks, BanksRequest, BanksResponse, TransactionStatus};
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
};
use std::{
io,
net::{Ipv4Addr, SocketAddr},
sync::{
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::Builder,
time::Duration,
};
use tarpc::{
context::Context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
server::{self, Channel, Handler},
transport,
};
use tokio::time::sleep;
use tokio_serde::formats::Bincode;
#[derive(Clone)]
struct BanksServer {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
poll_signature_status_sleep_duration: Duration,
}
impl BanksServer {
@@ -58,17 +52,15 @@ impl BanksServer {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
poll_signature_status_sleep_duration: Duration,
) -> Self {
Self {
bank_forks,
block_commitment_cache,
transaction_sender,
poll_signature_status_sleep_duration,
}
}
fn run(bank_forks: Arc<RwLock<BankForks>>, transaction_receiver: Receiver<TransactionInfo>) {
fn run(bank: &Bank, transaction_receiver: Receiver<TransactionInfo>) {
while let Ok(info) = transaction_receiver.recv() {
let mut transaction_infos = vec![info];
while let Ok(info) = transaction_receiver.try_recv() {
@@ -78,36 +70,23 @@ impl BanksServer {
.into_iter()
.map(|info| deserialize(&info.wire_transaction).unwrap())
.collect();
let bank = bank_forks.read().unwrap().working_bank();
let _ = bank.process_transactions(&transactions);
}
}
/// Useful for unit-testing
fn new_loopback(
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
poll_signature_status_sleep_duration: Duration,
) -> Self {
fn new_loopback(bank_forks: Arc<RwLock<BankForks>>) -> Self {
let (transaction_sender, transaction_receiver) = channel();
let bank = bank_forks.read().unwrap().working_bank();
let slot = bank.slot();
{
// ensure that the commitment cache and bank are synced
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
w_block_commitment_cache.set_all_slots(slot, slot);
}
let server_bank_forks = bank_forks.clone();
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
));
Builder::new()
.name("solana-bank-forks-client".to_string())
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
.spawn(move || Self::run(&bank, transaction_receiver))
.unwrap();
Self::new(
bank_forks,
block_commitment_cache,
transaction_sender,
poll_signature_status_sleep_duration,
)
Self::new(bank_forks, block_commitment_cache, transaction_sender)
}
fn slot(&self, commitment: CommitmentLevel) -> Slot {
@@ -125,16 +104,16 @@ impl BanksServer {
self,
signature: &Signature,
blockhash: &Hash,
last_valid_block_height: u64,
last_valid_slot: Slot,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
let mut status = self
.bank(commitment)
.get_signature_status_with_blockhash(signature, blockhash);
while status.is_none() {
sleep(self.poll_signature_status_sleep_duration).await;
sleep(Duration::from_millis(200)).await;
let bank = self.bank(commitment);
if bank.block_height() > last_valid_block_height {
if bank.slot() > last_valid_slot {
break;
}
status = bank.get_signature_status_with_blockhash(signature, blockhash);
@@ -143,13 +122,10 @@ impl BanksServer {
}
}
fn verify_transaction(
transaction: &Transaction,
feature_set: &Arc<FeatureSet>,
) -> transaction::Result<()> {
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
if let Err(err) = transaction.verify() {
Err(err)
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
} else if let Err(err) = transaction.verify_precompiles() {
Err(err)
} else {
Ok(())
@@ -160,19 +136,16 @@ fn verify_transaction(
impl Banks for BanksServer {
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
let blockhash = &transaction.message.recent_blockhash;
let last_valid_block_height = self
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_block_height(blockhash)
.get_blockhash_last_valid_slot(&blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info = TransactionInfo::new(
signature,
serialize(&transaction).unwrap(),
last_valid_block_height,
);
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
}
@@ -180,13 +153,11 @@ impl Banks for BanksServer {
self,
_: Context,
commitment: CommitmentLevel,
) -> (FeeCalculator, Hash, u64) {
) -> (FeeCalculator, Hash, Slot) {
let bank = self.bank(commitment);
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
let last_valid_block_height = bank
.get_blockhash_last_valid_block_height(&blockhash)
.unwrap();
(fee_calculator, blockhash, last_valid_block_height)
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
(fee_calculator, blockhash, last_valid_slot)
}
async fn get_transaction_status_with_context(
@@ -194,17 +165,11 @@ impl Banks for BanksServer {
_: Context,
signature: Signature,
) -> Option<TransactionStatus> {
let bank = self.bank(CommitmentLevel::Processed);
let bank = self.bank(CommitmentLevel::Recent);
let (slot, status) = bank.get_signature_status_slot(&signature)?;
let r_block_commitment_cache = self.block_commitment_cache.read().unwrap();
let optimistically_confirmed_bank = self.bank(CommitmentLevel::Confirmed);
let optimistically_confirmed =
optimistically_confirmed_bank.get_signature_status_slot(&signature);
let confirmations = if r_block_commitment_cache.root() >= slot
&& r_block_commitment_cache.highest_confirmed_root() >= slot
{
let confirmations = if r_block_commitment_cache.root() >= slot {
None
} else {
r_block_commitment_cache
@@ -215,13 +180,6 @@ impl Banks for BanksServer {
slot,
confirmations,
err: status.err(),
confirmation_status: if confirmations.is_none() {
Some(TransactionConfirmationStatus::Finalized)
} else if optimistically_confirmed.is_some() {
Some(TransactionConfirmationStatus::Confirmed)
} else {
Some(TransactionConfirmationStatus::Processed)
},
})
}
@@ -229,33 +187,29 @@ impl Banks for BanksServer {
self.slot(commitment)
}
async fn get_block_height_with_context(self, _: Context, commitment: CommitmentLevel) -> u64 {
self.bank(commitment).block_height()
}
async fn process_transaction_with_commitment_and_context(
self,
_: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
if let Err(err) = verify_transaction(&transaction) {
return Some(Err(err));
}
let blockhash = &transaction.message.recent_blockhash;
let last_valid_block_height = self
.bank(commitment)
.get_blockhash_last_valid_block_height(blockhash)
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info = TransactionInfo::new(
signature,
serialize(&transaction).unwrap(),
last_valid_block_height,
);
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment)
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
.await
}
@@ -266,20 +220,14 @@ impl Banks for BanksServer {
commitment: CommitmentLevel,
) -> Option<Account> {
let bank = self.bank(commitment);
bank.get_account(&address).map(Account::from)
bank.get_account(&address)
}
}
pub async fn start_local_server(
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
poll_signature_status_sleep_duration: Duration,
bank_forks: &Arc<RwLock<BankForks>>,
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
let banks_server = BanksServer::new_loopback(
bank_forks,
block_commitment_cache,
poll_signature_status_sleep_duration,
);
let banks_server = BanksServer::new_loopback(bank_forks.clone());
let (client_transport, server_transport) = transport::channel::unbounded();
let server = server::new(server::Config::default())
.incoming(stream::once(future::ready(server_transport)))
@@ -314,12 +262,8 @@ pub async fn start_tcp_server(
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
let server = BanksServer::new(
bank_forks.clone(),
block_commitment_cache.clone(),
sender,
Duration::from_millis(200),
);
let server =
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
chan.respond_with(server.serve()).execute()
})
// Max 10 channels.

View File

@@ -1,4 +1,3 @@
#![allow(clippy::integer_arithmetic)]
pub mod banks_server;
pub mod rpc_banks_service;
pub mod send_transaction_service;

View File

@@ -1,22 +1,19 @@
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
use {
crate::banks_server::start_tcp_server,
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
use crate::banks_server::start_tcp_server;
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
tokio::{
runtime::Runtime,
time::{self, Duration},
},
tokio_stream::wrappers::IntervalStream,
thread::{self, Builder, JoinHandle},
};
use tokio::{
runtime::Runtime,
time::{self, Duration},
};
pub struct RpcBanksService {
@@ -38,7 +35,7 @@ async fn start_abortable_tcp_server(
block_commitment_cache.clone(),
)
.fuse();
let interval = IntervalStream::new(time::interval(Duration::from_millis(100))).fuse();
let interval = time::interval(Duration::from_millis(100)).fuse();
pin_mut!(server, interval);
loop {
select! {
@@ -103,7 +100,8 @@ impl RpcBanksService {
#[cfg(test)]
mod tests {
use {super::*, solana_runtime::bank::Bank};
use super::*;
use solana_runtime::bank::Bank;
#[test]
fn test_rpc_banks_server_exit() {

View File

@@ -1,19 +1,17 @@
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
use {
log::*,
solana_metrics::{datapoint_warn, inc_new_counter_info},
solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::signature::Signature,
std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
mpsc::{Receiver, RecvTimeoutError},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
use log::*;
use solana_metrics::{datapoint_warn, inc_new_counter_info};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{clock::Slot, signature::Signature};
use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
mpsc::{Receiver, RecvTimeoutError},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
/// Maximum size of the transaction queue
@@ -26,19 +24,15 @@ pub struct SendTransactionService {
pub struct TransactionInfo {
pub signature: Signature,
pub wire_transaction: Vec<u8>,
pub last_valid_block_height: u64,
pub last_valid_slot: Slot,
}
impl TransactionInfo {
pub fn new(
signature: Signature,
wire_transaction: Vec<u8>,
last_valid_block_height: u64,
) -> Self {
pub fn new(signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) -> Self {
Self {
signature,
wire_transaction,
last_valid_block_height,
last_valid_slot,
}
}
}
@@ -130,7 +124,7 @@ impl SendTransactionService {
result.rooted += 1;
inc_new_counter_info!("send_transaction_service-rooted", 1);
false
} else if transaction_info.last_valid_block_height < root_bank.block_height() {
} else if transaction_info.last_valid_slot < root_bank.slot() {
info!("Dropping expired transaction: {}", signature);
result.expired += 1;
inc_new_counter_info!("send_transaction_service-expired", 1);
@@ -144,8 +138,8 @@ impl SendTransactionService {
result.retried += 1;
inc_new_counter_info!("send_transaction_service-retry", 1);
Self::send_transaction(
send_socket,
tpu_address,
&send_socket,
&tpu_address,
&transaction_info.wire_transaction,
);
true
@@ -185,14 +179,12 @@ impl SendTransactionService {
#[cfg(test)]
mod test {
use {
super::*,
solana_sdk::{
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
system_transaction,
},
std::sync::mpsc::channel,
use super::*;
use solana_sdk::{
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
system_transaction,
};
use std::sync::mpsc::channel;
#[test]
fn service_exit() {

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.8.17"
version = "1.5.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,26 +15,24 @@ log = "0.4.11"
num-derive = "0.3"
num-traits = "0.2"
rand = "0.7.0"
rayon = "1.5.0"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-core = { path = "../core", version = "=1.8.17" }
solana-genesis = { path = "../genesis", version = "=1.8.17" }
solana-client = { path = "../client", version = "=1.8.17" }
solana-exchange-program = { path = "../programs/exchange", version = "=1.8.17" }
solana-faucet = { path = "../faucet", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "1.5.2" }
solana-core = { path = "../core", version = "1.5.2" }
solana-genesis = { path = "../genesis", version = "1.5.2" }
solana-client = { path = "../client", version = "1.5.2" }
solana-faucet = { path = "../faucet", version = "1.5.2" }
solana-exchange-program = { path = "../programs/exchange", version = "1.5.2" }
solana-logger = { path = "../logger", version = "1.5.2" }
solana-metrics = { path = "../metrics", version = "1.5.2" }
solana-net-utils = { path = "../net-utils", version = "1.5.2" }
solana-runtime = { path = "../runtime", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
solana-version = { path = "../version", version = "1.5.2" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
solana-local-cluster = { path = "../local-cluster", version = "1.5.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,45 +1,42 @@
#![allow(clippy::useless_attribute)]
#![allow(clippy::integer_arithmetic)]
use {
crate::order_book::*,
itertools::izip,
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_client::perf_utils::{sample_txs, SampleStats},
solana_core::gen_keys::GenKeys,
solana_exchange_program::{exchange_instruction, exchange_state::*, id},
solana_faucet::faucet::request_airdrop_transaction,
solana_genesis::Base64Account,
solana_metrics::datapoint_info,
solana_sdk::{
client::{Client, SyncClient},
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction, system_program,
timing::{duration_as_ms, duration_as_s},
transaction::Transaction,
},
std::{
cmp,
collections::{HashMap, VecDeque},
fs::File,
io::prelude::*,
mem,
net::SocketAddr,
path::Path,
process::exit,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::{sleep, Builder},
time::{Duration, Instant},
use crate::order_book::*;
use itertools::izip;
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_core::gen_keys::GenKeys;
use solana_exchange_program::{exchange_instruction, exchange_state::*, id};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_genesis::Base64Account;
use solana_metrics::datapoint_info;
use solana_sdk::{
client::{Client, SyncClient},
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::{duration_as_ms, duration_as_s},
transaction::Transaction,
{system_instruction, system_program},
};
use std::{
cmp,
collections::{HashMap, VecDeque},
fs::File,
io::prelude::*,
mem,
net::SocketAddr,
path::Path,
process::exit,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::{sleep, Builder},
time::{Duration, Instant},
};
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
@@ -393,7 +390,7 @@ fn swapper<T>(
while client
.get_balance_with_commitment(
&trade_infos[trade_index].trade_account,
CommitmentConfig::processed(),
CommitmentConfig::recent(),
)
.unwrap_or(0)
== 0
@@ -448,18 +445,18 @@ fn swapper<T>(
account_group = (account_group + 1) % account_groups as usize;
let (blockhash, _fee_calculator, _last_valid_slot) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
let to_swap_txs: Vec<_> = to_swap
.par_iter()
.map(|(signer, swap, profit)| {
let s: &Keypair = signer;
let s: &Keypair = &signer;
let owner = &signer.pubkey();
let instruction = exchange_instruction::swap_request(
owner,
&swap.0.pubkey,
&swap.1.pubkey,
profit,
&profit,
);
let message = Message::new(&[instruction], Some(&s.pubkey()));
Transaction::new(&[s], message, blockhash)
@@ -574,7 +571,7 @@ fn trader<T>(
account_group = (account_group + 1) % account_groups as usize;
let (blockhash, _fee_calculator, _last_valid_slot) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
trades.chunks(chunk_size).for_each(|chunk| {
@@ -602,7 +599,7 @@ fn trader<T>(
src,
),
];
let message = Message::new(&instructions, Some(owner_pubkey));
let message = Message::new(&instructions, Some(&owner_pubkey));
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
})
.collect();
@@ -661,7 +658,7 @@ where
{
for s in &tx.signatures {
if let Ok(Some(r)) =
sync_client.get_signature_status_with_commitment(s, CommitmentConfig::processed())
sync_client.get_signature_status_with_commitment(s, CommitmentConfig::recent())
{
match r {
Ok(_) => {
@@ -684,7 +681,7 @@ fn verify_funding_transfer<T: SyncClient + ?Sized>(
if verify_transaction(client, tx) {
for a in &tx.message().account_keys[1..] {
if client
.get_balance_with_commitment(a, CommitmentConfig::processed())
.get_balance_with_commitment(a, CommitmentConfig::recent())
.unwrap_or(0)
>= amount
{
@@ -741,7 +738,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
let instructions = system_instruction::transfer_many(&k.pubkey(), m);
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
let message = Message::new(&instructions, Some(&k.pubkey()));
(k.clone(), Transaction::new_unsigned(message))
})
@@ -767,7 +764,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
);
let (blockhash, _fee_calculator, _last_valid_slot) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("blockhash");
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[*k], blockhash);
@@ -779,7 +776,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
let mut waits = 0;
loop {
sleep(Duration::from_millis(200));
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount));
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
if to_fund_txs.is_empty() {
break;
}
@@ -806,7 +803,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
funded.append(&mut new_funded);
funded.retain(|(k, b)| {
client
.get_balance_with_commitment(&k.pubkey(), CommitmentConfig::processed())
.get_balance_with_commitment(&k.pubkey(), CommitmentConfig::recent())
.unwrap_or(0)
> lamports
&& *b > lamports
@@ -838,7 +835,7 @@ pub fn create_token_accounts<T: Client>(
);
let request_ix =
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey));
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
(
(from_keypair, new_keypair),
Transaction::new_unsigned(message),
@@ -860,7 +857,7 @@ pub fn create_token_accounts<T: Client>(
let mut retries = 0;
while !to_create_txs.is_empty() {
let (blockhash, _fee_calculator, _last_valid_slot) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
to_create_txs
.par_iter_mut()
@@ -874,7 +871,7 @@ pub fn create_token_accounts<T: Client>(
let mut waits = 0;
while !to_create_txs.is_empty() {
sleep(Duration::from_millis(200));
to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx));
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
if to_create_txs.is_empty() {
break;
}
@@ -906,7 +903,7 @@ pub fn create_token_accounts<T: Client>(
let mut new_notfunded: Vec<(&Arc<Keypair>, &Keypair)> = vec![];
for f in &notfunded {
if client
.get_balance_with_commitment(&f.1.pubkey(), CommitmentConfig::processed())
.get_balance_with_commitment(&f.1.pubkey(), CommitmentConfig::recent())
.unwrap_or(0)
== 0
{
@@ -960,7 +957,7 @@ fn compute_and_report_stats(maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>, tot
fn generate_keypairs(num: u64) -> Vec<Keypair> {
let mut seed = [0_u8; 32];
seed.copy_from_slice(Keypair::new().pubkey().as_ref());
seed.copy_from_slice(&Keypair::new().pubkey().as_ref());
let mut rnd = GenKeys::new(seed);
rnd.gen_n_keypairs(num)
}
@@ -971,7 +968,7 @@ pub fn airdrop_lamports<T: Client>(
id: &Keypair,
amount: u64,
) {
let balance = client.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::processed());
let balance = client.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent());
let balance = balance.unwrap_or(0);
if balance >= amount {
return;
@@ -989,23 +986,23 @@ pub fn airdrop_lamports<T: Client>(
let mut tries = 0;
loop {
let (blockhash, _fee_calculator, _last_valid_slot) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
Ok(transaction) => {
let signature = client.async_send_transaction(transaction).unwrap();
for _ in 0..30 {
if let Ok(Some(_)) = client.get_signature_status_with_commitment(
&signature,
CommitmentConfig::processed(),
CommitmentConfig::recent(),
) {
break;
}
sleep(Duration::from_millis(100));
}
if client
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::processed())
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent())
.unwrap_or(0)
>= amount
{

View File

@@ -1,10 +1,10 @@
use {
clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches},
solana_core::gen_keys::GenKeys,
solana_faucet::faucet::FAUCET_PORT,
solana_sdk::signature::{read_keypair_file, Keypair},
std::{net::SocketAddr, process::exit, time::Duration},
};
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
use solana_core::gen_keys::GenKeys;
use solana_faucet::faucet::FAUCET_PORT;
use solana_sdk::signature::{read_keypair_file, Keypair};
use std::net::SocketAddr;
use std::process::exit;
use std::time::Duration;
pub struct Config {
pub entrypoint_addr: SocketAddr,

View File

@@ -1,15 +1,11 @@
#![allow(clippy::integer_arithmetic)]
pub mod bench;
mod cli;
pub mod order_book;
use {
crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config},
log::*,
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
solana_sdk::signature::Signer,
solana_streamer::socket::SocketAddrSpace,
};
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
use log::*;
use solana_core::gossip_service::{discover_cluster, get_multi_client};
use solana_sdk::signature::Signer;
fn main() {
solana_logger::setup();
@@ -58,12 +54,11 @@ fn main() {
);
} else {
info!("Connecting to the cluster");
let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified)
.unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
let (client, num_clients) = get_multi_client(&nodes);
info!("{} nodes found", num_clients);
if num_clients < num_nodes {

View File

@@ -1,13 +1,11 @@
use {
itertools::{
EitherOrBoth::{Both, Left, Right},
Itertools,
},
log::*,
solana_exchange_program::exchange_state::*,
solana_sdk::pubkey::Pubkey,
std::{cmp::Ordering, collections::BinaryHeap, error, fmt},
};
use itertools::EitherOrBoth::{Both, Left, Right};
use itertools::Itertools;
use log::*;
use solana_exchange_program::exchange_state::*;
use solana_sdk::pubkey::Pubkey;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::{error, fmt};
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ToOrder {

View File

@@ -1,24 +1,19 @@
use {
log::*,
solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config},
solana_core::validator::ValidatorConfig,
solana_exchange_program::{
exchange_processor::process_instruction, id, solana_exchange_program,
},
solana_faucet::faucet::run_local_faucet_with_port,
solana_gossip::gossip_service::{discover_cluster, get_multi_client},
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_runtime::{bank::Bank, bank_client::BankClient},
solana_sdk::{
genesis_config::create_genesis_config,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
std::{process::exit, sync::mpsc::channel, time::Duration},
};
use log::*;
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
use solana_core::gossip_service::{discover_cluster, get_multi_client};
use solana_core::validator::ValidatorConfig;
use solana_exchange_program::exchange_processor::process_instruction;
use solana_exchange_program::id;
use solana_exchange_program::solana_exchange_program;
use solana_faucet::faucet::run_local_faucet;
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_sdk::genesis_config::create_genesis_config;
use solana_sdk::signature::{Keypair, Signer};
use std::process::exit;
use std::sync::mpsc::channel;
use std::time::Duration;
#[test]
#[ignore]
@@ -46,19 +41,13 @@ fn test_exchange_local_cluster() {
} = config;
let accounts_in_groups = batch_size * account_groups;
let cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default_for_test(),
NUM_NODES,
),
native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000,
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default()
});
let faucet_keypair = Keypair::new();
cluster.transfer(
@@ -68,24 +57,17 @@ fn test_exchange_local_cluster() {
);
let (addr_sender, addr_receiver) = channel();
run_local_faucet_with_port(faucet_keypair, addr_sender, Some(1_000_000_000_000), 0);
let faucet_addr = addr_receiver
.recv_timeout(Duration::from_secs(2))
.expect("run_local_faucet")
.expect("faucet_addr");
run_local_faucet(faucet_keypair, addr_sender, Some(1_000_000_000_000));
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
info!("Connecting to the cluster");
let nodes = discover_cluster(
&cluster.entry_point_info.gossip,
NUM_NODES,
SocketAddrSpace::Unspecified,
)
.unwrap_or_else(|err| {
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
exit(1);
});
let nodes =
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
exit(1);
});
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
let (client, num_clients) = get_multi_client(&nodes);
info!("clients: {}", num_clients);
assert!(num_clients >= NUM_NODES);

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.8.17"
version = "1.5.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "1.5.2" }
solana-streamer = { path = "../streamer", version = "1.5.2" }
solana-logger = { path = "../logger", version = "1.5.2" }
solana-net-utils = { path = "../net-utils", version = "1.5.2" }
solana-version = { path = "../version", version = "1.5.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,38 +1,31 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, App, Arg},
solana_streamer::{
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketBatchReceiver},
},
std::{
cmp::max,
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::channel,
Arc,
},
thread::{sleep, spawn, JoinHandle, Result},
time::{Duration, SystemTime},
},
};
use clap::{crate_description, crate_name, App, Arg};
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
use solana_streamer::streamer::{receiver, PacketReceiver};
use std::cmp::max;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::sleep;
use std::thread::{spawn, JoinHandle, Result};
use std::time::Duration;
use std::time::SystemTime;
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut packet_batch = PacketBatch::default();
packet_batch.packets.resize(10, Packet::default());
for w in packet_batch.packets.iter_mut() {
let mut msgs = Packets::default();
msgs.packets.resize(10, Packet::default());
for w in msgs.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(addr);
w.meta.set_addr(&addr);
}
let packet_batch = Arc::new(packet_batch);
let msgs = Arc::new(msgs);
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in &packet_batch.packets {
for p in &msgs.packets {
let a = p.meta.addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
@@ -42,14 +35,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
})
}
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
if let Ok(packet_batch) = r.recv_timeout(timer) {
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
if let Ok(msgs) = r.recv_timeout(timer) {
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
}
})
}
@@ -81,7 +74,7 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
let recycler = PacketBatchRecycler::default();
let recycler = PacketsRecycler::default();
for _ in 0..num_sockets {
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
@@ -97,8 +90,6 @@ fn main() -> Result<()> {
s_reader,
recycler.clone(),
"bench-streamer-test",
1,
true,
));
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.8.17"
version = "1.5.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,27 +12,26 @@ publish = false
bincode = "1.3.1"
clap = "2.33.1"
log = "0.4.11"
rayon = "1.5.0"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "=1.8.17" }
solana-core = { path = "../core", version = "=1.8.17" }
solana-genesis = { path = "../genesis", version = "=1.8.17" }
solana-client = { path = "../client", version = "=1.8.17" }
solana-faucet = { path = "../faucet", version = "=1.8.17" }
solana-gossip = { path = "../gossip", version = "=1.8.17" }
solana-logger = { path = "../logger", version = "=1.8.17" }
solana-metrics = { path = "../metrics", version = "=1.8.17" }
solana-measure = { path = "../measure", version = "=1.8.17" }
solana-net-utils = { path = "../net-utils", version = "=1.8.17" }
solana-runtime = { path = "../runtime", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-streamer = { path = "../streamer", version = "=1.8.17" }
solana-version = { path = "../version", version = "=1.8.17" }
solana-clap-utils = { path = "../clap-utils", version = "1.5.2" }
solana-core = { path = "../core", version = "1.5.2" }
solana-genesis = { path = "../genesis", version = "1.5.2" }
solana-client = { path = "../client", version = "1.5.2" }
solana-faucet = { path = "../faucet", version = "1.5.2" }
solana-logger = { path = "../logger", version = "1.5.2" }
solana-metrics = { path = "../metrics", version = "1.5.2" }
solana-measure = { path = "../measure", version = "1.5.2" }
solana-net-utils = { path = "../net-utils", version = "1.5.2" }
solana-runtime = { path = "../runtime", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
solana-version = { path = "../version", version = "1.5.2" }
[dev-dependencies]
serial_test = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "=1.8.17" }
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.5.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,40 +1,39 @@
use {
crate::cli::Config,
log::*,
rayon::prelude::*,
solana_client::perf_utils::{sample_txs, SampleStats},
solana_core::gen_keys::GenKeys,
solana_faucet::faucet::request_airdrop_transaction,
solana_measure::measure::Measure,
solana_metrics::{self, datapoint_info},
solana_sdk::{
client::Client,
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction, system_transaction,
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
transaction::Transaction,
},
std::{
collections::{HashSet, VecDeque},
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
use crate::cli::Config;
use log::*;
use rayon::prelude::*;
use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_core::gen_keys::GenKeys;
use solana_faucet::faucet::request_airdrop_transaction;
use solana_measure::measure::Measure;
use solana_metrics::{self, datapoint_info};
use solana_sdk::{
client::Client,
clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction, system_transaction,
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
transaction::Transaction,
};
use std::{
collections::{HashSet, VecDeque},
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
};
// The point at which transactions become "too old", in seconds.
const MAX_TX_QUEUE_AGE: u64 = (MAX_PROCESSING_AGE as f64 * DEFAULT_S_PER_SLOT) as u64;
const MAX_TX_QUEUE_AGE: u64 =
MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND;
pub const MAX_SPENDS_PER_TX: u64 = 4;
@@ -49,7 +48,7 @@ pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
loop {
match client.get_recent_blockhash_with_commitment(CommitmentConfig::processed()) {
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
Ok((blockhash, fee_calculator, _last_valid_slot)) => {
return (blockhash, fee_calculator)
}
@@ -498,7 +497,7 @@ fn do_tx_transfers<T: Client>(
fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount: u64) -> bool {
for a in &tx.message().account_keys[1..] {
match client.get_balance_with_commitment(a, CommitmentConfig::processed()) {
match client.get_balance_with_commitment(a, CommitmentConfig::recent()) {
Ok(balance) => return balance >= amount,
Err(err) => error!("failed to get balance {:?}", err),
}
@@ -546,12 +545,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
// re-sign retained to_fund_txes with updated blockhash
self.sign(blockhash);
self.send(client);
self.send(&client);
// Sleep a few slots to allow transactions to process
sleep(Duration::from_secs(1));
self.verify(client, to_lamports);
self.verify(&client, to_lamports);
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
@@ -566,7 +565,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
.par_iter()
.map(|(k, t)| {
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
let message = Message::new(&instructions, Some(&k.pubkey()));
(*k, Transaction::new_unsigned(message))
})
@@ -619,7 +618,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
return None;
}
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
verified_txs.fetch_add(1, Ordering::Relaxed);
Some(k.pubkey())
} else {
@@ -735,7 +734,7 @@ pub fn airdrop_lamports<T: Client>(
);
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let mut tries = 0;
loop {
@@ -763,7 +762,7 @@ pub fn airdrop_lamports<T: Client>(
};
let current_balance = client
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::processed())
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent())
.unwrap_or_else(|e| {
info!("airdrop error {}", e);
starting_balance
@@ -926,14 +925,12 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
#[cfg(test)]
mod tests {
use {
super::*,
solana_runtime::{bank::Bank, bank_client::BankClient},
solana_sdk::{
client::SyncClient, fee_calculator::FeeRateGovernor,
genesis_config::create_genesis_config,
},
};
use super::*;
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_sdk::client::SyncClient;
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::genesis_config::create_genesis_config;
#[test]
fn test_bench_tps_bank_client() {
@@ -970,7 +967,7 @@ mod tests {
for kp in &keypairs {
assert_eq!(
client
.get_balance_with_commitment(&kp.pubkey(), CommitmentConfig::processed())
.get_balance_with_commitment(&kp.pubkey(), CommitmentConfig::recent())
.unwrap(),
lamports
);

View File

@@ -1,13 +1,11 @@
use {
clap::{crate_description, crate_name, App, Arg, ArgMatches},
solana_faucet::faucet::FAUCET_PORT,
solana_sdk::{
fee_calculator::FeeRateGovernor,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair},
},
std::{net::SocketAddr, process::exit, time::Duration},
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
use solana_faucet::faucet::FAUCET_PORT;
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::{
pubkey::Pubkey,
signature::{read_keypair_file, Keypair},
};
use std::{net::SocketAddr, process::exit, time::Duration};
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;

View File

@@ -1,3 +1,2 @@
#![allow(clippy::integer_arithmetic)]
pub mod bench;
pub mod cli;

View File

@@ -1,20 +1,12 @@
#![allow(clippy::integer_arithmetic)]
use {
log::*,
solana_bench_tps::{
bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs},
cli,
},
solana_genesis::Base64Account,
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
solana_sdk::{
fee_calculator::FeeRateGovernor,
signature::{Keypair, Signer},
system_program,
},
solana_streamer::socket::SocketAddrSpace,
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
};
use log::*;
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
use solana_bench_tps::cli;
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
use solana_genesis::Base64Account;
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_program;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
/// Number of signatures for all transactions in ~1 week at ~100K TPS
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
@@ -46,7 +38,7 @@ fn main() {
let keypair_count = *tx_count * keypair_multiplier;
if *write_to_client_file {
info!("Generating {} keypairs", keypair_count);
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
let num_accounts = keypairs.len() as u64;
let max_fee =
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
@@ -75,14 +67,13 @@ fn main() {
}
info!("Connecting to the cluster");
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let client = if *multi_client {
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
let (client, num_clients) = get_multi_client(&nodes);
if nodes.len() < num_clients {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
@@ -96,7 +87,7 @@ fn main() {
let mut target_client = None;
for node in nodes {
if node.id == *target_node {
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
target_client = Some(Arc::new(get_client(&[node])));
break;
}
}
@@ -105,7 +96,7 @@ fn main() {
exit(1);
})
} else {
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
Arc::new(get_client(&nodes))
};
let keypairs = if *read_from_client_file {
@@ -143,7 +134,7 @@ fn main() {
generate_and_fund_keypairs(
client.clone(),
Some(*faucet_addr),
id,
&id,
keypair_count,
*num_lamports_per_account,
)

View File

@@ -1,44 +1,27 @@
#![allow(clippy::integer_arithmetic)]
use {
serial_test::serial,
solana_bench_tps::{
bench::{do_bench_tps, generate_and_fund_keypairs},
cli::Config,
},
solana_client::thin_client::create_client,
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet_with_port,
solana_gossip::cluster_info::VALIDATOR_PORT_RANGE,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_sdk::signature::{Keypair, Signer},
solana_streamer::socket::SocketAddrSpace,
std::{
sync::{mpsc::channel, Arc},
time::Duration,
},
};
use serial_test_derive::serial;
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs};
use solana_bench_tps::cli::Config;
use solana_client::thin_client::create_client;
use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
use solana_core::validator::ValidatorConfig;
use solana_faucet::faucet::run_local_faucet;
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
use solana_sdk::signature::{Keypair, Signer};
use std::sync::{mpsc::channel, Arc};
use std::time::Duration;
fn test_bench_tps_local_cluster(config: Config) {
let native_instruction_processors = vec![];
solana_logger::setup();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default_for_test(),
NUM_NODES,
),
native_instruction_processors,
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
native_instruction_processors,
..ClusterConfig::default()
});
let faucet_keypair = Keypair::new();
cluster.transfer(
@@ -53,11 +36,8 @@ fn test_bench_tps_local_cluster(config: Config) {
));
let (addr_sender, addr_receiver) = channel();
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
let faucet_addr = addr_receiver
.recv_timeout(Duration::from_secs(2))
.expect("run_local_faucet")
.expect("faucet_addr");
run_local_faucet(faucet_keypair, addr_sender, None);
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
let lamports_per_account = 100;

View File

@@ -1,32 +0,0 @@
[package]
name = "solana-bloom"
version = "1.8.17"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bloom"
edition = "2018"
[dependencies]
bv = { version = "0.11.1", features = ["serde"] }
fnv = "1.0.7"
rand = "0.7.0"
serde = { version = "1.0.133", features = ["rc"] }
rayon = "1.5.1"
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.8.17" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
log = "0.4.14"
[lib]
crate-type = ["lib"]
name = "solana_bloom"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[build-dependencies]
rustc_version = "0.4"

View File

@@ -1 +0,0 @@
../frozen-abi/build.rs

View File

@@ -1,5 +0,0 @@
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
pub mod bloom;
#[macro_use]
extern crate solana_frozen_abi_macro;

9
cargo
View File

@@ -3,22 +3,25 @@
# shellcheck source=ci/rust-version.sh
here=$(dirname "$0")
source "${here}"/ci/rust-version.sh all
toolchain=
case "$1" in
stable)
source "${here}"/ci/rust-version.sh stable
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
shift
;;
nightly)
source "${here}"/ci/rust-version.sh nightly
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
toolchain="$rust_nightly"
shift
;;
+*)
toolchain="${1#+}"
shift
;;
*)
source "${here}"/ci/rust-version.sh stable
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
;;

View File

@@ -102,8 +102,6 @@ command_step() {
command: "$2"
timeout_in_minutes: $3
artifact_paths: "log-*.txt"
agents:
- "queue=solana"
EOF
}
@@ -139,7 +137,7 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
wait_step
else
annotate --style info --context test-coverage \
@@ -150,33 +148,6 @@ all_test_steps() {
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
wait_step
# BPF test suite
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-bpf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
; then
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-bpf.sh"
name: "stable-bpf"
timeout_in_minutes: 20
artifact_paths: "bpf-dumps.tar.bz2"
agents:
- "queue=solana"
EOF
else
annotate --style info \
"Stable-BPF skipped as no relevant files were modified"
fi
# Perf test suite
if affects \
.rs$ \
@@ -194,7 +165,7 @@ EOF
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 20
timeout_in_minutes: 40
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
@@ -223,8 +194,6 @@ EOF
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
agents:
- "queue=solana"
EOF
else
annotate --style info \
@@ -247,15 +216,7 @@ EOF
command_step "local-cluster" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
40
command_step "local-cluster-flakey" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
10
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
45
}
pull_or_push_steps() {

View File

@@ -3,24 +3,16 @@
# Pull requests to not run these steps.
steps:
- command: "ci/publish-tarball.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 60
name: "publish tarball"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- wait
- command: "sdk/docker-solana/build.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 60
name: "publish docker"
- command: "ci/publish-crate.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 240
name: "publish crate"
branches: "!master"
- command: "ci/publish-tarball.sh"
agents:
- "queue=release-build-aarch64-apple-darwin"
timeout_in_minutes: 60
name: "publish tarball (aarch64-apple-darwin)"

View File

@@ -105,18 +105,11 @@ if [[ -z "$CHANNEL" ]]; then
fi
fi
if [[ $CHANNEL = beta ]]; then
CHANNEL_LATEST_TAG="$BETA_CHANNEL_LATEST_TAG"
elif [[ $CHANNEL = stable ]]; then
CHANNEL_LATEST_TAG="$STABLE_CHANNEL_LATEST_TAG"
fi
echo EDGE_CHANNEL="$EDGE_CHANNEL"
echo BETA_CHANNEL="$BETA_CHANNEL"
echo BETA_CHANNEL_LATEST_TAG="$BETA_CHANNEL_LATEST_TAG"
echo STABLE_CHANNEL="$STABLE_CHANNEL"
echo STABLE_CHANNEL_LATEST_TAG="$STABLE_CHANNEL_LATEST_TAG"
echo CHANNEL="$CHANNEL"
echo CHANNEL_LATEST_TAG="$CHANNEL_LATEST_TAG"
exit 0

View File

@@ -1,56 +0,0 @@
#!/usr/bin/env bash
set -e
here="$(dirname "$0")"
src_root="$(readlink -f "${here}/..")"
cd "${src_root}"
cargo_audit_ignores=(
# failure is officially deprecated/unmaintained
#
# Blocked on multiple upstream crates removing their `failure` dependency.
--ignore RUSTSEC-2020-0036
# `net2` crate has been deprecated; use `socket2` instead
#
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
--ignore RUSTSEC-2020-0016
# stdweb is unmaintained
#
# Blocked on multiple upstream crates removing their `stdweb` dependency.
--ignore RUSTSEC-2020-0056
# Potential segfault in the time crate
#
# Blocked on multiple crates updating `time` to >= 0.2.23
--ignore RUSTSEC-2020-0071
# generic-array: arr! macro erases lifetimes
#
# Blocked on libsecp256k1 releasing with upgraded dependencies
# https://github.com/paritytech/libsecp256k1/issues/66
--ignore RUSTSEC-2020-0146
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
#
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
# https://github.com/paritytech/jsonrpc/issues/605
--ignore RUSTSEC-2021-0078
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
#
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
# https://github.com/paritytech/jsonrpc/issues/605
--ignore RUSTSEC-2021-0079
# chrono: Potential segfault in `localtime_r` invocations
#
# Blocked due to no safe upgrade
# https://github.com/chronotope/chrono/issues/499
--ignore RUSTSEC-2020-0159
)
scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}"

View File

@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.52.1
FROM solanalabs/rust:1.48.0
ARG date
RUN set -x \

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.52.1
FROM rust:1.48.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

View File

@@ -23,9 +23,6 @@ if [[ -n $CI ]]; then
elif [[ -n $BUILDKITE ]]; then
export CI_BRANCH=$BUILDKITE_BRANCH
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
fi
export CI_COMMIT=$BUILDKITE_COMMIT
export CI_JOB_ID=$BUILDKITE_JOB_ID
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
@@ -38,18 +35,7 @@ if [[ -n $CI ]]; then
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
export CI_PULL_REQUEST=
fi
case "$(uname -s)" in
Linux)
export CI_OS_NAME=linux
;;
Darwin)
export CI_OS_NAME=osx
;;
*)
;;
esac
export CI_OS_NAME=linux
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it
@@ -88,13 +74,10 @@ else
export CI_BUILD_ID=
export CI_COMMIT=
export CI_JOB_ID=
export CI_OS_NAME=
export CI_PULL_REQUEST=
export CI_REPO_SLUG=
export CI_TAG=
# Don't override ci/run-local.sh
if [[ -z $CI_LOCAL_RUN ]]; then
export CI_OS_NAME=
fi
fi
cat <<EOF

View File

@@ -70,7 +70,7 @@ done
source ci/upload-ci-artifact.sh
source scripts/configure-metrics.sh
source multinode-demo/common.sh --prebuild
source multinode-demo/common.sh
nodes=(
"multinode-demo/bootstrap-validator.sh \
@@ -78,6 +78,7 @@ nodes=(
--init-complete-file init-complete-node0.log \
--dynamic-port-range 8000-8050"
"multinode-demo/validator.sh \
--enable-rpc-exit \
--no-restart \
--dynamic-port-range 8050-8100
--init-complete-file init-complete-node1.log \
@@ -127,7 +128,7 @@ startNode() {
waitForNodeToInit() {
declare initCompleteFile=$1
while [[ ! -r $initCompleteFile ]]; do
if [[ $SECONDS -ge 300 ]]; then
if [[ $SECONDS -ge 240 ]]; then
echo "^^^ +++"
echo "Error: $initCompleteFile not found in $SECONDS seconds"
exit 1
@@ -200,10 +201,17 @@ killNodes() {
[[ ${#pids[@]} -gt 0 ]] || return
# Try to use the RPC exit API to cleanly exit the first two nodes
# (dynamic nodes, -x, are just killed)
# (dynamic nodes, -x, are just killed since their RPC port is not known)
echo "--- RPC exit"
$solana_validator --ledger "$SOLANA_CONFIG_DIR"/bootstrap-validator exit --force || true
$solana_validator --ledger "$SOLANA_CONFIG_DIR"/validator exit --force || true
for port in 8899 18899; do
(
set -x
curl --retry 5 --retry-delay 2 --retry-connrefused \
-X POST -H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' \
http://localhost:$port
)
done
# Give the nodes a splash of time to cleanly exit before killing them
sleep 2

View File

@@ -19,7 +19,6 @@ declare prints=(
# Parts of the tree that are expected to be print free
declare print_free_tree=(
':core/src/**.rs'
':^core/src/validator.rs'
':faucet/src/**.rs'
':ledger/src/**.rs'
':metrics/src/**.rs'

View File

@@ -12,14 +12,10 @@ import json
import subprocess
import sys;
real_file = os.path.realpath(__file__)
ci_path = os.path.dirname(real_file)
src_root = os.path.dirname(ci_path)
def load_metadata():
cmd = f'{src_root}/cargo metadata --no-deps --format-version=1'
return json.loads(subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
'cargo metadata --no-deps --format-version=1',
shell=True, stdout=subprocess.PIPE).communicate()[0])
def get_packages():
metadata = load_metadata()

27
ci/publish-bpf-sdk.sh Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
eval "$(ci/channel-info.sh)"
if [[ -n "$CI_TAG" ]]; then
CHANNEL_OR_TAG=$CI_TAG
else
CHANNEL_OR_TAG=$CHANNEL
fi
(
set -x
sdk/bpf/scripts/package.sh
[[ -f bpf-sdk.tar.bz2 ]]
)
source ci/upload-ci-artifact.sh
echo --- AWS S3 Store
if [[ -z $CHANNEL_OR_TAG ]]; then
echo Skipped
else
upload-s3-artifact "/solana/bpf-sdk.tar.bz2" "s3://solana-sdk/$CHANNEL_OR_TAG/bpf-sdk.tar.bz2"
fi
exit 0

View File

@@ -39,11 +39,7 @@ fi
case "$CI_OS_NAME" in
osx)
_cputype="$(uname -m)"
if [[ $_cputype = arm64 ]]; then
_cputype=aarch64
fi
TARGET=${_cputype}-apple-darwin
TARGET=x86_64-apple-darwin
;;
linux)
TARGET=x86_64-unknown-linux-gnu
@@ -87,7 +83,7 @@ echo --- Creating release tarball
export CHANNEL
source ci/rust-version.sh stable
scripts/cargo-install-all.sh stable "${RELEASE_BASENAME}"
scripts/cargo-install-all.sh +"$rust_stable" "${RELEASE_BASENAME}"
tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}"
bzip2 "${TARBALL_BASENAME}"-$TARGET.tar
@@ -150,7 +146,7 @@ elif [[ -n $BUILDKITE ]]; then
cat > release.solana.com-install <<EOF
SOLANA_RELEASE=$CHANNEL_OR_TAG
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
EOF
cat install/solana-install-init.sh >> release.solana.com-install

View File

@@ -1,57 +0,0 @@
#!/usr/bin/env bash
cd "$(dirname "$0")/.."
export CI_LOCAL_RUN=true
set -e
case $(uname -o) in
*/Linux)
export CI_OS_NAME=linux
;;
*)
echo "local CI runs are only supported on Linux" 1>&2
exit 1
;;
esac
steps=()
steps+=(test-sanity)
steps+=(shellcheck)
steps+=(test-checks)
steps+=(test-coverage)
steps+=(test-stable)
steps+=(test-stable-bpf)
steps+=(test-stable-perf)
steps+=(test-downstream-builds)
steps+=(test-bench)
steps+=(test-local-cluster)
steps+=(test-local-cluster-flakey)
steps+=(test-local-cluster-slow)
step_index=0
if [[ -n "$1" ]]; then
start_step="$1"
while [[ $step_index -lt ${#steps[@]} ]]; do
step="${steps[$step_index]}"
if [[ "$step" = "$start_step" ]]; then
break
fi
step_index=$((step_index + 1))
done
if [[ $step_index -eq ${#steps[@]} ]]; then
echo "unexpected start step: \"$start_step\"" 1>&2
exit 1
else
echo "** starting at step: \"$start_step\" **"
echo
fi
fi
while [[ $step_index -lt ${#steps[@]} ]]; do
step="${steps[$step_index]}"
cmd="ci/${step}.sh"
$cmd
step_index=$((step_index + 1))
done

View File

@@ -7,7 +7,7 @@ source multinode-demo/common.sh
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
timeout 120 ./run.sh &
pid=$!
attempts=20
@@ -16,20 +16,16 @@ while [[ ! -f config/run/init-completed ]]; do
if ((--attempts == 0)); then
echo "Error: validator failed to boot"
exit 1
else
echo "Checking init"
fi
done
snapshot_slot=1
# wait a bit longer than snapshot_slot
while [[ $($solana_cli --url http://localhost:8899 slot --commitment processed) -le $((snapshot_slot + 1)) ]]; do
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
sleep 1
echo "Checking slot"
done
$solana_validator --ledger config/ledger exit --force || true
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
wait $pid

View File

@@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.52.1
stable_version=1.48.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2021-05-18
nightly_version=2020-12-13
fi

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
#
# Finds the version of sbf-tools used by this source tree.
#
# stdout of this script may be eval-ed.
#
here="$(dirname "$0")"
SBF_TOOLS_VERSION=unknown
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
if [[ -f "${cargo_build_bpf_main}" ]]; then
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
if [[ ${version} != '' ]]; then
SBF_TOOLS_VERSION="${version}"
else
echo '--- unable to parse SBF_TOOLS_VERSION'
fi
else
echo "--- '${cargo_build_bpf_main}' not present"
fi
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"

View File

@@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE
TimeoutStartSec=10
TimeoutStopSec=0
KillMode=process
LimitNOFILE=1000000
LimitNOFILE=500000
[Install]
WantedBy=multi-user.target

View File

@@ -8,5 +8,5 @@ source "$HERE"/utils.sh
ensure_env || exit 1
# Allow more files to be opened by a user
echo "* - nofile 1000000" > /etc/security/limits.d/90-solana-nofiles.conf
echo "* - nofile 500000" > /etc/security/limits.d/90-solana-nofiles.conf

View File

@@ -23,11 +23,7 @@ fi
BENCH_FILE=bench_output.log
BENCH_ARTIFACT=current_bench_results.log
# solana-keygen required when building C programs
_ "$cargo" build --manifest-path=keygen/Cargo.toml
export PATH="$PWD/target/debug":$PATH
# Clear the C dependency files, if dependency moves these files are not regenerated
# Clear the C dependency files, if dependeny moves these files are not regenerated
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
@@ -45,14 +41,6 @@ _ "$cargo" nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
_ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run gossip benches
_ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run poh benches
_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run core benches
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"

View File

@@ -12,16 +12,6 @@ cargo="$(readlink -f "./cargo")"
scripts/increment-cargo-version.sh check
# Disallow uncommitted Cargo.lock changes
(
_ scripts/cargo-for-all-lock-files.sh tree >/dev/null
set +e
if ! _ git diff --exit-code; then
echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2
exit 1
fi
)
echo --- build environment
(
set -x
@@ -35,10 +25,8 @@ echo --- build environment
"$cargo" stable clippy --version --verbose
"$cargo" nightly clippy --version --verbose
# audit is done only with "$cargo stable"
# audit is done only with stable
"$cargo" stable audit --version
grcov --version
)
export RUST_BACKTRACE=1
@@ -47,7 +35,7 @@ export RUSTFLAGS="-D warnings -A incomplete_features"
# Only force up-to-date lock files on edge
if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
# Exclude --benches as it's not available in rust stable yet
if _ scripts/cargo-for-all-lock-files.sh stable check --locked --tests --bins --examples; then
if _ scripts/cargo-for-all-lock-files.sh +"$rust_stable" check --locked --tests --bins --examples; then
true
else
check_status=$?
@@ -58,30 +46,57 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
fi
# Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh nightly check --locked --all-targets
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
else
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
fi
_ ci/order-crates-for-publishing.py
_ "$cargo" stable fmt --all -- --check
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \
--deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings
_ "$cargo" stable fmt --all -- --check
cargo_audit_ignores=(
# failure is officially deprecated/unmaintained
#
# Blocked on multiple upstream crates removing their `failure` dependency.
--ignore RUSTSEC-2020-0036
_ ci/do-audit.sh
# `net2` crate has been deprecated; use `socket2` instead
#
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
--ignore RUSTSEC-2020-0016
# stdweb is unmaintained
#
# Blocked on multiple upstream crates removing their `stdweb` dependency.
--ignore RUSTSEC-2020-0056
# Potential segfault in the time crate
#
# Blocked on multiple crates updating `time` to >= 0.2.23
--ignore RUSTSEC-2020-0071
# difference is unmaintained
#
# Blocked on predicates v1.0.6 removing its dependency on `difference`
--ignore RUSTSEC-2020-0095
)
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
{
cd programs/bpf
_ "$cargo" stable audit
for project in rust/*/ ; do
echo "+++ do_bpf_checks $project"
(
cd "$project"
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
_ "$cargo" stable fmt -- --check
_ "$cargo" nightly test
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
)
done
}

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env bash
cd "$(dirname "$0")/.."
export CI_LOCAL_RUN=true
set -ex
scripts/build-downstream-projects.sh

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -25,29 +25,4 @@ echo
_ ci/nits.sh
_ ci/check-ssh-keys.sh
# Ensure the current channel version is not equal ("greater") than
# the version of the latest tag
if [[ -z $CI_TAG ]]; then
echo "--- channel version check"
(
eval "$(ci/channel-info.sh)"
if [[ -n $CHANNEL_LATEST_TAG ]]; then
source scripts/read-cargo-variable.sh
version=$(readCargoVariable version "version/Cargo.toml")
echo "version: v$version"
echo "latest channel tag: $CHANNEL_LATEST_TAG"
if [[ $CHANNEL_LATEST_TAG = v$version ]]; then
echo "Error: please run ./scripts/increment-cargo-version.sh"
exit 1
fi
else
echo "Skipped. CHANNEL_LATEST_TAG (CHANNEL=$CHANNEL) unset"
fi
)
fi
echo --- ok

View File

@@ -1 +0,0 @@
test-stable.sh

View File

@@ -21,6 +21,13 @@ export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
source scripts/ulimit-n.sh
# Clear the C dependency files, if dependency moves these files are not regenerated
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Clear the BPF sysroot files, they are not automatically rebuilt
rm -rf target/xargo # Issue #3105
# Limit compiler jobs to reduce memory usage
# on machines with 2gb/thread of memory
NPROC=$(nproc)
@@ -31,58 +38,21 @@ case $testName in
test-stable)
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
;;
test-stable-bpf)
# Clear the C dependency files, if dependency moves these files are not regenerated
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# rustfilt required for dumping BPF assembly listings
"$cargo" install rustfilt
# solana-keygen required when building C programs
_ "$cargo" build --manifest-path=keygen/Cargo.toml
export PATH="$PWD/target/debug":$PATH
cargo_build_bpf="$(realpath ./cargo-build-bpf)"
test-stable-perf)
# BPF solana-sdk legacy compile test
"$cargo_build_bpf" --manifest-path sdk/Cargo.toml
./cargo-build-bpf --manifest-path sdk/Cargo.toml
# BPF Program unit tests
"$cargo" test --manifest-path programs/bpf/Cargo.toml
"$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
# BPF program system tests
# BPF program tests
_ make -C programs/bpf/c tests
_ "$cargo" stable test \
--manifest-path programs/bpf/Cargo.toml \
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
# Dump BPF program assembly listings
for bpf_test in programs/bpf/rust/*; do
if pushd "$bpf_test"; then
"$cargo_build_bpf" --dump
popd
fi
done
# BPF program instruction count assertion
bpf_target_path=programs/bpf/target
_ "$cargo" stable test \
--manifest-path programs/bpf/Cargo.toml \
--no-default-features --features=bpf_c,bpf_rust assert_instruction_count \
-- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt
bpf_dump_archive="bpf-dumps.tar.bz2"
rm -f "$bpf_dump_archive"
tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so}
exit 0
;;
test-stable-perf)
if [[ $(uname) = Linux ]]; then
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
# lengthy and unexpected delay the first time CUDA is involved when the driver
# is not yet loaded.
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh
rm -rf target/perf-libs
./fetch-perf-libs.sh
@@ -100,17 +70,7 @@ test-stable-perf)
;;
test-local-cluster)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-flakey)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-slow)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
*)

View File

@@ -19,24 +19,13 @@ upload-ci-artifact() {
upload-s3-artifact() {
echo "--- artifact: $1 to $2"
(
args=(
--rm
--env AWS_ACCESS_KEY_ID
--env AWS_SECRET_ACCESS_KEY
--volume "$PWD:/solana"
)
if [[ $(uname -m) = arm64 ]]; then
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
args+=(
--platform linux/amd64
)
fi
args+=(
eremite/aws-cli:2018.12.18
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
set -x
docker run "${args[@]}"
docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
}

View File

@@ -1,29 +1,23 @@
[package]
name = "solana-clap-utils"
version = "1.8.17"
version = "1.5.2"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-clap-utils"
edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-perf = { path = "../perf", version = "=1.8.17" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.17" }
solana-sdk = { path = "../sdk", version = "=1.8.17" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.5.2" }
solana-sdk = { path = "../sdk", version = "1.5.2" }
thiserror = "1.0.21"
tiny-bip39 = "0.8.1"
uriparse = "0.6.3"
tiny-bip39 = "0.7.0"
url = "2.1.0"
chrono = "0.4"
[dev-dependencies]
tempfile = "3.1.0"
[lib]
name = "solana_clap_utils"

View File

@@ -0,0 +1,22 @@
use crate::ArgConstant;
use clap::Arg;
pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
name: "commitment",
long: "commitment",
help: "Return information at the selected commitment level",
};
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
commitment_arg_with_default("recent")
}
pub fn commitment_arg_with_default<'a, 'b>(default_value: &'static str) -> Arg<'a, 'b> {
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["recent", "single", "singleGossip", "root", "max"])
.default_value(default_value)
.value_name("COMMITMENT_LEVEL")
.help(COMMITMENT_ARG.help)
}

Some files were not shown because too many files have changed in this diff Show More