Compare commits

..

94 Commits

Author SHA1 Message Date
mergify[bot]
58fe45f3e5 Don't query modern Ledger wallet app version with deprecated payload size (#12031)
(cherry picked from commit dff8242887)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-09-03 21:55:55 +00:00
mergify[bot]
01a9360dbe builds crds filters without looping over filters (#11998) (#12028)
(cherry picked from commit bc7adb97ed)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-03 21:44:00 +00:00
mergify[bot]
f44d9d770a Rpc: add getMultipleAccounts endpoint (bp #12005) (#12024)
* Rpc: add getMultipleAccounts endpoint (#12005)

* Add rpc endpoint to return the state of multiple accounts from the same bank

* Add docs

* Review comments: Dedupe account code, default to base64, add max const

* Add get_multiple_accounts to rpc-client

(cherry picked from commit b22de369b7)

* Use new_response for consistency

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-09-03 19:52:46 +00:00
mergify[bot]
79d9d28e7d Update token amounts in parsed instructions to retain full precision (#12020) (#12022)
(cherry picked from commit b940da4040)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-03 19:22:24 +00:00
mergify[bot]
d273d1acb2 Fix forwarding calculation (#12014) (#12019)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 3f39ab1e04)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-03 10:36:56 +00:00
mergify[bot]
991f188f23 Fix test (#12013) (#12017)
(cherry picked from commit 36a294aae0)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 17:16:59 +09:00
mergify[bot]
8f02fdcc11 Purge storage rewards from accounts db for testnet (#11996) (#12011)
* Purge storage rewards from accounts db for testnet

* Fix test failing only on stable

(cherry picked from commit fb71ee60aa)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 08:01:37 +00:00
mergify[bot]
3e78850331 Move forward token2 native mint testnet epoch (#12007) (#12010)
(cherry picked from commit 4b1cb51a3e)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 07:48:55 +00:00
mergify[bot]
f29a741582 Clarify comments and names in inflation code (#11977) (#12008)
(cherry picked from commit 89bca6110a)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 06:47:13 +00:00
mergify[bot]
3e6052faca Bigtable method to return a single row of data (#11999) (#12002)
(cherry picked from commit b041afe1be)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-02 21:01:58 +00:00
mergify[bot]
0b0710d522 Use conventional special self notation (#11990) (#11997)
(cherry picked from commit 46aac4819a)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-02 11:59:16 +00:00
mergify[bot]
2f3fced8a8 Switch account hashing to blake3 (#11969) (#11992)
* Switch account hashing to blake3

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit af08221aec)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-02 08:40:07 +00:00
mergify[bot]
a5832366a7 Detect and notify when deserializable shreds are available (bp #11816) (#11988)
* Detect and notify when deserializable shreds are available (#11816)

* Add logic to check for complete data ranges

* Add RPC signature notification

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 1c1a3f979d)

# Conflicts:
#	accounts-bench/Cargo.toml
#	core/src/rpc_pubsub.rs

* Fix conflicts

Co-authored-by: carllin <wumu727@gmail.com>
Co-authored-by: Carl <carl@solana.com>
2020-09-02 06:38:10 +00:00
mergify[bot]
f26ff6d6b2 Docs.rs version replacement (#11981) (#11982)
(cherry picked from commit b720921c83)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-01 13:52:57 -06:00
mergify[bot]
57f149e415 Ensure that the spl-token 2 native mint account is owned by the spl-token 2 program. (#11974)
Workaround for https://github.com/solana-labs/solana-program-library/issues/374 until spl-token 3 is shipped

(cherry picked from commit 7341e60043)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-01 18:16:22 +00:00
mergify[bot]
8ece3847f9 Add tests for the Debug and activation Vecs (#11926) (#11968)
* Add tests for the Debug and activation Vecs

* Rename a bit

(cherry picked from commit 11ac4eb21d)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-01 09:54:27 +00:00
mergify[bot]
0d23ad00b1 Update to rayon 1.4.0 (#11898) (#11902)
(cherry picked from commit c4253dc0f9)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-09-01 04:53:39 +00:00
mergify[bot]
d9684f99c3 Remove log (#11949) (#11961)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 7641b60a2b)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-01 01:05:20 +00:00
mergify[bot]
6ea9c249d7 Add missing backslash to solana-validator command (#11958)
(cherry picked from commit a19f696a42)

Co-authored-by: Richard Ayotte <rich.ayotte@gmail.com>
2020-08-31 23:43:22 +00:00
mergify[bot]
1e02069f86 Increase message_processor logging to error level (#11945) (#11948)
(cherry picked from commit 9b9d559312)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-31 21:26:35 +00:00
mergify[bot]
b179ed0b90 Remove encrypted secrets not required by the main public CI (bp #11942) (#11944)
* Remove unused GEOLOCATION_API_KEY

(cherry picked from commit f78594dfc1)

* Remove secrets not required by the main public CI

(cherry picked from commit 278f2fe078)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-31 18:59:52 +00:00
mergify[bot]
eb65ff750e Simplify get_programs(), specify a real Preview activation epoch for new BPFLoader (#11930)
(cherry picked from commit f385af25e5)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-31 17:21:07 +00:00
Michael Vines
663dc9959f Bump version to v1.3.8 2020-08-31 10:18:42 -07:00
mergify[bot]
53ed6a2298 Fix use-deprecated-loader arg (#11921) (#11929)
(cherry picked from commit 6234909373)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-31 09:53:34 +00:00
mergify[bot]
47962f3e80 Use DNS for devnet/testnet entrypoints (#11922)
(cherry picked from commit e4d7e1fe3f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-31 07:53:02 +00:00
Michael Vines
d3a16e4e13 Bump RPC banks up 1 port to avoid web3.js wss port conflict
(cherry picked from commit f8bb93a0f4)
2020-08-30 23:56:19 -07:00
Michael Vines
250f5a8196 Add new trusted validator for testnet 2020-08-30 22:51:42 -07:00
Michael Vines
83a17acc17 Add methods to adjust rent burn percentage, and hashes per tick 2020-08-30 16:50:43 -07:00
Michael Vines
ed34b930e5 modify-genesis now writes elsewhere and produces a full genesis.tar.bz2 2020-08-30 16:47:27 -07:00
Michael Vines
95816f7db9 Add ability to fork a local cluster from the latest mainnet-beta snapshot 2020-08-30 16:47:18 -07:00
mergify[bot]
e652c27142 Add bank-hash subcommand (#11915)
(cherry picked from commit a07980536a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-30 19:05:04 +00:00
mergify[bot]
0811cb2966 Fix get_parsed_token_accounts (#11907) (#11909)
(cherry picked from commit 60c7ac6f95)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-29 19:53:05 +00:00
Michael Vines
311b419f8a Bump version to v1.3.7 2020-08-29 11:42:14 -07:00
Michael Vines
acb992c3be Update to spl-token 2 2020-08-29 09:23:20 -07:00
Tyera Eulberg
61aca235a3 Bump spl-token version 2020-08-29 03:24:36 -06:00
mergify[bot]
bad0709ff1 Update to token pack/unpack changes (bp #11900) (#11903)
* Update to token pack/unpack changes (#11900)

(cherry picked from commit 2eff9a19c3)

# Conflicts:
#	account-decoder/Cargo.toml
#	core/Cargo.toml

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-29 05:46:07 +00:00
mergify[bot]
2a649e990d Update spl-token to v2.0 (bp #11884) (#11897)
* Update spl-token to v2.0 (#11884)

* Update account-decoder to spl-token v2.0

* Update transaction-status to spl-token v2.0

* Update rpc to spl-token v2.0

* Update getTokenSupply to pull from Mint directly

* Fixup to spl-token v2.0.1

(cherry picked from commit 76be36c9ce)

# Conflicts:
#	Cargo.lock
#	account-decoder/Cargo.toml
#	core/Cargo.toml

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-28 23:36:53 +00:00
mergify[bot]
1a25889f72 Take v0.19.3 of perf libs which improves sigverify perf 2x (#11894) (#11895)
(cherry picked from commit 9393dce1ff)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-28 20:37:26 +00:00
Trent Nelson
e15dca6961 Update devnet cluster docs since reboot 2020-08-28 10:36:11 -07:00
mergify[bot]
bb12f48014 Add SolFlare as stake-supporting wallet (#11891) (#11892)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
(cherry picked from commit 8ba3a33129)

Co-authored-by: Dan Albert <dan@solana.com>
2020-08-28 16:17:24 +00:00
Jon Cinque
c6416fca6e Bump version to 1.3.6 (#11890) 2020-08-28 16:58:59 +02:00
Jack May
2f5d60bef7 Update epoch gating (#11880) 2020-08-27 21:17:45 -07:00
mergify[bot]
ef1a9df507 Small cleaning around consensus/bank_forks (#11873) (#11881)
(cherry picked from commit d8c529a9b8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-28 01:06:21 +00:00
mergify[bot]
d2611f54a0 Make ledger-tool accounts print rent_epoch and slot (#11845) (#11870)
(cherry picked from commit 57174cdabe)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-27 04:49:24 +00:00
mergify[bot]
6117f8d64e Add SolFlare guide to docs (#11843) (#11868)
(cherry picked from commit 36e8441149)

Co-authored-by: Dan Albert <dan@solana.com>
2020-08-26 23:13:43 +00:00
mergify[bot]
67d9faaefc Bump compute budget (#11864) (#11867)
* Bump compute budget

* nudge

(cherry picked from commit ea179ad762)

Co-authored-by: Jack May <jack@solana.com>
2020-08-26 23:12:01 +00:00
mergify[bot]
aaa551ca7c Merge pull request #11857 from mvines/cache (#11863)
ci: cargo-target-cache is now channel specific
(cherry picked from commit 5c7080c1f4)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-26 19:50:06 +00:00
mergify[bot]
85df8cb4c5 Accounts hash calculation metrics (#11433) (#11860)
(cherry picked from commit 770d3d383c)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-26 19:31:52 +00:00
mergify[bot]
65f189d932 Rpc: Filter accounts with invalid mints from get_parsed_token_accounts (#11844) (#11859)
* Filter out accounts with invalid mints from get_parsed_token_accounts

* Explicit docs

(cherry picked from commit 1988ee9cd6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-26 19:02:48 +00:00
mergify[bot]
99001f7f2e Bump MacOS nofile recommendation message (#11835)
(cherry picked from commit 8841c3398c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-08-25 19:57:08 +00:00
mergify[bot]
39eeb0142e Add SystemInstruction::CreateAccount support to CPI (bp #11649) (#11831)
* Add SystemInstruction::CreateAccount support to CPI (#11649)

(cherry picked from commit e9b610b8df)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/bank.rs
#	sdk/src/instruction.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 19:52:27 +00:00
mergify[bot]
d5d1a344c3 Switch programs activation to whole-set based gating (bp #11750) (#11837)
* Switch programs activation to whole-set based gating (#11750)

* Implement Debug for MessageProcessor

* Switch from delta-based gating to whole-set gating

* Remove dbg!

* Fix clippy

* Clippy

* Add test

* add loader to stable operating mode at proper epoch

* refresh_programs_and_inflation after ancestor setup

* Callback via snapshot; avoid account re-add; Debug

* Fix test

* Fix test and fix the past history

* Make callback management stricter and cleaner

* Fix test

* Test overwrite and frozen for native programs

* Test epoch callback with genesis-programs

* Add assertions for parent bank

* Add tests and some minor cleaning

* Remove unsteady assertion...

* Fix test...

* Fix DOS

* Skip ensuring account by dual (whole/delta) gating

* Fix frozen abi implementation...

* Move compute budget constatnt init back into bank

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
(cherry picked from commit db4bbb3569)

# Conflicts:
#	genesis-programs/src/lib.rs

* Fix conflicts

Co-authored-by: Jack May <jack@solana.com>
Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-25 19:00:41 +00:00
mergify[bot]
62e3c084d3 Update system tuning and docs (bp #11680) (#11830)
* Sync FD limit and max maps to 500k

(cherry picked from commit 11951eb009)

* Expand system tuning docs

(cherry picked from commit 5354df8c1c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-08-25 17:13:27 +00:00
mergify[bot]
3027ceb53a Document how to validate account pubkey (#11821) (#11833)
(cherry picked from commit 2c5366f259)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-25 17:11:09 +00:00
mergify[bot]
4e604e1211 Add (hidden) --use-deprecated-loader flag to solana deploy (#11828)
(cherry picked from commit de736e00ad)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-25 16:56:29 +00:00
mergify[bot]
6749bfd1d2 Gate aligned program heap (bp #11808) (#11814)
* Gate aligned program heap (#11808)


(cherry picked from commit c2e5dae7ba)

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 15:57:15 +00:00
mergify[bot]
d05b39c4f6 Specify loader when bootstrapping bpf programs (#11571) (#11822)
(cherry picked from commit 0a94e7e7fa)

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 07:56:11 -07:00
mergify[bot]
c8e1fbd568 Update comment (#11826) (#11827)
(cherry picked from commit dbd079f54c)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-25 14:48:00 +00:00
mergify[bot]
ad36ddedf1 CPI support for bpf_loader_deprecated (bp #11695) (#11824)
* CPI support for bpf_loader_deprecated (#11695)

(cherry picked from commit 46830124f8)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 11:02:06 +00:00
mergify[bot]
08bece7651 More efficient padding (#11656) (#11823)
(cherry picked from commit f1ba2387d3)

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 10:10:59 +00:00
mergify[bot]
f162c6d1d0 Align host addresses (bp #11384) (#11817)
* Align host addresses (#11384)

* Align host addresses

* support new program abi

* update epoch rollout

* Enforce aligned pointers in cross-program invocations

(cherry picked from commit 9290e561e1)

# Conflicts:
#	core/src/validator.rs
#	genesis-programs/src/lib.rs
#	programs/bpf_loader/src/deprecated.rs
#	programs/bpf_loader/src/lib.rs
#	sdk/src/entrypoint_native.rs
#	sdk/src/lib.rs

* resolve conflicts

* nudge

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 07:23:20 +00:00
mergify[bot]
f3904b5765 sdk: Make PubKey::create_program_address available in program unit tests (bp #11745) (#11810)
* sdk: Make PubKey::create_program_address available in program unit tests (#11745)

* sdk: Make PubKey::create_program_address available in program unit tests

This finishes the work started in #11604 to have
`create_program_address` available when `target_arch` is not `bpf` and
`program` is enabled.  Otherwise, there is an undefined reference error
to `sol_create_program_address`, which is only defined in `bpf`.

A small test to simply call the function has been added in order to catch
the problem in the future.

The default dependency to `solana-sdk/default` doesn't cause a problem with
existing programs since `build.sh` always specifies
`--no-default-features`, and programs in `solana-program-library` all
use it too.

* Add `default-features = false` for inter-program dependencies

Fix the build error found during CI.  The `--no-default-features` flag
only applies to the top-level package, and not to dependencies.  A program that
depends on another program, i.e. `128bit` which depends on `128bit_dep`,
must specify `default-features = false` when including that package,
otherwise the `bpf` build will try to pull in default packages, which
includes `std`.

(cherry picked from commit 9a366281d3)

# Conflicts:
#	programs/bpf/rust/128bit/Cargo.toml
#	programs/bpf/rust/invoke/Cargo.toml
#	programs/bpf/rust/many_args/Cargo.toml
#	programs/bpf/rust/param_passing/Cargo.toml

* resolve conflicts

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
Co-authored-by: Jack May <jack@solana.com>
2020-08-24 20:41:54 +00:00
mergify[bot]
67bf7515a7 Aligned program heap (#11657) (#11812)
(cherry picked from commit f8606fca4f)

Co-authored-by: Jack May <jack@solana.com>
2020-08-24 20:12:58 +00:00
mergify[bot]
0dcbc6d4d1 The constraints on compute power a program can consume is limited only to its instruction count (bp #11717) (#11800)
* The constraints on compute power a program can consume is limited only to its instruction count (#11717)

(cherry picked from commit 8d362f682b)

# Conflicts:
#	programs/bpf/Cargo.toml
#	programs/bpf_loader/Cargo.toml
#	programs/bpf_loader/src/lib.rs
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/bank.rs
#	sdk/src/instruction.rs

* Resolve conflicts

* nudge

Co-authored-by: Jack May <jack@solana.com>
2020-08-24 17:27:40 +00:00
mergify[bot]
79a2ccabb4 Return an error from create_program_address syscall (bp #11658) (#11789)
* Return an error from create_program_address syscall (#11658)

(cherry picked from commit 750e5344f1)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs

* resolve conflicts

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-24 04:30:29 +00:00
mergify[bot]
f5e583ef0d solana-gossip spy can now be given an identity keypair (--identity argument) (#11797)
(cherry picked from commit a1e2357d12)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-23 01:14:42 +00:00
mergify[bot]
132550cd7a RPC: Allow the sendTransaction preflight commitment level to be configured (bp #11792) (#11794)
* Allow the sendTransaction preflight commitment level to be configured

(cherry picked from commit b660704faa)

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-22 16:41:25 +00:00
Michael Vines
08a789323f Fix typo 2020-08-22 09:22:41 -07:00
mergify[bot]
2d9781b101 Add option for repairing only from trusted validators (#11752) (#11773)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit c8d67aa8eb)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-21 20:40:37 -07:00
mergify[bot]
6540d3c63e Make BPF Loader static (bp #11516) (#11790)
* Make BPF Loader static (#11516)

(cherry picked from commit 7c736f71fe)

# Conflicts:
#	Cargo.lock
#	core/Cargo.toml
#	core/src/lib.rs
#	core/src/validator.rs
#	genesis-programs/src/lib.rs
#	programs/bpf_loader/src/deprecated.rs
#	programs/bpf_loader/src/lib.rs
#	sdk/src/entrypoint_native.rs
#	sdk/src/lib.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-22 01:54:50 +00:00
mergify[bot]
a227b813d8 Feature check CPI up front (#11652) (#11787)
(cherry picked from commit 4196686acf)

Co-authored-by: Jack May <jack@solana.com>
2020-08-22 01:27:33 +00:00
mergify[bot]
2b4e0abb43 fix region checks (#11651) (#11785)
(cherry picked from commit 768b386f0a)

Co-authored-by: Jack May <jack@solana.com>
2020-08-22 01:14:13 +00:00
mergify[bot]
cdf6ff7907 Fix filter_crds_values output alignment with the inputs (#11734) (#11781)
(cherry picked from commit 418b483af6)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-08-21 20:56:26 +00:00
mergify[bot]
6775e01747 Add StakeInstruction::AuthorizeWithSeed (#11700) (#11779)
* Add StakeInstruction::AuthorizeWithSeed

* chore: add authorize-with-seed to web.js

* fix: add address_owner

* Add SystemInstruction::TransferWithSeed

* Update ABI hash

* chore: better variable names

* Add AuthorizeWithSeedArgs

* Reorder and rename arguments for clarity

(cherry picked from commit f02a78d8ff)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-08-21 19:39:36 +00:00
mergify[bot]
4be9d5030d Squash supermajority root on blockstore replay at startup (#11727) (#11765)
(cherry picked from commit f7adb68599)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-21 08:20:09 +00:00
carllin
a9f914da8e Cleanup test utilities (#11766)
* Add voting utility

* Add blockstore utility

Co-authored-by: Carl <carl@solana.com>
2020-08-21 06:52:01 +00:00
mergify[bot]
096375584b Rpc: Return error if block does not exist (#11743) (#11749)
* Return error if block does not exist

* Update docs

(cherry picked from commit 747f8d5877)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-20 23:26:55 +00:00
mergify[bot]
d3ab1ec9cf Do not delete any ledger when --limit-ledger-size is not provided (#11741)
(cherry picked from commit ea88bbdc33)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-20 19:27:48 +00:00
mergify[bot]
e41d9c87c5 Bump spl-token to clean up magic number (bp #11726) (#11738)
* Bump spl-token to clean up magic number (#11726)

(cherry picked from commit 2fd2aceeb2)

# Conflicts:
#	account-decoder/Cargo.toml
#	core/Cargo.toml

* Fix conflicts and toml order

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-20 18:02:28 +00:00
Greg Fitzgerald
e938925b95 Add BanksClient (#11721)
Cherry-picked from #10728, but without the changes to solana-tokens
2020-08-19 22:24:24 -06:00
mergify[bot]
6c03e6c4b5 Allow votes to timestamp subsequent slots with the same timestamp (#11715) (#11720)
(cherry picked from commit b1bc901a66)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-20 00:40:04 +00:00
Trent Nelson
abce60efdf Bump version to 1.3.5 2020-08-19 20:19:34 +00:00
mergify[bot]
b36510a565 Skip grace blocks if previous leader was on different fork (#11679) (#11709)
* Start leader blocks if previous leader was on different fork

* Fix test

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 5f8d34feb3)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-19 09:29:08 +00:00
mergify[bot]
a8220ae653 The end_slot argument to purge is now optional (#11707)
(cherry picked from commit d1500ae229)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-19 04:41:54 +00:00
mergify[bot]
2c642d4639 Filter push/pulls from spies (#11620) (#11703)
* Filter push/pulls from spies

* Don't pull from peers with shred version == 0, don't push to people with shred_version == 0

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 0f0a2ddafe)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-19 03:20:43 +00:00
mergify[bot]
04eb35e679 Remove old signatureSubscribe info (#11704) (#11706)
(cherry picked from commit 35828e8fe7)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-19 02:44:29 +00:00
mergify[bot]
8738241567 Get index (#11694) (#11697)
(cherry picked from commit 55ce2ebd53)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-18 18:54:17 +00:00
Justin Starry
0ef9185c9e Fully enable cross program support in mainnet-beta
(cherry picked from commit 9e89a963d9)
2020-08-18 06:39:24 -07:00
mergify[bot]
e41004f185 rpc: rework binary encoding. BREAKING CHANGE (bp #11646) (#11674)
* Add base64 (binary64) encoding for getConfirmedTransaction/getConfirmedBlock

(cherry picked from commit b5f3ced860)

* decode-transaction now supports binary64

(cherry picked from commit 2ebc68a9e2)

# Conflicts:
#	cli/src/cli.rs

* Rework UiAccountData encode/decode such that it works from Rust

(cherry picked from commit 757e147b3b)

# Conflicts:
#	account-decoder/src/lib.rs
#	cli/src/cli.rs

* Rename Binary64 to Base64.  Establish Base58 encoding

(cherry picked from commit adc984a225)

# Conflicts:
#	account-decoder/src/lib.rs

* Remove "binary" encoding. Document "encoding" as required

(cherry picked from commit e5281157fa)

* resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-18 02:53:10 +00:00
mergify[bot]
41cad9ccd5 Faucet: Add per-request cap (#11665) (#11669)
* Add per-request cap; also use clap-utils

* Clean up arg names and take cap inputs as SOL

(cherry picked from commit 71d5409b3b)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-18 01:00:11 +00:00
mergify[bot]
d9ae092637 Re-do rent collection check on rent-exempt account (bp #11349) (#11655)
* Re-do rent collection check on rent-exempt account (#11349)

* wip: re-do rent collection check on rent-exempt account

* Let's see how the ci goes

* Restore previous code

* Well, almost all new changes are revertable

* Update doc

* Add test and gating

* Fix tests

* Fix tests, especially avoid to change abi...

* Fix more tests...

* Fix snapshot restore

* Align to _new_ with better uninitialized detection

(cherry picked from commit 23fa84b322)

# Conflicts:
#	core/src/rpc_subscriptions.rs

* Fix conflicts

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-17 07:20:24 +00:00
mergify[bot]
9015e47cc5 Rpc: Add until parameter for getConfirmedSignaturesForAddress2 (#11644) (#11648)
* Refactor bigtable apis to accept start and end keys

* Make helper fn to deserialize cell data

* Refactor get_confirmed_signatures_for_address to use get_row_data range

* Add until param to get_confirmed_signatures_for_address

* Add until param to blockstore api

* Plumb until through client/cli

* Simplify client params

(cherry picked from commit 6c5b8f324a)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-15 17:59:35 +00:00
mergify[bot]
15b92e9c8d Bigtable: Use index to filter address-signatures correctly (#11622) (#11643)
* Use index to filter address-signatures correctly

* Pull additional keys to account for filtered records

* Clarify variable name

(cherry picked from commit 820af533a4)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-14 21:02:13 +00:00
Trent Nelson
c72bd900cd Bump version to 1.3.4 2020-08-14 18:29:46 +00:00
240 changed files with 10464 additions and 3255 deletions

View File

@@ -1,12 +1,6 @@
{
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": {
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]"
}
}

View File

@@ -3,16 +3,19 @@
#
# Save target/ for the next CI build on this machine
#
(
set -x
d=$HOME/cargo-target-cache/"$BUILDKITE_LABEL"
mkdir -p "$d"
set -x
rsync -a --delete --link-dest="$PWD" target "$d"
du -hs "$d"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$d")
echo "--- ${cacheSizeInGB}GB: $d"
)
if [[ -z $CARGO_TARGET_CACHE ]]; then
echo "+++ CARGO_TARGET_CACHE not defined" # pre-command should have defined it
else
(
set -x
mkdir -p "$CARGO_TARGET_CACHE"
set -x
rsync -a --delete --link-dest="$PWD" target "$CARGO_TARGET_CACHE"
du -hs "$CARGO_TARGET_CACHE"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$CARGO_TARGET_CACHE")
echo "--- ${cacheSizeInGB}GB: $CARGO_TARGET_CACHE"
)
fi
#
# Add job_stats data point

View File

@@ -11,23 +11,24 @@ export PS4="++"
#
# Restore target/ from the previous CI build on this machine
#
eval "$(ci/channel-info.sh)"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
(
set -x
d=$HOME/cargo-target-cache/"$BUILDKITE_LABEL"
MAX_CACHE_SIZE=18 # gigabytes
if [[ -d $d ]]; then
du -hs "$d"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$d")
echo "--- ${cacheSizeInGB}GB: $d"
if [[ -d $CARGO_TARGET_CACHE ]]; then
du -hs "$CARGO_TARGET_CACHE"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$CARGO_TARGET_CACHE")
echo "--- ${cacheSizeInGB}GB: $CARGO_TARGET_CACHE"
if [[ $cacheSizeInGB -gt $MAX_CACHE_SIZE ]]; then
echo "--- $d is too large, removing it"
rm -rf "$d"
echo "--- $CARGO_TARGET_CACHE is too large, removing it"
rm -rf "$CARGO_TARGET_CACHE"
fi
else
echo "--- $d not present"
echo "--- $CARGO_TARGET_CACHE not present"
fi
mkdir -p "$d"/target
rsync -a --delete --link-dest="$d" "$d"/target .
mkdir -p "$CARGO_TARGET_CACHE"/target
rsync -a --delete --link-dest="$CARGO_TARGET_CACHE" "$CARGO_TARGET_CACHE"/target .
)

657
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,9 @@ members = [
"bench-tps",
"accounts-bench",
"banking-bench",
"banks-client",
"banks-interface",
"banks-server",
"clap-utils",
"cli-config",
"client",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.3.3"
version = "1.3.8"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,14 +15,14 @@ bs58 = "0.3.1"
bv = "0.11.1"
Inflector = "0.11.4"
lazy_static = "1.4.0"
solana-config-program = { path = "../programs/config", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-stake-program = { path = "../programs/stake", version = "1.3.3" }
solana-vote-program = { path = "../programs/vote", version = "1.3.3" }
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-stake-program = { path = "../programs/stake", version = "1.3.8" }
solana-vote-program = { path = "../programs/vote", version = "1.3.8" }
spl-token-v2-0 = { package = "spl-token", version = "2.0.3", features = ["skip-no-mangle"] }
thiserror = "1.0"
[package.metadata.docs.rs]

View File

@@ -32,17 +32,18 @@ pub struct UiAccount {
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", untagged)]
pub enum UiAccountData {
Binary(String),
LegacyBinary(String), // Legacy. Retained for RPC backwards compatibility
Json(ParsedAccount),
Binary64(String),
Binary(String, UiAccountEncoding),
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum UiAccountEncoding {
Binary,
Binary, // Legacy. Retained for RPC backwards compatibility
Base58,
Base64,
JsonParsed,
Binary64,
}
impl UiAccount {
@@ -54,20 +55,24 @@ impl UiAccount {
data_slice_config: Option<UiDataSliceConfig>,
) -> Self {
let data = match encoding {
UiAccountEncoding::Binary => UiAccountData::Binary(
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
),
UiAccountEncoding::Binary64 => UiAccountData::Binary64(base64::encode(slice_data(
&account.data,
data_slice_config,
))),
UiAccountEncoding::Base58 => UiAccountData::Binary(
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
encoding,
),
UiAccountEncoding::Base64 => UiAccountData::Binary(
base64::encode(slice_data(&account.data, data_slice_config)),
encoding,
),
UiAccountEncoding::JsonParsed => {
if let Ok(parsed_data) =
parse_account_data(pubkey, &account.owner, &account.data, additional_data)
{
UiAccountData::Json(parsed_data)
} else {
UiAccountData::Binary64(base64::encode(&account.data))
UiAccountData::Binary(base64::encode(&account.data), UiAccountEncoding::Base64)
}
}
};
@@ -83,8 +88,12 @@ impl UiAccount {
pub fn decode(&self) -> Option<Account> {
let data = match &self.data {
UiAccountData::Json(_) => None,
UiAccountData::Binary(blob) => bs58::decode(blob).into_vec().ok(),
UiAccountData::Binary64(blob) => base64::decode(blob).ok(),
UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(),
UiAccountData::Binary(blob, encoding) => match encoding {
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
},
}?;
Some(Account {
lamports: self.lamports,

View File

@@ -3,7 +3,7 @@ use crate::{
parse_nonce::parse_nonce,
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_id_v1_0},
parse_token::{parse_token, spl_token_id_v2_0},
parse_vote::parse_vote,
};
use inflector::Inflector;
@@ -17,7 +17,7 @@ lazy_static! {
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v1_0();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
let mut m = HashMap::new();

View File

@@ -3,32 +3,32 @@ use crate::{
StringAmount,
};
use solana_sdk::pubkey::Pubkey;
use spl_token_v1_0::{
use spl_token_v2_0::{
option::COption,
pack::Pack,
solana_sdk::pubkey::Pubkey as SplTokenPubkey,
state::{unpack, Account, Mint, Multisig},
state::{Account, AccountState, Mint, Multisig},
};
use std::{mem::size_of, str::FromStr};
use std::str::FromStr;
// A helper function to convert spl_token_v1_0::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_id_v1_0() -> Pubkey {
Pubkey::from_str(&spl_token_v1_0::id().to_string()).unwrap()
pub fn spl_token_id_v2_0() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap()
}
// A helper function to convert spl_token_v1_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_v1_0_native_mint() -> Pubkey {
Pubkey::from_str(&spl_token_v1_0::native_mint::id().to_string()).unwrap()
pub fn spl_token_v2_0_native_mint() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
}
pub fn parse_token(
data: &[u8],
mint_decimals: Option<u8>,
) -> Result<TokenAccountType, ParseAccountError> {
let mut data = data.to_vec();
if data.len() == size_of::<Account>() {
let account: Account = *unpack(&mut data)
if data.len() == Account::get_packed_len() {
let account = Account::unpack(data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
let decimals = mint_decimals.ok_or_else(|| {
ParseAccountError::AdditionalDataMissing(
@@ -43,8 +43,12 @@ pub fn parse_token(
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
is_initialized: account.is_initialized,
is_native: account.is_native,
state: account.state.into(),
is_native: account.is_native(),
rent_exempt_reserve: match account.is_native {
COption::Some(reserve) => Some(token_amount_to_ui_amount(reserve, decimals)),
COption::None => None,
},
delegated_amount: if account.delegate.is_none() {
None
} else {
@@ -53,20 +57,29 @@ pub fn parse_token(
decimals,
))
},
}))
} else if data.len() == size_of::<Mint>() {
let mint: Mint = *unpack(&mut data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
Ok(TokenAccountType::Mint(UiMint {
owner: match mint.owner {
close_authority: match account.close_authority {
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
}))
} else if data.len() == Mint::get_packed_len() {
let mint = Mint::unpack(data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
Ok(TokenAccountType::Mint(UiMint {
mint_authority: match mint.mint_authority {
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
supply: mint.supply.to_string(),
decimals: mint.decimals,
is_initialized: mint.is_initialized,
freeze_authority: match mint.freeze_authority {
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
}))
} else if data.len() == size_of::<Multisig>() {
let multisig: Multisig = *unpack(&mut data)
} else if data.len() == Multisig::get_packed_len() {
let multisig = Multisig::unpack(data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
Ok(TokenAccountType::Multisig(UiMultisig {
num_required_signers: multisig.m,
@@ -107,10 +120,32 @@ pub struct UiTokenAccount {
pub token_amount: UiTokenAmount,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegate: Option<String>,
pub is_initialized: bool,
pub state: UiAccountState,
pub is_native: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub rent_exempt_reserve: Option<UiTokenAmount>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegated_amount: Option<UiTokenAmount>,
#[serde(skip_serializing_if = "Option::is_none")]
pub close_authority: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum UiAccountState {
Uninitialized,
Initialized,
Frozen,
}
impl From<AccountState> for UiAccountState {
fn from(state: AccountState) -> Self {
match state {
AccountState::Uninitialized => UiAccountState::Uninitialized,
AccountState::Initialized => UiAccountState::Initialized,
AccountState::Frozen => UiAccountState::Frozen,
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@@ -134,9 +169,11 @@ pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiMint {
pub owner: Option<String>,
pub mint_authority: Option<String>,
pub supply: StringAmount,
pub decimals: u8,
pub is_initialized: bool,
pub freeze_authority: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
@@ -149,7 +186,7 @@ pub struct UiMultisig {
}
pub fn get_token_account_mint(data: &[u8]) -> Option<Pubkey> {
if data.len() == size_of::<Account>() {
if data.len() == Account::get_packed_len() {
Some(Pubkey::new(&data[0..32]))
} else {
None
@@ -159,18 +196,23 @@ pub fn get_token_account_mint(data: &[u8]) -> Option<Pubkey> {
#[cfg(test)]
mod test {
use super::*;
use spl_token_v1_0::state::unpack_unchecked;
#[test]
fn test_parse_token() {
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
let owner_pubkey = SplTokenPubkey::new(&[3; 32]);
let mut account_data = [0; size_of::<Account>()];
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
account.mint = mint_pubkey;
account.owner = owner_pubkey;
account.amount = 42;
account.is_initialized = true;
let mut account_data = vec![0; Account::get_packed_len()];
Account::unpack_unchecked_mut(&mut account_data, &mut |account: &mut Account| {
account.mint = mint_pubkey;
account.owner = owner_pubkey;
account.amount = 42;
account.state = AccountState::Initialized;
account.is_native = COption::None;
account.close_authority = COption::Some(owner_pubkey);
Ok(())
})
.unwrap();
assert!(parse_token(&account_data, None).is_err());
assert_eq!(
parse_token(&account_data, Some(2)).unwrap(),
@@ -183,39 +225,52 @@ mod test {
amount: "42".to_string()
},
delegate: None,
is_initialized: true,
state: UiAccountState::Initialized,
is_native: false,
rent_exempt_reserve: None,
delegated_amount: None,
close_authority: Some(owner_pubkey.to_string()),
}),
);
let mut mint_data = [0; size_of::<Mint>()];
let mut mint: &mut Mint = unpack_unchecked(&mut mint_data).unwrap();
mint.owner = COption::Some(owner_pubkey);
mint.decimals = 3;
mint.is_initialized = true;
let mut mint_data = vec![0; Mint::get_packed_len()];
Mint::unpack_unchecked_mut(&mut mint_data, &mut |mint: &mut Mint| {
mint.mint_authority = COption::Some(owner_pubkey);
mint.supply = 42;
mint.decimals = 3;
mint.is_initialized = true;
mint.freeze_authority = COption::Some(owner_pubkey);
Ok(())
})
.unwrap();
assert_eq!(
parse_token(&mint_data, None).unwrap(),
TokenAccountType::Mint(UiMint {
owner: Some(owner_pubkey.to_string()),
mint_authority: Some(owner_pubkey.to_string()),
supply: 42.to_string(),
decimals: 3,
is_initialized: true,
freeze_authority: Some(owner_pubkey.to_string()),
}),
);
let signer1 = SplTokenPubkey::new(&[1; 32]);
let signer2 = SplTokenPubkey::new(&[2; 32]);
let signer3 = SplTokenPubkey::new(&[3; 32]);
let mut multisig_data = [0; size_of::<Multisig>()];
let mut multisig: &mut Multisig = unpack_unchecked(&mut multisig_data).unwrap();
let mut multisig_data = vec![0; Multisig::get_packed_len()];
let mut signers = [SplTokenPubkey::default(); 11];
signers[0] = signer1;
signers[1] = signer2;
signers[2] = signer3;
multisig.m = 2;
multisig.n = 3;
multisig.is_initialized = true;
multisig.signers = signers;
Multisig::unpack_unchecked_mut(&mut multisig_data, &mut |multisig: &mut Multisig| {
multisig.m = 2;
multisig.n = 3;
multisig.is_initialized = true;
multisig.signers = signers;
Ok(())
})
.unwrap();
assert_eq!(
parse_token(&multisig_data, None).unwrap(),
TokenAccountType::Multisig(UiMultisig {
@@ -237,9 +292,12 @@ mod test {
#[test]
fn test_get_token_account_mint() {
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
let mut account_data = [0; size_of::<Account>()];
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
account.mint = mint_pubkey;
let mut account_data = vec![0; Account::get_packed_len()];
Account::unpack_unchecked_mut(&mut account_data, &mut |account: &mut Account| {
account.mint = mint_pubkey;
Ok(())
})
.unwrap();
let expected_mint_pubkey = Pubkey::new(&[2; 32]);
assert_eq!(

View File

@@ -2,18 +2,18 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.1"
solana-logger = { path = "../logger", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-measure = { path = "../measure", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
rayon = "1.4.0"
solana-logger = { path = "../logger", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-measure = { path = "../measure", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"

View File

@@ -5,7 +5,7 @@ use solana_runtime::{
accounts::{create_test_accounts, update_accounts, Accounts},
accounts_index::Ancestors,
};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::{genesis_config::OperatingMode, pubkey::Pubkey};
use std::fs;
use std::path::PathBuf;
@@ -54,7 +54,7 @@ fn main() {
if fs::remove_dir_all(path.clone()).is_err() {
println!("Warning: Couldn't remove {:?}", path);
}
let accounts = Accounts::new(vec![path]);
let accounts = Accounts::new(vec![path], &OperatingMode::Preview);
println!("Creating {} accounts", num_accounts);
let mut create_time = Measure::start("create accounts");
let pubkeys: Vec<_> = (0..num_slots)

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,17 +12,17 @@ clap = "2.33.1"
crossbeam-channel = "0.4"
log = "0.4.6"
rand = "0.7.0"
rayon = "1.3.1"
solana-core = { path = "../core", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-streamer = { path = "../streamer", version = "1.3.3" }
solana-perf = { path = "../perf", version = "1.3.3" }
solana-ledger = { path = "../ledger", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-measure = { path = "../measure", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
rayon = "1.4.0"
solana-core = { path = "../core", version = "1.3.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-streamer = { path = "../streamer", version = "1.3.8" }
solana-perf = { path = "../perf", version = "1.3.8" }
solana-ledger = { path = "../ledger", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-measure = { path = "../measure", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

30
banks-client/Cargo.toml Normal file
View File

@@ -0,0 +1,30 @@
[package]
name = "solana-banks-client"
version = "1.3.8"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
async-trait = "0.1.36"
bincode = "1.3.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
tarpc = { version = "0.21.0", features = ["full"] }
tokio = "0.2"
tokio-serde = { version = "0.6", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-banks-server = { path = "../banks-server", version = "1.3.8" }
[lib]
crate-type = ["lib"]
name = "solana_banks_client"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

283
banks-client/src/lib.rs Normal file
View File

@@ -0,0 +1,283 @@
//! A client for the ledger state, from the perspective of an arbitrary validator.
//!
//! Use start_tcp_client() to create a client and then import BanksClientExt to
//! access its methods. Additional "*_with_context" methods are also available,
//! but they are undocumented, may change over time, and are generally more
//! cumbersome to use.
use async_trait::async_trait;
use futures::future::join_all;
pub use solana_banks_interface::{BanksClient, TransactionStatus};
use solana_banks_interface::{BanksRequest, BanksResponse};
use solana_sdk::{
account::Account, clock::Slot, commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature,
transaction::Transaction, transport,
};
use std::io::{self, Error, ErrorKind};
use tarpc::{
client, context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
};
use tokio::{net::ToSocketAddrs, time::Duration};
use tokio_serde::formats::Bincode;
#[async_trait]
pub trait BanksClientExt {
/// Send a transaction and return immediately. The server will resend the
/// transaction until either it is accepted by the cluster or the transaction's
/// blockhash expires.
async fn send_transaction(&mut self, transaction: Transaction) -> io::Result<()>;
/// Return a recent, rooted blockhash from the server. The cluster will only accept
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
/// method to get both a blockhash and the blockhash's last valid slot.
async fn get_recent_blockhash(&mut self) -> io::Result<Hash>;
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
/// will use the transaction's blockhash to look up these same fee parameters and
/// use them to calculate the transaction fee.
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)>;
/// Send a transaction and return after the transaction has been rejected or
/// reached the given level of commitment.
async fn process_transaction_with_commitment(
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> transport::Result<()>;
/// Send a transaction and return after the transaction has been finalized or rejected.
async fn process_transaction(&mut self, transaction: Transaction) -> transport::Result<()>;
/// Return the status of a transaction with a signature matching the transaction's first
/// signature. Return None if the transaction is not found, which may be because the
/// blockhash was expired or the fee-paying account had insufficient funds to pay the
/// transaction fee. Note that servers rarely store the full transaction history. This
/// method may return None if the transaction status has been discarded.
async fn get_transaction_status(
&mut self,
signature: Signature,
) -> io::Result<Option<TransactionStatus>>;
/// Same as get_transaction_status, but for multiple transactions.
async fn get_transaction_statuses(
&mut self,
signatures: Vec<Signature>,
) -> io::Result<Vec<Option<TransactionStatus>>>;
/// Return the most recent rooted slot height. All transactions at or below this height
/// are said to be finalized. The cluster will not fork to a higher slot height.
async fn get_root_slot(&mut self) -> io::Result<Slot>;
/// Return the account at the given address at the time of the most recent root slot.
/// If the account is not found, None is returned.
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>>;
/// Return the balance in lamports of an account at the given address at the slot
/// corresponding to the given commitment level.
async fn get_balance_with_commitment(
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> io::Result<u64>;
/// Return the balance in lamports of an account at the given address at the time
/// of the most recent root slot.
async fn get_balance(&mut self, address: Pubkey) -> io::Result<u64>;
}
#[async_trait]
impl BanksClientExt for BanksClient {
async fn send_transaction(&mut self, transaction: Transaction) -> io::Result<()> {
self.send_transaction_with_context(context::current(), transaction)
.await
}
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)> {
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::Root)
.await
}
async fn get_recent_blockhash(&mut self) -> io::Result<Hash> {
Ok(self.get_fees().await?.1)
}
async fn process_transaction_with_commitment(
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> transport::Result<()> {
let mut ctx = context::current();
ctx.deadline += Duration::from_secs(50);
let result = self
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.await?;
match result {
None => Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into()),
Some(transaction_result) => Ok(transaction_result?),
}
}
async fn process_transaction(&mut self, transaction: Transaction) -> transport::Result<()> {
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
.await
}
async fn get_root_slot(&mut self) -> io::Result<Slot> {
self.get_slot_with_context(context::current(), CommitmentLevel::Root)
.await
}
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>> {
self.get_account_with_commitment_and_context(
context::current(),
address,
CommitmentLevel::default(),
)
.await
}
async fn get_balance_with_commitment(
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> io::Result<u64> {
let account = self
.get_account_with_commitment_and_context(context::current(), address, commitment)
.await?;
Ok(account.map(|x| x.lamports).unwrap_or(0))
}
async fn get_balance(&mut self, address: Pubkey) -> io::Result<u64> {
self.get_balance_with_commitment(address, CommitmentLevel::default())
.await
}
async fn get_transaction_status(
&mut self,
signature: Signature,
) -> io::Result<Option<TransactionStatus>> {
self.get_transaction_status_with_context(context::current(), signature)
.await
}
async fn get_transaction_statuses(
&mut self,
signatures: Vec<Signature>,
) -> io::Result<Vec<Option<TransactionStatus>>> {
// tarpc futures oddly hold a mutable reference back to the client so clone the client upfront
let mut clients_and_signatures: Vec<_> = signatures
.into_iter()
.map(|signature| (self.clone(), signature))
.collect();
let futs = clients_and_signatures
.iter_mut()
.map(|(client, signature)| client.get_transaction_status(*signature));
let statuses = join_all(futs).await;
// Convert Vec<Result<_, _>> to Result<Vec<_>>
statuses.into_iter().collect()
}
}
pub async fn start_client(
transport: UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>>,
) -> io::Result<BanksClient> {
BanksClient::new(client::Config::default(), transport).spawn()
}
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
let transport = tcp::connect(addr, Bincode::default()).await?;
BanksClient::new(client::Config::default(), transport).spawn()
}
#[cfg(test)]
mod tests {
use super::*;
use solana_banks_server::banks_server::start_local_server;
use solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config};
use solana_sdk::{message::Message, pubkey::Pubkey, signature::Signer, system_instruction};
use std::sync::{Arc, RwLock};
use tarpc::transport;
use tokio::{runtime::Runtime, time::delay_for};
#[test]
fn test_banks_client_new() {
let (client_transport, _server_transport) = transport::channel::unbounded();
BanksClient::new(client::Config::default(), client_transport);
}
#[test]
fn test_banks_server_transfer_via_server() -> io::Result<()> {
// This test shows the preferred way to interact with BanksServer.
// It creates a runtime explicitly (no globals via tokio macros) and calls
// `runtime.block_on()` just once, to run all the async code.
let genesis = create_genesis_config(10);
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(
&genesis.genesis_config,
))));
let bob_pubkey = Pubkey::new_rand();
let mint_pubkey = genesis.mint_keypair.pubkey();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport = start_local_server(&bank_forks).await;
let mut banks_client =
BanksClient::new(client::Config::default(), client_transport).spawn()?;
let recent_blockhash = banks_client.get_recent_blockhash().await?;
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
banks_client.process_transaction(transaction).await.unwrap();
assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1);
Ok(())
})
}
#[test]
fn test_banks_server_transfer_via_client() -> io::Result<()> {
// The caller may not want to hold the connection open until the transaction
// is processed (or blockhash expires). In this test, we verify the
// server-side functionality is available to the client.
let genesis = create_genesis_config(10);
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(
&genesis.genesis_config,
))));
let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = Pubkey::new_rand();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport = start_local_server(&bank_forks).await;
let mut banks_client =
BanksClient::new(client::Config::default(), client_transport).spawn()?;
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
let signature = transaction.signatures[0];
banks_client.send_transaction(transaction).await?;
let mut status = banks_client.get_transaction_status(signature).await?;
while status.is_none() {
let root_slot = banks_client.get_root_slot().await?;
if root_slot > last_valid_slot {
break;
}
delay_for(Duration::from_millis(100)).await;
status = banks_client.get_transaction_status(signature).await?;
}
assert!(status.unwrap().err.is_none());
assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1);
Ok(())
})
}
}

View File

@@ -0,0 +1,21 @@
[package]
name = "solana-banks-interface"
version = "1.3.8"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
serde = { version = "1.0.112", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "1.3.8" }
tarpc = { version = "0.21.0", features = ["full"] }
[lib]
crate-type = ["lib"]
name = "solana_banks_interface"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -0,0 +1,49 @@
use serde::{Deserialize, Serialize};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransactionStatus {
pub slot: Slot,
pub confirmations: Option<usize>, // None = rooted
pub err: Option<TransactionError>,
}
#[tarpc::service]
pub trait Banks {
async fn send_transaction_with_context(transaction: Transaction);
async fn get_fees_with_commitment_and_context(
commitment: CommitmentLevel,
) -> (FeeCalculator, Hash, Slot);
async fn get_transaction_status_with_context(signature: Signature)
-> Option<TransactionStatus>;
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
async fn process_transaction_with_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>>;
async fn get_account_with_commitment_and_context(
address: Pubkey,
commitment: CommitmentLevel,
) -> Option<Account>;
}
#[cfg(test)]
mod tests {
use super::*;
use tarpc::{client, transport};
#[test]
fn test_banks_client_new() {
let (client_transport, _server_transport) = transport::channel::unbounded();
BanksClient::new(client::Config::default(), client_transport);
}
}

26
banks-server/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "solana-banks-server"
version = "1.3.8"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
bincode = "1.3.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
tarpc = { version = "0.21.0", features = ["full"] }
tokio = "0.2"
tokio-serde = { version = "0.6", features = ["bincode"] }
[lib]
crate-type = ["lib"]
name = "solana_banks_server"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -0,0 +1,272 @@
use bincode::{deserialize, serialize};
use futures::{
future,
prelude::stream::{self, StreamExt},
};
use solana_banks_interface::{Banks, BanksRequest, BanksResponse, TransactionStatus};
use solana_runtime::{
bank::Bank,
bank_forks::BankForks,
commitment::{BlockCommitmentCache, CommitmentSlots},
send_transaction_service::{SendTransactionService, TransactionInfo},
};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
};
use std::{
collections::HashMap,
io,
net::SocketAddr,
sync::{
atomic::AtomicBool,
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::Builder,
time::Duration,
};
use tarpc::{
context::Context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
server::{self, Channel, Handler},
transport,
};
use tokio::time::delay_for;
use tokio_serde::formats::Bincode;
#[derive(Clone)]
struct BanksServer {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
}
impl BanksServer {
/// Return a BanksServer that forwards transactions to the
/// given sender. If unit-testing, those transactions can go to
/// a bank in the given BankForks. Otherwise, the receiver should
/// forward them to a validator in the leader schedule.
fn new(
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
) -> Self {
Self {
bank_forks,
block_commitment_cache,
transaction_sender,
}
}
fn run(bank: &Bank, transaction_receiver: Receiver<TransactionInfo>) {
while let Ok(info) = transaction_receiver.recv() {
let mut transaction_infos = vec![info];
while let Ok(info) = transaction_receiver.try_recv() {
transaction_infos.push(info);
}
let transactions: Vec<_> = transaction_infos
.into_iter()
.map(|info| deserialize(&info.wire_transaction).unwrap())
.collect();
let _ = bank.process_transactions(&transactions);
}
}
/// Useful for unit-testing
fn new_loopback(bank_forks: Arc<RwLock<BankForks>>) -> Self {
let (transaction_sender, transaction_receiver) = channel();
let bank = bank_forks.read().unwrap().working_bank();
let slot = bank.slot();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
HashMap::default(),
0,
CommitmentSlots {
slot,
root: 0,
highest_confirmed_slot: 0,
highest_confirmed_root: 0,
},
)));
Builder::new()
.name("solana-bank-forks-client".to_string())
.spawn(move || Self::run(&bank, transaction_receiver))
.unwrap();
Self::new(bank_forks, block_commitment_cache, transaction_sender)
}
fn slot(&self, commitment: CommitmentLevel) -> Slot {
self.block_commitment_cache
.read()
.unwrap()
.slot_with_commitment(commitment)
}
fn bank(&self, commitment: CommitmentLevel) -> Arc<Bank> {
self.bank_forks.read().unwrap()[self.slot(commitment)].clone()
}
async fn poll_signature_status(
self,
signature: Signature,
last_valid_slot: Slot,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
let mut status = self.bank(commitment).get_signature_status(&signature);
while status.is_none() {
delay_for(Duration::from_millis(200)).await;
let bank = self.bank(commitment);
if bank.slot() > last_valid_slot {
break;
}
status = bank.get_signature_status(&signature);
}
status
}
}
#[tarpc::server]
impl Banks for BanksServer {
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
let blockhash = &transaction.message.recent_blockhash;
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(&blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
}
async fn get_fees_with_commitment_and_context(
self,
_: Context,
commitment: CommitmentLevel,
) -> (FeeCalculator, Hash, Slot) {
let bank = self.bank(commitment);
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
(fee_calculator, blockhash, last_valid_slot)
}
async fn get_transaction_status_with_context(
self,
_: Context,
signature: Signature,
) -> Option<TransactionStatus> {
let bank = self.bank(CommitmentLevel::Recent);
let (slot, status) = bank.get_signature_status_slot(&signature)?;
let r_block_commitment_cache = self.block_commitment_cache.read().unwrap();
let confirmations = if r_block_commitment_cache.root() >= slot {
None
} else {
r_block_commitment_cache
.get_confirmation_count(slot)
.or(Some(0))
};
Some(TransactionStatus {
slot,
confirmations,
err: status.err(),
})
}
async fn get_slot_with_context(self, _: Context, commitment: CommitmentLevel) -> Slot {
self.slot(commitment)
}
async fn process_transaction_with_commitment_and_context(
self,
_: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
let blockhash = &transaction.message.recent_blockhash;
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(&blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
self.poll_signature_status(signature, last_valid_slot, commitment)
.await
}
async fn get_account_with_commitment_and_context(
self,
_: Context,
address: Pubkey,
commitment: CommitmentLevel,
) -> Option<Account> {
let bank = self.bank(commitment);
bank.get_account(&address)
}
}
pub async fn start_local_server(
bank_forks: &Arc<RwLock<BankForks>>,
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
let banks_server = BanksServer::new_loopback(bank_forks.clone());
let (client_transport, server_transport) = transport::channel::unbounded();
let server = server::new(server::Config::default())
.incoming(stream::once(future::ready(server_transport)))
.respond_with(banks_server.serve());
tokio::spawn(server);
client_transport
}
pub async fn start_tcp_server(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> io::Result<()> {
// Note: These settings are copied straight from the tarpc example.
let server = tcp::listen(listen_addr, Bincode::default)
.await?
// Ignore accept errors.
.filter_map(|r| future::ready(r.ok()))
.map(server::BaseChannel::with_defaults)
// Limit channels to 1 per IP.
.max_channels_per_key(1, |t| t.as_ref().peer_addr().unwrap().ip())
// serve is generated by the service attribute. It takes as input any type implementing
// the generated Banks trait.
.map(move |chan| {
let (sender, receiver) = channel();
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
SendTransactionService::new(
tpu_addr,
&bank_forks,
&exit_send_transaction_service,
receiver,
);
let server =
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
chan.respond_with(server.serve()).execute()
})
// Max 10 channels.
.buffer_unordered(10)
.for_each(|_| async {});
server.await;
Ok(())
}

2
banks-server/src/lib.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod banks_server;
pub mod rpc_banks_service;

View File

@@ -0,0 +1,116 @@
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
use crate::banks_server::start_tcp_server;
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
};
use tokio::{
runtime::Runtime,
time::{self, Duration},
};
pub struct RpcBanksService {
thread_hdl: JoinHandle<()>,
}
/// Run the TCP service until `exit` is set to true
async fn start_abortable_tcp_server(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
exit: Arc<AtomicBool>,
) {
let server = start_tcp_server(
listen_addr,
tpu_addr,
bank_forks.clone(),
block_commitment_cache.clone(),
)
.fuse();
let interval = time::interval(Duration::from_millis(100)).fuse();
pin_mut!(server, interval);
loop {
select! {
_ = server => {},
_ = interval.select_next_some() => {
if exit.load(Ordering::Relaxed) {
break;
}
}
}
}
}
impl RpcBanksService {
fn run(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
exit: Arc<AtomicBool>,
) {
let server = start_abortable_tcp_server(
listen_addr,
tpu_addr,
bank_forks,
block_commitment_cache,
exit,
);
Runtime::new().unwrap().block_on(server);
}
pub fn new(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
exit: &Arc<AtomicBool>,
) -> Self {
let bank_forks = bank_forks.clone();
let block_commitment_cache = block_commitment_cache.clone();
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-rpc-banks".to_string())
.spawn(move || {
Self::run(
listen_addr,
tpu_addr,
bank_forks,
block_commitment_cache,
exit,
)
})
.unwrap();
Self { thread_hdl }
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_runtime::bank::Bank;
#[test]
fn test_rpc_banks_server_exit() {
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::default())));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let exit = Arc::new(AtomicBool::new(false));
let addr = "127.0.0.1:0".parse().unwrap();
let service = RpcBanksService::new(addr, addr, &bank_forks, &block_commitment_cache, &exit);
exit.store(true, Ordering::Relaxed);
service.join().unwrap();
}
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,24 +15,24 @@ log = "0.4.8"
num-derive = "0.3"
num-traits = "0.2"
rand = "0.7.0"
rayon = "1.3.1"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-core = { path = "../core", version = "1.3.3" }
solana-genesis = { path = "../genesis", version = "1.3.3" }
solana-client = { path = "../client", version = "1.3.3" }
solana-faucet = { path = "../faucet", version = "1.3.3" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-metrics = { path = "../metrics", version = "1.3.3" }
solana-net-utils = { path = "../net-utils", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-core = { path = "../core", version = "1.3.8" }
solana-genesis = { path = "../genesis", version = "1.3.8" }
solana-client = { path = "../client", version = "1.3.8" }
solana-faucet = { path = "../faucet", version = "1.3.8" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-metrics = { path = "../metrics", version = "1.3.8" }
solana-net-utils = { path = "../net-utils", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.3.3" }
solana-local-cluster = { path = "../local-cluster", version = "1.3.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,18 +2,18 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-streamer = { path = "../streamer", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-net-utils = { path = "../net-utils", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-streamer = { path = "../streamer", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-net-utils = { path = "../net-utils", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,26 +11,26 @@ homepage = "https://solana.com/"
bincode = "1.3.1"
clap = "2.33.1"
log = "0.4.8"
rayon = "1.3.1"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-core = { path = "../core", version = "1.3.3" }
solana-genesis = { path = "../genesis", version = "1.3.3" }
solana-client = { path = "../client", version = "1.3.3" }
solana-faucet = { path = "../faucet", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-metrics = { path = "../metrics", version = "1.3.3" }
solana-measure = { path = "../measure", version = "1.3.3" }
solana-net-utils = { path = "../net-utils", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-core = { path = "../core", version = "1.3.8" }
solana-genesis = { path = "../genesis", version = "1.3.8" }
solana-client = { path = "../client", version = "1.3.8" }
solana-faucet = { path = "../faucet", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-metrics = { path = "../metrics", version = "1.3.8" }
solana-measure = { path = "../measure", version = "1.3.8" }
solana-net-utils = { path = "../net-utils", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.3.3" }
solana-local-cluster = { path = "../local-cluster", version = "1.3.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -89,11 +89,17 @@ BETA_CHANNEL_LATEST_TAG=${beta_tag:+v$beta_tag}
STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag}
if [[ $CI_BRANCH = "$STABLE_CHANNEL" ]]; then
if [[ -n $CI_BASE_BRANCH ]]; then
BRANCH="$CI_BASE_BRANCH"
elif [[ -n $CI_BRANCH ]]; then
BRANCH="$CI_BRANCH"
fi
if [[ $BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $CI_BRANCH = "$EDGE_CHANNEL" ]]; then
elif [[ $BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $CI_BRANCH = "$BETA_CHANNEL" ]]; then
elif [[ $BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi

View File

@@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE
TimeoutStartSec=10
TimeoutStopSec=0
KillMode=process
LimitNOFILE=65536
LimitNOFILE=500000
[Install]
WantedBy=multi-user.target

View File

@@ -8,5 +8,5 @@ source "$HERE"/utils.sh
ensure_env || exit 1
# Allow more files to be opened by a user
sed -i 's/^\(# End of file\)/* soft nofile 65535\n\n\1/' /etc/security/limits.conf
echo "* - nofile 500000" > /etc/security/limits.d/90-solana-nofiles.conf

View File

@@ -41,13 +41,14 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
echo "$0: [tree (for outdated Cargo.lock sync)|check (for compilation error)|update -p foo --precise x.y.z (for your Cargo.toml update)] ..." >&2
exit "$check_status"
fi
# Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
else
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
fi
# Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
_ ci/order-crates-for-publishing.py
_ cargo +"$rust_stable" fmt --all -- --check

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.3.3"
version = "1.3.8"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
thiserror = "1.0.20"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -76,6 +76,17 @@ impl Config {
ws_url.to_string()
}
pub fn compute_rpc_banks_url(json_rpc_url: &str) -> String {
let json_rpc_url: Option<Url> = json_rpc_url.parse().ok();
if json_rpc_url.is_none() {
return "".to_string();
}
let mut url = json_rpc_url.unwrap();
let port = url.port_or_known_default().unwrap_or(80);
url.set_port(Some(port + 2)).expect("unable to set port");
url.to_string()
}
pub fn import_address_labels<P>(&mut self, filename: P) -> Result<(), io::Error>
where
P: AsRef<Path>,
@@ -122,4 +133,28 @@ mod test {
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
}
#[test]
fn compute_rpc_banks_url() {
assert_eq!(
Config::compute_rpc_banks_url(&"http://devnet.solana.com"),
"http://devnet.solana.com:82/".to_string()
);
assert_eq!(
Config::compute_rpc_banks_url(&"https://devnet.solana.com"),
"https://devnet.solana.com:445/".to_string()
);
assert_eq!(
Config::compute_rpc_banks_url(&"http://example.com:8899"),
"http://example.com:8901/".to_string()
);
assert_eq!(
Config::compute_rpc_banks_url(&"https://example.com:1234"),
"https://example.com:1236/".to_string()
);
assert_eq!(Config::compute_rpc_banks_url(&"garbage"), String::new());
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,29 +27,29 @@ reqwest = { version = "0.10.6", default-features = false, features = ["blocking"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.3" }
solana-budget-program = { path = "../programs/budget", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-cli-config = { path = "../cli-config", version = "1.3.3" }
solana-client = { path = "../client", version = "1.3.3" }
solana-config-program = { path = "../programs/config", version = "1.3.3" }
solana-faucet = { path = "../faucet", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-net-utils = { path = "../net-utils", version = "1.3.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-stake-program = { path = "../programs/stake", version = "1.3.3" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
solana-vote-program = { path = "../programs/vote", version = "1.3.3" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.3" }
solana-account-decoder = { path = "../account-decoder", version = "1.3.8" }
solana-budget-program = { path = "../programs/budget", version = "1.3.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-cli-config = { path = "../cli-config", version = "1.3.8" }
solana-client = { path = "../client", version = "1.3.8" }
solana-config-program = { path = "../programs/config", version = "1.3.8" }
solana-faucet = { path = "../faucet", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-net-utils = { path = "../net-utils", version = "1.3.8" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-stake-program = { path = "../programs/stake", version = "1.3.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
solana-vote-program = { path = "../programs/vote", version = "1.3.8" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.8" }
thiserror = "1.0.20"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.3.3" }
solana-budget-program = { path = "../programs/budget", version = "1.3.3" }
solana-core = { path = "../core", version = "1.3.8" }
solana-budget-program = { path = "../programs/budget", version = "1.3.8" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -11,7 +11,7 @@ use crate::{
vote::*,
};
use chrono::prelude::*;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use log::*;
use num_traits::FromPrimitive;
use serde_json::{self, json, Value};
@@ -33,7 +33,7 @@ use solana_faucet::faucet::request_airdrop_transaction;
use solana_faucet::faucet_mock::request_airdrop_transaction;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
bpf_loader,
bpf_loader, bpf_loader_deprecated,
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
commitment_config::CommitmentConfig,
decode_error::DecodeError,
@@ -232,6 +232,7 @@ pub enum CliCommand {
TransactionHistory {
address: Pubkey,
before: Option<Signature>,
until: Option<Signature>,
limit: usize,
},
// Nonce commands
@@ -265,6 +266,7 @@ pub enum CliCommand {
Deploy {
program_location: String,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
},
// Stake Commands
CreateStakeAccount {
@@ -585,6 +587,7 @@ impl CliConfig<'_> {
config.commitment = CommitmentConfig::recent();
config.send_transaction_config = RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
};
config
}
@@ -695,11 +698,13 @@ pub fn parse_command(
signers.push(signer);
1
});
let use_deprecated_loader = matches.is_present("use_deprecated_loader");
Ok(CliCommandInfo {
command: CliCommand::Deploy {
program_location: matches.value_of("program_location").unwrap().to_string(),
address,
use_deprecated_loader,
},
signers,
})
@@ -850,9 +855,14 @@ pub fn parse_command(
_ => Err(CliError::BadParameter("Invalid signature".to_string())),
},
("decode-transaction", Some(matches)) => {
let encoded_transaction = EncodedTransaction::Binary(
matches.value_of("base58_transaction").unwrap().to_string(),
);
let blob = value_t_or_exit!(matches, "transaction", String);
let encoding = match matches.value_of("encoding").unwrap() {
"base58" => UiTransactionEncoding::Binary,
"base64" => UiTransactionEncoding::Base64,
_ => unreachable!(),
};
let encoded_transaction = EncodedTransaction::Binary(blob, encoding);
if let Some(transaction) = encoded_transaction.decode() {
Ok(CliCommandInfo {
command: CliCommand::DecodeTransaction(transaction),
@@ -1182,7 +1192,7 @@ fn process_confirm(
if let Some(transaction_status) = status {
if config.verbose {
match rpc_client
.get_confirmed_transaction(signature, UiTransactionEncoding::Binary)
.get_confirmed_transaction(signature, UiTransactionEncoding::Base64)
{
Ok(confirmed_transaction) => {
println!(
@@ -1238,7 +1248,7 @@ fn process_show_account(
account: UiAccount::encode(
account_pubkey,
account,
UiAccountEncoding::Binary64,
UiAccountEncoding::Base64,
None,
None,
),
@@ -1289,6 +1299,7 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
&transaction,
RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
},
)
.ok();
@@ -1358,6 +1369,7 @@ fn process_deploy(
config: &CliConfig,
program_location: &str,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
) -> ProcessResult {
let new_keypair = Keypair::new(); // Create ephemeral keypair to use for program address, if not provided
let program_id = if let Some(i) = address {
@@ -1373,6 +1385,12 @@ fn process_deploy(
CliError::DynamicProgramError(format!("Unable to read program file: {}", err))
})?;
let loader_id = if use_deprecated_loader {
bpf_loader_deprecated::id()
} else {
bpf_loader::id()
};
// Build transactions to calculate fees
let mut messages: Vec<&Message> = Vec::new();
let (blockhash, fee_calculator, _) = rpc_client
@@ -1384,7 +1402,7 @@ fn process_deploy(
&program_id.pubkey(),
minimum_balance.max(1),
program_data.len() as u64,
&bpf_loader::id(),
&loader_id,
);
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
let mut create_account_tx = Transaction::new_unsigned(message);
@@ -1395,7 +1413,7 @@ fn process_deploy(
for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) {
let instruction = loader_instruction::write(
&program_id.pubkey(),
&bpf_loader::id(),
&loader_id,
(i * DATA_CHUNK_SIZE) as u32,
chunk.to_vec(),
);
@@ -1408,7 +1426,7 @@ fn process_deploy(
}
messages.append(&mut write_message_refs);
let instruction = loader_instruction::finalize(&program_id.pubkey(), &bpf_loader::id());
let instruction = loader_instruction::finalize(&program_id.pubkey(), &loader_id);
let finalize_message = Message::new(&[instruction], Some(&signers[0].pubkey()));
messages.push(&finalize_message);
@@ -1459,6 +1477,7 @@ fn process_deploy(
config.commitment,
RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
},
)
.map_err(|e| {
@@ -1886,8 +1905,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::TransactionHistory {
address,
before,
until,
limit,
} => process_transaction_history(&rpc_client, config, address, *before, *limit),
} => process_transaction_history(&rpc_client, config, address, *before, *until, *limit),
// Nonce Commands
@@ -1957,7 +1977,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::Deploy {
program_location,
address,
} => process_deploy(&rpc_client, config, program_location, *address),
use_deprecated_loader,
} => process_deploy(
&rpc_client,
config,
program_location,
*address,
*use_deprecated_loader,
),
// Stake Commands
@@ -2578,12 +2605,22 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
SubCommand::with_name("decode-transaction")
.about("Decode a base-58 binary transaction")
.arg(
Arg::with_name("base58_transaction")
Arg::with_name("transaction")
.index(1)
.value_name("BASE58_TRANSACTION")
.value_name("TRANSACTION")
.takes_value(true)
.required(true)
.help("The transaction to decode"),
.help("transaction to decode"),
)
.arg(
Arg::with_name("encoding")
.index(2)
.value_name("ENCODING")
.possible_values(&["base58", "base64"]) // Subset of `UiTransactionEncoding` enum
.default_value("base58")
.takes_value(true)
.required(true)
.help("transaction encoding"),
),
)
.subcommand(
@@ -2634,6 +2671,13 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.takes_value(true)
.validator(is_valid_signer)
.help("The signer for the desired address of the program [default: new random address]")
)
.arg(
Arg::with_name("use_deprecated_loader")
.long("use-deprecated-loader")
.takes_value(false)
.hidden(true) // Don't document this argument to discourage its use
.help("Use the deprecated BPF loader")
),
)
.subcommand(
@@ -3078,6 +3122,7 @@ mod tests {
command: CliCommand::Deploy {
program_location: "/Users/test/program.o".to_string(),
address: None,
use_deprecated_loader: false,
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -3098,6 +3143,7 @@ mod tests {
command: CliCommand::Deploy {
program_location: "/Users/test/program.o".to_string(),
address: Some(1),
use_deprecated_loader: false,
},
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -3817,6 +3863,7 @@ mod tests {
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
};
let result = process_command(&config);
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
@@ -3834,6 +3881,7 @@ mod tests {
config.command = CliCommand::Deploy {
program_location: "bad/file/location.so".to_string(),
address: None,
use_deprecated_loader: false,
};
assert!(process_command(&config).is_err());
}

View File

@@ -11,7 +11,7 @@ use solana_clap_utils::{
};
use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient},
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
@@ -444,12 +444,21 @@ pub fn parse_transaction_history(
),
None => None,
};
let until = match matches.value_of("until") {
Some(signature) => Some(
signature
.parse()
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
),
None => None,
};
let limit = value_t_or_exit!(matches, "limit", usize);
Ok(CliCommandInfo {
command: CliCommand::TransactionHistory {
address,
before,
until,
limit,
},
signers: vec![],
@@ -1311,12 +1320,16 @@ pub fn process_transaction_history(
config: &CliConfig,
address: &Pubkey,
before: Option<Signature>,
until: Option<Signature>,
limit: usize,
) -> ProcessResult {
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
address,
before,
Some(limit),
GetConfirmedSignaturesForAddress2Config {
before,
until,
limit: Some(limit),
},
)?;
let transactions_found = format!("{} transactions found", results.len());

View File

@@ -353,7 +353,7 @@ mod tests {
let rpc_nonce_account = UiAccount::encode(
&nonce_pubkey,
nonce_account,
UiAccountEncoding::Binary64,
UiAccountEncoding::Base64,
None,
None,
);

View File

@@ -63,6 +63,7 @@ fn test_cli_deploy_program() {
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
};
let response = process_command(&config);
@@ -96,6 +97,7 @@ fn test_cli_deploy_program() {
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
};
process_command(&config).unwrap();
let account1 = rpc_client

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.3.3"
version = "1.3.8"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,16 +14,16 @@ bs58 = "0.3.1"
indicatif = "0.15.0"
jsonrpc-core = "14.2.0"
log = "0.4.8"
rayon = "1.3.1"
rayon = "1.4.0"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.3" }
solana-net-utils = { path = "../net-utils", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.3" }
solana-vote-program = { path = "../programs/vote", version = "1.3.3" }
solana-account-decoder = { path = "../account-decoder", version = "1.3.8" }
solana-net-utils = { path = "../net-utils", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.8" }
solana-vote-program = { path = "../programs/vote", version = "1.3.8" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -32,7 +32,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.2.0"
jsonrpc-http-server = "14.2.0"
solana-logger = { path = "../logger", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -5,6 +5,9 @@ use serde_json::{
value::Value::{Number, Object},
Map, Value,
};
use solana_sdk::{
commitment_config::CommitmentConfig, signature::Signature, transaction::TransactionError,
};
use std::{
marker::PhantomData,
sync::{
@@ -18,6 +21,8 @@ use thiserror::Error;
use tungstenite::{client::AutoStream, connect, Message, WebSocket};
use url::{ParseError, Url};
type PubsubSignatureResponse = PubsubClientSubscription<RpcResponse<SignatureResult>>;
#[derive(Debug, Error)]
pub enum PubsubClientError {
#[error("url parse error")]
@@ -33,6 +38,30 @@ pub enum PubsubClientError {
UnexpectedMessageError,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase", tag = "type", content = "result")]
pub enum SignatureResult {
ProcessedSignatureResult(ProcessedSignatureResult),
ReceivedSignature,
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct RpcResponseContext {
pub slot: u64,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct ProcessedSignatureResult {
pub err: Option<TransactionError>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RpcResponse<T> {
pub context: RpcResponseContext,
pub value: T,
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct SlotInfoMessage {
pub parent: u64,
@@ -40,6 +69,14 @@ pub struct SlotInfoMessage {
pub slot: u64,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureSubscribeConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub enable_received_notification: Option<bool>,
}
pub struct PubsubClientSubscription<T>
where
T: DeserializeOwned,
@@ -73,18 +110,12 @@ where
{
fn send_subscribe(
writable_socket: &Arc<RwLock<WebSocket<AutoStream>>>,
operation: &str,
body: String,
) -> Result<u64, PubsubClientError> {
let method = format!("{}Subscribe", operation);
writable_socket
.write()
.unwrap()
.write_message(Message::Text(
json!({
"jsonrpc":"2.0","id":1,"method":method,"params":[]
})
.to_string(),
))?;
.write_message(Message::Text(body))?;
let message = writable_socket.write().unwrap().read_message()?;
Self::extract_subscription_id(message)
}
@@ -148,6 +179,7 @@ where
}
const SLOT_OPERATION: &str = "slot";
const SIGNATURE_OPERATION: &str = "signature";
pub struct PubsubClient {}
@@ -171,7 +203,10 @@ impl PubsubClient {
let exit_clone = exit.clone();
let subscription_id = PubsubClientSubscription::<SlotInfoMessage>::send_subscribe(
&socket_clone,
SLOT_OPERATION,
json!({
"jsonrpc":"2.0","id":1,"method":format!("{}Subscribe", SLOT_OPERATION),"params":[]
})
.to_string(),
)
.unwrap();
@@ -212,6 +247,80 @@ impl PubsubClient {
Ok((result, receiver))
}
pub fn signature_subscribe(
url: &str,
signature: &Signature,
) -> Result<
(
PubsubSignatureResponse,
Receiver<RpcResponse<SignatureResult>>,
),
PubsubClientError,
> {
let url = Url::parse(url)?;
let (socket, _response) = connect(url)?;
let (sender, receiver) = channel::<RpcResponse<SignatureResult>>();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let body = json!({
"jsonrpc":"2.0",
"id":1,
"method":format!("{}Subscribe", SIGNATURE_OPERATION),
"params":[
signature.to_string(),
{"enableReceivedNotification": true }
]
})
.to_string();
let subscription_id =
PubsubClientSubscription::<RpcResponse<SignatureResult>>::send_subscribe(
&socket_clone,
body,
)
.unwrap();
let t_cleanup = std::thread::spawn(move || {
loop {
if exit_clone.load(Ordering::Relaxed) {
break;
}
let message: Result<RpcResponse<SignatureResult>, PubsubClientError> =
PubsubClientSubscription::read_message(&socket_clone);
if let Ok(msg) = message {
match sender.send(msg.clone()) {
Ok(_) => (),
Err(err) => {
info!("receive error: {:?}", err);
break;
}
}
} else {
info!("receive error: {:?}", message);
break;
}
}
info!("websocket - exited receive loop");
});
let result: PubsubClientSubscription<RpcResponse<SignatureResult>> =
PubsubClientSubscription {
message_type: PhantomData,
operation: SIGNATURE_OPERATION,
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}
}
#[cfg(test)]

View File

@@ -15,12 +15,7 @@ use bincode::serialize;
use indicatif::{ProgressBar, ProgressStyle};
use log::*;
use serde_json::{json, Value};
use solana_account_decoder::{
parse_token::UiTokenAmount,
UiAccount,
UiAccountData::{Binary, Binary64},
UiAccountEncoding,
};
use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount, UiAccountEncoding};
use solana_sdk::{
account::Account,
clock::{
@@ -299,18 +294,21 @@ impl RpcClient {
&self,
address: &Pubkey,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
self.get_confirmed_signatures_for_address2_with_config(address, None, None)
self.get_confirmed_signatures_for_address2_with_config(
address,
GetConfirmedSignaturesForAddress2Config::default(),
)
}
pub fn get_confirmed_signatures_for_address2_with_config(
&self,
address: &Pubkey,
before: Option<Signature>,
limit: Option<usize>,
config: GetConfirmedSignaturesForAddress2Config,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
let config = RpcGetConfirmedSignaturesForAddress2Config {
before: before.map(|signature| signature.to_string()),
limit,
before: config.before.map(|signature| signature.to_string()),
until: config.until.map(|signature| signature.to_string()),
limit: config.limit,
};
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
@@ -466,7 +464,7 @@ impl RpcClient {
commitment_config: CommitmentConfig,
) -> RpcResult<Option<Account>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Binary64),
encoding: Some(UiAccountEncoding::Base64),
commitment: Some(commitment_config),
data_slice: None,
};
@@ -484,17 +482,8 @@ impl RpcClient {
}
let Response {
context,
value: mut rpc_account,
value: rpc_account,
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
if let Some(ref mut account) = rpc_account {
if let Binary(_) = &account.data {
let tmp = Binary64(String::new());
match std::mem::replace(&mut account.data, tmp) {
Binary(new_data) => account.data = Binary64(new_data),
_ => panic!("should have gotten binary here."),
}
}
}
trace!("Response account {:?} {:?}", pubkey, rpc_account);
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
Ok(Response {
@@ -510,6 +499,38 @@ impl RpcClient {
})?
}
pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult<Vec<Option<Account>>> {
Ok(self
.get_multiple_accounts_with_commitment(pubkeys, CommitmentConfig::default())?
.value)
}
pub fn get_multiple_accounts_with_commitment(
&self,
pubkeys: &[Pubkey],
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<Option<Account>>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
commitment: Some(commitment_config),
data_slice: None,
};
let pubkeys: Vec<_> = pubkeys.iter().map(|pubkey| pubkey.to_string()).collect();
let response = self.send(RpcRequest::GetMultipleAccounts, json!([[pubkeys], config]))?;
let Response {
context,
value: accounts,
} = serde_json::from_value::<Response<Option<UiAccount>>>(response)?;
let accounts: Vec<Option<Account>> = accounts
.iter()
.map(|rpc_account| rpc_account.decode())
.collect();
Ok(Response {
context,
value: accounts,
})
}
pub fn get_account_data(&self, pubkey: &Pubkey) -> ClientResult<Vec<u8>> {
Ok(self.get_account(pubkey)?.data)
}
@@ -1134,6 +1155,13 @@ impl RpcClient {
}
}
#[derive(Debug, Default)]
pub struct GetConfirmedSignaturesForAddress2Config {
pub before: Option<Signature>,
pub until: Option<Signature>,
pub limit: Option<usize>,
}
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar

View File

@@ -12,12 +12,14 @@ pub struct RpcSignatureStatusConfig {
#[serde(rename_all = "camelCase")]
pub struct RpcSendTransactionConfig {
pub skip_preflight: bool,
pub preflight_commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionConfig {
pub sig_verify: bool,
pub commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
@@ -67,9 +69,18 @@ pub enum RpcTokenAccountsFilter {
ProgramId(String),
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureSubscribeConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub enable_received_notification: Option<bool>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcGetConfirmedSignaturesForAddress2Config {
pub before: Option<String>, // Signature as base-58 string
pub until: Option<String>, // Signature as base-58 string
pub limit: Option<usize>,
}

View File

@@ -28,6 +28,7 @@ pub enum RpcRequest {
GetLargestAccounts,
GetLeaderSchedule,
GetMinimumBalanceForRentExemption,
GetMultipleAccounts,
GetProgramAccounts,
GetRecentBlockhash,
GetSignatureStatuses,
@@ -80,6 +81,7 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetLargestAccounts => "getLargestAccounts",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::GetMultipleAccounts => "getMultipleAccounts",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
@@ -110,11 +112,12 @@ impl fmt::Display for RpcRequest {
}
}
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
pub const MAX_MULTIPLE_ACCOUNTS: usize = 20;
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
// Validators that are this number of slots behind are considered delinquent
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;

View File

@@ -94,9 +94,16 @@ pub struct RpcKeyedAccount {
pub account: UiAccount,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase", tag = "type", content = "result")]
pub enum RpcSignatureResult {
ProcessedSignatureResult(ProcessedSignatureResult),
ReceivedSignature,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureResult {
pub struct ProcessedSignatureResult {
pub err: Option<TransactionError>,
}

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.3"
version = "1.3.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -38,43 +38,44 @@ num-traits = "0.2"
rand = "0.7.0"
rand_chacha = "0.2.2"
raptorq = "1.4.2"
rayon = "1.3.1"
rayon = "1.4.0"
regex = "1.3.9"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.3" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.3" }
solana-budget-program = { path = "../programs/budget", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-client = { path = "../client", version = "1.3.3" }
solana-faucet = { path = "../faucet", version = "1.3.3" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.3.3" }
solana-ledger = { path = "../ledger", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.3.3" }
solana-metrics = { path = "../metrics", version = "1.3.3" }
solana-measure = { path = "../measure", version = "1.3.3" }
solana-net-utils = { path = "../net-utils", version = "1.3.3" }
solana-perf = { path = "../perf", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.3.3" }
solana-stake-program = { path = "../programs/stake", version = "1.3.3" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.3.3" }
solana-streamer = { path = "../streamer", version = "1.3.3" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.3.3" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
solana-vote-program = { path = "../programs/vote", version = "1.3.3" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.3" }
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
solana-account-decoder = { path = "../account-decoder", version = "1.3.8" }
solana-banks-server = { path = "../banks-server", version = "1.3.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.8" }
solana-budget-program = { path = "../programs/budget", version = "1.3.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-client = { path = "../client", version = "1.3.8" }
solana-faucet = { path = "../faucet", version = "1.3.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.3.8" }
solana-ledger = { path = "../ledger", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.3.8" }
solana-metrics = { path = "../metrics", version = "1.3.8" }
solana-measure = { path = "../measure", version = "1.3.8" }
solana-net-utils = { path = "../net-utils", version = "1.3.8" }
solana-perf = { path = "../perf", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.3.8" }
solana-stake-program = { path = "../programs/stake", version = "1.3.8" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.3.8" }
solana-streamer = { path = "../streamer", version = "1.3.8" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.3.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
solana-vote-program = { path = "../programs/vote", version = "1.3.8" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.8" }
spl-token-v2-0 = { package = "spl-token", version = "2.0.3", features = ["skip-no-mangle"] }
tempfile = "3.1.0"
thiserror = "1.0"
tokio_01 = { version = "0.1", package = "tokio" }
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
tokio_io_01 = { version = "0.1", package = "tokio-io" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.3.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.3.8" }
tokio = { version = "0.2.22", features = ["full"] }
trees = "0.2.1"

View File

@@ -30,8 +30,8 @@ use solana_runtime::{
};
use solana_sdk::{
clock::{
Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE,
MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU,
Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
},
poh_config::PohConfig,
pubkey::Pubkey,
@@ -768,10 +768,7 @@ impl BankingStage {
&filter,
(MAX_PROCESSING_AGE)
.saturating_sub(max_tx_fwd_delay)
.saturating_sub(
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET * bank.ticks_per_slot()
/ DEFAULT_TICKS_PER_SECOND) as usize,
),
.saturating_sub(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET as usize),
&mut error_counters,
);

View File

@@ -358,7 +358,7 @@ pub fn make_accounts_hashes_message(
}
// TODO These messages should go through the gpu pipeline for spam filtering
#[frozen_abi(digest = "6qRS1ZwydpdSqzeyRdDvn5uwfDdFYkuUz4K4jSkd1oFW")]
#[frozen_abi(digest = "CnN1gW2K2TRydGc84eYnQJwdTADPjQf6LJLZ4RP1QeoH")]
#[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)]
#[allow(clippy::large_enum_variant)]
enum Protocol {
@@ -491,6 +491,21 @@ impl ClusterInfo {
.map(map)
}
pub fn lookup_contact_info_by_gossip_addr(
&self,
gossip_addr: &SocketAddr,
) -> Option<ContactInfo> {
for versioned_value in self.gossip.read().unwrap().crds.table.values() {
if let Some(contact_info) = CrdsValue::contact_info(&versioned_value.value) {
if contact_info.gossip == *gossip_addr {
return Some(contact_info.clone());
}
}
}
None
}
pub fn my_contact_info(&self) -> ContactInfo {
self.my_contact_info.read().unwrap().clone()
}
@@ -543,7 +558,7 @@ impl ClusterInfo {
}
let ip_addr = node.gossip.ip();
Some(format!(
"{:15} {:2}| {:5} | {:44} |{:^15}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
"{:15} {:2}| {:5} | {:44} |{:^15}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
@@ -566,6 +581,7 @@ impl ClusterInfo {
addr_to_string(&ip_addr, &node.serve_repair),
addr_to_string(&ip_addr, &node.rpc),
addr_to_string(&ip_addr, &node.rpc_pubsub),
addr_to_string(&ip_addr, &node.rpc_banks),
node.shred_version,
))
}
@@ -1445,11 +1461,13 @@ impl ClusterInfo {
fn handle_adopt_shred_version(self: &Arc<Self>, adopt_shred_version: &mut bool) {
// Adopt the entrypoint's `shred_version` if ours is unset
if *adopt_shred_version {
// If gossip was given an entrypoint, lookup its id
let entrypoint_id = self.entrypoint.read().unwrap().as_ref().map(|e| e.id);
if let Some(entrypoint_id) = entrypoint_id {
// If gossip was given an entrypoint, look up the ContactInfo by the given
// entrypoint gossip adddress
let gossip_addr = self.entrypoint.read().unwrap().as_ref().map(|e| e.gossip);
if let Some(gossip_addr) = gossip_addr {
// If a pull from the entrypoint was successful, it should exist in the crds table
let entrypoint = self.lookup_contact_info(&entrypoint_id, |ci| ci.clone());
let entrypoint = self.lookup_contact_info_by_gossip_addr(&gossip_addr);
if let Some(entrypoint) = entrypoint {
if entrypoint.shred_version == 0 {
info!("Unable to adopt entrypoint's shred version");
@@ -1465,6 +1483,7 @@ impl ClusterInfo {
.unwrap()
.set_shred_version(entrypoint.shred_version);
self.insert_self();
*self.entrypoint.write().unwrap() = Some(entrypoint);
*adopt_shred_version = false;
}
}
@@ -1853,15 +1872,15 @@ impl ClusterInfo {
) -> (usize, usize, usize) {
let len = crds_values.len();
trace!("PullResponse me: {} from: {} len={}", self.id, from, len);
if let Some(shred_version) = self.lookup_contact_info(from, |ci| ci.shred_version) {
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
}
let shred_version = self
.lookup_contact_info(from, |ci| ci.shred_version)
.unwrap_or(0);
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
let filtered_len = crds_values.len();
let mut pull_stats = ProcessPullStats::default();
@@ -1913,7 +1932,8 @@ impl ClusterInfo {
shred_version: u16,
my_shred_version: u16,
) {
if my_shred_version != 0 && shred_version != 0 && shred_version != my_shred_version {
// Always run filter on spies
if my_shred_version != 0 && shred_version != my_shred_version {
// Allow someone to update their own ContactInfo so they
// can change shred versions if needed.
crds_values.retain(|crds_value| match &crds_value.data {
@@ -1934,14 +1954,15 @@ impl ClusterInfo {
self.stats.push_message_count.add_relaxed(1);
let len = crds_values.len();
if let Some(shred_version) = self.lookup_contact_info(from, |ci| ci.shred_version) {
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
}
let shred_version = self
.lookup_contact_info(from, |ci| ci.shred_version)
.unwrap_or(0);
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
let filtered_len = crds_values.len();
self.stats
.push_message_value_count
@@ -2433,10 +2454,12 @@ impl Node {
let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, (1024, 65535)).unwrap();
let rpc_pubsub_addr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_pubsub_port);
let rpc_banks_port = find_available_port_in_range(bind_ip_addr, (1024, 65535)).unwrap();
let rpc_banks_addr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_banks_port);
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let unused = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
let info = ContactInfo {
id: *pubkey,
@@ -2446,7 +2469,7 @@ impl Node {
repair: repair.local_addr().unwrap(),
tpu: tpu.local_addr().unwrap(),
tpu_forwards: tpu_forwards.local_addr().unwrap(),
unused: unused.local_addr().unwrap(),
rpc_banks: rpc_banks_addr,
rpc: rpc_addr,
rpc_pubsub: rpc_pubsub_addr,
serve_repair: serve_repair.local_addr().unwrap(),
@@ -2527,7 +2550,7 @@ impl Node {
repair: SocketAddr::new(gossip_addr.ip(), repair_port),
tpu: SocketAddr::new(gossip_addr.ip(), tpu_port),
tpu_forwards: SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
unused: socketaddr_any!(),
rpc_banks: socketaddr_any!(),
rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(),
serve_repair: SocketAddr::new(gossip_addr.ip(), serve_repair_port),
@@ -3389,4 +3412,36 @@ mod tests {
let vote = CrdsValue::new_signed(CrdsData::Vote(1, vote), &Keypair::new());
assert!(bincode::serialized_size(&vote).unwrap() <= MAX_PROTOCOL_PAYLOAD_SIZE);
}
#[test]
fn test_handle_adopt_shred_version() {
let node_keypair = Arc::new(Keypair::new());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
));
// Simulating starting up with default entrypoint, no known id, only a gossip
// address
let entrypoint_gossip_addr = socketaddr!("127.0.0.2:1234");
let mut entrypoint = ContactInfo::new_localhost(&Pubkey::default(), timestamp());
entrypoint.gossip = entrypoint_gossip_addr;
assert_eq!(entrypoint.shred_version, 0);
cluster_info.set_entrypoint(entrypoint);
// Simulate getting entrypoint ContactInfo from gossip
let mut gossiped_entrypoint_info =
ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
gossiped_entrypoint_info.gossip = entrypoint_gossip_addr;
gossiped_entrypoint_info.shred_version = 1;
cluster_info.insert_info(gossiped_entrypoint_info.clone());
// Adopt the entrypoint's gossiped contact info and verify
ClusterInfo::handle_adopt_shred_version(&cluster_info, &mut true);
assert_eq!(
cluster_info.entrypoint.read().unwrap().as_ref().unwrap(),
&gossiped_entrypoint_info
);
assert_eq!(cluster_info.my_shred_version(), 1);
}
}

View File

@@ -246,10 +246,7 @@ mod tests {
bank_forks::BankForks,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
};
use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use solana_sdk::{pubkey::Pubkey, signature::Signer};
use solana_stake_program::stake_state;
use solana_vote_program::{
vote_state::{self, VoteStateVersions},
@@ -488,14 +485,8 @@ mod tests {
let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests());
let node_keypair = Arc::new(Keypair::new());
let vote_keypair = Arc::new(Keypair::new());
let stake_keypair = Arc::new(Keypair::new());
let validator_keypairs = vec![ValidatorVoteKeypairs {
node_keypair: node_keypair.clone(),
vote_keypair: vote_keypair.clone(),
stake_keypair,
}];
let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand();
let validator_keypairs = vec![&validator_vote_keypairs];
let GenesisConfigInfo {
genesis_config,
mint_keypair: _,
@@ -518,9 +509,9 @@ mod tests {
vec![x],
previous_bank.hash(),
previous_bank.last_blockhash(),
&node_keypair,
&vote_keypair,
&vote_keypair,
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank.process_transaction(&vote).unwrap();
@@ -528,7 +519,10 @@ mod tests {
}
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
for x in 0..root {
bank_forks.set_root(x, &None, None);
}
@@ -540,16 +534,19 @@ mod tests {
vec![33],
bank33.hash(),
bank33.last_blockhash(),
&node_keypair,
&vote_keypair,
&vote_keypair,
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank34.process_transaction(&vote33).unwrap();
bank_forks.insert(bank34);
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,
@@ -601,9 +598,9 @@ mod tests {
vec![x],
previous_bank.hash(),
previous_bank.last_blockhash(),
&node_keypair,
&vote_keypair,
&vote_keypair,
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank.process_transaction(&vote).unwrap();
@@ -611,7 +608,10 @@ mod tests {
}
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,

View File

@@ -0,0 +1,82 @@
use crate::rpc_subscriptions::RpcSubscriptions;
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_ledger::blockstore::{Blockstore, CompletedDataSetInfo};
use solana_sdk::signature::Signature;
use std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub type CompletedDataSetsReceiver = Receiver<Vec<CompletedDataSetInfo>>;
pub type CompletedDataSetsSender = Sender<Vec<CompletedDataSetInfo>>;
pub struct CompletedDataSetsService {
thread_hdl: JoinHandle<()>,
}
impl CompletedDataSetsService {
pub fn new(
completed_sets_receiver: CompletedDataSetsReceiver,
blockstore: Arc<Blockstore>,
rpc_subscriptions: Arc<RpcSubscriptions>,
exit: &Arc<AtomicBool>,
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("completed-data-set-service".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(RecvTimeoutError::Disconnected) = Self::recv_completed_data_sets(
&completed_sets_receiver,
&blockstore,
&rpc_subscriptions,
) {
break;
}
})
.unwrap();
Self { thread_hdl }
}
fn recv_completed_data_sets(
completed_sets_receiver: &CompletedDataSetsReceiver,
blockstore: &Blockstore,
rpc_subscriptions: &RpcSubscriptions,
) -> Result<(), RecvTimeoutError> {
let completed_data_sets = completed_sets_receiver.recv_timeout(Duration::from_secs(1))?;
for completed_set_info in std::iter::once(completed_data_sets)
.chain(completed_sets_receiver.try_iter())
.flatten()
{
let CompletedDataSetInfo {
slot,
start_index,
end_index,
} = completed_set_info;
match blockstore.get_entries_in_data_block(slot, start_index, end_index, None) {
Ok(entries) => {
let transactions = entries
.into_iter()
.flat_map(|e| e.transactions.into_iter().map(|t| t.signatures[0]))
.collect::<Vec<Signature>>();
if !transactions.is_empty() {
rpc_subscriptions.notify_signatures_received((slot, transactions));
}
}
Err(e) => warn!("completed-data-set-service deserialize error: {:?}", e),
}
}
Ok(())
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@@ -18,7 +18,7 @@ use solana_vote_program::{
},
};
use std::{
collections::{BTreeMap, HashMap, HashSet},
collections::{HashMap, HashSet},
ops::Bound::{Included, Unbounded},
sync::{Arc, RwLock},
};
@@ -128,9 +128,9 @@ impl Tower {
pub(crate) fn collect_vote_lockouts<F>(
node_pubkey: &Pubkey,
bank_slot: u64,
bank_slot: Slot,
vote_accounts: F,
ancestors: &HashMap<Slot, HashSet<u64>>,
ancestors: &HashMap<Slot, HashSet<Slot>>,
all_pubkeys: &mut PubkeyReferences,
) -> ComputedBankState
where
@@ -141,13 +141,13 @@ impl Tower {
let mut bank_weight = 0;
// Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
// keyed by end of the range
let mut lockout_intervals = BTreeMap::new();
let mut lockout_intervals = LockoutIntervals::new();
let mut pubkey_votes = vec![];
for (key, (lamports, account)) in vote_accounts {
if lamports == 0 {
for (key, (voted_stake, account)) in vote_accounts {
if voted_stake == 0 {
continue;
}
trace!("{} {} with stake {}", node_pubkey, key, lamports);
trace!("{} {} with stake {}", node_pubkey, key, voted_stake);
let vote_state = VoteState::from(&account);
if vote_state.is_none() {
datapoint_warn!(
@@ -197,7 +197,7 @@ impl Tower {
vote_state.process_slot_vote_unchecked(bank_slot);
for vote in &vote_state.votes {
bank_weight += vote.lockout() as u128 * lamports as u128;
bank_weight += vote.lockout() as u128 * voted_stake as u128;
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
}
@@ -208,7 +208,7 @@ impl Tower {
slot: root,
};
trace!("ROOT: {}", vote.slot);
bank_weight += vote.lockout() as u128 * lamports as u128;
bank_weight += vote.lockout() as u128 * voted_stake as u128;
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
}
}
@@ -217,7 +217,7 @@ impl Tower {
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
slot: root,
};
bank_weight += vote.lockout() as u128 * lamports as u128;
bank_weight += vote.lockout() as u128 * voted_stake as u128;
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
}
@@ -239,11 +239,11 @@ impl Tower {
Self::update_ancestor_voted_stakes(
&mut voted_stakes,
vote.slot,
lamports,
voted_stake,
ancestors,
);
}
total_stake += lamports;
total_stake += voted_stake;
}
ComputedBankState {
@@ -473,8 +473,12 @@ impl Tower {
.lockout_intervals;
// Find any locked out intervals in this bank with endpoint >= last_vote,
// implies they are locked out at last_vote
for (_lockout_interval_end, value) in lockout_intervals.range((Included(last_voted_slot), Unbounded)) {
for (lockout_interval_start, vote_account_pubkey) in value {
for (_lockout_interval_end, intervals_keyed_by_end) in lockout_intervals.range((Included(last_voted_slot), Unbounded)) {
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
if locked_out_vote_accounts.contains(vote_account_pubkey) {
continue;
}
// Only count lockouts on slots that are:
// 1) Not ancestors of `last_vote`
// 2) Not from before the current root as we can't determine if
@@ -485,7 +489,6 @@ impl Tower {
// is an ancestor of the current root, because `candidate_slot` is a
// descendant of the current root
&& *lockout_interval_start > root
&& !locked_out_vote_accounts.contains(vote_account_pubkey)
{
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
@@ -587,21 +590,21 @@ impl Tower {
/// Note, stake is the same for all the ancestor.
fn update_ancestor_voted_stakes(
voted_stakes: &mut VotedStakes,
slot: Slot,
lamports: u64,
voted_slot: Slot,
voted_stake: u64,
ancestors: &HashMap<Slot, HashSet<Slot>>,
) {
// If there's no ancestors, that means this slot must be from
// before the current root, so ignore this slot
let vote_slot_ancestors = ancestors.get(&slot);
let vote_slot_ancestors = ancestors.get(&voted_slot);
if vote_slot_ancestors.is_none() {
return;
}
let mut slot_with_ancestors = vec![slot];
let mut slot_with_ancestors = vec![voted_slot];
slot_with_ancestors.extend(vote_slot_ancestors.unwrap());
for slot in slot_with_ancestors {
let current = voted_stakes.entry(slot).or_default();
*current += lamports;
*current += voted_stake;
}
}

View File

@@ -25,8 +25,8 @@ pub struct ContactInfo {
pub tpu: SocketAddr,
/// address to forward unprocessed transactions to
pub tpu_forwards: SocketAddr,
/// unused address
pub unused: SocketAddr,
/// address to which to send bank state requests
pub rpc_banks: SocketAddr,
/// address to which to send JSON-RPC requests
pub rpc: SocketAddr,
/// websocket for JSON-RPC push notifications
@@ -95,7 +95,7 @@ impl Default for ContactInfo {
repair: socketaddr_any!(),
tpu: socketaddr_any!(),
tpu_forwards: socketaddr_any!(),
unused: socketaddr_any!(),
rpc_banks: socketaddr_any!(),
rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(),
serve_repair: socketaddr_any!(),
@@ -115,7 +115,7 @@ impl ContactInfo {
repair: socketaddr!("127.0.0.1:1237"),
tpu: socketaddr!("127.0.0.1:1238"),
tpu_forwards: socketaddr!("127.0.0.1:1239"),
unused: socketaddr!("127.0.0.1:1240"),
rpc_banks: socketaddr!("127.0.0.1:1240"),
rpc: socketaddr!("127.0.0.1:1241"),
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
serve_repair: socketaddr!("127.0.0.1:1243"),
@@ -137,7 +137,7 @@ impl ContactInfo {
repair: addr,
tpu: addr,
tpu_forwards: addr,
unused: addr,
rpc_banks: addr,
rpc: addr,
rpc_pubsub: addr,
serve_repair: addr,
@@ -162,6 +162,7 @@ impl ContactInfo {
let repair = next_port(&bind_addr, 5);
let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
let rpc_banks = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_BANKS_PORT);
let serve_repair = next_port(&bind_addr, 6);
Self {
id: *pubkey,
@@ -171,7 +172,7 @@ impl ContactInfo {
repair,
tpu,
tpu_forwards,
unused: "0.0.0.0:0".parse().unwrap(),
rpc_banks,
rpc,
rpc_pubsub,
serve_repair,
@@ -248,7 +249,7 @@ mod tests {
assert!(ci.rpc.ip().is_unspecified());
assert!(ci.rpc_pubsub.ip().is_unspecified());
assert!(ci.tpu.ip().is_unspecified());
assert!(ci.unused.ip().is_unspecified());
assert!(ci.rpc_banks.ip().is_unspecified());
assert!(ci.serve_repair.ip().is_unspecified());
}
#[test]
@@ -260,7 +261,7 @@ mod tests {
assert!(ci.rpc.ip().is_multicast());
assert!(ci.rpc_pubsub.ip().is_multicast());
assert!(ci.tpu.ip().is_multicast());
assert!(ci.unused.ip().is_multicast());
assert!(ci.rpc_banks.ip().is_multicast());
assert!(ci.serve_repair.ip().is_multicast());
}
#[test]
@@ -273,7 +274,7 @@ mod tests {
assert!(ci.rpc.ip().is_unspecified());
assert!(ci.rpc_pubsub.ip().is_unspecified());
assert!(ci.tpu.ip().is_unspecified());
assert!(ci.unused.ip().is_unspecified());
assert!(ci.rpc_banks.ip().is_unspecified());
assert!(ci.serve_repair.ip().is_unspecified());
}
#[test]
@@ -286,7 +287,7 @@ mod tests {
assert_eq!(ci.tpu_forwards.port(), 13);
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
assert!(ci.unused.ip().is_unspecified());
assert_eq!(ci.rpc_banks.port(), rpc_port::DEFAULT_RPC_BANKS_PORT);
assert_eq!(ci.serve_repair.port(), 16);
}
@@ -310,6 +311,10 @@ mod tests {
d1.rpc_pubsub,
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT))
);
assert_eq!(
d1.rpc_banks,
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_BANKS_PORT))
);
assert_eq!(d1.tvu_forwards, socketaddr!("127.0.0.1:1238"));
assert_eq!(d1.repair, socketaddr!("127.0.0.1:1239"));
assert_eq!(d1.serve_repair, socketaddr!("127.0.0.1:1240"));

View File

@@ -57,8 +57,32 @@ impl CrdsFilter {
mask_bits,
}
}
// CrdsFilter::new_complete_set returns a vector of filters. Given a hash
// value, this function returns the filter within that vector which
// corresponds to the item.
// TODO: Consider making Vec<CrdsFilter> a separate type, and implementing
// this (and new_complete_set) function as methods on that type.
pub fn get_crds_filter<'a>(
filters: &'a mut Vec<CrdsFilter>,
item: &Hash,
) -> Option<&'a mut CrdsFilter> {
if let Some(filter) = filters.first() {
let shift = 64 - filter.mask_bits.min(64);
let index = CrdsFilter::hash_as_u64(item)
.checked_shr(shift)
.unwrap_or(0u64);
filters.get_mut(index as usize)
} else {
None
}
}
// generates a vec of filters that together hold a complete set of Hashes
pub fn new_complete_set(num_items: usize, max_bytes: usize) -> Vec<Self> {
// Note: get_crds_filter above relies on the order of filters and
// bit-mask pattern here. If changed, update get_crds_filter
// accordingly.
let max_bits = (max_bytes * 8) as f64;
let max_items = Self::max_items(max_bits, FALSE_RATE, KEYS);
let mask_bits = Self::mask_bits(num_items as f64, max_items as f64);
@@ -193,9 +217,7 @@ impl CrdsGossipPull {
.filter(|v| {
v.id != *self_id
&& ContactInfo::is_valid_address(&v.gossip)
&& (self_shred_version == 0
|| v.shred_version == 0
|| self_shred_version == v.shred_version)
&& (self_shred_version == 0 || self_shred_version == v.shred_version)
})
.map(|item| {
let max_weight = f32::from(u16::max_value()) - 1.0;
@@ -364,13 +386,17 @@ impl CrdsGossipPull {
crds.table.values().count() + self.purged_values.len(),
);
let mut filters = CrdsFilter::new_complete_set(num, bloom_size);
let mut add_value_hash = |value_hash| {
if let Some(filter) = CrdsFilter::get_crds_filter(&mut filters, value_hash) {
debug_assert!(filter.test_mask(value_hash));
filter.filter.add(value_hash);
}
};
for v in crds.table.values() {
filters
.iter_mut()
.for_each(|filter| filter.add(&v.value_hash));
add_value_hash(&v.value_hash);
}
for (value_hash, _insert_timestamp) in &self.purged_values {
filters.iter_mut().for_each(|filter| filter.add(value_hash));
add_value_hash(&value_hash);
}
filters
@@ -392,7 +418,8 @@ impl CrdsGossipPull {
let past = now.saturating_sub(msg_timeout);
let recent: Vec<_> = filters
.iter()
.filter(|(caller, _)| caller.wallclock() < future && caller.wallclock() >= past)
.enumerate()
.filter(|(_, (caller, _))| caller.wallclock() < future && caller.wallclock() >= past)
.collect();
inc_new_counter_info!(
"gossip_filter_crds_values-dropped_requests",
@@ -404,11 +431,13 @@ impl CrdsGossipPull {
let mut total_skipped = 0;
let mask_ones: Vec<_> = recent
.iter()
.map(|(_caller, filter)| (!0u64).checked_shr(filter.mask_bits).unwrap_or(!0u64))
.map(|(_i, (_caller, filter))| (!0u64).checked_shr(filter.mask_bits).unwrap_or(!0u64))
.collect();
for (label, mask) in crds.masks.iter() {
recent.iter().zip(mask_ones.iter()).enumerate().for_each(
|(i, ((caller, filter), mask_ones))| {
recent
.iter()
.zip(mask_ones.iter())
.for_each(|((i, (caller, filter)), mask_ones)| {
if filter.test_mask_u64(*mask, *mask_ones) {
let item = crds.table.get(label).unwrap();
@@ -421,11 +450,10 @@ impl CrdsGossipPull {
}
if !filter.filter_contains(&item.value_hash) {
ret[i].push(item.value.clone());
ret[*i].push(item.value.clone());
}
}
},
);
});
}
inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped);
ret
@@ -519,8 +547,9 @@ mod test {
use crate::contact_info::ContactInfo;
use crate::crds_value::{CrdsData, Vote};
use itertools::Itertools;
use rand::{thread_rng, RngCore};
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::hash;
use solana_sdk::hash::{hash, HASH_BYTES};
use solana_sdk::packet::PACKET_DATA_SIZE;
#[test]
@@ -591,14 +620,14 @@ mod test {
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_456.clone(), 0).unwrap();
// shred version 123 should ignore 456 nodes
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.pull_options(&crds, &me.label().pubkey(), 123, 0, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 2);
assert!(options.contains(&spy.pubkey()));
assert_eq!(options.len(), 1);
assert!(!options.contains(&spy.pubkey()));
assert!(options.contains(&node_123.pubkey()));
// spy nodes will see all
@@ -613,6 +642,53 @@ mod test {
assert!(options.contains(&node_456.pubkey()));
}
#[test]
fn test_crds_filter_get_crds_filter() {
let mut filters =
CrdsFilter::new_complete_set(/*num_items=*/ 9672788, /*max_bytes=*/ 8196);
assert_eq!(filters.len(), 1024);
let mut bytes = [0u8; HASH_BYTES];
let mut rng = thread_rng();
for _ in 0..100 {
rng.fill_bytes(&mut bytes);
let hash_value = Hash::new(&bytes);
let filter = CrdsFilter::get_crds_filter(&mut filters, &hash_value)
.unwrap()
.clone();
assert!(filter.test_mask(&hash_value));
// Validate that the returned filter is the *unique* filter which
// corresponds to the hash value (i.e. test_mask returns true).
let mut num_hits = 0;
for f in &filters {
if *f == filter {
num_hits += 1;
assert!(f.test_mask(&hash_value));
} else {
assert!(!f.test_mask(&hash_value));
}
}
assert_eq!(num_hits, 1);
}
}
#[test]
fn test_crds_filter_new_complete_set() {
// Validates invariances required by CrdsFilter::get_crds_filter in the
// vector of filters generated by CrdsFilter::new_complete_set.
let filters =
CrdsFilter::new_complete_set(/*num_items=*/ 55345017, /*max_bytes=*/ 4098);
assert_eq!(filters.len(), 16384);
let mask_bits = filters[0].mask_bits;
let right_shift = 64 - mask_bits;
let ones = !0u64 >> mask_bits;
for (i, filter) in filters.iter().enumerate() {
// Check that all mask_bits are equal.
assert_eq!(mask_bits, filter.mask_bits);
assert_eq!(i as u64, filter.mask >> right_shift);
assert_eq!(ones, ones & filter.mask);
}
}
#[test]
fn test_new_pull_request() {
let mut crds = Crds::default();
@@ -729,15 +805,19 @@ mod test {
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
assert_eq!(rsp[0].len(), 0);
assert_eq!(filters.len(), 1);
filters.push(filters[0].clone());
//should return new value since caller is new
filters[0].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
filters[1].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
)));
let rsp =
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
assert_eq!(rsp[0].len(), 1);
assert_eq!(rsp.len(), 2);
assert_eq!(rsp[0].len(), 0);
assert_eq!(rsp[1].len(), 1); // Orders are also preserved.
}
#[test]

View File

@@ -344,9 +344,7 @@ impl CrdsGossipPush {
.filter(|(info, _)| {
info.id != *self_id
&& ContactInfo::is_valid_address(&info.gossip)
&& (self_shred_version == 0
|| info.shred_version == 0
|| self_shred_version == info.shred_version)
&& self_shred_version == info.shred_version
})
.map(|(info, value)| {
let max_weight = f32::from(u16::max_value()) - 1.0;
@@ -641,28 +639,25 @@ mod test {
crds.insert(me.clone(), 0).unwrap();
crds.insert(spy.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_456.clone(), 0).unwrap();
crds.insert(node_456, 0).unwrap();
// shred version 123 should ignore 456 nodes
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.push_options(&crds, &me.label().pubkey(), 123, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 2);
assert!(options.contains(&spy.pubkey()));
assert_eq!(options.len(), 1);
assert!(!options.contains(&spy.pubkey()));
assert!(options.contains(&node_123.pubkey()));
// spy nodes will see all
// spy nodes should not push to people on different shred versions
let options = node
.push_options(&crds, &spy.label().pubkey(), 0, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 3);
assert!(options.contains(&me.pubkey()));
assert!(options.contains(&node_123.pubkey()));
assert!(options.contains(&node_456.pubkey()));
assert!(options.is_empty());
}
#[test]
fn test_new_push_messages() {

View File

@@ -69,6 +69,7 @@ pub fn discover_cluster(
num_nodes: usize,
) -> std::io::Result<Vec<ContactInfo>> {
discover(
None,
Some(entrypoint),
Some(num_nodes),
Some(30),
@@ -81,6 +82,7 @@ pub fn discover_cluster(
}
pub fn discover(
keypair: Option<Arc<Keypair>>,
entrypoint: Option<&SocketAddr>,
num_nodes: Option<usize>, // num_nodes only counts validators, excludes spy nodes
timeout: Option<u64>,
@@ -89,9 +91,11 @@ pub fn discover(
my_gossip_addr: Option<&SocketAddr>,
my_shred_version: u16,
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
let keypair = keypair.unwrap_or_else(|| Arc::new(Keypair::new()));
let exit = Arc::new(AtomicBool::new(false));
let (gossip_service, ip_echo, spy_ref) =
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
make_gossip_node(keypair, entrypoint, &exit, my_gossip_addr, my_shred_version);
let id = spy_ref.id();
info!("Entrypoint: {:?}", entrypoint);
@@ -245,12 +249,12 @@ fn spy(
/// Makes a spy or gossip node based on whether or not a gossip_addr was passed in
/// Pass in a gossip addr to fully participate in gossip instead of relying on just pulls
fn make_gossip_node(
keypair: Arc<Keypair>,
entrypoint: Option<&SocketAddr>,
exit: &Arc<AtomicBool>,
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
let keypair = Arc::new(Keypair::new());
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
} else {

View File

@@ -372,15 +372,8 @@ impl HeaviestSubtreeForkChoice {
stake_voted_subtree = fork_info.stake_voted_at;
let mut best_child_stake_voted_subtree = 0;
let mut best_child_slot = slot;
let should_print = fork_info.children.len() > 1;
for &child in &fork_info.children {
let child_stake_voted_subtree = self.stake_voted_subtree(child).unwrap();
if should_print {
info!(
"child: {} of slot: {} has weight: {}",
child, slot, child_stake_voted_subtree
);
}
stake_voted_subtree += child_stake_voted_subtree;
if best_child_slot == slot ||
child_stake_voted_subtree > best_child_stake_voted_subtree ||

View File

@@ -12,6 +12,7 @@ pub mod banking_stage;
pub mod broadcast_stage;
pub mod cluster_info_vote_listener;
pub mod commitment_service;
pub mod completed_data_sets_service;
mod deprecated;
pub mod shred_fetch_stage;
#[macro_use]
@@ -72,9 +73,6 @@ pub mod vote_stake_tracker;
pub mod weighted_shuffle;
pub mod window_service;
#[macro_use]
extern crate solana_bpf_loader_program;
#[macro_use]
extern crate solana_budget_program;

View File

@@ -96,7 +96,9 @@ solana_sdk::pubkeys!(
mod tests {
use super::*;
use solana_sdk::{
account::Account, epoch_schedule::EpochSchedule, genesis_config::GenesisConfig,
account::Account,
epoch_schedule::EpochSchedule,
genesis_config::{GenesisConfig, OperatingMode},
};
use solana_stake_program::stake_state::{Authorized, Lockup, Meta, StakeState};
use std::{collections::BTreeMap, sync::Arc};
@@ -147,6 +149,7 @@ mod tests {
let genesis_config = GenesisConfig {
accounts,
epoch_schedule: EpochSchedule::new(slots_per_epoch),
operating_mode: OperatingMode::Stable,
..GenesisConfig::default()
};
let mut bank = Arc::new(Bank::new(&genesis_config));

View File

@@ -137,18 +137,13 @@ impl PohRecorder {
self.ticks_per_slot
}
fn received_any_previous_leader_data(&self, slot: Slot) -> bool {
(slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|i| {
// Check if we have received any data in previous leader's slots
if let Ok(slot_meta) = self.blockstore.meta(i as Slot) {
if let Some(slot_meta) = slot_meta {
slot_meta.received > 0
} else {
false
}
} else {
false
}
fn is_same_fork_as_previous_leader(&self, slot: Slot) -> bool {
(slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|slot| {
// Check if the last slot Poh reset to was any of the
// previous leader's slots.
// If so, PoH is currently building on the previous leader's blocks
// If not, PoH is building on a different fork
slot == self.start_slot
})
}
@@ -167,13 +162,13 @@ impl PohRecorder {
let target_tick_height = leader_first_tick_height.saturating_sub(1);
let ideal_target_tick_height = target_tick_height.saturating_sub(self.grace_ticks);
let current_slot = self.tick_height / self.ticks_per_slot;
// we've approached target_tick_height OR poh was reset to run immediately
// We've approached target_tick_height OR poh was reset to run immediately
// Or, previous leader didn't transmit in any of its leader slots, so ignore grace ticks
self.tick_height >= target_tick_height
|| self.start_tick_height + self.grace_ticks == leader_first_tick_height
|| (self.tick_height >= ideal_target_tick_height
&& (self.prev_slot_was_mine(current_slot)
|| !self.received_any_previous_leader_data(current_slot)))
|| !self.is_same_fork_as_previous_leader(current_slot)))
}
/// returns if leader slot has been reached, how many grace ticks were afforded,
@@ -1159,28 +1154,29 @@ mod tests {
}
poh_recorder.grace_ticks = grace_ticks;
// True, as previous leader did not transmit in its slots
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
true
);
let mut parent_meta = SlotMeta::default();
parent_meta.received = 1;
poh_recorder
.blockstore
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
.unwrap();
// False, because the Poh was reset on slot 0, which
// is a block produced by the previous leader, so a grace
// period must be given
assert!(!poh_recorder.reached_leader_tick(new_tick_height + grace_ticks));
// False, as previous leader transmitted in one of its recent slots
// and grace ticks have not expired
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
false
);
// Tick `NUM_CONSECUTIVE_LEADER_SLOTS` more times
let new_tick_height = 2 * NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot();
for _ in 0..new_tick_height {
poh_recorder.tick();
}
// True, because
// 1) the Poh was reset on slot 0
// 2) Our slot starts at 2 * NUM_CONSECUTIVE_LEADER_SLOTS, which means
// none of the previous leader's `NUM_CONSECUTIVE_LEADER_SLOTS` were slots
// this Poh built on (previous leader was on different fork). Thus, skip the
// grace period.
assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks));
// From the bootstrap validator's perspective, it should have reached
// the tick
// the tick because the previous slot was also it's own slot (all slots
// belong to the bootstrap leader b/c it's the only staked node!), and
// validators don't give grace periods if previous slot was also their own.
poh_recorder.id = bootstrap_validator_id;
assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks));
}

View File

@@ -18,7 +18,7 @@ use solana_measure::measure::Measure;
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE};
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
iter::Iterator,
net::SocketAddr,
net::UdpSocket,
@@ -108,6 +108,7 @@ pub struct RepairInfo {
pub bank_forks: Arc<RwLock<BankForks>>,
pub epoch_schedule: EpochSchedule,
pub duplicate_slots_reset_sender: DuplicateSlotsResetSender,
pub repair_validators: Option<HashSet<Pubkey>>,
}
pub struct RepairSlotRange {
@@ -234,6 +235,7 @@ impl RepairService {
blockstore,
&serve_repair,
&repair_info.duplicate_slots_reset_sender,
&repair_info.repair_validators,
);
Self::generate_and_send_duplicate_repairs(
&mut duplicate_slot_repair_statuses,
@@ -242,6 +244,7 @@ impl RepairService {
&serve_repair,
&mut repair_stats,
&repair_socket,
&repair_info.repair_validators,
);*/
repair_weight.get_best_weighted_repairs(
@@ -263,6 +266,7 @@ impl RepairService {
repair_request,
&mut cache,
&mut repair_stats,
&repair_info.repair_validators,
) {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
@@ -444,9 +448,16 @@ impl RepairService {
serve_repair: &ServeRepair,
repair_stats: &mut RepairStats,
repair_socket: &UdpSocket,
repair_validators: &Option<HashSet<Pubkey>>,
) {
duplicate_slot_repair_statuses.retain(|slot, status| {
Self::update_duplicate_slot_repair_addr(*slot, status, cluster_slots, serve_repair);
Self::update_duplicate_slot_repair_addr(
*slot,
status,
cluster_slots,
serve_repair,
repair_validators,
);
if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr {
let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot);
@@ -499,13 +510,17 @@ impl RepairService {
status: &mut DuplicateSlotRepairStatus,
cluster_slots: &ClusterSlots,
serve_repair: &ServeRepair,
repair_validators: &Option<HashSet<Pubkey>>,
) {
let now = timestamp();
if status.repair_pubkey_and_addr.is_none()
|| now.saturating_sub(status.start) >= MAX_DUPLICATE_WAIT_MS as u64
{
let repair_pubkey_and_addr =
serve_repair.repair_request_duplicate_compute_best_peer(slot, cluster_slots);
let repair_pubkey_and_addr = serve_repair.repair_request_duplicate_compute_best_peer(
slot,
cluster_slots,
repair_validators,
);
status.repair_pubkey_and_addr = repair_pubkey_and_addr.ok();
status.start = timestamp();
}
@@ -520,6 +535,7 @@ impl RepairService {
blockstore: &Blockstore,
serve_repair: &ServeRepair,
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
repair_validators: &Option<HashSet<Pubkey>>,
) {
for slot in new_duplicate_slots {
warn!(
@@ -545,7 +561,7 @@ impl RepairService {
// Mark this slot as special repair, try to download from single
// validator to avoid corruption
let repair_pubkey_and_addr = serve_repair
.repair_request_duplicate_compute_best_peer(*slot, cluster_slots)
.repair_request_duplicate_compute_best_peer(*slot, cluster_slots, repair_validators)
.ok();
let new_duplicate_slot_repair_status = DuplicateSlotRepairStatus {
start: timestamp(),
@@ -953,6 +969,7 @@ mod test {
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
&None,
);
assert!(duplicate_slot_repair_statuses
.get(&dead_slot)
@@ -976,6 +993,7 @@ mod test {
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
&None,
);
assert_eq!(duplicate_slot_repair_statuses.len(), 1);
assert!(duplicate_slot_repair_statuses.get(&dead_slot).is_some());
@@ -992,6 +1010,7 @@ mod test {
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
&None,
);
assert!(duplicate_slot_repair_statuses.is_empty());
}
@@ -1026,6 +1045,7 @@ mod test {
&mut duplicate_status,
&cluster_slots,
&serve_repair,
&None,
);
assert_eq!(duplicate_status.repair_pubkey_and_addr, dummy_addr);
@@ -1039,6 +1059,7 @@ mod test {
&mut duplicate_status,
&cluster_slots,
&serve_repair,
&None,
);
assert!(duplicate_status.repair_pubkey_and_addr.is_some());
@@ -1052,6 +1073,7 @@ mod test {
&mut duplicate_status,
&cluster_slots,
&serve_repair,
&None,
);
assert_ne!(duplicate_status.repair_pubkey_and_addr, dummy_addr);
}
@@ -1108,6 +1130,7 @@ mod test {
&blockstore,
&serve_repair,
&reset_sender,
&None,
);
// Blockstore should have been cleared

View File

@@ -511,6 +511,7 @@ mod test {
use super::*;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::{bank::Bank, bank_utils};
use solana_sdk::hash::Hash;
use trees::tr;
#[test]
@@ -658,7 +659,7 @@ mod test {
assert_eq!(repair_weight.trees.get(&8).unwrap().ancestors(10), vec![8]);
// Connect orphan back to main fork
blockstore.add_tree(tr(6) / (tr(8)), true, true);
blockstore.add_tree(tr(6) / (tr(8)), true, true, 2, Hash::default());
assert_eq!(
AncestorIterator::new(8, &blockstore).collect::<Vec<_>>(),
vec![6, 5, 3, 1, 0]
@@ -742,8 +743,8 @@ mod test {
assert!(repair_weight.trees.contains_key(&20));
// Resolve orphans in blockstore
blockstore.add_tree(tr(6) / (tr(8)), true, true);
blockstore.add_tree(tr(11) / (tr(20)), true, true);
blockstore.add_tree(tr(6) / (tr(8)), true, true, 2, Hash::default());
blockstore.add_tree(tr(11) / (tr(20)), true, true, 2, Hash::default());
// Call `update_orphan_ancestors` to resolve orphan
repair_weight.update_orphan_ancestors(
&blockstore,
@@ -853,8 +854,8 @@ mod test {
// Resolve the orphans, should now return no
// orphans
repairs = vec![];
blockstore.add_tree(tr(6) / (tr(8)), true, true);
blockstore.add_tree(tr(11) / (tr(20)), true, true);
blockstore.add_tree(tr(6) / (tr(8)), true, true, 2, Hash::default());
blockstore.add_tree(tr(11) / (tr(20)), true, true, 2, Hash::default());
repair_weight.get_best_orphans(
&blockstore,
&mut repairs,
@@ -889,7 +890,7 @@ mod test {
// orphan in the `trees` map, we should search for
// exactly one more of the remaining two
let mut repairs = vec![];
blockstore.add_tree(tr(100) / (tr(101)), true, true);
blockstore.add_tree(tr(100) / (tr(101)), true, true, 2, Hash::default());
repair_weight.get_best_orphans(
&blockstore,
&mut repairs,
@@ -991,7 +992,7 @@ mod test {
// Chain orphan 8 back to the main fork, but don't
// touch orphan 20
blockstore.add_tree(tr(4) / (tr(8)), true, true);
blockstore.add_tree(tr(4) / (tr(8)), true, true, 2, Hash::default());
// Call `update_orphan_ancestors` to resolve orphan
repair_weight.update_orphan_ancestors(
@@ -1061,10 +1062,10 @@ mod test {
}
// Chain orphan 8 back to slot `old_parent`
blockstore.add_tree(tr(*old_parent) / (tr(8)), true, true);
blockstore.add_tree(tr(*old_parent) / (tr(8)), true, true, 2, Hash::default());
// Chain orphan 20 back to orphan 8
blockstore.add_tree(tr(8) / (tr(20)), true, true);
blockstore.add_tree(tr(8) / (tr(20)), true, true, 2, Hash::default());
// Call `update_orphan_ancestors` to resolve orphan
repair_weight.update_orphan_ancestors(
@@ -1089,7 +1090,13 @@ mod test {
// Add a vote that chains back to `old_parent`, should be purged
let new_vote_slot = 100;
blockstore.add_tree(tr(*old_parent) / tr(new_vote_slot), true, true);
blockstore.add_tree(
tr(*old_parent) / tr(new_vote_slot),
true,
true,
2,
Hash::default(),
);
repair_weight.add_votes(
&blockstore,
vec![(new_vote_slot, vec![Pubkey::default()])].into_iter(),
@@ -1137,7 +1144,7 @@ mod test {
);
// Ancestors of slot 31 are [30], with no existing subtree
blockstore.add_tree(tr(30) / (tr(31)), true, true);
blockstore.add_tree(tr(30) / (tr(31)), true, true, 2, Hash::default());
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 31),
(vec![30].into_iter().collect::<VecDeque<_>>(), None)
@@ -1155,7 +1162,7 @@ mod test {
// Chain orphan 8 back to slot 4 on a different fork, ancestor search
// should not return ancestors earlier than the root
blockstore.add_tree(tr(4) / (tr(8)), true, true);
blockstore.add_tree(tr(4) / (tr(8)), true, true, 2, Hash::default());
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 8),
(vec![4].into_iter().collect::<VecDeque<_>>(), None)
@@ -1242,8 +1249,8 @@ mod test {
*/
let blockstore = setup_forks();
blockstore.add_tree(tr(8) / (tr(10) / (tr(11))), true, true);
blockstore.add_tree(tr(20) / (tr(22) / (tr(23))), true, true);
blockstore.add_tree(tr(8) / (tr(10) / (tr(11))), true, true, 2, Hash::default());
blockstore.add_tree(tr(20) / (tr(22) / (tr(23))), true, true, 2, Hash::default());
assert!(blockstore.orphan(8).unwrap().is_some());
blockstore
}
@@ -1265,7 +1272,7 @@ mod test {
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5) / (tr(6)))));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
blockstore.add_tree(forks, false, true);
blockstore.add_tree(forks, false, true, 2, Hash::default());
blockstore
}
}

View File

@@ -150,6 +150,7 @@ pub mod test {
use super::*;
use solana_ledger::{get_tmp_ledger_path, shred::Shred};
use solana_runtime::bank_utils;
use solana_sdk::hash::Hash;
use trees::tr;
#[test]
@@ -246,7 +247,13 @@ pub mod test {
repairs = vec![];
let best_overall_slot = heaviest_subtree_fork_choice.best_overall_slot();
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4);
blockstore.add_tree(tr(best_overall_slot) / (tr(6) / tr(7)), true, false);
blockstore.add_tree(
tr(best_overall_slot) / (tr(6) / tr(7)),
true,
false,
2,
Hash::default(),
);
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
&blockstore,
@@ -300,7 +307,7 @@ pub mod test {
// Adding incomplete children with higher weighted parents, even if
// the parents are complete should still be repaired
repairs = vec![];
blockstore.add_tree(tr(2) / (tr(8)), true, false);
blockstore.add_tree(tr(2) / (tr(8)), true, false, 2, Hash::default());
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
&blockstore,
@@ -322,7 +329,7 @@ pub mod test {
let (blockstore, heaviest_subtree_fork_choice) = setup_forks();
// Add a branch to slot 2, make sure it doesn't repair child
// 4 again when the Unvisited(2) event happens
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false);
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false, 2, Hash::default());
let mut repairs = vec![];
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
@@ -368,7 +375,7 @@ pub mod test {
// Adding slot 2 to ignore should not remove its unexplored children from
// the repair set
repairs = vec![];
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false);
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false, 2, Hash::default());
ignore_set.insert(2);
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
@@ -420,7 +427,7 @@ pub mod test {
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5))));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
blockstore.add_tree(forks.clone(), false, false);
blockstore.add_tree(forks.clone(), false, false, 2, Hash::default());
(blockstore, HeaviestSubtreeForkChoice::new_from_tree(forks))
}

View File

@@ -20,6 +20,7 @@ pub enum Error {
ReadyTimeoutError,
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
CrossbeamSendError,
TryCrossbeamSendError,
TryRecvError(std::sync::mpsc::TryRecvError),
Serialize(std::boxed::Box<bincode::ErrorKind>),
TransactionError(transaction::TransactionError),
@@ -87,6 +88,11 @@ impl<T> std::convert::From<crossbeam_channel::SendError<T>> for Error {
Error::CrossbeamSendError
}
}
impl<T> std::convert::From<crossbeam_channel::TrySendError<T>> for Error {
fn from(_e: crossbeam_channel::TrySendError<T>) -> Error {
Error::TryCrossbeamSendError
}
}
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
Error::SendError

View File

@@ -5,6 +5,7 @@ use crate::{
cluster_info_vote_listener::VerifiedVoteReceiver,
cluster_slots::ClusterSlots,
cluster_slots_service::ClusterSlotsService,
completed_data_sets_service::CompletedDataSetsSender,
contact_info::ContactInfo,
repair_service::DuplicateSlotsResetSender,
repair_service::RepairInfo,
@@ -28,6 +29,7 @@ use solana_sdk::timing::timestamp;
use solana_streamer::streamer::PacketReceiver;
use std::{
cmp,
collections::hash_set::HashSet,
collections::{BTreeMap, HashMap},
net::UdpSocket,
sync::atomic::{AtomicBool, AtomicU64, Ordering},
@@ -417,6 +419,8 @@ impl RetransmitStage {
cluster_slots: Arc<ClusterSlots>,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
verified_vote_receiver: VerifiedVoteReceiver,
repair_validators: Option<HashSet<Pubkey>>,
completed_data_sets_sender: CompletedDataSetsSender,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@@ -442,6 +446,7 @@ impl RetransmitStage {
bank_forks,
epoch_schedule,
duplicate_slots_reset_sender,
repair_validators,
};
let window_service = WindowService::new(
blockstore,
@@ -469,6 +474,7 @@ impl RetransmitStage {
},
cluster_slots,
verified_vote_receiver,
completed_data_sets_sender,
);
let thread_hdls = t_retransmit;

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
const JSON_RPC_SERVER_ERROR_4: i64 = -32004;
pub enum RpcCustomError {
BlockCleanedUp {
@@ -14,6 +15,9 @@ pub enum RpcCustomError {
message: String,
},
SendTransactionIsNotSigned,
BlockNotAvailable {
slot: Slot,
},
}
impl From<RpcCustomError> for Error {
@@ -40,6 +44,11 @@ impl From<RpcCustomError> for Error {
message: "Transaction is not signed".to_string(),
data: None,
},
RpcCustomError::BlockNotAvailable { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_4),
message: format!("Block not available for slot {}", slot,),
data: None,
},
}
}
}

View File

@@ -6,14 +6,12 @@ use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
use solana_account_decoder::UiAccount;
use solana_client::{
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig},
rpc_response::{Response as RpcResponse, RpcKeyedAccount, RpcSignatureResult},
};
#[cfg(test)]
use solana_runtime::bank_forks::BankForks;
use solana_sdk::{
clock::Slot, commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature,
};
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
#[cfg(test)]
use std::sync::RwLock;
use std::{
@@ -89,7 +87,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
commitment: Option<CommitmentConfig>,
config: Option<RpcSignatureSubscribeConfig>,
);
// Unsubscribe from signature notification subscription.
@@ -248,7 +246,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
commitment: Option<CommitmentConfig>,
signature_subscribe_config: Option<RpcSignatureSubscribeConfig>,
) {
info!("signature_subscribe");
match param::<Signature>(&signature_str, "signature") {
@@ -259,8 +257,12 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
"signature_subscribe: signature={:?} id={:?}",
signature, sub_id
);
self.subscriptions
.add_signature_subscription(signature, commitment, sub_id, subscriber);
self.subscriptions.add_signature_subscription(
signature,
signature_subscribe_config,
sub_id,
subscriber,
);
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -360,6 +362,7 @@ mod tests {
use serial_test_derive::serial;
use solana_account_decoder::{parse_account_data::parse_account_data, UiAccountEncoding};
use solana_budget_program::{self, budget_instruction};
use solana_client::rpc_response::ProcessedSignatureResult;
use solana_runtime::{
bank::Bank,
bank_forks::BankForks,
@@ -370,6 +373,7 @@ mod tests {
},
};
use solana_sdk::{
commitment_config::CommitmentConfig,
hash::Hash,
message::Message,
pubkey::Pubkey,
@@ -439,7 +443,8 @@ mod tests {
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = RpcSignatureResult { err: None };
let expected_res =
RpcSignatureResult::ProcessedSignatureResult(ProcessedSignatureResult { err: None });
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
@@ -451,6 +456,38 @@ mod tests {
"subscription": 0,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
// Test "received"
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("signatureNotification");
rpc.signature_subscribe(
session,
subscriber,
tx.signatures[0].to_string(),
Some(RpcSignatureSubscribeConfig {
commitment: None,
enable_received_notification: Some(true),
}),
);
let received_slot = 1;
rpc.subscriptions
.notify_signatures_received((received_slot, vec![tx.signatures[0]]));
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = RpcSignatureResult::ReceivedSignature;
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": received_slot },
"value": expected_res,
},
"subscription": 1,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
}
@@ -584,7 +621,7 @@ mod tests {
"lamports": 51,
"data": bs58::encode(expected_data).into_string(),
"executable": false,
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -696,7 +733,7 @@ mod tests {
"lamports": 100,
"data": expected_data,
"executable": false,
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -859,7 +896,7 @@ mod tests {
"lamports": 100,
"data": "",
"executable": false,
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,

View File

@@ -386,7 +386,7 @@ mod tests {
use solana_runtime::{
bank::Bank, bank_forks::CompressionType, snapshot_utils::SnapshotVersion,
};
use solana_sdk::signature::Signer;
use solana_sdk::{genesis_config::OperatingMode, signature::Signer};
use std::net::{IpAddr, Ipv4Addr};
#[test]
@@ -438,7 +438,10 @@ mod tests {
}
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(10_000);
genesis_config.operating_mode = OperatingMode::Stable;
let bank = Bank::new(&genesis_config);
Arc::new(RwLock::new(BankForks::new(bank)))
}

View File

@@ -8,11 +8,13 @@ use jsonrpc_pubsub::{
SubscriptionId,
};
use serde::Serialize;
use solana_account_decoder::{parse_token::spl_token_id_v1_0, UiAccount, UiAccountEncoding};
use solana_account_decoder::{parse_token::spl_token_id_v2_0, UiAccount, UiAccountEncoding};
use solana_client::{
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig},
rpc_filter::RpcFilterType,
rpc_response::{Response, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult},
rpc_response::{
ProcessedSignatureResult, Response, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
},
};
use solana_runtime::{
bank::Bank,
@@ -67,6 +69,7 @@ enum NotificationEntry {
Frozen(Slot),
Bank(CommitmentSlots),
Gossip(Slot),
SignaturesReceived((Slot, Vec<Signature>)),
}
impl std::fmt::Debug for NotificationEntry {
@@ -79,6 +82,9 @@ impl std::fmt::Debug for NotificationEntry {
NotificationEntry::Bank(commitment_slots) => {
write!(f, "Bank({{slot: {:?}}})", commitment_slots.slot)
}
NotificationEntry::SignaturesReceived(slot_signatures) => {
write!(f, "SignaturesReceived({:?})", slot_signatures)
}
NotificationEntry::Gossip(slot) => write!(f, "Gossip({:?})", slot),
}
}
@@ -108,7 +114,10 @@ type RpcProgramSubscriptions = RwLock<
>,
>;
type RpcSignatureSubscriptions = RwLock<
HashMap<Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>, ()>>>,
HashMap<
Signature,
HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>, bool>>,
>,
>;
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
type RpcVoteSubscriptions = RwLock<HashMap<SubscriptionId, Sink<RpcVote>>>;
@@ -134,13 +143,11 @@ fn add_subscription<K, S, T>(
last_notified_slot: RwLock::new(last_notified_slot),
config,
};
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
current_hashmap.insert(sub_id, subscription_data);
return;
}
let mut hashmap = HashMap::new();
hashmap.insert(sub_id, subscription_data);
subscriptions.insert(hashmap_key, hashmap);
subscriptions
.entry(hashmap_key)
.or_default()
.insert(sub_id, subscription_data);
}
fn remove_subscription<K, S, T>(
@@ -256,7 +263,7 @@ fn filter_account_result(
// and should notify that the account state has been reverted.
if fork != last_notified_slot {
let encoding = encoding.unwrap_or(UiAccountEncoding::Binary);
if account.owner == spl_token_id_v1_0() && encoding == UiAccountEncoding::JsonParsed {
if account.owner == spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed {
let bank = bank.unwrap(); // If result.is_some(), bank must also be Some
return (
Box::new(iter::once(get_parsed_token_account(bank, pubkey, account))),
@@ -279,15 +286,15 @@ fn filter_signature_result(
result: Option<transaction::Result<()>>,
_signature: &Signature,
last_notified_slot: Slot,
_config: Option<()>,
_config: Option<bool>,
_bank: Option<Arc<Bank>>,
) -> (Box<dyn Iterator<Item = RpcSignatureResult>>, Slot) {
(
Box::new(
result
.into_iter()
.map(|result| RpcSignatureResult { err: result.err() }),
),
Box::new(result.into_iter().map(|result| {
RpcSignatureResult::ProcessedSignatureResult(ProcessedSignatureResult {
err: result.err(),
})
})),
last_notified_slot,
)
}
@@ -629,13 +636,18 @@ impl RpcSubscriptions {
pub fn add_signature_subscription(
&self,
signature: Signature,
commitment: Option<CommitmentConfig>,
signature_subscribe_config: Option<RpcSignatureSubscribeConfig>,
sub_id: SubscriptionId,
subscriber: Subscriber<Response<RpcSignatureResult>>,
) {
let (commitment, enable_received_notification) = signature_subscribe_config
.map(|config| (config.commitment, config.enable_received_notification))
.unwrap_or((None, Some(false)));
let commitment_level = commitment
.unwrap_or_else(CommitmentConfig::recent)
.commitment;
let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip {
self.subscriptions
.gossip_signature_subscriptions
@@ -651,7 +663,7 @@ impl RpcSubscriptions {
sub_id,
subscriber,
0, // last_notified_slot is not utilized for signature subscriptions
None,
enable_received_notification,
);
}
@@ -696,6 +708,10 @@ impl RpcSubscriptions {
self.enqueue_notification(NotificationEntry::Slot(SlotInfo { slot, parent, root }));
}
pub fn notify_signatures_received(&self, slot_signatures: (Slot, Vec<Signature>)) {
self.enqueue_notification(NotificationEntry::SignaturesReceived(slot_signatures));
}
pub fn add_vote_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<RpcVote>) {
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let mut subscriptions = self.subscriptions.vote_subscriptions.write().unwrap();
@@ -840,6 +856,13 @@ impl RpcSubscriptions {
);
}
}
NotificationEntry::SignaturesReceived(slot_signatures) => {
RpcSubscriptions::process_signatures_received(
&slot_signatures,
&subscriptions.signature_subscriptions,
&notifier,
)
}
},
Err(RecvTimeoutError::Timeout) => {
// not a problem - try reading again
@@ -939,6 +962,40 @@ impl RpcSubscriptions {
}
}
fn process_signatures_received(
(received_slot, signatures): &(Slot, Vec<Signature>),
signature_subscriptions: &Arc<RpcSignatureSubscriptions>,
notifier: &RpcNotifier,
) {
for signature in signatures {
if let Some(hashmap) = signature_subscriptions.read().unwrap().get(signature) {
for (
_,
SubscriptionData {
sink,
config: is_received_notification_enabled,
..
},
) in hashmap.iter()
{
if is_received_notification_enabled
.expect("All signature subscriptions must have this config field set")
{
notifier.notify(
Response {
context: RpcResponseContext {
slot: *received_slot,
},
value: RpcSignatureResult::ReceivedSignature,
},
&sink,
);
}
}
}
}
}
fn shutdown(&mut self) -> std::thread::Result<()> {
if let Some(runtime) = self.notifier_runtime.take() {
info!("RPC Notifier runtime - shutting down");
@@ -1076,7 +1133,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -1158,7 +1215,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
"pubkey": alice.pubkey().to_string(),
},
@@ -1247,31 +1304,55 @@ pub(crate) mod tests {
Subscriber::new_test("signatureNotification");
let (processed_sub, _id_receiver, processed_recv) =
Subscriber::new_test("signatureNotification");
let (processed_sub3, _id_receiver, processed_recv3) =
Subscriber::new_test("signatureNotification");
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(CommitmentConfig::recent()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(1 as u64),
past_bank_sub1,
);
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(CommitmentConfig::root()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::root()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(2 as u64),
past_bank_sub2,
);
subscriptions.add_signature_subscription(
processed_tx.signatures[0],
Some(CommitmentConfig::recent()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(3 as u64),
processed_sub,
);
subscriptions.add_signature_subscription(
unprocessed_tx.signatures[0],
Some(CommitmentConfig::recent()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(4 as u64),
Subscriber::new_test("signatureNotification").0,
);
// Add a subscription that gets `received` notifications
subscriptions.add_signature_subscription(
unprocessed_tx.signatures[0],
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(true),
}),
SubscriptionId::Number(5 as u64),
processed_sub3,
);
{
let sig_subs = subscriptions
@@ -1284,46 +1365,62 @@ pub(crate) mod tests {
assert!(sig_subs.contains_key(&processed_tx.signatures[0]));
}
let mut commitment_slots = CommitmentSlots::default();
commitment_slots.slot = 1;
let received_slot = 1;
commitment_slots.slot = received_slot;
subscriptions
.notify_signatures_received((received_slot, vec![unprocessed_tx.signatures[0]]));
subscriptions.notify_subscribers(commitment_slots);
let expected_res = RpcSignatureResult { err: None };
let expected_res =
RpcSignatureResult::ProcessedSignatureResult(ProcessedSignatureResult { err: None });
let received_expected_res = RpcSignatureResult::ReceivedSignature;
struct Notification {
slot: Slot,
id: u64,
}
let expected_notification = |exp: Notification| -> String {
let json = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": exp.slot },
"value": &expected_res,
},
"subscription": exp.id,
}
});
serde_json::to_string(&json).unwrap()
};
let expected_notification =
|exp: Notification, expected_res: &RpcSignatureResult| -> String {
let json = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": exp.slot },
"value": expected_res,
},
"subscription": exp.id,
}
});
serde_json::to_string(&json).unwrap()
};
// Expect to receive a notification from bank 1 because this subscription is
// looking for 0 confirmations and so checks the current bank
let expected = expected_notification(Notification { slot: 1, id: 1 });
let expected = expected_notification(Notification { slot: 1, id: 1 }, &expected_res);
let (response, _) = robust_poll_or_panic(past_bank_recv1);
assert_eq!(expected, response);
// Expect to receive a notification from bank 0 because this subscription is
// looking for 1 confirmation and so checks the past bank
let expected = expected_notification(Notification { slot: 0, id: 2 });
let expected = expected_notification(Notification { slot: 0, id: 2 }, &expected_res);
let (response, _) = robust_poll_or_panic(past_bank_recv2);
assert_eq!(expected, response);
let expected = expected_notification(Notification { slot: 1, id: 3 });
let expected = expected_notification(Notification { slot: 1, id: 3 }, &expected_res);
let (response, _) = robust_poll_or_panic(processed_recv);
assert_eq!(expected, response);
// Expect a "received" notification
let expected = expected_notification(
Notification {
slot: received_slot,
id: 5,
},
&received_expected_res,
);
let (response, _) = robust_poll_or_panic(processed_recv3);
assert_eq!(expected, response);
// Subscription should be automatically removed after notification
let sig_subs = subscriptions
.subscriptions
@@ -1336,7 +1433,7 @@ pub(crate) mod tests {
// Unprocessed signature subscription should not be removed
assert_eq!(
sig_subs.get(&unprocessed_tx.signatures[0]).unwrap().len(),
1
2
);
}
@@ -1572,7 +1669,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -1606,7 +1703,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 1,

View File

@@ -21,7 +21,7 @@ use solana_sdk::{
};
use solana_streamer::streamer::{PacketReceiver, PacketSender};
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
net::SocketAddr,
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
@@ -382,12 +382,13 @@ impl ServeRepair {
repair_request: RepairType,
cache: &mut RepairCache,
repair_stats: &mut RepairStats,
repair_validators: &Option<HashSet<Pubkey>>,
) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
let slot = repair_request.slot();
if cache.get(&slot).is_none() {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(slot);
let repair_peers = self.repair_peers(&repair_validators, slot);
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
@@ -411,8 +412,9 @@ impl ServeRepair {
&self,
slot: Slot,
cluster_slots: &ClusterSlots,
repair_validators: &Option<HashSet<Pubkey>>,
) -> Result<(Pubkey, SocketAddr)> {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(slot);
let repair_peers: Vec<_> = self.repair_peers(repair_validators, slot);
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
@@ -448,6 +450,27 @@ impl ServeRepair {
}
}
fn repair_peers(
&self,
repair_validators: &Option<HashSet<Pubkey>>,
slot: Slot,
) -> Vec<ContactInfo> {
if let Some(repair_validators) = repair_validators {
repair_validators
.iter()
.filter_map(|key| {
if *key != self.my_info.id {
self.cluster_info.lookup_contact_info(key, |ci| ci.clone())
} else {
None
}
})
.collect()
} else {
self.cluster_info.repair_peers(slot)
}
}
fn run_window_request(
recycler: &PacketsRecycler,
from: &ContactInfo,
@@ -662,7 +685,7 @@ mod tests {
repair: socketaddr!("127.0.0.1:1237"),
tpu: socketaddr!("127.0.0.1:1238"),
tpu_forwards: socketaddr!("127.0.0.1:1239"),
unused: socketaddr!("127.0.0.1:1240"),
rpc_banks: socketaddr!("127.0.0.1:1240"),
rpc: socketaddr!("127.0.0.1:1241"),
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
serve_repair: socketaddr!("127.0.0.1:1243"),
@@ -733,6 +756,7 @@ mod tests {
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
);
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
@@ -745,7 +769,7 @@ mod tests {
repair: socketaddr!([127, 0, 0, 1], 1237),
tpu: socketaddr!([127, 0, 0, 1], 1238),
tpu_forwards: socketaddr!([127, 0, 0, 1], 1239),
unused: socketaddr!([127, 0, 0, 1], 1240),
rpc_banks: socketaddr!([127, 0, 0, 1], 1240),
rpc: socketaddr!([127, 0, 0, 1], 1241),
rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242),
serve_repair: serve_repair_addr,
@@ -759,6 +783,7 @@ mod tests {
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
)
.unwrap();
assert_eq!(nxt.serve_repair, serve_repair_addr);
@@ -773,7 +798,7 @@ mod tests {
repair: socketaddr!([127, 0, 0, 1], 1237),
tpu: socketaddr!([127, 0, 0, 1], 1238),
tpu_forwards: socketaddr!([127, 0, 0, 1], 1239),
unused: socketaddr!([127, 0, 0, 1], 1240),
rpc_banks: socketaddr!([127, 0, 0, 1], 1240),
rpc: socketaddr!([127, 0, 0, 1], 1241),
rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242),
serve_repair: serve_repair_addr2,
@@ -791,6 +816,7 @@ mod tests {
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
)
.unwrap();
if rv.0 == serve_repair_addr {
@@ -937,4 +963,71 @@ mod tests {
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_repair_with_repair_validators() {
let cluster_slots = ClusterSlots::default();
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me.clone()));
// Insert two peers on the network
let contact_info2 = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let contact_info3 = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
cluster_info.insert_info(contact_info2.clone());
cluster_info.insert_info(contact_info3.clone());
let serve_repair = ServeRepair::new(cluster_info);
// If:
// 1) repair validator set doesn't exist in gossip
// 2) repair validator set only includes our own id
// then no repairs should be generated
for pubkey in &[Pubkey::new_rand(), me.id] {
let trusted_validators = Some(vec![*pubkey].into_iter().collect());
assert!(serve_repair.repair_peers(&trusted_validators, 1).is_empty());
assert!(serve_repair
.repair_request(
&cluster_slots,
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&trusted_validators,
)
.is_err());
}
// If trusted validator exists in gossip, should return repair successfully
let trusted_validators = Some(vec![contact_info2.id].into_iter().collect());
let repair_peers = serve_repair.repair_peers(&trusted_validators, 1);
assert_eq!(repair_peers.len(), 1);
assert_eq!(repair_peers[0].id, contact_info2.id);
assert!(serve_repair
.repair_request(
&cluster_slots,
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&trusted_validators,
)
.is_ok());
// Using no trusted validators should default to all
// validator's available in gossip, excluding myself
let repair_peers: HashSet<Pubkey> = serve_repair
.repair_peers(&None, 1)
.into_iter()
.map(|c| c.id)
.collect();
assert_eq!(repair_peers.len(), 2);
assert!(repair_peers.contains(&contact_info2.id));
assert!(repair_peers.contains(&contact_info3.id));
assert!(serve_repair
.repair_request(
&cluster_slots,
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
)
.is_ok());
}
}

View File

@@ -8,6 +8,7 @@ use crate::{
cluster_info::ClusterInfo,
cluster_info_vote_listener::{VerifiedVoteReceiver, VoteTracker},
cluster_slots::ClusterSlots,
completed_data_sets_service::CompletedDataSetsSender,
ledger_cleanup_service::LedgerCleanupService,
poh_recorder::PohRecorder,
replay_stage::{ReplayStage, ReplayStageConfig},
@@ -66,6 +67,7 @@ pub struct TvuConfig {
pub shred_version: u16,
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
pub trusted_validators: Option<HashSet<Pubkey>>,
pub repair_validators: Option<HashSet<Pubkey>>,
pub accounts_hash_fault_injection_slots: u64,
}
@@ -99,6 +101,7 @@ impl Tvu {
retransmit_slots_sender: RetransmitSlotsSender,
verified_vote_receiver: VerifiedVoteReceiver,
replay_vote_sender: ReplayVoteSender,
completed_data_sets_sender: CompletedDataSetsSender,
tvu_config: TvuConfig,
) -> Self {
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
@@ -150,6 +153,8 @@ impl Tvu {
cluster_slots.clone(),
duplicate_slots_reset_sender,
verified_vote_receiver,
tvu_config.repair_validators,
completed_data_sets_sender,
);
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
@@ -247,6 +252,7 @@ pub mod tests {
};
use serial_test_derive::serial;
use solana_ledger::{
blockstore::BlockstoreSignals,
create_new_tmp_ledger,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
};
@@ -273,9 +279,13 @@ pub mod tests {
let cref1 = Arc::new(cluster_info1);
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
let (blockstore, l_receiver, completed_slots_receiver) =
Blockstore::open_with_signal(&blockstore_path, None)
.expect("Expected to successfully open ledger");
let BlockstoreSignals {
blockstore,
ledger_signal_receiver,
completed_slots_receiver,
..
} = Blockstore::open_with_signal(&blockstore_path, None)
.expect("Expected to successfully open ledger");
let blockstore = Arc::new(blockstore);
let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) =
@@ -286,6 +296,7 @@ pub mod tests {
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
let (_verified_vote_sender, verified_vote_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let (completed_data_sets_sender, _completed_data_sets_receiver) = unbounded();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let tvu = Tvu::new(
&vote_keypair.pubkey(),
@@ -301,7 +312,7 @@ pub mod tests {
}
},
blockstore,
l_receiver,
ledger_signal_receiver,
&Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
@@ -320,6 +331,7 @@ pub mod tests {
retransmit_slots_sender,
verified_vote_receiver,
replay_vote_sender,
completed_data_sets_sender,
TvuConfig::default(),
);
exit.store(true, Ordering::Relaxed);

View File

@@ -4,6 +4,7 @@ use crate::{
broadcast_stage::BroadcastStageType,
cluster_info::{ClusterInfo, Node},
cluster_info_vote_listener::VoteTracker,
completed_data_sets_service::CompletedDataSetsService,
contact_info::ContactInfo,
gossip_service::{discover_cluster, GossipService},
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
@@ -21,11 +22,12 @@ use crate::{
transaction_status_service::TransactionStatusService,
tvu::{Sockets, Tvu, TvuConfig},
};
use crossbeam_channel::unbounded;
use crossbeam_channel::{bounded, unbounded};
use rand::{thread_rng, Rng};
use solana_banks_server::rpc_banks_service::RpcBanksService;
use solana_ledger::{
bank_forks_utils,
blockstore::{Blockstore, CompletedSlotsReceiver, PurgeType},
blockstore::{Blockstore, BlockstoreSignals, CompletedSlotsReceiver, PurgeType},
blockstore_db::BlockstoreRecoveryMode,
blockstore_processor::{self, TransactionStatusSender},
create_new_tmp_ledger,
@@ -63,6 +65,8 @@ use std::{
time::Duration,
};
pub const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000;
#[derive(Clone, Debug)]
pub struct ValidatorConfig {
pub dev_halt_at_slot: Option<Slot>,
@@ -72,7 +76,7 @@ pub struct ValidatorConfig {
pub voting_disabled: bool,
pub account_paths: Vec<PathBuf>,
pub rpc_config: JsonRpcConfig,
pub rpc_ports: Option<(u16, u16)>, // (API, PubSub)
pub rpc_ports: Option<(u16, u16, u16)>, // (JsonRpc, JsonRpcPubSub, Banks)
pub snapshot_config: Option<SnapshotConfig>,
pub max_ledger_shreds: Option<u64>,
pub broadcast_stage_type: BroadcastStageType,
@@ -81,6 +85,7 @@ pub struct ValidatorConfig {
pub wait_for_supermajority: Option<Slot>,
pub new_hard_forks: Option<Vec<Slot>>,
pub trusted_validators: Option<HashSet<Pubkey>>, // None = trust all
pub repair_validators: Option<HashSet<Pubkey>>, // None = repair from all
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection
pub frozen_accounts: Vec<Pubkey>,
@@ -109,6 +114,7 @@ impl Default for ValidatorConfig {
wait_for_supermajority: None,
new_hard_forks: None,
trusted_validators: None,
repair_validators: None,
halt_on_trusted_validators_accounts_hash_mismatch: false,
accounts_hash_fault_injection_slots: 0,
frozen_accounts: vec![],
@@ -148,11 +154,12 @@ struct TransactionHistoryServices {
pub struct Validator {
pub id: Pubkey,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
rpc_service: Option<(JsonRpcService, PubSubService)>,
rpc_service: Option<(JsonRpcService, PubSubService, RpcBanksService)>,
transaction_status_service: Option<TransactionStatusService>,
rewards_recorder_service: Option<RewardsRecorderService>,
gossip_service: GossipService,
serve_repair_service: ServeRepairService,
completed_data_sets_service: CompletedDataSetsService,
snapshot_packager_service: Option<SnapshotPackagerService>,
poh_recorder: Arc<Mutex<PohRecorder>>,
poh_service: PohService,
@@ -281,37 +288,57 @@ impl Validator {
block_commitment_cache.clone(),
));
let (completed_data_sets_sender, completed_data_sets_receiver) =
bounded(MAX_COMPLETED_DATA_SETS_IN_CHANNEL);
let completed_data_sets_service = CompletedDataSetsService::new(
completed_data_sets_receiver,
blockstore.clone(),
subscriptions.clone(),
&exit,
);
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
assert_eq!(rpc_port, node.info.rpc.port());
assert_eq!(rpc_pubsub_port, node.info.rpc_pubsub.port());
} else {
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
}
(
JsonRpcService::new(
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port),
config.rpc_config.clone(),
config.snapshot_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore.clone(),
cluster_info.clone(),
genesis_config.hash(),
ledger_path,
validator_exit.clone(),
config.trusted_validators.clone(),
rpc_override_health_check.clone(),
),
PubSubService::new(
&subscriptions,
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_pubsub_port),
&exit,
),
)
});
let rpc_service = config
.rpc_ports
.map(|(rpc_port, rpc_pubsub_port, rpc_banks_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
assert_eq!(rpc_port, node.info.rpc.port());
assert_eq!(rpc_pubsub_port, node.info.rpc_pubsub.port());
assert_eq!(rpc_banks_port, node.info.rpc_banks.port());
} else {
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
}
let tpu_address = cluster_info.my_contact_info().tpu;
(
JsonRpcService::new(
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port),
config.rpc_config.clone(),
config.snapshot_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore.clone(),
cluster_info.clone(),
genesis_config.hash(),
ledger_path,
validator_exit.clone(),
config.trusted_validators.clone(),
rpc_override_health_check.clone(),
),
PubSubService::new(
&subscriptions,
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_pubsub_port),
&exit,
),
RpcBanksService::new(
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_banks_port),
tpu_address,
&bank_forks,
&block_commitment_cache,
&exit,
),
)
});
info!(
"Starting PoH: epoch={} slot={} tick_height={} blockhash={} leader={:?}",
@@ -454,12 +481,14 @@ impl Validator {
retransmit_slots_sender,
verified_vote_receiver,
replay_vote_sender.clone(),
completed_data_sets_sender,
TvuConfig {
max_ledger_shreds: config.max_ledger_shreds,
halt_on_trusted_validators_accounts_hash_mismatch: config
.halt_on_trusted_validators_accounts_hash_mismatch,
shred_version: node.info.shred_version,
trusted_validators: config.trusted_validators.clone(),
repair_validators: config.repair_validators.clone(),
accounts_hash_fault_injection_slots: config.accounts_hash_fault_injection_slots,
},
);
@@ -494,6 +523,7 @@ impl Validator {
transaction_status_service,
rewards_recorder_service,
snapshot_packager_service,
completed_data_sets_service,
tpu,
tvu,
poh_service,
@@ -543,9 +573,10 @@ impl Validator {
pub fn join(self) -> Result<()> {
self.poh_service.join()?;
drop(self.poh_recorder);
if let Some((rpc_service, rpc_pubsub_service)) = self.rpc_service {
if let Some((rpc_service, rpc_pubsub_service, rpc_banks_service)) = self.rpc_service {
rpc_service.join()?;
rpc_pubsub_service.join()?;
rpc_banks_service.join()?;
}
if let Some(transaction_status_service) = self.transaction_status_service {
transaction_status_service.join()?;
@@ -563,6 +594,7 @@ impl Validator {
self.serve_repair_service.join()?;
self.tpu.join()?;
self.tvu.join()?;
self.completed_data_sets_service.join()?;
self.ip_echo_server.shutdown_now();
Ok(())
@@ -606,9 +638,13 @@ fn new_banks_from_ledger(
}
}
let (mut blockstore, ledger_signal_receiver, completed_slots_receiver) =
Blockstore::open_with_signal(ledger_path, config.wal_recovery_mode.clone())
.expect("Failed to open ledger database");
let BlockstoreSignals {
mut blockstore,
ledger_signal_receiver,
completed_slots_receiver,
..
} = Blockstore::open_with_signal(ledger_path, config.wal_recovery_mode.clone())
.expect("Failed to open ledger database");
blockstore.set_no_compaction(config.no_rocksdb_compaction);
let process_options = blockstore_processor::ProcessOptions {
@@ -850,18 +886,14 @@ impl TestValidator {
} = create_genesis_config_with_leader_ex(
mint_lamports,
&contact_info.id,
Arc::new(Keypair::new()),
Arc::new(Keypair::new()),
&Keypair::new(),
&Pubkey::new_rand(),
42,
bootstrap_validator_lamports,
);
genesis_config
.native_instruction_processors
.push(solana_budget_program!());
genesis_config
.native_instruction_processors
.push(solana_bpf_loader_program!());
genesis_config.rent.lamports_per_byte_year = 1;
genesis_config.rent.exemption_threshold = 1.0;
genesis_config.fee_rate_governor = FeeRateGovernor::new(fees, 0);
@@ -869,15 +901,20 @@ impl TestValidator {
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
let config = ValidatorConfig {
rpc_ports: Some((node.info.rpc.port(), node.info.rpc_pubsub.port())),
rpc_ports: Some((
node.info.rpc.port(),
node.info.rpc_pubsub.port(),
node.info.rpc_banks.port(),
)),
..ValidatorConfig::default()
};
let vote_pubkey = voting_keypair.pubkey();
let node = Validator::new(
node,
&node_keypair,
&ledger_path,
&voting_keypair.pubkey(),
vec![voting_keypair.clone()],
vec![Arc::new(voting_keypair)],
None,
true,
&config,
@@ -889,7 +926,7 @@ impl TestValidator {
alice: mint_keypair,
ledger_path,
genesis_hash: blockhash,
vote_pubkey: voting_keypair.pubkey(),
vote_pubkey,
}
}
}
@@ -1037,6 +1074,7 @@ mod tests {
rpc_ports: Some((
validator_node.info.rpc.port(),
validator_node.info.rpc_pubsub.port(),
validator_node.info.rpc_banks.port(),
)),
..ValidatorConfig::default()
};
@@ -1106,11 +1144,12 @@ mod tests {
.genesis_config;
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
ledger_paths.push(validator_ledger_path.clone());
let vote_account_keypair = Arc::new(Keypair::new());
let vote_account_keypair = Keypair::new();
let config = ValidatorConfig {
rpc_ports: Some((
validator_node.info.rpc.port(),
validator_node.info.rpc_pubsub.port(),
validator_node.info.rpc_banks.port(),
)),
..ValidatorConfig::default()
};
@@ -1119,7 +1158,7 @@ mod tests {
&Arc::new(validator_keypair),
&validator_ledger_path,
&vote_account_keypair.pubkey(),
vec![vote_account_keypair.clone()],
vec![Arc::new(vote_account_keypair)],
Some(&leader_node.info),
true,
&config,

View File

@@ -5,6 +5,7 @@ use crate::{
cluster_info::ClusterInfo,
cluster_info_vote_listener::VerifiedVoteReceiver,
cluster_slots::ClusterSlots,
completed_data_sets_service::CompletedDataSetsSender,
repair_response,
repair_service::{RepairInfo, RepairService},
result::{Error, Result},
@@ -123,6 +124,7 @@ fn run_insert<F>(
leader_schedule_cache: &Arc<LeaderScheduleCache>,
handle_duplicate: F,
metrics: &mut BlockstoreInsertionMetrics,
completed_data_sets_sender: &CompletedDataSetsSender,
) -> Result<()>
where
F: Fn(Shred),
@@ -138,13 +140,13 @@ where
let mut i = 0;
shreds.retain(|shred| (verify_repair(&shred, &repair_infos[i]), i += 1).0);
blockstore.insert_shreds_handle_duplicate(
completed_data_sets_sender.try_send(blockstore.insert_shreds_handle_duplicate(
shreds,
Some(leader_schedule_cache),
false,
&handle_duplicate,
metrics,
)?;
)?)?;
Ok(())
}
@@ -302,6 +304,7 @@ impl WindowService {
shred_filter: F,
cluster_slots: Arc<ClusterSlots>,
verified_vote_receiver: VerifiedVoteReceiver,
completed_data_sets_sender: CompletedDataSetsSender,
) -> WindowService
where
F: 'static
@@ -333,6 +336,7 @@ impl WindowService {
leader_schedule_cache,
insert_receiver,
duplicate_sender,
completed_data_sets_sender,
);
let t_window = Self::start_recv_window_thread(
@@ -387,6 +391,7 @@ impl WindowService {
leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
duplicate_sender: CrossbeamSender<Shred>,
completed_data_sets_sender: CompletedDataSetsSender,
) -> JoinHandle<()> {
let exit = exit.clone();
let blockstore = blockstore.clone();
@@ -415,6 +420,7 @@ impl WindowService {
&leader_schedule_cache,
&handle_duplicate,
&mut metrics,
&completed_data_sets_sender,
) {
if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) {
break;

View File

@@ -1,31 +1,32 @@
// Long-running bank_forks tests
macro_rules! DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS {
($x:ident) => {
($x:ident, $y:ident, $z:ident) => {
#[allow(non_snake_case)]
mod $x {
mod $z {
use super::*;
const SNAPSHOT_VERSION: SnapshotVersion = SnapshotVersion::$x;
const OPERATING_MODE: OperatingMode = OperatingMode::$y;
#[test]
fn test_bank_forks_status_cache_snapshot_n() {
run_test_bank_forks_status_cache_snapshot_n(SNAPSHOT_VERSION)
run_test_bank_forks_status_cache_snapshot_n(SNAPSHOT_VERSION, OPERATING_MODE)
}
#[test]
fn test_bank_forks_snapshot_n() {
run_test_bank_forks_snapshot_n(SNAPSHOT_VERSION)
run_test_bank_forks_snapshot_n(SNAPSHOT_VERSION, OPERATING_MODE)
}
#[test]
fn test_concurrent_snapshot_packaging() {
run_test_concurrent_snapshot_packaging(SNAPSHOT_VERSION)
run_test_concurrent_snapshot_packaging(SNAPSHOT_VERSION, OPERATING_MODE)
}
#[test]
fn test_slots_to_snapshot() {
run_test_slots_to_snapshot(SNAPSHOT_VERSION)
run_test_slots_to_snapshot(SNAPSHOT_VERSION, OPERATING_MODE)
}
}
};
@@ -49,7 +50,7 @@ mod tests {
};
use solana_sdk::{
clock::Slot,
genesis_config::GenesisConfig,
genesis_config::{GenesisConfig, OperatingMode},
hash::hashv,
pubkey::Pubkey,
signature::{Keypair, Signer},
@@ -58,7 +59,9 @@ mod tests {
use std::{fs, path::PathBuf, sync::atomic::AtomicBool, sync::mpsc::channel, sync::Arc};
use tempfile::TempDir;
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0);
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Development, V1_2_0_Development);
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Preview, V1_2_0_Preview);
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Stable, V1_2_0_Stable);
struct SnapshotTestConfig {
accounts_dir: TempDir,
@@ -72,12 +75,14 @@ mod tests {
impl SnapshotTestConfig {
fn new(
snapshot_version: SnapshotVersion,
operating_mode: OperatingMode,
snapshot_interval_slots: u64,
) -> SnapshotTestConfig {
let accounts_dir = TempDir::new().unwrap();
let snapshot_dir = TempDir::new().unwrap();
let snapshot_output_path = TempDir::new().unwrap();
let genesis_config_info = create_genesis_config(10_000);
let mut genesis_config_info = create_genesis_config(10_000);
genesis_config_info.genesis_config.operating_mode = operating_mode;
let bank0 = Bank::new_with_paths(
&genesis_config_info.genesis_config,
vec![accounts_dir.path().to_path_buf()],
@@ -158,6 +163,7 @@ mod tests {
// `last_slot` bank
fn run_bank_forks_snapshot_n<F>(
snapshot_version: SnapshotVersion,
operating_mode: OperatingMode,
last_slot: Slot,
f: F,
set_root_interval: u64,
@@ -166,7 +172,7 @@ mod tests {
{
solana_logger::setup();
// Set up snapshotting config
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, 1);
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, operating_mode, 1);
let bank_forks = &mut snapshot_test_config.bank_forks;
let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair;
@@ -211,11 +217,15 @@ mod tests {
restore_from_snapshot(bank_forks, last_slot, genesis_config, account_paths);
}
fn run_test_bank_forks_snapshot_n(snapshot_version: SnapshotVersion) {
fn run_test_bank_forks_snapshot_n(
snapshot_version: SnapshotVersion,
operating_mode: OperatingMode,
) {
// create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots
// and restores correctly
run_bank_forks_snapshot_n(
snapshot_version,
operating_mode,
4,
|bank, mint_keypair| {
let key1 = Keypair::new().pubkey();
@@ -246,11 +256,14 @@ mod tests {
}
}
fn run_test_concurrent_snapshot_packaging(snapshot_version: SnapshotVersion) {
fn run_test_concurrent_snapshot_packaging(
snapshot_version: SnapshotVersion,
operating_mode: OperatingMode,
) {
solana_logger::setup();
// Set up snapshotting config
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, 1);
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, operating_mode, 1);
let bank_forks = &mut snapshot_test_config.bank_forks;
let accounts_dir = &snapshot_test_config.accounts_dir;
@@ -395,7 +408,10 @@ mod tests {
);
}
fn run_test_slots_to_snapshot(snapshot_version: SnapshotVersion) {
fn run_test_slots_to_snapshot(
snapshot_version: SnapshotVersion,
operating_mode: OperatingMode,
) {
solana_logger::setup();
let num_set_roots = MAX_CACHE_ENTRIES * 2;
@@ -404,6 +420,7 @@ mod tests {
// Make sure this test never clears bank.slots_since_snapshot
let mut snapshot_test_config = SnapshotTestConfig::new(
snapshot_version,
operating_mode,
(*add_root_interval * num_set_roots * 2) as u64,
);
let mut current_bank = snapshot_test_config.bank_forks[0].clone();
@@ -437,7 +454,10 @@ mod tests {
}
}
fn run_test_bank_forks_status_cache_snapshot_n(snapshot_version: SnapshotVersion) {
fn run_test_bank_forks_status_cache_snapshot_n(
snapshot_version: SnapshotVersion,
operating_mode: OperatingMode,
) {
// create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time
// this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves
// ahead. Also tests the status_cache purge and status cache snapshotting.
@@ -447,6 +467,7 @@ mod tests {
for set_root_interval in &[1, 4] {
run_bank_forks_snapshot_n(
snapshot_version,
operating_mode,
(MAX_CACHE_ENTRIES * 2 + 1) as u64,
|bank, mint_keypair| {
let tx = system_transaction::transfer(

View File

@@ -233,7 +233,9 @@ pub fn cluster_info_scale() {
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| test_node_with_bank(keypairs.node_keypair, &exit, bank_forks.clone()))
.map(|keypairs| {
test_node_with_bank(Arc::new(keypairs.node_keypair), &exit, bank_forks.clone())
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {

View File

@@ -103,7 +103,7 @@ fn test_rpc_send_tx() {
use solana_account_decoder::UiAccountEncoding;
use solana_client::rpc_config::RpcAccountInfoConfig;
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Binary64),
encoding: Some(UiAccountEncoding::Base64),
commitment: None,
data_slice: None,
};
@@ -285,8 +285,12 @@ fn test_rpc_subscriptions() {
let timeout = deadline.saturating_duration_since(Instant::now());
match status_receiver.recv_timeout(timeout) {
Ok((sig, result)) => {
assert!(result.value.err.is_none());
assert!(signature_set.remove(&sig));
if let RpcSignatureResult::ProcessedSignatureResult(result) = result.value {
assert!(result.err.is_none());
assert!(signature_set.remove(&sig));
} else {
panic!("Unexpected result");
}
}
Err(_err) => {
assert!(

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "1.3.3"
version = "1.3.8"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -22,7 +22,10 @@ if [[ -z "$LATEST_SOLANA_RELEASE_VERSION" ]]; then
exit 1
fi
VERSION_FOR_DOCS_RS="${LATEST_SOLANA_RELEASE_VERSION:1}"
set -x
if [[ -n $CI ]]; then
find src/ -name \*.md -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
find src/ -name \*.md -exec sed -i "s/VERSION_FOR_DOCS_RS/$VERSION_FOR_DOCS_RS/g" {} \;
fi

View File

@@ -16,6 +16,14 @@ module.exports = {
"wallet-guide/ledger-live",
],
},
{
type: "category",
label: "Web Wallets",
items: [
"wallet-guide/web-wallets",
"wallet-guide/solflare",
],
},
{
type: "category",
label: "Command-line Wallets",

View File

@@ -21,14 +21,14 @@ programs, as well include instructions from third-party programs.
Create accounts and transfer lamports between them
- Program ID: `11111111111111111111111111111111`
- Instructions: [SystemInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/system_instruction/enum.SystemInstruction.html)
- Instructions: [SystemInstruction](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/system_instruction/enum.SystemInstruction.html)
## Config Program
Add configuration data to the chain and the list of public keys that are permitted to modify it
- Program ID: `Config1111111111111111111111111111111111111`
- Instructions: [config_instruction](https://docs.rs/solana-config-program/LATEST_SOLANA_RELEASE_VERSION/solana_config_program/config_instruction/index.html)
- Instructions: [config_instruction](https://docs.rs/solana-config-program/VERSION_FOR_DOCS_RS/solana_config_program/config_instruction/index.html)
Unlike the other programs, the Config program does not define any individual
instructions. It has just one implicit instruction, a "store" instruction. Its
@@ -40,21 +40,21 @@ data to store in it.
Create stake accounts and delegate it to validators
- Program ID: `Stake11111111111111111111111111111111111111`
- Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/LATEST_SOLANA_RELEASE_VERSION/solana_stake_program/stake_instruction/enum.StakeInstruction.html)
- Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/VERSION_FOR_DOCS_RS/solana_stake_program/stake_instruction/enum.StakeInstruction.html)
## Vote Program
Create vote accounts and vote on blocks
- Program ID: `Vote111111111111111111111111111111111111111`
- Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/LATEST_SOLANA_RELEASE_VERSION/solana_vote_program/vote_instruction/enum.VoteInstruction.html)
- Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/VERSION_FOR_DOCS_RS/solana_vote_program/vote_instruction/enum.VoteInstruction.html)
## BPF Loader
Add programs to the chain.
- Program ID: `BPFLoader1111111111111111111111111111111111`
- Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/loader_instruction/enum.LoaderInstruction.html)
- Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/loader_instruction/enum.LoaderInstruction.html)
The BPF Loader marks itself as its "owner" of the executable account it
creates to store your program. When a user invokes an instruction via a

View File

@@ -39,6 +39,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
- [getLargestAccounts](jsonrpc-api.md#getlargestaccounts)
- [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
- [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
- [getMultipleAccounts](jsonrpc-api.md#getmultipleaccounts)
- [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
- [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash)
- [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses)
@@ -157,9 +158,9 @@ Returns all information associated with the account of provided Pubkey
- `<string>` - Pubkey of account to query, as base-58 encoded string
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary", "binary64", or jsonParsed". If parameter not provided, the default encoding is "binary". "binary" is base-58 encoded and limited to Account data of less than 128 bytes. "binary64" will return base64 encoded data for Account data of any size.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
- `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64", or jsonParsed". "base58" is limited to Account data of less than 128 bytes. "base64" will return base64 encoded data for Account data of any size.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "base58" or "base64" encoding.
#### Results:
@@ -169,7 +170,7 @@ The result will be an RpcResponse JSON object with `value` equal to:
- `<object>` - otherwise, a JSON object containing:
- `lamports: <u64>`, number of lamports assigned to this account, as a u64
- `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
- `data: <string|object>`, data associated with the account, either as base-58 encoded binary data or JSON format `{<program>: <state>}`, depending on encoding parameter
- `data: <[string, encoding]|object>`, data associated with the account, either as encoded binary data or JSON format `{<program>: <state>}`, depending on encoding parameter
- `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
- `rentEpoch: <u64>`, the epoch at which this account will next owe rent, as u64
@@ -177,10 +178,10 @@ The result will be an RpcResponse JSON object with `value` equal to:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["4fYNw3dojWmQ4dXtSGE9epjRGy9pFSx62YypT7avPYvA"]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["vines1vzrYbzLMRdu58ou5XTby4qAqVRLmqo36NKPTg",{"encoding": "base58"}]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"data":"11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHRTPuR3oZ1EioKtYGiYxpxMG5vpbZLsbcBYBEmZZcMKaSoGx9JZeAuWf","executable":false,"lamports":1000000000,"owner":"11111111111111111111111111111111","rentEpoch":2}},"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"data":["11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHRTPuR3oZ1EioKtYGiYxpxMG5vpbZLsbcBYBEmZZcMKaSoGx9JZeAuWf","base58"],"executable":false,"lamports":1000000000,"owner":"11111111111111111111111111111111","rentEpoch":2}},"id":1}
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["4fYNw3dojWmQ4dXtSGE9epjRGy9pFSx62YypT7avPYvA",{"encoding":"json"}]}' http://localhost:8899
@@ -241,7 +242,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### getBlockTime
Returns the estimated production time of a block.
Returns the estimated production time of a confirmed block.
Each validator reports their UTC time to the ledger on a regular interval by
intermittently adding a timestamp to a Vote for a particular block. A requested
@@ -259,8 +260,8 @@ query a node that is built from genesis and retains the entire ledger.
#### Results:
- `<null>` - block has not yet been produced
- `<i64>` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
* `<i64>` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
* `<null>` - timestamp is not available for this block
#### Example:
@@ -307,7 +308,7 @@ Returns identity and transaction information about a confirmed block in the ledg
#### Parameters:
- `<u64>` - slot, as u64 integer
- `<string>` - (optional) encoding for each returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON. **jsonParsed encoding is UNSTABLE**
- `<string>` - encoding for each returned Transaction, either "json", "jsonParsed", "base58" (*slow*), or "base64". If parameter not provided, the default encoding is JSON. **jsonParsed encoding is UNSTABLE**
Parsed-JSON encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
#### Results:
@@ -320,7 +321,7 @@ The result field will be an object with the following fields:
- `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111"
- `parentSlot: <u64>` - the slot index of this block's parent
- `transactions: <array>` - an array of JSON objects containing:
- `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
- `transaction: <object|[string,encoding]>` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter
- `meta: <object>` - transaction status metadata object, containing `null` or:
- `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
- `fee: <u64>` - fee this transaction was charged, as u64 integer
@@ -341,13 +342,13 @@ The result field will be an object with the following fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
{"jsonrpc":"2.0","result":{"blockTime":null,"blockhash":"3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA","parentSlot":429,"previousBlockhash":"mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B","rewards":[],"transactions":[{"meta":{"err":null,"fee":5000,"postBalances":[499998932500,26858640,1,1,1],"preBalances":[499998937500,26858640,1,1,1],"status":{"Ok":null}},"transaction":{"message":{"accountKeys":["3UVYmECPPMZSCqWKfENfuoTv51fTDTWicX9xmBD2euKe","AjozzgE83A3x1sHNUR64hfH7zaEBWeMaFuAN9kQgujrc","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":1},"instructions":[{"accounts":[1,2,3,0],"data":"37u9WtQpcm6ULa3WRQHmj49EPs4if7o9f1jSRVZpm2dvihR9C8jY4NqEwXUbLwx15HBSNcP1","programIdIndex":4}],"recentBlockhash":"mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B"},"signatures":["2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv"]}}]},"id":1}
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "base64"]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
{"jsonrpc":"2.0","result":{"blockTime":null,"blockhash":"3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA","parentSlot":429,"previousBlockhash":"mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B","rewards":[],"transactions":[{"meta":{"err":null,"fee":5000,"postBalances":[499998932500,26858640,1,1,1],"preBalances":[499998937500,26858640,1,1,1],"status":{"Ok":null}},"transaction":["AVj7dxHlQ9IrvdYVIjuiRFs1jLaDMHixgrv+qtHBwz51L4/ImLZhszwiyEJDIp7xeBSpm/TX5B7mYzxa+fPOMw0BAAMFJMJVqLw+hJYheizSoYlLm53KzgT82cDVmazarqQKG2GQsLgiqktA+a+FDR4/7xnDX7rsusMwryYVUdixfz1B1Qan1RcZLwqvxvJl4/t3zHragsUp0L47E24tAFUgAAAABqfVFxjHdMkoVmOYaR1etoteuKObS21cc1VbIQAAAAAHYUgdNXR0u3xNdiTr072z2DVec9EQQ/wNo1OAAAAAAAtxOUhPBp2WSjUNJEgfvy70BbxI00fZyEPvFHNfxrtEAQQEAQIDADUCAAAAAQAAAAAAAACtAQAAAAAAAAdUE18R96XTJCe+YfRfUp6WP+YKCy/72ucOL8AoBFSpAA==","base64"]}]},"id":1}
```
#### Transaction Structure
@@ -465,14 +466,14 @@ Returns transaction details for a confirmed transaction
- `<string>` - transaction signature as base-58 encoded string
N encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
- `<string>` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", or "binary". **jsonParsed encoding is UNSTABLE**
- `<string>` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", "base58" (*slow*), or "base64". If parameter not provided, the default encoding is JSON. **jsonParsed encoding is UNSTABLE**
#### Results:
- `<null>` - if transaction is not found or not confirmed
- `<object>` - if transaction is confirmed, an object with the following fields:
- `slot: <u64>` - the slot this transaction was processed in
- `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
- `transaction: <object|[string,encoding]>` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter
- `meta: <object | null>` - transaction status metadata object:
- `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
- `fee: <u64>` - fee this transaction was charged, as u64 integer
@@ -486,16 +487,16 @@ N encoding attempts to use program-specific instruction parsers to return more h
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "json"]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv", "json"]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":{"slot":430,"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
{"jsonrpc":"2.0","result":{"meta":{"err":null,"fee":5000,"postBalances":[499998932500,26858640,1,1,1],"preBalances":[499998937500,26858640,1,1,1],"status":{"Ok":null}},"slot":430,"transaction":{"message":{"accountKeys":["3UVYmECPPMZSCqWKfENfuoTv51fTDTWicX9xmBD2euKe","AjozzgE83A3x1sHNUR64hfH7zaEBWeMaFuAN9kQgujrc","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":1},"instructions":[{"accounts":[1,2,3,0],"data":"37u9WtQpcm6ULa3WRQHmj49EPs4if7o9f1jSRVZpm2dvihR9C8jY4NqEwXUbLwx15HBSNcP1","programIdIndex":4}],"recentBlockhash":"mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B"},"signatures":["2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv"]}},"id":1}
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "binary"]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv", "base64"]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":{"slot":430,"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
{"jsonrpc":"2.0","result":{"meta":{"err":null,"fee":5000,"postBalances":[499998932500,26858640,1,1,1],"preBalances":[499998937500,26858640,1,1,1],"status":{"Ok":null}},"slot":430,"transaction":["AVj7dxHlQ9IrvdYVIjuiRFs1jLaDMHixgrv+qtHBwz51L4/ImLZhszwiyEJDIp7xeBSpm/TX5B7mYzxa+fPOMw0BAAMFJMJVqLw+hJYheizSoYlLm53KzgT82cDVmazarqQKG2GQsLgiqktA+a+FDR4/7xnDX7rsusMwryYVUdixfz1B1Qan1RcZLwqvxvJl4/t3zHragsUp0L47E24tAFUgAAAABqfVFxjHdMkoVmOYaR1etoteuKObS21cc1VbIQAAAAAHYUgdNXR0u3xNdiTr072z2DVec9EQQ/wNo1OAAAAAAAtxOUhPBp2WSjUNJEgfvy70BbxI00fZyEPvFHNfxrtEAQQEAQIDADUCAAAAAQAAAAAAAACtAQAAAAAAAAdUE18R96XTJCe+YfRfUp6WP+YKCy/72ucOL8AoBFSpAA==","base64"]},"id":1}
```
### getEpochInfo
@@ -836,6 +837,49 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
{"jsonrpc":"2.0","result":500,"id":1}
```
### getMultipleAccounts
Returns the account information for a list of Pubkeys
#### Parameters:
- `<array>` - An array of Pubkeys to query, as base-58 encoded strings
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64", or jsonParsed". "base58" is limited to Account data of less than 128 bytes. "base64" will return base64 encoded data for Account data of any size.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "base58" or "base64" encoding.
#### Results:
The result will be an RpcResponse JSON object with `value` equal to:
An array of:
- `<null>` - if the account at that Pubkey doesn't exist
- `<object>` - otherwise, a JSON object containing:
- `lamports: <u64>`, number of lamports assigned to this account, as a u64
- `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
- `data: <[string, encoding]|object>`, data associated with the account, either as encoded binary data or JSON format `{<program>: <state>}`, depending on encoding parameter
- `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
- `rentEpoch: <u64>`, the epoch at which this account will next owe rent, as u64
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getMultipleAccounts", "params":[["vines1vzrYbzLMRdu58ou5XTby4qAqVRLmqo36NKPTg", "4fYNw3dojWmQ4dXtSGE9epjRGy9pFSx62YypT7avPYvA"],{"dataSlice":{"offset":0,"length":0}}]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":[{"data":["AAAAAAEAAAACtzNsyJrW0g==","base64"],"executable":false,"lamports":1000000000,"owner":"11111111111111111111111111111111","rentEpoch":2}},{"data":["","base64"],"executable":false,"lamports":5000000000,"owner":"11111111111111111111111111111111","rentEpoch":2}}],"id":1}
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getMultipleAccounts", "params":[["vines1vzrYbzLMRdu58ou5XTby4qAqVRLmqo36NKPTg", "4fYNw3dojWmQ4dXtSGE9epjRGy9pFSx62YypT7avPYvA"],{"encoding": "base58"}]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":[{"data":["11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHRTPuR3oZ1EioKtYGiYxpxMG5vpbZLsbcBYBEmZZcMKaSoGx9JZeAuWf","base58"],"executable":false,"lamports":1000000000,"owner":"11111111111111111111111111111111","rentEpoch":2}},{"data":["","base58"],"executable":false,"lamports":5000000000,"owner":"11111111111111111111111111111111","rentEpoch":2}}],"id":1}
```
### getProgramAccounts
Returns all accounts owned by the provided program Pubkey
@@ -845,9 +889,9 @@ Returns all accounts owned by the provided program Pubkey
- `<string>` - Pubkey of program, as base-58 encoded string
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
- `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64" or jsonParsed".
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type `<string>`. If parsed-JSON is requested for the SPL Token program, when a valid mint cannot be found for a particular account, that account will be filtered out from results. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "base58" or "base64" encoding.
- (optional) `filters: <array>` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results
##### Filters:
@@ -865,7 +909,7 @@ The result field will be an array of JSON objects, which will contain:
- `account: <object>` - a JSON object, with the following sub fields:
- `lamports: <u64>`, number of lamports assigned to this account, as a u64
- `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
`data: <string|object>`, data associated with the account, either as base-58 encoded binary data or JSON format `{<program>: <state>}`, depending on encoding parameter
`data: <[string,encoding]|object>`, data associated with the account, either as encoded binary data or JSON format `{<program>: <state>}`, depending on encoding parameter
- `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
- `rentEpoch: <u64>`, the epoch at which this account will next owe rent, as u64
@@ -1100,9 +1144,9 @@ Returns all SPL Token accounts by approved Delegate. **UNSTABLE**
* `programId: <string>` - Pubkey of the Token program ID that owns the accounts, as base-58 encoded string
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
- `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64" or jsonParsed".
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a valid mint cannot be found for a particular account, that account will be filtered out from results. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "base58" or "base64" encoding.
#### Results:
@@ -1112,7 +1156,7 @@ The result will be an RpcResponse JSON object with `value` equal to an array of
- `account: <object>` - a JSON object, with the following sub fields:
- `lamports: <u64>`, number of lamports assigned to this account, as a u64
- `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
- `data: <object>`, Token state data associated with the account, either as base-58 encoded binary data or in JSON format `{<program>: <state>}`
- `data: <object>`, Token state data associated with the account, either as encoded binary data or in JSON format `{<program>: <state>}`
- `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
- `rentEpoch: <u64>`, the epoch at which this account will next owe rent, as u64
@@ -1137,9 +1181,9 @@ Returns all SPL Token accounts by token owner. **UNSTABLE**
* `programId: <string>` - Pubkey of the Token program ID that owns the accounts, as base-58 encoded string
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
- `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64" or jsonParsed".
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a valid mint cannot be found for a particular account, that account will be filtered out from results. **jsonParsed encoding is UNSTABLE**
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "base58" or "base64" encoding.
#### Results:
@@ -1149,7 +1193,7 @@ The result will be an RpcResponse JSON object with `value` equal to an array of
- `account: <object>` - a JSON object, with the following sub fields:
- `lamports: <u64>`, number of lamports assigned to this account, as a u64
- `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
- `data: <object>`, Token state data associated with the account, either as base-58 encoded binary data or in JSON format `{<program>: <state>}`
- `data: <object>`, Token state data associated with the account, either as encoded binary data or in JSON format `{<program>: <state>}`
- `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
- `rentEpoch: <u64>`, the epoch at which this account will next owe rent, as u64
@@ -1257,7 +1301,7 @@ The result field will be a JSON object with the following fields:
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"solana-core": "1.3.3"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "1.3.8"},"id":1}
```
### getVoteAccounts
@@ -1353,6 +1397,7 @@ Before submitting, the following preflight checks are performed:
- `<string>` - fully-signed Transaction, as base-58 encoded string
- `<object>` - (optional) Configuration object containing the following field:
- `skipPreflight: <bool>` - if true, skip the preflight transaction checks (default: false)
- `preflightCommitment: <object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) level to use for preflight (default: max).
#### Results:
@@ -1377,6 +1422,7 @@ Simulate sending a transaction
- `<string>` - Transaction, as base-58 encoded string. The transaction must have a valid blockhash, but is not required to be signed.
- `<object>` - (optional) Configuration object containing the following field:
- `sigVerify: <bool>` - if true the transaction signatures will be verified (default: false)
- `commitment: <object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) level to simulate the transaction at (default: max).
#### Results:
@@ -1457,7 +1503,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
- `<string>` - account Pubkey, as base-58 encoded string
- `<object>` - (optional) Configuration object containing the following optional fields:
- `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
- `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64" or jsonParsed".
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
#### Results:
@@ -1468,9 +1514,9 @@ Subscribe to an account to receive notifications when the lamports or data for a
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", {"encoding":"base58"}]}
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", {"commitment": "single"}]}
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", {"encoding":"base64", "commitment": "single"}]}
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", {"encoding":"jsonParsed"}]}
@@ -1481,7 +1527,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Notification Format:
```bash
// Binary encoding
// Base58 encoding
{
"jsonrpc": "2.0",
"method": "accountNotification",
@@ -1491,7 +1537,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
"slot": 5199307
},
"value": {
"data": "11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR",
"data": ["11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR", "base58"],
"executable": false,
"lamports": 33594,
"owner": "11111111111111111111111111111111",
@@ -1567,8 +1613,8 @@ Subscribe to a program to receive notifications when the lamports or data for a
- `<string>` - program_id Pubkey, as base-58 encoded string
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64" or jsonParsed".
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `filters: <array>` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results
#### Results:
@@ -1579,13 +1625,11 @@ Subscribe to a program to receive notifications when the lamports or data for a
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["11111111111111111111111111111111"]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["11111111111111111111111111111111", {"commitment": "single"}]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["11111111111111111111111111111111", {"encoding":"base64", "commitment": "single"}]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["11111111111111111111111111111111", {"encoding":"jsonParsed"}]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["11111111111111111111111111111111", {"filters":[{"dataSize":80}]}]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["11111111111111111111111111111111", {"encoding":"base64", "filters":[{"dataSize":80}]}]}
// Result
{"jsonrpc": "2.0","result": 24040,"id": 1}
@@ -1594,7 +1638,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
#### Notification Format:
```bash
// Binary encoding
// Base58 encoding
{
"jsonrpc": "2.0",
"method": "programNotification",
@@ -1606,7 +1650,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
"value": {
"pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq"
"account": {
"data": "11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR",
"data": ["11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR", "base58"],
"executable": false,
"lamports": 33594,
"owner": "11111111111111111111111111111111",
@@ -1686,8 +1730,6 @@ Subscribe to a transaction signature to receive notification when the transactio
- `<string>` - Transaction Signature, as base-58 encoded string
- `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
#### Results:
- `integer` - subscription id \(needed to unsubscribe\)

View File

@@ -23,7 +23,6 @@ Explorers:
- Devnet includes a token faucet for airdrops for application testing
- Devnet may be subject to ledger resets
- Devnet typically runs a newer software version than Mainnet Beta
- Devnet may be maintained by different validators than Mainnet Beta
- Gossip entrypoint for Devnet: `devnet.solana.com:8001`
- RPC URL for Devnet: `https://devnet.solana.com`
@@ -39,14 +38,14 @@ solana config set --url https://devnet.solana.com
$ solana-validator \
--identity ~/validator-keypair.json \
--vote-account ~/vote-account-keypair.json \
--trusted-validator 47Sbuv6jL7CViK9F2NMW51aQGhfdpUu7WNvKyH645Rfi \
--trusted-validator dv1LfzJvDF7S1fBKpFgKoKXK5yoSosmkAdfbxBo1GqJ \
--no-untrusted-rpc \
--ledger ~/validator-ledger \
--rpc-port 8899 \
--dynamic-port-range 8000-8010 \
--entrypoint devnet.solana.com:8001 \
--expected-genesis-hash HzyuivuNXMHJKjM6q6BE2qBsR3etqW21BSvuJTpJFj9A \
--expected-shred-version 61357 \
--entrypoint entrypoint.devnet.solana.com:8001 \
--expected-genesis-hash Ap36zrBt2jLWpwUjaF48hRULVgmvSE3ViFxiQgjZX2XC \
--expected-shred-version 37460 \
--limit-ledger-size
```
@@ -62,9 +61,9 @@ The `--trusted-validator`s is operated by Solana
squash bugs or network vulnerabilities.
- Testnet tokens are **not real**
- Testnet may be subject to ledger resets.
- Testnet includes a token faucet for airdrops for application testing
- Testnet typically runs a newer software release than both Devnet and
Mainnet Beta
- Testnet may be maintained by different validators than Mainnet Beta
- Gossip entrypoint for Testnet: `35.203.170.30:8001`
- RPC URL for Testnet: `https://testnet.solana.com`
@@ -81,13 +80,14 @@ $ solana-validator \
--identity ~/validator-keypair.json \
--vote-account ~/vote-account-keypair.json \
--trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \
--trusted-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQAD \
--trusted-validator ta1Uvfb7W5BRPrdGnhP9RmeCGKzBySGM1hTE4rBRy6T \
--trusted-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN \
--trusted-validator 9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv \
--no-untrusted-rpc \
--ledger ~/validator-ledger \
--rpc-port 8899 \
--dynamic-port-range 8000-8010 \
--entrypoint 35.203.170.30:8001 \
--entrypoint entrypoint.testnet.solana.com:8001 \
--expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY \
--expected-shred-version 1579 \
--limit-ledger-size
@@ -96,6 +96,7 @@ $ solana-validator \
The identity of the `--trusted-validator`s are:
- `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - testnet.solana.com (Solana)
- `ta1Uvfb7W5BRPrdGnhP9RmeCGKzBySGM1hTE4rBRy6T` - Break RPC node (Solana)
- `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One
- `9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv` - Algo|Stake

View File

@@ -24,11 +24,11 @@ Even if those processes are out of scope of rent collection, all of manipulated
## Actual processing of collecting rent
Rent is due for one epoch's worth of time, and accounts always have `Account::rent_epoch` of `current_epoch + 1`.
Rent is due for one epoch's worth of time, and accounts have `Account::rent_epoch` of `current_epoch` or `current_epoch + 1` depending on the rent regime.
If the account is in the exempt regime, `Account::rent_epoch` is simply pushed to `current_epoch + 1`.
If the account is in the exempt regime, `Account::rent_epoch` is simply updated to `current_epoch`.
If the account is non-exempt, the difference between the next epoch and `Account::rent_epoch` is used to calculate the amount of rent owed by this account \(via `Rent::due()`\). Any fractional lamports of the calculation are truncated. Rent due is deducted from `Account::lamports` and `Account::rent_epoch` is updated to the next epoch. If the amount of rent due is less than one lamport, no changes are made to the account.
If the account is non-exempt, the difference between the next epoch and `Account::rent_epoch` is used to calculate the amount of rent owed by this account \(via `Rent::due()`\). Any fractional lamports of the calculation are truncated. Rent due is deducted from `Account::lamports` and `Account::rent_epoch` is updated to `current_epoch + 1` (= next epoch). If the amount of rent due is less than one lamport, no changes are made to the account.
Accounts whose balance is insufficient to satisfy the rent that would be due simply fail to load.

View File

@@ -100,6 +100,77 @@ greater security. If so, you will need to move SOL to hot accounts using our
When a user wants to deposit SOL into your exchange, instruct them to send a
transfer to the appropriate deposit address.
## Validating User-supplied Account Addresses for Withdrawals in SOL
As withdrawals are irreversible, it may be a good practice to validate the
account address before authorizing withdrawals into user-supplied accounts
to prevent accidental user's fund loss.
For a normal account in Solana, its address is simply a Base58-encoded
actual 256-bit public key of ed25519. Because not all bit pattern is a valid
public key for the ed25519, it's possible to make sure user-supplied
account addresses are at least something that may be a correct ed25519 public
key.
### Java
You can check Solana's normal account address validity by first decoding
Base58 string and ensuring the decoded bytes are valid ed25519 public keys
like this:
The following code sample assumes you're using the Maven.
`pom.xml`:
```xml
<repositories>
...
<repository>
<id>spring</id>
<url>https://repo.spring.io/libs-release/</url>
</repository>
</repositories>
...
<dependencies>
...
<dependency>
<groupId>io.github.novacrypto</groupId>
<artifactId>Base58</artifactId>
<version>0.1.3</version>
</dependency>
<dependency>
<groupId>cafe.cryptography</groupId>
<artifactId>curve25519-elisabeth</artifactId>
<version>0.1.0</version>
</dependency>
<dependencies>
```
```java
import io.github.novacrypto.base58.Base58;
import cafe.cryptography.curve25519.CompressedEdwardsY;
public class PubkeyValidator
{
public static boolean verifyPubkey(String userProvidedPubkey)
{
try {
return _verifyPubkeyInternal(userProvidedPubkey);
} catch (Exception e) {
return false;
}
}
public static boolean _verifyPubkeyInternal(String maybePubkey) throws Exception
{
byte[] bytes = Base58.base58Decode(maybePubkey);
return !(new CompressedEdwardsY(bytes)).decompress().isSmallOrder();
}
}
```
### Poll for Blocks
The easiest way to track all the deposit accounts for your exchange is to poll

View File

@@ -46,10 +46,14 @@ that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"`
## System Tuning
For Linux validators, the solana repo includes a daemon to adjust system settings to optimize
performance (namely by increasing the OS UDP buffer limits, and scheduling PoH with realtime policy).
### Linux
#### Automatic
The solana repo includes a daemon to adjust system settings to optimize performance
(namely by increasing the OS UDP buffer limits, and scheduling PoH with realtime policy).
The daemon (`solana-sys-tuner`) is included in the solana binary release.
The daemon (`solana-sys-tuner`) is included in the solana binary release. Restart
it, *before* restarting your validator, after each software upgrade to ensure that
the latest recommended settings are applied.
To run it:
@@ -57,6 +61,53 @@ To run it:
sudo solana-sys-tuner --user $(whoami) > sys-tuner.log 2>&1 &
```
#### Manual
If you would prefer to manage system settings on your own, you may do so with
the following commands.
##### **Increase UDP buffers**
```bash
sudo bash -c "cat >/etc/sysctl.d/20-solana-udp-buffers.conf <<EOF
# Increase UDP buffer size
net.core.rmem_default = 134217728
net.core.rmem_max = 134217728
net.core.wmem_default = 134217728
net.core.wmem_max = 134217728
EOF"
```
```bash
sudo sysctl -p /etc/sysctl.d/20-solana-udp-buffers.conf
```
##### **Increased memory mapped files limit**
```bash
sudo bash -c "cat >/etc/sysctl.d/20-solana-mmaps.conf <<EOF
# Increase memory mapped files limit
vm.max_map_count = 500000
EOF"
```
```bash
sudo sysctl -p /etc/sysctl.d/20-solana-mmaps.conf
```
Add
```
LimitNOFILE=500000
```
to the `[Service]` section of your systemd service file, if you use one,
otherwise add it to `/etc/systemd/system.conf`.
```bash
sudo systemctl daemon-reload
```
```bash
sudo bash -c "cat >/etc/security/limits.d/90-solana-nofiles.conf <<EOF
# Increase process file descriptor count limit
* - nofile 500000
EOF"
```
```bash
### Close all open sessions (log out then, in again) ###
```
## Generate identity
Create an identity keypair for your validator by running:
@@ -202,7 +253,7 @@ solana-validator \
--ledger ~/validator-ledger \
--rpc-port 8899 \
--entrypoint devnet.solana.com:8001 \
--limit-ledger-size
--limit-ledger-size \
--log ~/solana-validator.log
```

View File

@@ -64,10 +64,15 @@ Different wallets will vary slightly in their process for this but the general
description is below.
#### Supported Wallets
Currently, staking operation are only supported by wallets that can interact
with the Solana command line tools, including Ledger Nano S and paper wallet.
Staking operations are supported by the following wallet solutions:
[Staking commands using the Solana Command Line Tools](cli/delegate-stake.md)
- SolFlare.com in conjunction with a keystore file or a Ledger Nano S. Check
out our [guide to using SolFlare](wallet-guide/solflare.md) for details.
- Solana command line tools can perform all stake operations in conjunction
with a CLI-generated keypair file wallet, a paper wallet, or with a connected
Ledger Nano S.
[Staking commands using the Solana Command Line Tools](cli/delegate-stake.md).
#### Create a Stake Account
A stake account is a different type of account from a wallet address

View File

@@ -53,8 +53,9 @@ Solana supports several types of wallets in the Solana native
command-line app as well as wallets from third-parties.
For the majority of users, we recommend using one of the
[app wallets](wallet-guide/apps.md), which will provide a more familiar user
experience rather than needing to learn command line tools.
[app wallets](wallet-guide/apps.md) or a browser-based
[web wallet](wallet-guide/web-wallets.md), which will provide a more familiar
user experience rather than needing to learn command line tools.
For advanced users or developers, the [command-line wallets](wallet-guide/cli.md)
may be more appropriate, as new features on the Solana blockchain will always be

View File

@@ -6,14 +6,9 @@ This document describes how to set up a
[Ledger Nano S hardware wallet](https://shop.ledger.com/products/ledger-nano-s)
with the [Ledger Live](https://www.ledger.com/ledger-live) software.
**NOTE: While Solana tools are fully integrated with the Ledger Nano S device,
and the Solana App can be installed on the Nano S using Ledger Live, adding and
managing wallet accounts currently requires use of our command-line tools.
Integration with Ledger Live to use Solana wallet accounts on Ledger Live
will be available in the future.**
Users may [use a Ledger Nano S with the Solana command
line tools](hardware-wallets/ledger.md).
Once the setup steps shown below are complete and the Solana app is installed
on your Nano S device, users have several options of how to
[use the Nano S to interact with the Solana Network](#interact-with-the-solana-network)
## Set up a Ledger Nano S
@@ -66,11 +61,20 @@ of the Solana App, please upgrade to version v0.2.2 by following these steps.
![Upgrade complete](/img/ledger-live-latest-version-installed.png)
## Interact with Solana network
## Interact with the Solana network
- To interact with your Ledger wallet on our live network, please see our
instructions on how to
[use a Ledger Nano S with the Solana command line tools](hardware-wallets/ledger.md).
Users can use any of the following options to sign and submit transactions with
the Ledger Nano S to interact with the Solana network:
- [SolFlare.com](https://solflare.com/) is a non-custodial web wallet built
specifically for Solana and supports basic transfers and staking operations
with the Ledger device.
Check out our guide for [using a Ledger Nano S with SolFlare](solflare.md).
- Developers and advanced users may
[use a Ledger Nano S with the Solana command line tools](hardware-wallets/ledger.md).
New wallet features are almost always supported in the native command line tools
before being supported by third-party wallets.
## Support

View File

@@ -0,0 +1,185 @@
---
title: SolFlare Web Wallet
---
## Introduction
[SolFlare.com](https://solflare.com/) is a community-created web wallet built
specifically for Solana.
SolFlare supports sending and receiving native SOL tokens as well as sending and
receiving SPL Tokens (Solana's ERC-20 equivalent).
SolFlare also supports staking of SOL tokens.
As a _non-custodial_ wallet, your private keys are not stored by the SolFlare
site itself, but rather they are stored in an encrypted
[Keystore File](#using-a-keystore-file) or on a
[Ledger Nano S hardware wallet](#using-a-ledger-nano-s-hardware-wallet).
This guide describes how to set up a wallet using SolFlare, how to send and
receive SOL tokens, and how to create and manage a stake account.
## Getting Started
Go to https://www.solflare.com in a supported browser. Most popular web browsers
should work when interacting with a Keystore File, but currently only
Chrome and Brave are supported when interacting with a Ledger Nano S.
### Using a Keystore File
#### Create a new Keystore File
To create a wallet with a Keystore file, click on "Create a Wallet" and select
"Using Keystore File". Follow the prompts to create a password which will be
used to encrypt your Keystore file, and then to download the new file to your
computer. You will be prompted to then upload the Keystore file back to the site
to verify that the download was saved correctly.
**NOTE: If you lose your Keystore file or the password used to encrypt it, any
funds in that wallet will be lost permanently. Neither the Solana team nor the
SolFlare developers can help you recover lost keys.**
You may want to consider saving a backup copy of your Keystore file on an
external drive separate from your main computer, and storing your password in a
separate location.
#### Access your wallet with a Keystore File
To use SolFlare with a previously created Keystore file, click on
"Access a Wallet" and select "Using Keystore File". If you just created a new
Keystore file, you will be taken to the Access page directly.
You will be prompted to enter the password and upload your Keystore file,
then you will be taken to the wallet interface main page.
### Using a Ledger Nano S hardware wallet
#### Initial Device Setup
To use a Ledger Nano S with SolFlare, first ensure you have
[set up your Nano S](ledger-live.md) and have [installed the latest version of
the Solana app](ledger-live.md#upgrade-to-the-latest-version-of-the-solana-app)
on your device.
#### Select a Ledger address to access
Plug in your Nano S and open the Solana app. Acknowledge a message of "Pending
Ledger Review" by tapping both buttons at once so the device screen displays
"Application is Ready".
From the SolFlare home page, click "Access a Wallet" then select "Using Ledger
Nano S". Under "Select derivation path", select the only option:
```Solana - 44`/501`/```
Note: Your browser may prompt you to ask if SolFlare may communicate with your
Ledger device. Click to allow this.
Select an address to interact with from the lower drop down box then click "Access".
The Ledger device can derive a large number of private keys and associated
public addresses. This allows you to manage and interact with an arbitrary
number of different accounts from the same device.
If you deposit funds to an address derived from your Ledger device,
make sure to access the same address when using SolFlare to be able to access
those funds. If you connect to the incorrect address,
simply click Logout and re-connect with the correct address.
## Select a Network
Solana maintains [three distinct networks](../clusters.md), each of which has
its own purpose in supporting the Solana ecosystem. Mainnet Beta is selected by
default on SolFlare, as this is the permanent network where exchanges and other
production apps are deployed. To select a different network, click on the name
of the currently selected network at the top of the wallet dashboard, either
Mainnet, Testnet or Devnet, then click on the name of the network you wish to be
using.
## Sending and Receiving SOL Tokens
### Receiving
To receive tokens into your wallet, someone must transfer some to your wallet's
address. The address is displayed at the top-left on the screen, and you can
click the Copy icon to copy the address and provide it to whoever is sending you
tokens. If you hold tokens in a different wallet or on an exchange, you can
withdraw to this address as well. Once the transfer is made, the balance shown
on SolFlare should update within a few seconds.
### Sending
Once you have some tokens at your wallet address, you can send them to any other
wallet address or an exchange deposit address by clicking "Transfer SOL" in the
upper-right corner. Enter the recipient address and the amount of SOL to
transfer and click "Submit". You will be prompted to confirm the details of the
transaction before you [use your key to sign the transaction](#signing-a-transaction)
and then it will be submitted to the network.
## Staking SOL Tokens
SolFlare supports creating and managing stake accounts and delegations. To learn
about how staking on Solana works in general, check out our
[Staking Guide](../staking.md).
### Create a Stake Account
You can use some of the SOL tokens in your wallet to create a new stake account.
From the wallet main page click "Staking" at the top of the page. In the upper-
right, click "Create Account". Enter the amount of SOL you want to use to
fund your new stake account. This amount will be withdrawn from your wallet
and transfered to the stake account. Do not transfer your entire wallet balance
to a stake account, as the wallet is still used to pay any transaction fees
associated with your stake account. Consider leaving at least 1 SOL in your
wallet account.
After you submit and [sign the transaction](#signing-a-transaction) you will see
your new stake account appear in the box labeled "Your Staking Accounts".
Stake accounts created on SolFlare set your wallet address as the
[staking and withdrawing authority](../staking/stake-accounts.md#understanding-account-authorities)
for your new account, which gives your wallet's key the authority to sign
for any transactions related to the new stake account.
### View your Stake Accounts
On the main Wallet dashboard page or on the Staking dashboard page, your stake
accounts will be visible in the "Your Staking Accounts" box. Stake accounts
exist at a different address from your wallet.
SolFlare will locate any display all stake accounts on the
[selected network](#select-a-network)
for which your wallet address is assigned as the
[stake authority](../staking/stake-accounts.md#understanding-account-authorities).
Stake accounts that were created outside of SolFlare will also be displayed and
can be managed as long as the wallet you logged in with is assigned as the stake
authority.
### Delegate tokens in a Stake Account
Once you have [selected a validator](../staking.md#select-a-validator), you may
delegate the tokens in one of your stake accounts to them. From the Staking
dashboard, click "Delegate" at the right side of a displayed stake account.
Select the validator you wish to delegate to from the drop down list and click
Delegate.
To un-delegate your staked tokens (also called deactivating your stake), the
process is similar. On the Staking page, at the right side of a delegated stake
account, click the "Undelegate" button and follow the prompts.
### Split a Stake Account
You may split an existing stake account into two stake accounts. Click on the
address of a stake account controlled by your wallet, and under the Actions bar,
click "Split". Specify the amount of SOL tokens you want to split. This will be
the amount of tokens in your new stake account and your existing stake account
balance will be reduced by the same amount. Splitting your stake account
allows you to delegate to multiple different validators with different amounts
of tokens. You may split a stake account as many times as you want, to create
as many stake accounts as you want.
## Signing a Transaction
Any time you submit a transaction such as sending tokens to another wallet or
delegating stake, you need to use your private key to sign the transaction so
it will be accepted by the network.
### Using a Keystore File
If you accessed your wallet using a Keystore file, you will be prompted to enter
your password any time the key is needed to sign a transaction.
### Using a Ledger Nano S
If you accessed your wallet with a Ledger Nano S, you will be prompted to confirm
the pending transaction details on your device whenever the key is needed to sign.
On the Nano S, use the left and right buttons to view and confirm all of the
transaction details. If everything looks correct, keep clicking the right button
until the screen shows "Approve". Click both buttons to approve the transaction.
If something looks incorrect, press the right button once more so the screen shows
"Reject" and press both buttons to reject the transaction. After you approve
or reject a transaction, you will see this reflected on the SolFlare page.

View File

@@ -0,0 +1,10 @@
---
title: Web Wallets
---
Solana is supported by the following web wallets.
## SolFlare
[SolFlare.com](https://solflare.com/) is a community-created non-custodial
web wallet that was built specifically for Solana. Check out our guide for
[using SolFlare](solflare.md).

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-dos"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,15 +12,15 @@ bincode = "1.3.1"
clap = "2.33.1"
log = "0.4.8"
rand = "0.7.0"
rayon = "1.3.1"
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-core = { path = "../core", version = "1.3.3" }
solana-ledger = { path = "../ledger", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-net-utils = { path = "../net-utils", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
rayon = "1.4.0"
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-core = { path = "../core", version = "1.3.8" }
solana-ledger = { path = "../ledger", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-net-utils = { path = "../net-utils", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -149,6 +149,7 @@ fn main() {
info!("Finding cluster entry: {:?}", entrypoint_addr);
let (nodes, _validators) = discover(
None,
Some(&entrypoint_addr),
None,
Some(60),

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.3.3"
version = "1.3.8"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ console = "0.11.3"
indicatif = "0.15.0"
log = "0.4.8"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
tar = "0.4.28"
[lib]

View File

@@ -98,7 +98,7 @@ dependencies = [
[[package]]
name = "solana-sdk-wasm"
version = "1.3.3"
version = "1.3.7"
dependencies = [
"bincode",
"bs58",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-sdk-wasm"
version = "1.3.3"
version = "1.3.8"
description = "Solana SDK Wasm"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.3.3"
version = "1.3.8"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,11 +16,11 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.112"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-metrics = { path = "../metrics", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-metrics = { path = "../metrics", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -1,4 +1,5 @@
use clap::{crate_description, crate_name, App, Arg};
use solana_clap_utils::input_parsers::{lamports_of_sol, value_of};
use solana_faucet::{
faucet::{run_faucet, Faucet, FAUCET_PORT},
socketaddr,
@@ -34,36 +35,36 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.help("Time slice over which to limit requests to faucet"),
)
.arg(
Arg::with_name("cap")
.long("cap")
Arg::with_name("per_time_cap")
.long("per-time-cap")
.alias("cap")
.value_name("NUM")
.takes_value(true)
.help("Request limit for time slice"),
.help("Request limit for time slice, in SOL"),
)
.arg(
Arg::with_name("per_request_cap")
.long("per-request-cap")
.value_name("NUM")
.takes_value(true)
.help("Request limit for a single request, in SOL"),
)
.get_matches();
let mint_keypair = read_keypair_file(matches.value_of("keypair").unwrap())
.expect("failed to read client keypair");
let time_slice: Option<u64>;
if let Some(secs) = matches.value_of("slice") {
time_slice = Some(secs.to_string().parse().expect("failed to parse slice"));
} else {
time_slice = None;
}
let request_cap: Option<u64>;
if let Some(c) = matches.value_of("cap") {
request_cap = Some(c.to_string().parse().expect("failed to parse cap"));
} else {
request_cap = None;
}
let time_slice = value_of(&matches, "slice");
let per_time_cap = lamports_of_sol(&matches, "per_time_cap");
let per_request_cap = lamports_of_sol(&matches, "per_request_cap");
let faucet_addr = socketaddr!(0, FAUCET_PORT);
let faucet = Arc::new(Mutex::new(Faucet::new(
mint_keypair,
time_slice,
request_cap,
per_time_cap,
per_request_cap,
)));
let faucet1 = faucet.clone();

View File

@@ -62,7 +62,8 @@ pub struct Faucet {
mint_keypair: Keypair,
ip_cache: Vec<IpAddr>,
pub time_slice: Duration,
request_cap: u64,
per_time_cap: u64,
per_request_cap: Option<u64>,
pub request_current: u64,
}
@@ -70,27 +71,23 @@ impl Faucet {
pub fn new(
mint_keypair: Keypair,
time_input: Option<u64>,
request_cap_input: Option<u64>,
per_time_cap: Option<u64>,
per_request_cap: Option<u64>,
) -> Faucet {
let time_slice = match time_input {
Some(time) => Duration::new(time, 0),
None => Duration::new(TIME_SLICE, 0),
};
let request_cap = match request_cap_input {
Some(cap) => cap,
None => REQUEST_CAP,
};
let time_slice = Duration::new(time_input.unwrap_or(TIME_SLICE), 0);
let per_time_cap = per_time_cap.unwrap_or(REQUEST_CAP);
Faucet {
mint_keypair,
ip_cache: Vec::new(),
time_slice,
request_cap,
per_time_cap,
per_request_cap,
request_current: 0,
}
}
pub fn check_request_limit(&mut self, request_amount: u64) -> bool {
(self.request_current + request_amount) <= self.request_cap
pub fn check_time_request_limit(&mut self, request_amount: u64) -> bool {
(self.request_current + request_amount) <= self.per_time_cap
}
pub fn clear_request_count(&mut self) {
@@ -116,7 +113,15 @@ impl Faucet {
to,
blockhash,
} => {
if self.check_request_limit(lamports) {
if let Some(cap) = self.per_request_cap {
if lamports > cap {
return Err(Error::new(
ErrorKind::Other,
format!("request too large; req: {} cap: {}", lamports, cap),
));
}
}
if self.check_time_request_limit(lamports) {
self.request_current += lamports;
datapoint_info!(
"faucet-airdrop",
@@ -135,7 +140,7 @@ impl Faucet {
ErrorKind::Other,
format!(
"token limit reached; req: {} current: {} cap: {}",
lamports, self.request_current, self.request_cap
lamports, self.request_current, self.per_time_cap
),
))
}
@@ -248,14 +253,15 @@ pub fn request_airdrop_transaction(
pub fn run_local_faucet(
mint_keypair: Keypair,
sender: Sender<SocketAddr>,
request_cap_input: Option<u64>,
per_time_cap: Option<u64>,
) {
thread::spawn(move || {
let faucet_addr = socketaddr!(0, 0);
let faucet = Arc::new(Mutex::new(Faucet::new(
mint_keypair,
None,
request_cap_input,
per_time_cap,
None,
)));
run_faucet(faucet, faucet_addr, Some(sender));
});
@@ -312,18 +318,18 @@ mod tests {
use std::time::Duration;
#[test]
fn test_check_request_limit() {
fn test_check_time_request_limit() {
let keypair = Keypair::new();
let mut faucet = Faucet::new(keypair, None, Some(3));
assert!(faucet.check_request_limit(1));
let mut faucet = Faucet::new(keypair, None, Some(3), None);
assert!(faucet.check_time_request_limit(1));
faucet.request_current = 3;
assert!(!faucet.check_request_limit(1));
assert!(!faucet.check_time_request_limit(1));
}
#[test]
fn test_clear_request_count() {
let keypair = Keypair::new();
let mut faucet = Faucet::new(keypair, None, None);
let mut faucet = Faucet::new(keypair, None, None, None);
faucet.request_current += 256;
assert_eq!(faucet.request_current, 256);
faucet.clear_request_count();
@@ -333,7 +339,7 @@ mod tests {
#[test]
fn test_add_ip_to_cache() {
let keypair = Keypair::new();
let mut faucet = Faucet::new(keypair, None, None);
let mut faucet = Faucet::new(keypair, None, None, None);
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
assert_eq!(faucet.ip_cache.len(), 0);
faucet.add_ip_to_cache(ip);
@@ -344,7 +350,7 @@ mod tests {
#[test]
fn test_clear_ip_cache() {
let keypair = Keypair::new();
let mut faucet = Faucet::new(keypair, None, None);
let mut faucet = Faucet::new(keypair, None, None, None);
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
assert_eq!(faucet.ip_cache.len(), 0);
faucet.add_ip_to_cache(ip);
@@ -359,9 +365,10 @@ mod tests {
let keypair = Keypair::new();
let time_slice: Option<u64> = None;
let request_cap: Option<u64> = None;
let faucet = Faucet::new(keypair, time_slice, request_cap);
let faucet = Faucet::new(keypair, time_slice, request_cap, Some(100));
assert_eq!(faucet.time_slice, Duration::new(TIME_SLICE, 0));
assert_eq!(faucet.request_cap, REQUEST_CAP);
assert_eq!(faucet.per_time_cap, REQUEST_CAP);
assert_eq!(faucet.per_request_cap, Some(100));
}
#[test]
@@ -376,7 +383,7 @@ mod tests {
let mint = Keypair::new();
let mint_pubkey = mint.pubkey();
let mut faucet = Faucet::new(mint, None, None);
let mut faucet = Faucet::new(mint, None, None, None);
let tx = faucet.build_airdrop_transaction(request).unwrap();
let message = tx.message();
@@ -392,8 +399,15 @@ mod tests {
let instruction: SystemInstruction = deserialize(&message.instructions[0].data).unwrap();
assert_eq!(instruction, SystemInstruction::Transfer { lamports: 2 });
// Test per-time request cap
let mint = Keypair::new();
faucet = Faucet::new(mint, None, Some(1));
faucet = Faucet::new(mint, None, Some(1), None);
let tx = faucet.build_airdrop_transaction(request);
assert!(tx.is_err());
// Test per-request cap
let mint = Keypair::new();
faucet = Faucet::new(mint, None, None, Some(1));
let tx = faucet.build_airdrop_transaction(request);
assert!(tx.is_err());
}
@@ -421,7 +435,7 @@ mod tests {
LittleEndian::write_u16(&mut expected_vec_with_length, expected_bytes.len() as u16);
expected_vec_with_length.extend_from_slice(&expected_bytes);
let mut faucet = Faucet::new(keypair, None, None);
let mut faucet = Faucet::new(keypair, None, None, None);
let response = faucet.process_faucet_request(&bytes);
let response_vec = response.unwrap().to_vec();
assert_eq!(expected_vec_with_length, response_vec);

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
PERF_LIBS_VERSION=v0.19.1
PERF_LIBS_VERSION=v0.19.3
VERSION=$PERF_LIBS_VERSION-1
set -e

View File

@@ -10,10 +10,11 @@ fetch_program() {
declare name=$1
declare version=$2
declare address=$3
declare loader=$4
declare so=spl_$name-$version.so
genesis_args+=(--bpf-program "$address" "$so")
genesis_args+=(--bpf-program "$address" "$loader" "$so")
if [[ -r $so ]]; then
return
@@ -36,8 +37,8 @@ fetch_program() {
}
fetch_program token 1.0.0 TokenSVp5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o
fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo
fetch_program token 2.0.3 TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA BPFLoader1111111111111111111111111111111111
fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo BPFLoader1111111111111111111111111111111111
echo "${genesis_args[@]}" > spl-genesis-args.sh

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "1.3.3"
version = "1.3.8"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,12 +10,12 @@ edition = "2018"
[dependencies]
log = { version = "0.4.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.3" }
solana-budget-program = { path = "../programs/budget", version = "1.3.3" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-vest-program = { path = "../programs/vest", version = "1.3.3" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.8" }
solana-budget-program = { path = "../programs/budget", version = "1.3.8" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-vest-program = { path = "../programs/vest", version = "1.3.8" }
[lib]
crate-type = ["lib"]

View File

@@ -1,7 +1,3 @@
use solana_sdk::{
clock::Epoch, genesis_config::OperatingMode, inflation::Inflation, pubkey::Pubkey,
};
#[macro_use]
extern crate solana_bpf_loader_program;
#[macro_use]
@@ -13,6 +9,13 @@ extern crate solana_vest_program;
use log::*;
use solana_runtime::bank::{Bank, EnteredEpochCallback};
use solana_sdk::{
clock::{Epoch, GENESIS_EPOCH},
entrypoint_native::{ErasedProcessInstructionWithContext, ProcessInstructionWithContext},
genesis_config::OperatingMode,
inflation::Inflation,
pubkey::Pubkey,
};
pub fn get_inflation(operating_mode: OperatingMode, epoch: Epoch) -> Option<Inflation> {
match operating_mode {
@@ -36,80 +39,122 @@ pub fn get_inflation(operating_mode: OperatingMode, epoch: Epoch) -> Option<Infl
OperatingMode::Stable => match epoch {
// No inflation at epoch 0
0 => Some(Inflation::new_disabled()),
// Inflation starts
// The epoch of Epoch::MAX is a placeholder and is expected to be reduced in
// a future hard fork.
// Inflation starts The epoch of Epoch::MAX is a placeholder and is
// expected to be reduced in a future hard fork.
Epoch::MAX => Some(Inflation::default()),
_ => None,
},
}
}
pub fn get_programs(operating_mode: OperatingMode, epoch: Epoch) -> Option<Vec<(String, Pubkey)>> {
match operating_mode {
OperatingMode::Development => {
if epoch == 0 {
Some(vec![
// Enable all Stable programs
solana_bpf_loader_program!(),
solana_vest_program!(),
// Programs that are only available in Development mode
solana_budget_program!(),
solana_exchange_program!(),
])
} else {
None
}
enum Program {
Native((String, Pubkey)),
BuiltinLoader((String, Pubkey, ProcessInstructionWithContext)),
}
impl std::fmt::Debug for Program {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
#[derive(Debug)]
enum Program {
Native((String, Pubkey)),
BuiltinLoader((String, Pubkey, String)),
}
OperatingMode::Stable => {
if epoch == std::u64::MAX - 1 {
// The epoch of std::u64::MAX - 1 is a placeholder and is expected to be reduced in
// a future hard fork.
Some(vec![solana_bpf_loader_program!()])
} else if epoch == std::u64::MAX {
// The epoch of std::u64::MAX is a placeholder and is expected to be reduced in a
// future hard fork.
Some(vec![solana_vest_program!()])
} else {
None
let program = match self {
crate::Program::Native((string, pubkey)) => Program::Native((string.clone(), *pubkey)),
crate::Program::BuiltinLoader((string, pubkey, instruction)) => {
let erased: ErasedProcessInstructionWithContext = *instruction;
Program::BuiltinLoader((string.clone(), *pubkey, format!("{:p}", erased)))
}
}
OperatingMode::Preview => {
if epoch == 0 {
Some(vec![solana_bpf_loader_program!()])
} else if epoch == std::u64::MAX {
// The epoch of std::u64::MAX is a placeholder and is expected to be reduced in a
// future hard fork.
Some(vec![solana_vest_program!()])
} else {
None
}
}
};
write!(f, "{:?}", program)
}
}
// given operating_mode, return the entire set of enabled programs
fn get_programs(operating_mode: OperatingMode) -> Vec<(Program, Epoch)> {
match operating_mode {
OperatingMode::Development => vec![
// Programs used for testing
Program::BuiltinLoader(solana_bpf_loader_program!()),
Program::BuiltinLoader(solana_bpf_loader_deprecated_program!()),
Program::Native(solana_vest_program!()),
Program::Native(solana_budget_program!()),
Program::Native(solana_exchange_program!()),
]
.into_iter()
.map(|program| (program, GENESIS_EPOCH))
.collect::<Vec<_>>(),
OperatingMode::Preview => vec![
(
Program::BuiltinLoader(solana_bpf_loader_deprecated_program!()),
GENESIS_EPOCH,
),
(Program::BuiltinLoader(solana_bpf_loader_program!()), 89),
],
OperatingMode::Stable => vec![
(
Program::BuiltinLoader(solana_bpf_loader_deprecated_program!()),
34,
),
(
Program::BuiltinLoader(solana_bpf_loader_program!()),
// The epoch of std::u64::MAX is a placeholder and is expected
// to be reduced in a future cluster update.
Epoch::MAX,
),
],
}
}
pub fn get_native_programs_for_genesis(operating_mode: OperatingMode) -> Vec<(String, Pubkey)> {
let mut native_programs = vec![];
for (program, start_epoch) in get_programs(operating_mode) {
if let Program::Native((string, key)) = program {
if start_epoch == GENESIS_EPOCH {
native_programs.push((string, key));
}
}
}
native_programs
}
pub fn get_entered_epoch_callback(operating_mode: OperatingMode) -> EnteredEpochCallback {
Box::new(move |bank: &mut Bank| {
info!(
"Entering epoch {} with operating_mode {:?}",
bank.epoch(),
operating_mode
);
Box::new(move |bank: &mut Bank, initial: bool| {
// Be careful to add arbitrary logic here; this should be idempotent and can be called
// at arbitrary point in an epoch not only epoch boundaries.
// This is because this closure need to be executed immediately after snapshot restoration,
// in addition to usual epoch boundaries
// In other words, this callback initializes some skip(serde) fields, regardless
// frozen or not
if let Some(inflation) = get_inflation(operating_mode, bank.epoch()) {
info!("Entering new epoch with inflation {:?}", inflation);
bank.set_inflation(inflation);
}
if let Some(new_programs) = get_programs(operating_mode, bank.epoch()) {
for (name, program_id) in new_programs.iter() {
info!("Registering {} at {}", name, program_id);
bank.add_native_program(name, program_id);
for (program, start_epoch) in get_programs(operating_mode) {
let should_populate =
initial && bank.epoch() >= start_epoch || !initial && bank.epoch() == start_epoch;
if should_populate {
match program {
Program::Native((name, program_id)) => {
bank.add_native_program(&name, &program_id);
}
Program::BuiltinLoader((
name,
program_id,
process_instruction_with_context,
)) => {
bank.add_builtin_loader(
&name,
program_id,
process_instruction_with_context,
);
}
}
}
}
if OperatingMode::Stable == operating_mode {
bank.set_cross_program_support(bank.epoch() >= 63);
} else {
bank.set_cross_program_support(true);
}
})
}
@@ -118,11 +163,31 @@ mod tests {
use super::*;
use std::collections::HashSet;
fn do_test_uniqueness(programs: Vec<(Program, Epoch)>) {
let mut unique_ids = HashSet::new();
let mut unique_names = HashSet::new();
let mut prev_start_epoch = GENESIS_EPOCH;
for (program, next_start_epoch) in programs {
assert!(next_start_epoch >= prev_start_epoch);
match program {
Program::Native((name, id)) => {
assert!(unique_ids.insert(id));
assert!(unique_names.insert(name));
}
Program::BuiltinLoader((name, id, _)) => {
assert!(unique_ids.insert(id));
assert!(unique_names.insert(name));
}
}
prev_start_epoch = next_start_epoch;
}
}
#[test]
fn test_id_uniqueness() {
let mut unique = HashSet::new();
let ids = get_programs(OperatingMode::Development, 0).unwrap();
assert!(ids.into_iter().all(move |id| unique.insert(id)));
fn test_uniqueness() {
do_test_uniqueness(get_programs(OperatingMode::Development));
do_test_uniqueness(get_programs(OperatingMode::Preview));
do_test_uniqueness(get_programs(OperatingMode::Stable));
}
#[test]
@@ -136,11 +201,15 @@ mod tests {
#[test]
fn test_development_programs() {
assert_eq!(get_programs(OperatingMode::Development).len(), 5);
}
#[test]
fn test_native_development_programs() {
assert_eq!(
get_programs(OperatingMode::Development, 0).unwrap().len(),
4
get_native_programs_for_genesis(OperatingMode::Development).len(),
3
);
assert_eq!(get_programs(OperatingMode::Development, 1), None);
}
#[test]
@@ -158,8 +227,11 @@ mod tests {
#[test]
fn test_softlaunch_programs() {
assert_eq!(get_programs(OperatingMode::Stable, 1), None);
assert!(get_programs(OperatingMode::Stable, std::u64::MAX - 1).is_some());
assert!(get_programs(OperatingMode::Stable, std::u64::MAX).is_some());
assert!(!get_programs(OperatingMode::Stable).is_empty());
}
#[test]
fn test_debug() {
assert!(!format!("{:?}", get_programs(OperatingMode::Development)).is_empty());
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.3"
version = "1.3.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,15 +15,15 @@ chrono = "0.4"
serde = "1.0.112"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.3" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.3.3" }
solana-ledger = { path = "../ledger", version = "1.3.3" }
solana-logger = { path = "../logger", version = "1.3.3" }
solana-runtime = { path = "../runtime", version = "1.3.3" }
solana-sdk = { path = "../sdk", version = "1.3.3" }
solana-stake-program = { path = "../programs/stake", version = "1.3.3" }
solana-vote-program = { path = "../programs/vote", version = "1.3.3" }
solana-version = { path = "../version", version = "1.3.3" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.3.8" }
solana-ledger = { path = "../ledger", version = "1.3.8" }
solana-logger = { path = "../logger", version = "1.3.8" }
solana-runtime = { path = "../runtime", version = "1.3.8" }
solana-sdk = { path = "../sdk", version = "1.3.8" }
solana-stake-program = { path = "../programs/stake", version = "1.3.8" }
solana-vote-program = { path = "../programs/vote", version = "1.3.8" }
solana-version = { path = "../version", version = "1.3.8" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -12,7 +12,7 @@ use solana_ledger::{
use solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE;
use solana_sdk::{
account::Account,
bpf_loader, clock,
clock,
epoch_schedule::EpochSchedule,
fee_calculator::FeeRateGovernor,
genesis_config::{GenesisConfig, OperatingMode},
@@ -353,7 +353,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.long("bpf-program")
.value_name("ADDRESS BPF_PROGRAM.SO")
.takes_value(true)
.number_of_values(2)
.number_of_values(3)
.multiple(true)
.help("Install a BPF program at the given address"),
)
@@ -468,7 +468,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
);
let native_instruction_processors =
solana_genesis_programs::get_programs(operating_mode, 0).unwrap_or_else(Vec::new);
solana_genesis_programs::get_native_programs_for_genesis(operating_mode);
let inflation = solana_genesis_programs::get_inflation(operating_mode, 0).unwrap();
let mut genesis_config = GenesisConfig {
@@ -553,14 +553,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
if let Some(values) = matches.values_of("bpf_program") {
let values: Vec<&str> = values.collect::<Vec<_>>();
for address_program in values.chunks(2) {
match address_program {
[address, program] => {
for address_loader_program in values.chunks(3) {
match address_loader_program {
[address, loader, program] => {
let address = address.parse::<Pubkey>().unwrap_or_else(|err| {
eprintln!("Error: invalid address {}: {}", address, err);
process::exit(1);
});
let loader = loader.parse::<Pubkey>().unwrap_or_else(|err| {
eprintln!("Error: invalid loader {}: {}", loader, err);
process::exit(1);
});
let mut program_data = vec![];
File::open(program)
.and_then(|mut file| file.read_to_end(&mut program_data))
@@ -574,7 +579,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
lamports: genesis_config.rent.minimum_balance(program_data.len()),
data: program_data,
executable: true,
owner: bpf_loader::id(),
owner: loader,
rent_epoch: 0,
},
);

Some files were not shown because too many files have changed in this diff Show More