Compare commits

...

559 Commits

Author SHA1 Message Date
49ad5e0b69 Preserve extra dependency annotations (optional=,features=) during version bump (#3811) 2019-04-16 15:12:03 -07:00
3c49d48666 Selectively deploy beta testnet to GCE/AWS or both clouds (#3805) (#3806) 2019-04-16 11:58:12 -07:00
2fe93101cc Remove stake from ./net sanity ephemeral validator (#3799) 2019-04-15 22:36:29 -07:00
e90d97e244 Revert "Revert "disable staking of blockstreamer node""
This reverts commit 03da63b41b.
2019-04-15 20:11:29 -07:00
819a0c5c7e Update testnet automation script to reflect changes in metrics (#3779) 2019-04-15 18:56:04 -07:00
7afd8644b3 Clarify release instructions (#3792) 2019-04-15 19:05:15 -06:00
68fc303b9b Rework Accounts for fast squash, hashing state and checkpoint recovery. (#3613)
* accounts rewrite

* ignore grow tests

* skip duplicate roots

* allow for a root race

* logger

* accounts_index tests

* tests

* tests
2019-04-15 17:15:50 -07:00
2bbed7727f Wait a bit for the funding transactions to go through (#3788) 2019-04-15 16:30:00 -07:00
63b1fd3675 Correctly fill out IP address for rpc ports (#3791) 2019-04-15 16:21:06 -07:00
3fcf03ff3e Refactor LocalCluster and add support for listener nodes (#3790) 2019-04-15 15:27:45 -07:00
80f3568062 Upgrade to Rust 1.34.0 (#3781)
* Upgrade to Rust 1.34.0

* Remove redundant closures

Thanks Clippy!
2019-04-15 15:56:08 -06:00
3e1214a871 Don't add reviewers to draft PRs (#3780) 2019-04-15 15:03:44 -06:00
149d809e86 Minor cli help cleanup (#3786) 2019-04-15 13:36:14 -07:00
784dbb00ab Bump reqwest from 0.9.14 to 0.9.15 (#3785)
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.9.14 to 0.9.15.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.9.14...v0.9.15)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-15 13:31:33 -07:00
87aef92e71 Fix up bash array handling (#3771) 2019-04-15 13:25:44 -07:00
d026ebb83a Use tvu_peers() since validators no longer run an RPC port by default (#3784) 2019-04-15 13:25:09 -07:00
64c6f05da2 persist set_root() and use it in blocktree_processor to limit squashes (#3782)
* rename locktower's slot to epoch

* persist set_root() and use it in blocktree_processor to limit squashes
2019-04-15 13:12:28 -07:00
8963500aa8 Bump generic-array from 0.12.0 to 0.13.0
Bumps [generic-array](https://github.com/fizyk20/generic-array) from 0.12.0 to 0.13.0.
- [Release notes](https://github.com/fizyk20/generic-array/releases)
- [Changelog](https://github.com/fizyk20/generic-array/blob/master/CHANGELOG.md)
- [Commits](https://github.com/fizyk20/generic-array/commits)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-15 12:35:06 -06:00
175c0090de Bump hashbrown from 0.2.0 to 0.2.1
Bumps [hashbrown](https://github.com/Amanieu/hashbrown) from 0.2.0 to 0.2.1.
- [Release notes](https://github.com/Amanieu/hashbrown/releases)
- [Changelog](https://github.com/Amanieu/hashbrown/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Amanieu/hashbrown/compare/v0.2.0...v0.2.1)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-15 12:18:44 -06:00
5c4689a326 rename locktower's slot to epoch (#3776) 2019-04-15 10:46:14 -07:00
5e2831f09e Disable cluster restart attempt 2019-04-15 09:59:53 -07:00
666882fbbd -r does not require an argument 2019-04-15 09:40:34 -07:00
6c9fba058b Reenable validator sanity check for testnet-{beta,edge} 2019-04-15 08:58:29 -07:00
0767c0c07f Add DNS resolution to cli tools 2019-04-14 21:25:46 -07:00
6859907df9 more rigorous erasure constants, comments (#3766)
* more rigorous erasure constants, comments

* new header size means new golden
2019-04-14 21:10:09 -07:00
de52747950 remove max_tick_height replicode (#3765) 2019-04-14 19:15:31 -07:00
bd1db51e07 delete db_window.rs, move contents to window_service, clean up process_blobs (#3746) 2019-04-14 18:52:05 -07:00
dd005fb50e fix broadcast to *always* call erasure generation, simplify generator, test slot reset better (#3764) 2019-04-14 18:12:37 -07:00
542bafeb71 groom packet.rs, add blob.data alignment (#3763) 2019-04-14 17:30:08 -07:00
e57a0ab05d test some bits (#3762) 2019-04-14 17:10:30 -07:00
2c745ce108 Shorten recv wait when there are buffered packets in banking stage (#3757)
- packets are buffered on leader rotation, when the next leader is
  unknown
- shortening the wait allows the banking stage to poll for next
  leader more frequently
2019-04-14 12:34:07 -07:00
f6aa90e193 Add fullnode --dynamic-port-range option 2019-04-14 07:08:29 -07:00
c7a7d6db84 Use |solana-keygen pubkey| instead of |solana-wallet address|
Same end result but solana-keygen is a smaller program that builds
faster
2019-04-14 07:08:29 -07:00
2277a39dd2 Default solana-gossip log-level to 'info' 2019-04-14 07:07:15 -07:00
ee35ed5250 Refactored buffered packet forwarding code (#3750)
- Added unit tests
- Don't consume packets if bank is not known
2019-04-13 23:19:54 -07:00
92b5e131fe Name sigverify threads 2019-04-13 11:24:36 -07:00
1f35779821 Add solana-install usage info 2019-04-12 17:08:18 -07:00
5b438d917d Create fullnode-x.sh wrapper script for use with |solana-install run ...| 2019-04-12 17:08:18 -07:00
bf4d5745c9 Symlink the entire release to preserve relative paths from bin/ 2019-04-12 17:08:18 -07:00
1e8f83a74a Use a better name for new api 2019-04-12 14:58:22 -07:00
1db80d79fc Update get recent blockhashes to return confirmed blockhashes only 2019-04-12 14:58:22 -07:00
1dac4c33b8 Change sigverify counter from entries to packets
batch or entries kind of useless since it can have some
variable number of packets
2019-04-12 13:19:46 -07:00
656b3139e3 see perf-libs all the time (#3748) 2019-04-12 08:38:14 -07:00
8b08fe265a AppendVec PR with using "/tmp" as the default directory and a random file (#3743)
* AppendVec with raw pointers
* fixed test target directory
2019-04-12 04:30:17 -07:00
29dc139a22 shellcheck 2019-04-11 17:39:04 -07:00
44ebfa736a Don't forward buffered packet to the same node (#3712)
- instead, process the packets
2019-04-11 17:23:45 -07:00
b001685e7b Added missing feature flag for erasure (#3741) 2019-04-11 15:25:32 -07:00
ca6290b117 remove wallet stuff, bootstrap node is already staked (#3744) 2019-04-11 15:16:38 -07:00
767e0a201e stak*->vote (#3740) 2019-04-11 14:52:56 -07:00
877ec08280 Send recent votes in Vote Transactions (#3734) 2019-04-11 14:48:36 -07:00
485013b7ce Revert "AppendVecs that can return references and read/append without locks (#3713)"
This reverts commit f669ae5868.
2019-04-11 14:47:30 -07:00
efd19b07e7 implement erasure-based recovery inside blocktree (#3739)
* implement recover in blocktree

* erasures metric

* erasure metrics only

* fixup
2019-04-11 14:14:57 -07:00
d31989f878 CustomError from Vec->u32 2019-04-11 13:59:48 -07:00
f669ae5868 AppendVecs that can return references and read/append without locks (#3713)
* AppendVec with raw pointers

* appendvecs

* imports

* review

* review comments

* clippy
2019-04-11 13:16:56 -07:00
a28c3b0e9a Consume Bank in BankClient
This will allow BankClient to spin up a thread to use the Bank.
It'll also ease the transaction from BankClient to ThinClient since
it won't let you depend on Bank.

Drawback, you the transition from Bank to BankClient will be harder
because the Bank methods are inaccessible.
2019-04-11 12:16:33 -07:00
0aa05158c9 Adjust noop/failure program names to be consistent with all other programs 2019-04-11 11:59:56 -07:00
787dc5748a Fixed DuplicateSigs (#3727)
* Fixed DuplicateSigs by not recording errors in signature cache of bank
2019-04-11 11:51:34 -07:00
8ada4bfd1f Remove test now covered by Vote crate 2019-04-11 10:53:11 -07:00
5d4624e75f Use Bank::add_instruction_processor to bypass manual build step 2019-04-11 10:53:11 -07:00
2f1b0bf4f5 Add solana-install deployments to the testnets 2019-04-11 10:03:35 -07:00
e1d5bb1a26 add redundant broadcast (#3724)
* add redundant broadcast

* crank up to full redundancy

* Update broadcast_stage.rs

* Update broadcast_stage.rs

* Update broadcast_stage.rs

* Update broadcast_stage.rs
2019-04-11 09:15:17 -07:00
d0f46d6a8a Cleanup client traits and create super trait (#3728) 2019-04-11 00:25:14 -07:00
4b6c0198ad reset coding generator on slot boundaries (#3726) 2019-04-10 18:18:55 -07:00
f1e7237c09 vote_api cleanup (#3710)
* vote_api cleanup

* fixups

* fixup

* remove unused code

* revert removal of serialize and deserialize

* ...

* increase coverage, bootstrap staking

* Sagar's STAKE to my VOTE
2019-04-10 17:52:47 -07:00
1b5845ac3e Fix getting votes from gossip (#3723) 2019-04-10 17:16:08 -07:00
58a049ebe5 pick up logs as artifacts (#3721) 2019-04-10 17:05:39 -07:00
c0808d01f8 Add tests 2019-04-10 15:51:00 -07:00
7fd5e51168 Make sure bank 0 is votable and correctly designate signer 2019-04-10 15:51:00 -07:00
d2ea782372 Always use bootstrap vote account for leader 2019-04-10 15:51:00 -07:00
e6f02d1a10 Use latest release for testnet doc (#3711)
* Use latest release for testnet doc

* Clean up markdown
2019-04-10 15:01:37 -07:00
894135a084 Less pub in PohRecorder 2019-04-10 12:50:45 -07:00
df9cf92782 testnet-participation.md is now /implemented/ 2019-04-10 12:26:47 -07:00
f243a96e01 Remove testnet/metrics server debug info from book 2019-04-10 12:26:47 -07:00
842d146b0d Limit USE_INSTALL scope 2019-04-10 11:50:23 -07:00
81d43c57a2 Add missing feature flags to gossip (#3708) 2019-04-10 06:52:46 -07:00
7da4142d33 Process votes from gossip only in leader node (#3707) 2019-04-09 22:06:32 -07:00
88e5b14afc Exit faster on sanity failures 2019-04-09 17:16:15 -07:00
0b95a5c121 Include blockstreamer node in sanity 2019-04-09 16:52:57 -07:00
62c28a8592 Bump reqwest from 0.9.13 to 0.9.14
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.9.13 to 0.9.14.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.9.13...v0.9.14)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-09 16:22:17 -07:00
b80c6840da Metrics dashboard publish script updates for influx cloud 2019-04-09 15:44:01 -07:00
003fd6545c Logging for unexpected validator errors (#3697) 2019-04-09 15:05:43 -07:00
393ed978d1 Update testnet monitor to use influx cloud end point (#3700) 2019-04-09 14:56:21 -07:00
2c93062f54 Improve banking_stage performance messages
Use transaction count instead of batch count,
and set the recv_start from when we finished processing
the previous batch to get a more accurate number.
2019-04-09 14:54:12 -07:00
7b2abf2087 Update count for the right store (#3683) 2019-04-09 13:48:13 -07:00
a5254a3f7a Add TESTNET_TAG Env var to buildkite (#3692)
* Add TESTNET_TAG Env var to buildkite
2019-04-09 13:00:45 -07:00
dc6c34da5d Fast-track vote signature verification and processing (#3695) 2019-04-09 12:57:12 -07:00
d4eebcc2aa Check for frozen in confirm_forks (#3678) 2019-04-09 11:45:38 -07:00
4f232cbc27 Make MAX_RECENT_BLOCKHASHES <= MAX_HASH_AGE_IN_SECONDS (#3679)
* Make MAX_RECENT_BLOCKHASHES == MAX_HASH_AGE_IN_SECONDS
2019-04-09 11:45:25 -07:00
76e524ae48 Remove check for 0 additional nodes
Network with 1 leader is valid.
2019-04-09 11:16:55 -07:00
6ac919c71a Set warn log level only for perf testnets 2019-04-09 11:09:16 -07:00
1ba4806f8c Document recent -z and -x command-line arg changes 2019-04-09 10:39:55 -07:00
20a2c59b70 Reduce udp read/write buffer sizes
With 18.04, these large values cause packet errors and mess up the system.
2019-04-08 15:21:45 -07:00
6540fa9121 Add sleep for check_signature 2019-04-08 15:09:02 -07:00
21287ba554 Bump clap from 2.32.0 to 2.33.0
Bumps [clap](https://github.com/clap-rs/clap) from 2.32.0 to 2.33.0.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/commits)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-08 12:56:44 -07:00
7295a84d69 Bump bincode from 1.1.2 to 1.1.3 (#3672)
Bumps [bincode](https://github.com/TyOverby/bincode) from 1.1.2 to 1.1.3.
- [Release notes](https://github.com/TyOverby/bincode/releases)
- [Commits](https://github.com/TyOverby/bincode/compare/v1.1.2...v1.1.3)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-08 12:55:18 -07:00
483cc2fa4e Support old repair strategy for reparing slots in a range for supporting replicators (#3665) 2019-04-08 12:46:23 -07:00
e551f6b552 Support settable drone lamport cap (#3675) 2019-04-08 12:37:01 -07:00
44b391096d Configurable local cluster native processors (#3676) 2019-04-08 11:15:58 -07:00
d45d8e9670 s/credit/read/ 2019-04-08 08:39:59 -07:00
88bda58836 remove unused (#3674) 2019-04-08 04:50:42 -07:00
79bf3cf70d add rewards math (#3673)
* add rewards math

* fixup
2019-04-07 21:45:28 -07:00
72b7419e1c Define list of valid cloud regions for GCE and AWS (#3670) 2019-04-07 14:29:09 -07:00
7baff0920c Propagate cloud env variables to buildkite job 2019-04-07 11:48:25 -07:00
d9ecc278b4 Configure cloud zones and nodes from buildkite for beta testnet (#3666) 2019-04-07 08:25:34 -07:00
0904df327d Parallelize cloud node deployment commands in case of multiple zones (#3657) 2019-04-07 08:13:48 -07:00
444e87f888 Fix metric (#3664) 2019-04-06 21:57:01 -07:00
20aa4434e2 Fix repair (#3581)
Add DetachedHeads repair protocol

Add DetachedHeads repair test

Repair starting from root
2019-04-06 19:41:22 -07:00
03da63b41b Revert "disable staking of blockstreamer node"
This reverts commit 42d8a7d9e7.
2019-04-06 08:57:06 -07:00
878a842611 Move append_vec bench to the crate with append_vec (#3650)
* Move append_vec bench to the crate with append_vec

* Use black_box to tell the compiler not to optimize away test data

```
pub fn black_box<T>(dummy: T) -> T {
    unsafe {
        let ret = std::ptr::read_volatile(&dummy);
        std::mem::forget(dummy);
        ret
    }
}
```

* Revert "Use black_box to tell the compiler not to optimize away test data"

This reverts commit 5610b8ee95.

* Use black_box to tell the compiler not to optimize away test data

* Create bench directories
2019-04-06 07:18:56 -06:00
f3eda38b65 Fix broken lockout doubling 2019-04-05 23:15:46 -07:00
68e21911eb Remove redundant transfer_signed 2019-04-05 22:04:32 -07:00
95cc36af96 Impl SyncClient and AsyncClient for ThinClient 2019-04-05 22:04:32 -07:00
d3c4e4f7b3 Update docs 2019-04-05 22:09:29 -06:00
4068612300 Remove RpcSignatureStatus 2019-04-05 22:09:29 -06:00
f349c1f0dc Get everything off RpcSignatureStatus 2019-04-05 22:09:29 -06:00
90c1300bb6 Plumb TransactionError through Rpc 2019-04-05 22:09:29 -06:00
569a289a6f Cargo.lock, serde_derive 2019-04-05 22:09:29 -06:00
89efe67e73 Fix the ordering of beta testnet zones 2019-04-05 17:53:31 -07:00
c3654b0f65 Add sdk benches to ci
And add `-a` to `tee` for more reliable copypasta.
2019-04-05 17:58:11 -06:00
f5f4434e0a Remove unnecessary lock in sigverify 2019-04-05 16:57:45 -07:00
d30049b8eb test for debit of TX fees on full process_transaction() (#3643)
* fix double debit of TX fees

* add test that fails when removing that line

* put that line back in

* comments
2019-04-05 16:55:58 -07:00
42d8a7d9e7 disable staking of blockstreamer node
- this will stop it from entering leader rotation schedule
2019-04-05 16:48:52 -07:00
adcda3c715 Remove airdrop dependency from replicators 2019-04-05 16:11:39 -07:00
a5b5248a09 move vote_accounts up (#3647) 2019-04-05 14:23:00 -07:00
3fcca5bc0a Suppress shellcheck array expansion warnings 2019-04-05 13:25:14 -07:00
9d4c6f6aaa Appease shellcheck 2019-04-05 13:25:14 -07:00
d570b08134 Clean up array expansion 2019-04-05 13:25:14 -07:00
8b6d7129f3 Fix option flag lettering 2019-04-05 13:25:14 -07:00
50444181c5 Fix arg array ordering and rename network-name option 2019-04-05 13:25:14 -07:00
0c51f156ae Reverse order of zone arg array building 2019-04-05 13:25:14 -07:00
fe2fb40d88 Add multi-region deploy functionality 2019-04-05 13:25:14 -07:00
9ba0439593 Add multi-region deploy functionality 2019-04-05 13:25:14 -07:00
b33a1fa019 Fix clippy errors 2019-04-05 12:22:10 -07:00
63fd4222aa Fix testnet sanity check for beta testnet 2019-04-05 12:22:10 -07:00
ef5df6f3fe Add server specification 2019-04-05 11:44:57 -07:00
2f90f9fbd4 Version all jsonrpc crates, in core too 2019-04-05 11:44:57 -07:00
12b099ea78 Bump jsonrpc-core from 10.1.0 to 11.0.0
Bumps [jsonrpc-core](https://github.com/paritytech/jsonrpc) from 10.1.0 to 11.0.0.
- [Release notes](https://github.com/paritytech/jsonrpc/releases)
- [Commits](https://github.com/paritytech/jsonrpc/commits)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-05 11:44:57 -07:00
9f046a023e move transaction_count up (#3618)
* move transaction_count up

* fixup
2019-04-05 10:42:25 -07:00
46e6911ec1 Add get_signature_status() to SyncClient
And move bank::Result to transaction module.
2019-04-05 10:22:05 -07:00
d3844ef32a Add AsyncClient 2019-04-05 10:22:05 -07:00
4507dca342 Boot exchange_transaction. No tests depend on it. 2019-04-05 11:10:57 -06:00
c2fdd1362a bump release version in testnet participation document 2019-04-05 08:30:42 -07:00
4ea19b90a4 Fix update_ancestor_stakes in locktower (#3631)
* Fix update_ancestor_stakes in locktower

* Add test for vote threshold
2019-04-05 03:05:31 -07:00
9cd555cad5 AWS script change for additional zones and regions 2019-04-04 15:59:59 -07:00
ed78c8d3bb Fix beta testnet launch script 2019-04-04 15:16:01 -07:00
0b23af324b Refactor Storage Program (#3622)
* Refactor Storage Program

* Replace KeyedAccount trait with StorageAccount struct

* Implement State for Account, not StorageAccount

* Make State trait more generic

* Move validation check into function
2019-04-04 12:01:09 -07:00
1598a02a7a Wrap all client errors with TransportError 2019-04-04 12:00:19 -06:00
167f5bdc58 Add get_balance() and get_account_data() to SyncClient
Migrate tests to use them.
2019-04-04 12:00:19 -06:00
5cd7bccdf3 Add SyncClient and use from BankClient 2019-04-04 12:00:19 -06:00
acbc261891 Add gossip to build script, and fix bash strings 2019-04-04 00:18:48 -07:00
f97f0c4758 Bump serde_derive from 1.0.89 to 1.0.90
Bumps [serde_derive](https://github.com/serde-rs/serde) from 1.0.89 to 1.0.90.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.89...v1.0.90)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-03 19:54:50 -07:00
e6ac5bc546 Bump serde from 1.0.89 to 1.0.90
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.89 to 1.0.90.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.89...v1.0.90)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-03 20:26:48 -06:00
ef1e5db0ee Force delete all beta testnet nodes before restarting them 2019-04-03 17:58:54 -07:00
5cdfd79e96 Replace print with debug 2019-04-03 16:23:02 -07:00
b441bac7b2 Add separate Struct for Replicator submissions 2019-04-03 16:23:02 -07:00
00cb52c444 Update Storage Program to support multiple accounts 2019-04-03 16:23:02 -07:00
9323a3e257 Use keyed_account index names (#3555) 2019-04-03 15:57:26 -07:00
35298e01a8 Remove Instruction wrapper structs and name functions after enum fields 2019-04-03 13:34:27 -07:00
867f6f107b Rename SystemInstruction::Move to SystemInstruction::Transfer 2019-04-03 08:35:57 -06:00
43bb813cbe Rename 'new_account' to 'new_user_account'
And 'new_program_account' to 'new_account'
2019-04-02 21:24:42 -06:00
7b82e96467 Bump libc from 0.2.50 to 0.2.51 (#3554)
Bumps [libc](https://github.com/rust-lang/libc) from 0.2.50 to 0.2.51.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.50...0.2.51)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-02 20:32:35 -05:00
978ff87b76 Fix potential storage bug
The previous code was assuming the instruction index and the
program_id index were the same. That's always true for
single-instruction transactions, but not for multiples.
2019-04-02 19:00:35 -06:00
4c0bc1fd88 Add program_ids() methods
Added CompiledInstruction::program_id() so that we don't need to pass
around instruction indexes just for Message::program_id().

Also added Message.program_ids() that returns a slice so that we
can move those pubkeys into Message::account_keys.
2019-04-02 19:00:35 -06:00
025b4f90de Pre-populate tokens (#3605) 2019-04-02 16:50:53 -07:00
20189c5d45 Bump hashbrown to 0.2.0 2019-04-02 16:37:21 -06:00
2e4acba579 Remove second block streamer from testnet beta 2019-04-02 15:15:11 -07:00
d90b8c331d Refactor blocktree storage abstraction (#3588) 2019-04-02 16:58:07 -05:00
efbb49d579 Don't use external node ssh key if one is not configured 2019-04-02 14:20:00 -07:00
f0079cd7b3 Bump hashbrown from 0.1.8 to 0.2.0
Bumps [hashbrown](https://github.com/Amanieu/hashbrown) from 0.1.8 to 0.2.0.
- [Release notes](https://github.com/Amanieu/hashbrown/releases)
- [Changelog](https://github.com/Amanieu/hashbrown/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Amanieu/hashbrown/compare/v0.1.8...v0.2.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-02 13:45:49 -06:00
a0041cec97 Rename Runtime to MessageProcessor 2019-04-02 12:49:26 -06:00
77bb9e7ffc Fix the release number in testnet participation document 2019-04-02 11:26:54 -07:00
f441177840 Deploy beta testnet with 100 nodes across AWS and GCP 2019-04-02 11:21:57 -07:00
cd634801a2 Re-enable test but remove replicators from config 2019-04-02 10:38:30 -07:00
5f10a87dec Ignore Flaky Local Cluster test 2019-04-02 10:56:29 -06:00
fa1c1e3734 Rename native programs to native instruction processors 2019-04-02 10:36:19 -06:00
947cdd8748 Rename system_program to system_instrution_processor 2019-04-02 10:36:19 -06:00
0a9f063d3e Rename native_program.rs to instruction_processor_utils.rs
Prefer the term "instruction processor" over "program". Reserve
the term "native" for the loader and shared object it loads.
Compiling an instruction processor to BPF shouldn't imply changing
to a non-native entrypoint.
2019-04-02 10:36:19 -06:00
dd4c512954 Rename Wallet's id to keypair 2019-04-02 07:38:28 -06:00
d228b6467c Implement finalizer so that all locked accounts are dropped (#3585)
* Implement finalizer so that all locked accounts are dropped when finalizer goes out of scope

* Add test for tx error with lock conflict

* Fix double unlock from destructor running after a call to unlock
2019-04-02 03:55:42 -07:00
92c66a411b Remove bench-tps converge-only 2019-04-01 23:05:25 -06:00
af97ad3d68 Add solana-gossip module 2019-04-01 23:05:25 -06:00
6ff2a0a75e Rework discover to handle additional parameters, and be unit-testable 2019-04-01 23:05:25 -06:00
5b7d5e2e02 Bump reqwest from 0.9.12 to 0.9.13
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.9.12 to 0.9.13.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.9.12...v0.9.13)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-04-01 20:34:08 -06:00
97bd7a00f1 Support for configuring testnet nodes across multiple cloud services 2019-04-01 17:11:41 -07:00
25a2f08f8d add passive staking and rewards (#3579)
* add stake stuff

* more generic

* test decode bail cases

* favor early returns
2019-04-01 16:45:53 -07:00
3152090a66 update with review comments 2019-04-01 15:54:53 -06:00
9a0f9b910e add bench tests for squash operations 2019-04-01 15:54:53 -06:00
f853c39169 remove unused member 2019-04-01 15:54:53 -06:00
75ad1305c0 Cache vote accounts and optimize squash 2019-04-01 15:54:53 -06:00
cb3adea94f Increase node count in beta testnet 2019-04-01 11:06:24 -07:00
fcef54d062 Add a constructor to generate random pubkeys 2019-03-31 16:23:18 -06:00
32683cac7c Move markdown into book 2019-03-31 16:23:06 -06:00
15947b8642 Congestion stats, take 3 2019-03-31 16:23:06 -06:00
4e0316f792 Apply review feedback 2019-03-31 16:23:06 -06:00
9594b7fdce Use stake-weighted congestion statistics 2019-03-31 16:23:06 -06:00
1adf8355f2 Add design proposal for deterministic transaction fees 2019-03-31 16:23:06 -06:00
8660c3581e Add squashing metrics (#3573) 2019-03-29 21:21:59 -07:00
f886b3b12b Fix resetting PohRecorder to wrong bank (#3553)
* Check whether future slot already has transmission
2019-03-29 20:00:36 -07:00
5646daa820 Delete lots of fee parameters
So many zeros!
2019-03-29 19:21:51 -06:00
7896e8288d Replace Transaction::fee with a FeeCalculator 2019-03-29 19:21:51 -06:00
9369ea86ea Track detached slots in blocktree (#3536)
* Add contains_all_parents flag to SlotMeta to prep for tracking detached heads

* Add new DetachedHeads column family

* Remove has_complete_parents

* Fix test
2019-03-29 16:07:24 -07:00
dee5ede16d Get rid of unnecessary frozen banks (#3572) 2019-03-29 16:06:48 -07:00
3b516c0710 Fix build 2019-03-29 14:56:29 -06:00
0887832b00 Early exit if buffered packets is empty 2019-03-29 13:40:07 -07:00
8e04fadb05 Cleanup magic numbers
Rename `num_signatures` to `num_required_signatures` to
disambiguate it from `tx.signatures.len()`.
2019-03-29 13:03:29 -07:00
31f8b6d352 Integrate Message into Transaction 2019-03-29 13:03:29 -07:00
98d60e6124 Expose a method for getting the Message from a Transaction
This currently constructs the message, but when message
is integrated, it can return a `&Message`.
2019-03-29 13:03:29 -07:00
fc678f53ba Send metrics data to the correct/configured database host 2019-03-29 12:14:15 -07:00
8e25c39564 fix formatting of numbered list 2019-03-29 11:46:21 -07:00
78ab79c322 fix build failure 2019-03-29 11:46:21 -07:00
052fc9b74f Information on how to debug testnet issues 2019-03-29 11:46:21 -07:00
f482c9ab61 Functionalize tx serialization; make testing more explicit 2019-03-29 11:31:46 -06:00
75dcd97f5f Update test to deserialize txs 2019-03-29 11:31:46 -06:00
4776dc36ab Map entry txs to serialized txs in blockstream 2019-03-29 11:31:46 -06:00
10239c3b3c Replace recursive status cache with a single global fast status cache (#3541)
Fast Status Cache
2019-03-29 10:03:55 -07:00
753d0dcabe Fix the cuda build
And add a test to check the condition that the cuda tests are
exercising.
2019-03-29 08:25:56 -06:00
b708998d9d Fix chacha build 2019-03-29 08:25:56 -06:00
3759b0d2a5 Fix Blockstreamer test 2019-03-29 08:25:56 -06:00
c4bc710d3a Use Serde's with attribute to shorten length encodings in Transaction 2019-03-29 08:25:56 -06:00
857dc2ba47 Remove custom serialization 2019-03-29 08:25:56 -06:00
981e057363 Just test features in core 2019-03-28 21:40:52 -07:00
37494c67d0 Add pubkey read/write tools
Co-authored-by: Tyera Eulberg <tyera@solana.com>
Co-authored-by: Tristan Debrunner <tristan@solana.com>
2019-03-28 20:04:32 -06:00
7a81f327ce Add sigverify tests 2019-03-28 19:42:11 -06:00
845ddc3496 Fixup wallet-sanity to match new balance string 2019-03-28 16:56:27 -07:00
c61bb16fdf Fix manifest path for cargo commands (#3549) 2019-03-28 15:56:08 -07:00
15b945a652 Fix EC2 scripts for blockstream startup 2019-03-28 15:37:23 -07:00
1d48c4dd45 enable leader rotation in beta testnet 2019-03-28 13:44:44 -07:00
2ab50cbae8 Move untested code out of SDK
verify_signature() was only used in a test that was testing
binary layout. It only worked because the test transaction only
had one signature.

from() was only used by verify_signature() and that's something
we'd typically called `pubkey()`.

hash() didn't return the hash of the Transaction, as you might
guess. It's only used for PoH, so move it into Entry.
2019-03-28 14:24:59 -06:00
0482f153d0 Lower a bunch of debug
Can't afford to be printing on every transaction error, it will slow
the system down.
2019-03-28 12:24:47 -07:00
92e1c4c531 Report which account is in use (#3539) 2019-03-28 08:17:49 -07:00
4bca60861e Specialize GenericInstruction 2019-03-28 05:45:46 -06:00
50b0a5ae83 Blocktree+Erasure tests of basic erasure functionality (#3535)
* Remove WindowSlot; add Blocktree based tests to erasure
2019-03-28 01:55:51 -05:00
c30eb6185c Enable logging in exchange program (#3538) 2019-03-27 23:02:05 -07:00
a94bc80383 fix clippy errors 2019-03-27 18:05:17 -07:00
586b6fc3d7 review comments 2019-03-27 18:05:17 -07:00
a14c202d60 fix the ip address that's stored in the config file 2019-03-27 18:05:17 -07:00
ed48c495a3 fix shell-check errors 2019-03-27 18:05:17 -07:00
f0abd06a46 Added support for multi-region cloud testnet 2019-03-27 18:05:17 -07:00
7d0ff8e713 Re-enable Replicator test (#3534) 2019-03-27 17:21:49 -07:00
e8cc566b2b Storage Account setup for replicators and validators (#3516)
* Setup Storage Accounts for replicators

* Setup Storage Accounts for validators

* Add Replicator Info to Local Cluster and Add test
2019-03-27 15:54:09 -07:00
e45f7afd85 use the right id for delegate id 2019-03-27 15:04:09 -07:00
054ae3a3e3 Document current transaction size awkwardness 2019-03-27 14:27:20 -06:00
36ea088387 Fix Storage Stage not receiving entries when node is leader (#3528) 2019-03-27 13:10:33 -07:00
47b6707c07 Don't use a loader to test Storage instruction processor 2019-03-27 11:02:41 -06:00
0346b9cb5c hang out on progress until fork is confirmed 2019-03-27 08:41:41 -07:00
6bfe497ab5 remove leader confirmaiton 2019-03-27 08:41:41 -07:00
6956bf635e validator confirmaiton 2019-03-27 08:41:41 -07:00
e27d6d0988 validator confirmation 2019-03-27 08:41:41 -07:00
3fc09fb23f Remove keypairs from BankClient
Bring its interface closer to the other clients.
2019-03-27 09:37:19 -06:00
cecdb7061e Remove blockhash parameter from Bank::transfer
That parameter is an artifact from the Loom days, when I thought
Bank should implement the same interace as ThinClient.
2019-03-27 08:51:10 -06:00
0ac865f08c Remove BankClient::process_instructions 2019-03-27 08:51:10 -06:00
55115d0eeb Add process_message() to BankClient 2019-03-27 08:51:10 -06:00
16ff4ac1a8 Simplify storage interface in blocktree (#3522) 2019-03-27 01:36:39 -05:00
5ce31168ef Remove Transaction::new_signed 2019-03-26 19:51:16 -07:00
b9ff70c8ab pub Transaction::new_unsigned
Offer an incremental path off Transaction::new_unsigned_instructions().
2019-03-26 20:06:05 -06:00
77498c6efe Expose Message via the new default Transaction constructor 2019-03-26 20:06:05 -06:00
8c69c40834 Make space for a new Transaction::new 2019-03-26 20:06:05 -06:00
d497b99abb use solana_entrypoint directly (#3518) 2019-03-26 16:40:34 -07:00
ca2ac1e5ea Remove a mostly unused Transaction constructor 2019-03-26 15:46:58 -07:00
c09e0eb536 propagate TESTNET_DB_HOST env variable to next step in buildkite 2019-03-26 14:40:18 -07:00
0d90dfae1a Add provisions to specify a database server in testnet manager buildkite 2019-03-26 14:40:18 -07:00
bf61321cab fix metrics 2019-03-26 14:30:25 -07:00
591653981b fix metrics 2019-03-26 14:30:25 -07:00
e651510805 Instructions on how to boot metrics server 2019-03-26 14:02:41 -07:00
9d73fbb84a also check the delegate_id 2019-03-26 12:03:22 -07:00
215b07c1a9 remove status_cache.freeze (#3506) 2019-03-26 11:56:25 -07:00
420cbc45cd Record the current nodes locktower votes from the bank (#3502)
* observed_locktower_stats

* fixup! observed_locktower_stats
2019-03-26 11:06:31 -07:00
df333e8b6e Move new_move_many to SystemInstruction 2019-03-26 09:22:29 -07:00
9759ac2961 Mark book's javascript library as binary
highlight.js has a big dictionary of words. When git-grep includes
one of those words, it floods the screen with the whole dictionary.

Mark it as binary so that it'll now just report one line:

     Binary file book/theme/highlight.js matches
2019-03-26 07:39:34 -07:00
af9b173dfd fix link 2019-03-26 05:43:02 -07:00
b61aed7250 Minor cleanup 2019-03-25 20:31:13 -07:00
e1c0425c2b Remove rewards crate from publishing script 2019-03-25 20:19:58 -07:00
615472b52c Initailize locktower with heaviest bank (#3489) 2019-03-25 20:00:11 -07:00
4d34102d9c Move stragglers into the book
The stuff added between the time we switched to proposals/ and the git-revert
2019-03-25 19:39:22 -07:00
3e22ce4154 Added stats for locktower in testnet dashboard 2019-03-25 18:49:15 -07:00
215f33680b Fix the filename in testnet pariticpation instructions 2019-03-25 17:33:41 -07:00
a5420f19da Update testnet-participation.md
add instructions for finding metrics
2019-03-25 17:12:00 -07:00
4bc3f70150 Boot VoteTransaction 2019-03-25 17:11:57 -07:00
e8814b1297 Add support for influx cloud 2019-03-25 17:10:38 -07:00
46ab0e6449 Update testnet-participation.md
multinode-demo/validator-x.sh uses cargo run unless USE_INSTALL==1
2019-03-25 16:30:56 -07:00
59b4f40f4e fixup! fixup! keep track of locktower slots and stakes 2019-03-25 16:05:28 -07:00
93c57934cb fixup! keep track of locktower slots and stakes 2019-03-25 16:05:28 -07:00
e8e1d6b8ce keep track of locktower slots and stakes 2019-03-25 16:05:28 -07:00
4916cd8da5 bench-tps in a cargo test 2019-03-25 15:05:56 -07:00
573dec63da Fix runtime benches 2019-03-25 14:32:01 -06:00
34c051f183 add hash_fromstr (#3476) 2019-03-25 12:23:19 -07:00
51004881f8 filter out banks that have an older epoch (#3472) 2019-03-25 11:09:39 -07:00
5c536e423c Inline InstructionCompiler
The object-oriented paradigm isn't helpful here; go functional.
2019-03-25 12:08:27 -06:00
4efa144916 Generate a Message instead of a Transaction 2019-03-25 12:08:27 -06:00
f3936c21a3 Add message 2019-03-25 12:08:27 -06:00
caff603497 Less code 2019-03-24 21:44:04 -07:00
aefa9891c0 Delete unused code 2019-03-24 21:44:04 -07:00
6286947697 Inline payment_plan
This module predates Accounts. That was a separate module because
it used to be part of Bank and those types could be sent to any
smart contract. Now each instruction processor defines for itself
what instructions it accepts.
2019-03-24 14:52:06 -06:00
33972ef89e Boot BudgetTransaction 2019-03-24 14:52:06 -06:00
b53cbdd9e6 Punt on the Script abstraction
Low ROI
2019-03-24 14:52:06 -06:00
c49e84c75b Boot StorageTransaction 2019-03-24 13:51:02 -07:00
dcf2337e58 Add StorageInstruction constructors 2019-03-24 13:51:02 -07:00
5a65c3f72e Test-drive StorageContract 2019-03-24 13:51:02 -07:00
8ff1987d2d Reorg Storage program to look more like the others 2019-03-24 13:51:02 -07:00
acedf4ca5a Move Instruction into its own module 2019-03-23 20:31:55 -07:00
68c35bfde6 Restart node test (#3459) (#3465)
* Restart node test (#3459)

* Add test to local_cluster for restarting a node

* fix so that we don't hit end of epoch - leader not found before trying to transfer

* Do not look for confirmations, b/c nobody is voting on empty transmissions in this single node test
2019-03-23 19:19:55 -07:00
e1a3708844 Ensure accounts are unlocked (#3458) 2019-03-23 13:30:56 -07:00
46ecac3310 Update leader slot in poh recorder if we skipped it (#3452)
* reset poh recorder with the original start slot
2019-03-23 13:07:09 -07:00
028b9da0da Revert "Move the design proposals to a separate book"
This reverts commit 4ca18d6b9a.
2019-03-23 14:04:34 -06:00
74cea2748c Revert "Publish design proposals"
This reverts commit fb44e2bf48.
2019-03-23 14:04:34 -06:00
a478b2a05a Bump assert_cmd from 0.11.0 to 0.11.1
Bumps [assert_cmd](https://github.com/assert-rs/assert_cmd) from 0.11.0 to 0.11.1.
- [Release notes](https://github.com/assert-rs/assert_cmd/releases)
- [Changelog](https://github.com/assert-rs/assert_cmd/blob/master/CHANGELOG.md)
- [Commits](https://github.com/assert-rs/assert_cmd/compare/v0.11.0...v0.11.1)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-03-23 13:18:51 -06:00
41a52dbfea Document solana-install-init.sh 2019-03-23 09:01:08 -07:00
4923f889c4 Add trick to ensure the entire script is downloaded 2019-03-23 09:01:08 -07:00
31b8743052 delay freeze of status_cache until squash (#3453) 2019-03-22 22:14:56 -07:00
6505221629 Add exchange program (#3444) 2019-03-22 21:07:36 -07:00
de2b6bc9fc Bump tokio from 0.1.17 to 0.1.18
Bumps [tokio](https://github.com/tokio-rs/tokio) from 0.1.17 to 0.1.18.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-0.1.17...tokio-0.1.18)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-03-22 19:54:18 -06:00
f565292852 Update testnet-participation.md 2019-03-22 17:58:58 -07:00
90f17e8fd4 Update testnet-participation.md 2019-03-22 17:18:27 -07:00
d6da7dc1b6 Initial testnet participation doc 2019-03-22 17:11:47 -07:00
7e2aad2590 Refrain from trying to configure a staking account that was previously configured 2019-03-22 17:00:09 -07:00
f09b8d3921 Demote log level 2019-03-22 17:00:09 -07:00
52f6c33ff9 Make sure banking stage is recording with the same bank that it read (#3447)
* make sure banking stage is recording with the same bank that it read with
2019-03-22 14:17:39 -07:00
60dfb35924 Why do I need to see raw bytes from the drone? 2019-03-22 14:07:44 -07:00
5f41909098 Stop using VoteTransaction in Vote processor 2019-03-22 14:07:00 -06:00
a28f7db950 Retry more for a new blockhash 2019-03-22 11:14:03 -07:00
38fdbbba3f Reduce remaining program crates to boilerplate crates 2019-03-22 06:46:44 -07:00
0a5b6154e8 Use same gossip port for all testnet nodes 2019-03-22 00:16:58 -07:00
4542a7042a Add --poll-for-new-genesis-block flag 2019-03-22 00:15:19 -07:00
6113b64fee Include multinode-demo scripts in release tarball 2019-03-21 22:09:44 -07:00
f777ed76a3 Use installed binaries if not within the cargo workspace 2019-03-21 22:09:44 -07:00
e6b9babf53 Run a drone on blockstreamer nodes 2019-03-21 22:09:44 -07:00
ed8bada439 Kill all node processes (blockexplorer) 2019-03-21 22:09:44 -07:00
06b0c98c75 Remove accounts when the fork is removed (#3384)
* Fix test

* Cleanup accounts when the fork is removed

* Update test to check for deleted accounts
2019-03-21 17:36:10 -07:00
dbb145c266 Fixup ledger path 2019-03-21 17:06:57 -07:00
437481853b Ensure genesis ledger directory is populated on all validator nodes
This allows all nodes to serve the genesis ledger over rsync instead of
just the bootstrap leader
2019-03-21 16:35:40 -07:00
3b5a9f512c Get client-id.json out of the genesis ledger directory 2019-03-21 16:35:40 -07:00
045af04784 Reduce budget_program and config_program into boilerplate crates 2019-03-21 16:53:08 -06:00
d0761f57e8 Add _program suffix to directories of crates with _program suffix 2019-03-21 16:24:06 -06:00
4bb88619fd Move entrypoint boilerplate into a macro 2019-03-21 15:27:49 -06:00
412ebfcaf2 ReplayStage::new is too long
break into more functions
2019-03-21 14:08:11 -07:00
3a7647f611 Add curl-able install script 2019-03-21 13:45:54 -07:00
d4cc48f99d Check from account balance and exit cleanly if 0 2019-03-21 13:00:46 -07:00
852fcbd700 Automatically update PATH for the user 2019-03-21 13:00:46 -07:00
8ab4b8e6ac Support local networks for easy testing 2019-03-21 13:00:46 -07:00
a8095e204f Cleanup SystemTransaction 2019-03-21 12:41:39 -07:00
98979c7d53 Cargo.lock 2019-03-21 11:24:10 -07:00
c18fcde385 Move installer to the implemented section 2019-03-21 11:24:10 -07:00
f286bbac99 Initial commands implementation 2019-03-21 11:24:10 -07:00
4e029d81a2 Setup staking 2019-03-21 11:12:35 -07:00
2b00a42b06 Boot Rewards program 2019-03-21 12:07:20 -06:00
07d55d0092 Downgrade 'No next leader found' to warning 2019-03-21 11:18:49 -06:00
fb44e2bf48 Publish design proposals 2019-03-21 10:54:59 -06:00
9b0bf5ad66 Add tests for table mappers; Bugfix on-disk mapper (#3408)
add tests to `kvstore::mapper::{disk, memory}` modules

fix bug in disk mapper uncovered by tests

use `tempdir` crate for unit test-directories
2019-03-21 11:38:29 -05:00
4247fa946e Add solana-wallet balance <PUBKEY> 2019-03-21 09:51:27 -06:00
071b1d8b77 Cargo.lock 2019-03-21 08:47:58 -07:00
63aadc4905 Turn top-level Cargo.toml into a virtual manifest 2019-03-21 08:47:58 -07:00
d2415613de Migrate loader tests to BankClient 2019-03-21 09:19:24 -06:00
58f071b7a0 Migrate loader to high-level instructions 2019-03-21 09:19:24 -06:00
148e08a8a5 Enable cluster tests (#3372)
* Cluster tests

* stable!

* fixup! stable!

* fixup! fixup! stable!

* fixup! fixup! fixup! stable!

* fixup! fixup! fixup! fixup! stable!

* fixed space

* add getNumBlocksSinceSignatureConfirmation entry for the json rpc docs

* Check in upcoming epochs for potential leadership slots in next_leader_slot()
2019-03-21 07:43:21 -07:00
402a733cd7 Upload tarball as a github release asset 2019-03-20 21:39:35 -07:00
78be3652de Add script to upload github release assets 2019-03-20 21:39:35 -07:00
b03d9884a3 Ensure current crate versions match the tag before publishing to crates.io 2019-03-20 20:51:58 -07:00
799085a105 Remove dead code 2019-03-20 20:51:58 -07:00
7812b67471 deduplicate some test code (#3401) 2019-03-20 19:35:25 -05:00
4033fa031b Add convenience script for testnet deployments 2019-03-20 16:57:55 -07:00
b41737259a Add GITHUB_TOKEN 2019-03-20 16:57:55 -07:00
7c8a4bf6a4 use ticks per slot to check if the current tick is in the leader slot 2019-03-20 16:55:01 -07:00
71314d79a7 address review comments 2019-03-20 16:55:01 -07:00
d7ff6645a9 change pubkey to ref 2019-03-20 16:55:01 -07:00
1824e09d0a find next leader slot before resetting working bank in Poh recorder 2019-03-20 16:55:01 -07:00
205907d3d7 Check if poh recorder has over stepped the leader slot 2019-03-20 16:55:01 -07:00
d4bcc4d474 🐳 2019-03-20 16:21:47 -07:00
bcb190a12a Remove erroneous comment 2019-03-20 16:15:25 -07:00
63e8496473 Cleanup pubkey parsing copypasta 2019-03-20 16:08:03 -07:00
4107d70e93 Bump reqwest from 0.9.11 to 0.9.12
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.9.11 to 0.9.12.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/commits)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-03-20 16:26:31 -06:00
4fb0782892 Rename blocktree SlotMeta::is_rooted to is_connected
is_rooted is now is_connected and (still) indicates the set of connected
completed slots. 'rooted' slot terminology is used for a different
meaning in bank_forks and replay_stage.
2019-03-20 14:43:39 -07:00
9b7c1d5650 Relocate *-help.sh to their respective packages 2019-03-20 14:34:57 -07:00
985592cf40 Fix cp args 2019-03-20 14:29:30 -07:00
2694654a98 Change fixed 8050 port to one from bind_in_range. 2019-03-20 14:17:21 -07:00
4126461f87 Fix dupe port on cluster_info
and remove unintended grow file
2019-03-20 14:17:21 -07:00
791ead6053 Include TARGET in release URL to make room for future targets 2019-03-20 13:54:32 -07:00
3048de18bb add doc that should have been copy-pasta'd from bench (#3389) 2019-03-20 11:10:42 -07:00
df9fd2bc0b stop copying Blooms (#3379)
* stop copying Blooms

* fixup

* clippy
2019-03-20 11:06:39 -07:00
13c9d3d4e1 Kvstore: use bincode serialization (#3385)
* use bincode for SSTable serialization; add tests

* Fix bug uncovered in merge algorithm by unit tests

* use bincode in write-ahead-log serialization

* Add helper `Fill` trait for zeroing buffers
2019-03-20 09:55:44 -05:00
0dc364c17a Relocate transaction reference verification to join the other validity checks 2019-03-20 07:46:01 -07:00
b3cdf58e4b Add WriteBatch to KvStore (#3364)
* implement write-batch in kvstore

* Add tests to writebatch, and in-memory table
2019-03-20 06:55:39 -05:00
61f950a60c Sign Gossip Vote Messages 2019-03-19 19:56:17 -07:00
da77789881 Revert "Drop 'unchecked' from get_subset_mut()"
This reverts commit 70b21b3795.
2019-03-19 17:52:02 -07:00
61af87972e allow empty ancestors 2019-03-19 17:51:01 -07:00
fe9e771b9b Clear progress map on squash (#3377) 2019-03-19 17:30:36 -07:00
94b5835738 Make AccountMeta a traditional struct instead of a tuple struct 2019-03-19 17:22:39 -06:00
a4652a9aaf Label tuple with AccountMeta 2019-03-19 17:22:39 -06:00
7246d72f03 fix is_locked_out logic 2019-03-19 16:21:46 -07:00
70b21b3795 Drop 'unchecked' from get_subset_mut() 2019-03-19 16:12:53 -07:00
682b1b89b3 Adjust for vector of entries in blobs. 2019-03-19 13:49:48 -07:00
f1802e592a Review comments: node creation functions for replicators
And rework download loop.
2019-03-19 13:49:48 -07:00
ee58c1f960 Add test for replicator ledger download
Add an interface to query the storage slot a
  replicator is holding on storage_addr port.
Fix logic to poll blocktree for all slots
  replicated being filled.
Add test logic to ask replicator what slot it
  is replicating and then download an entry in
  the slot.
2019-03-19 13:49:48 -07:00
07f4dd385d Cleanup replicator sockets
Add optional UdpSocket for storage interface.
Add new_localhost_replicator to create a new replicator local node.
2019-03-19 13:49:48 -07:00
1be7ee51be Fix potential crash in banking stage 2019-03-19 12:06:42 -07:00
56fcc93ef5 Schedule node for consecutive slots as leader (#3353)
* Also tweak epoch and slot duration

* new test for leader schedule
2019-03-19 06:36:45 -07:00
c70412d7bb move core tests to core (#3355)
* move core tests to core

* remove window

* fix up flaky tests

* test_entryfication needs a singly-threaded banking_stage

* move core benches to core

* remove unnecessary dependencies

* remove core as a member for now, test it like runtime

* stop running tests twice

* remove duplicate runs of tests in perf
2019-03-18 22:08:21 -07:00
5e21268ca0 PR comments 2019-03-18 20:46:11 -07:00
b38e3bef01 Modify bank_forks to support squashing/filtering new root and also don't remove parents from bank_forks when inserting, otherwise we lose potential fork points when querying blocktree for child slots 2019-03-18 20:46:11 -07:00
89cc82c71b Update cli interface 2019-03-18 18:34:08 -07:00
1d0f6a5d85 Add scripts/install-help.sh 2019-03-18 18:34:08 -07:00
d0292b1cf1 store transaction no longer takes the transaction fee from the config account 2019-03-18 18:34:08 -07:00
15aed9f320 Self 2019-03-18 18:34:08 -07:00
5a67362b8e update passive staking proposal (#3335)
* update passive staking proposal

* fixup
2019-03-18 18:25:29 -07:00
5d73ab299b Clean up locks in KvStore (#3358)
* Lift all shared mutable state into Kvstore

commit is now an AtomicUsize

In-memory table and write-log are now struct members behind individual RwLocks
2019-03-18 19:04:31 -05:00
ef111dcbe1 Decendent is not a word 2019-03-18 15:58:27 -07:00
da7e49c880 Fix broken build
- build breaks if Cuda feature is used
2019-03-18 15:29:51 -07:00
f16f88873d Add multiple signer support to BankClient 2019-03-18 16:07:45 -06:00
211c81f2a2 bank fork rpc (#3351) 2019-03-18 14:18:43 -07:00
efc39ffdde Report how many grace ticks were afforded to previous leader (#3350) 2019-03-18 13:24:07 -07:00
61a4b998fa Implement locktower voting (#3251)
* locktower components and tests

* integrate locktower into replay stage

* track locktower duration

* make sure threshold is checked after simulating the vote

* check vote lockouts using the VoteState program

* duplicate vote test

* epoch stakes

* disable impossible to verify tests
2019-03-18 12:12:33 -07:00
cedff2fca1 Cleanup sockets test 2019-03-18 11:56:18 -07:00
8d032aba9d Merge InstructionError and ProgramError
From the user's perspective, it's just an instruction error.
For program-specific errors, we still have
InstructionError::CustomError.
2019-03-18 10:39:20 -06:00
607b368fe3 Add back in test to check the account program id 2019-03-18 08:22:54 -07:00
a54854abc7 Do Budget verification in BudgetScript 2019-03-18 08:22:54 -07:00
ce6257a069 Delete misplaced unit-tests
These tests were from back in the day when Bank(then-called Accountant)
would call `verify_plan()` on all transactions. Nowadays `verify_plan`
is only useful to the client. At can be used to ensure a transaction
won't trigger runtime errors.
2019-03-18 08:22:54 -07:00
7b28d3a231 Move Budget's verify_plan() into tests
This functionality is supposed to be the the interpreter
2019-03-18 08:22:54 -07:00
ea01ff2aab Add pubkey to BudgetExpr::new_cancelable_future_payment for wallet 2019-03-18 08:22:54 -07:00
3369019943 Add BudgetExpr::new_cancelable_authorized_payment 2019-03-18 08:22:54 -07:00
dbd4176b97 Move script constructors into a separate module 2019-03-18 08:22:54 -07:00
122c7bc2ef Rename TransactionCompiler to Script and use it to replace the type alias 2019-03-18 08:22:54 -07:00
99671472d1 Migrate config tests to Bank 2019-03-18 08:22:54 -07:00
0c0716abfb Move Bank-based tests into unit-tests 2019-03-18 08:22:54 -07:00
c09accb685 Rename StaticEntrypoint to ProcessInstruction 2019-03-18 08:22:54 -07:00
ae4d14a2ad Introducing Scripts
A sequence of instructions. A client compiles the script and then uses
the compiled script to construction a transaction. Then it adds a
adds a blockhash, signs the transaction, and sends it off for
processing.
2019-03-18 08:22:54 -07:00
55cdbedb52 Allow tests to add instruction processors
Make runtime a private module again.
2019-03-18 08:22:54 -07:00
ee39f31d81 Add Runtime object. Allow any number of static loaders. 2019-03-18 08:22:54 -07:00
70b45de012 Get access to runtime errors in Budget unit-tests 2019-03-18 08:22:54 -07:00
60437a8dcb Multiple entries per blob (#3337)
* Pack multiple entries into blob

* fix tests

* Add test for deserializing multi-entry blobs in blocktree

* more test fixes
2019-03-17 18:48:23 -07:00
a35ebe1186 Avoid RpcRequest 2019-03-17 01:34:58 -07:00
c498775a3d Move generic rpc_client functions from wallet/ to client/ 2019-03-17 01:34:58 -07:00
3ad019a176 Increment stable timeout 2019-03-16 23:56:35 -07:00
9632136cda Clean up stray retry_get_balance() function 2019-03-16 23:56:35 -07:00
42cea7a785 Remove metrics dependency 2019-03-16 23:56:35 -07:00
4c9d852b08 Remove ThinClient::transfer() 2019-03-16 23:56:35 -07:00
9566a5cc68 Organize accounts on a per fork basis (#3336)
* Organize accounts by fork

* Keep track of vote accounts in account info

* update comments
2019-03-16 23:42:32 -07:00
ac03c59b41 client/: get_transaction_count() now returns a Result 2019-03-16 23:27:23 -07:00
73ceaf07b1 client/: move RpcClient from rpc_request.rs to rpc_client.rs 2019-03-16 23:27:23 -07:00
7b314f47f7 Factor RPC request mechanism out of RpcClient into *RpcClientRequest 2019-03-16 23:27:23 -07:00
23337e08eb client/: Merge client.rs into thin_client.rs 2019-03-16 22:48:26 -07:00
97e73311c5 Move mock request_airdrop_transaction into drone/, closer to the real impl 2019-03-16 22:14:09 -07:00
e2c24481e4 wallet/ now only dev-depends on core/ 2019-03-16 21:40:39 -07:00
ad252fe4c5 Remove unnecessary Option from get_account_data 2019-03-16 11:32:01 -07:00
4b04bc8612 Move thin_client RPC requests into rpc_request; de-mut thin_client 2019-03-16 11:32:01 -07:00
bcc34b906c Relieve the caller of having to care about the rpc request id 2019-03-16 11:32:01 -07:00
c2b1010f18 Clarify url vs addr 2019-03-16 11:32:01 -07:00
e3ef4f25d3 Update Dockerfile
install mscgen (for book art)
2019-03-15 20:44:35 -07:00
ad12b0efce Bump kvstore version to 0.13.0 to match all other solana crates (#3334) 2019-03-15 19:05:24 -05:00
00f005af25 Fix leader rotation counter 2019-03-15 17:01:18 -07:00
656fb173f9 Extract kvstore into separate crate (#3327)
* extract kvstore into new crate

* add kvstore crate to CI publishing list
2019-03-15 18:42:47 -05:00
5f58e9cd6e Config program - useful for storing/updating simple config items on chain 2019-03-15 16:39:45 -07:00
1d876df8b3 Add command plumbing 2019-03-15 16:30:31 -07:00
c8bbca08f8 Install the install program 2019-03-15 16:30:31 -07:00
971da7325d Reduce log level for periodic debug messages 2019-03-15 15:41:26 -07:00
ca4f874f52 Remove ci/run-local.sh 2019-03-15 15:09:25 -07:00
a88b36d718 Rename TransactionBuilder to TransactionCompiler 2019-03-15 14:46:44 -06:00
24d9138067 Abandon Builder pattern 2019-03-15 14:46:44 -06:00
aca739b800 Boot fees from TransactionBuilder 2019-03-15 14:46:44 -06:00
e091aa87ea More precise constructor names 2019-03-15 14:46:44 -06:00
968022a1b0 Instruction name swap
* Instruction -> GenericInstruction
* Instruction<u8, u8> -> CompiledInstruction
* Instruction<Pubkey, (Pubkey, bool)> -> Instruction
2019-03-15 14:46:44 -06:00
66fb1bbb2e Give last leader some grace ticks to catch up (#3299)
* Wait for last leader for some ticks

* New tests and fixed existing tests
2019-03-15 13:22:16 -07:00
fa3e1fa7c9 Add error correction to write-log (#3323) 2019-03-15 15:04:34 -05:00
36763d0802 Cleanup entry.rs packing code (#3303) 2019-03-15 12:48:32 -07:00
be5f800390 Use the Mining Proof's Signature as storage keys (#3321) 2019-03-15 11:44:10 -07:00
ca69b7b75b Add CRC Reader and Writer I/O wrappers (#3322)
* add CRC Reader and Writer I/O wrappers

* typo fix and variable rename
2019-03-15 13:17:49 -05:00
a15927f8d0 make KvStore Send+Sync (#3317) 2019-03-15 13:01:34 -05:00
4ba4ad9878 Update README.md 2019-03-15 11:53:37 -06:00
be1511a7ff delete accidental file (#3316) 2019-03-15 11:28:08 -05:00
41b98c603b Upgrade rust stable to 1.33.0 2019-03-15 09:25:28 -07:00
5430dd28b6 Update docker-rust to 1.33 2019-03-15 09:25:28 -07:00
e9d687329b Only push newly built container 2019-03-15 09:25:28 -07:00
d72cac6e97 Fix chacha test 2019-03-15 09:06:54 -06:00
4e51a444f4 Simplify TransactionBuilder::new_with_instructions 2019-03-15 09:06:54 -06:00
48d86683e2 Abuse KeypairUtil 2019-03-15 09:06:54 -06:00
42d5dde5b1 new_singleton -> new_with_instruction 2019-03-15 09:06:54 -06:00
142eeffe5d Add BankClient to minimize copypasta 2019-03-15 09:06:54 -06:00
6a68df3ebd Don't resign airdrop requests with the keypair asking for an airdrop
The correct thing to do here is retry until you get a
BlockhashNotFound error, and then send another request
to the drone for a new signed transaction.

Also, this test is an integration test masquerading as a unit-test..
2019-03-15 09:06:54 -06:00
8306c1841c Fix build 2019-03-15 09:06:54 -06:00
73bd396dfb Rewrite system integration test
Create Client helpers instead of Bank helpers.
2019-03-15 09:06:54 -06:00
36fb0a0aef Add new preferred transaction constructors 2019-03-15 09:06:54 -06:00
4d53be8350 Make it unappealing to build and sign transactions at the same time
Use a client to sign transactions. It'll need that keypair anyway
to resign new blockhashes on retries.
2019-03-15 09:06:54 -06:00
f8bf9ca218 Make safe transaction signing the default 2019-03-15 09:06:54 -06:00
7b4568b9bf Migrate to sign_checked() 2019-03-15 09:06:54 -06:00
bd8502e87e Implement Transaction::new_unsigned with TransactionBuilder 2019-03-15 09:06:54 -06:00
21815f26d5 Implement signed transaction using unsigned transaction 2019-03-15 09:06:54 -06:00
8ef5195037 Don't test a transaction with a duplicate key 2019-03-15 09:06:54 -06:00
57606c6bf8 Bump log level for better CI logs 2019-03-15 07:48:23 -07:00
0465abf75b move (#2738) 2019-03-15 06:58:45 -07:00
8a142966be Avoid stray '' when rust version is not specified 2019-03-14 21:30:24 -07:00
7498488f5f cloud_DeleteInstances() now waits for the instances to be terminated 2019-03-14 21:15:00 -07:00
ede99d5913 Revert "Block until instances are confirmed to be deleted"
This reverts commit 47ddbbe53b.
2019-03-14 20:53:10 -07:00
3ced91319f Upgrade nightly rust version 2019-03-14 20:22:46 -07:00
3d1413e619 Preserve original nightly name 2019-03-14 20:22:46 -07:00
8f25548781 Overhaul cargo/rustc version management 2019-03-14 20:22:46 -07:00
47ddbbe53b Block until instances are confirmed to be deleted 2019-03-14 16:20:18 -07:00
5741400713 add support for finding the next slot a node will be leader (#3298) 2019-03-14 16:06:56 -07:00
9f02a8d3d0 remove ticks_per_slot from blocktree (#3297) 2019-03-14 15:18:37 -07:00
c208f4dbb5 Add option of replicators to local cluster test 2019-03-14 13:55:11 -07:00
a17843c8f6 solana-install design proposal 2019-03-14 13:21:00 -07:00
3f2fc21bb3 Rename hash_queue and fix boundary condition (#3289) 2019-03-14 11:56:36 -07:00
9fac3b26ee Move the design proposals to a separate book
Fixes #3262
2019-03-14 10:08:43 -07:00
c1eec0290e Rename userdata to data (#3282)
* Rename userdata to data

Instead of saying "userdata", which is ambiguous and imprecise,
say "instruction data" or "account data".

Also, add `ProgramError::InvalidInstructionData`

Fixes #2761
2019-03-14 10:48:27 -06:00
de13082347 add economic design mvp to summary and overview 2019-03-14 09:42:19 -06:00
48b5d666d0 some economic mvp features 2019-03-14 09:42:19 -06:00
70bb49a46d some economic mvp features 2019-03-14 09:42:19 -06:00
105fc7029e create mvp section 2019-03-14 09:42:19 -06:00
77a7ffe543 Bump hex-literal from 0.1.3 to 0.1.4
Bumps [hex-literal](https://github.com/RustCrypto/utils) from 0.1.3 to 0.1.4.
- [Release notes](https://github.com/RustCrypto/utils/releases)
- [Commits](https://github.com/RustCrypto/utils/compare/hex-literal-v0.1.3...hex-literal-v0.1.4)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-03-14 07:21:32 -06:00
7d593e6c61 Exit gracefully when no subcommand is specified 2019-03-14 00:35:34 -05:00
bb420cb995 Use crate_description and crate_name Clap macros 2019-03-14 00:35:34 -05:00
e58220282a Move TransactionError into the SDK 2019-03-13 21:26:57 -06:00
4ca4038d54 Rename BankError to TransactionError 2019-03-13 21:26:57 -06:00
150cd31ec0 Blur the line between Bank and Runtime 2019-03-13 21:26:57 -06:00
6fd0d4dcf5 Boot error piggybacking on BankError 2019-03-13 21:26:57 -06:00
296415945a Generalize error codes 2019-03-13 21:26:57 -06:00
1de5ae1ef0 Remove SystemError from ProgramError 2019-03-13 21:26:57 -06:00
6a89c68a1d Add utility function to help get System error out of ProgramError 2019-03-13 21:26:57 -06:00
c14cce4c85 Add InstructionError for runtime instruction errors 2019-03-13 21:26:57 -06:00
959961b596 Modified test 2019-03-13 18:18:27 -07:00
6f76c2da6c Fix confirmation test 2019-03-13 17:50:53 -07:00
8d2bd2b30f Reduce ticks per second
- It's improving TPS. Temp fix for beacons timeframe
2019-03-13 17:50:53 -07:00
34a8d591fa Switch version file from .txt to .yaml; add target tuple to version.yml 2019-03-13 16:30:07 -07:00
d94ff4bf4a Bump tokio from 0.1.15 to 0.1.17
Bumps [tokio](https://github.com/tokio-rs/tokio) from 0.1.15 to 0.1.17.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Changelog](https://github.com/tokio-rs/tokio/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-0.1.15...tokio-0.1.17)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2019-03-13 15:45:33 -06:00
af03df38b9 Don't vote for empty leader transmissions (#3248)
* Don't vote for empty leader transmissions

* Add is_delta flag to bank to detect empty leader transmissions

* Plumb new is_votable flag through replay stage

* Fix PohRecorder tests

* Change is_delta to AtomicBool to avoid making Bank references mutable

* Reset start slot in poh_recorder when working bank is cleared, so that connsecutive TPU's will start from the correct place

* Use proper max tick height calculation

* Test for not voting on empty transmission

* tests for is_votable
2019-03-13 14:06:12 -07:00
242bcf44db Replace stale --no-signer usage with --no-voting 2019-03-13 13:50:30 -07:00
ebd540972d Remove duplicate --rpc-drone-address 2019-03-13 13:24:02 -07:00
a17be9f8bd Revert "Add case for --rpc-drone-address"
This reverts commit 42ad297778.
2019-03-13 13:23:54 -07:00
42ad297778 Add case for --rpc-drone-address 2019-03-13 13:04:44 -07:00
0568d7238e Add implemented design proposals section 2019-03-13 13:20:37 -06:00
9bc05313a2 Update TPU ASCII art 2019-03-13 13:14:15 -06:00
fedbae6f8c Enable rpc for all testnet nodes 2019-03-13 10:49:40 -07:00
64de639817 Fixes to replicator
Move functionality into more functions.
Break down the current test and just test creation/joining the network.
2019-03-13 10:15:03 -07:00
ec9e13d1f4 Add repair slot range
Use default impl RepairSlotRange
2019-03-13 10:15:03 -07:00
5d27f221f7 Drop socat for iptables 2019-03-13 12:03:56 -05:00
61db74d98e Run socat in the background 2019-03-13 08:15:58 -07:00
1d689e84f1 Move and rename cluster_client 2019-03-12 22:05:38 -06:00
b7f420412b Update publish script 2019-03-12 22:05:38 -06:00
e3ac9e9679 Move thin client tests to integration test suite 2019-03-12 22:05:38 -06:00
12fde77ecd Update crate references 2019-03-12 22:05:38 -06:00
3fc96c4a18 Add solana-client crate 2019-03-12 22:05:38 -06:00
cb3eeace56 Replay Stage start_leader() can use wrong parent fork() (#3238)
*  Make sure start_leader starts on the last voted block, not necessarily the biggest indexed bank in frozen_slots()

* Fix tvu test
2019-03-12 17:42:53 -07:00
76feb2098e Use same VM type for validators as leader, if CUDA is enabled (#3253)
- Since all nodes are created equal
2019-03-12 17:42:47 -07:00
06cb266cfe remove unused code (#3252) 2019-03-12 16:46:41 -07:00
866d3f467f Fix flag to disable leader-rotation (#3243) 2019-03-12 16:35:13 -07:00
c1e726da87 Remove comment 2019-03-12 15:32:41 -07:00
7d7528eb18 Fix test_bank_storage 2019-03-12 15:32:41 -07:00
9f916f9d47 remove Option<> wrapper for accounts 2019-03-12 15:03:26 -07:00
a7d8bfdf8b Adjust crate list 2019-03-12 14:02:51 -07:00
abdd4f371b Adjust readme path 2019-03-12 14:02:51 -07:00
13adee332e Add retry transfer logic to kill_entry_and_spend_and_verify_rest to account for dead forks (#3239) 2019-03-12 13:48:02 -07:00
a799f8f4b1 tell blockexplorer to run on port 8080 (#3237)
* tell blockexplorer to run on port 8080

* forward port 80 to 5000 for a blockexplorer node
2019-03-12 13:39:09 -07:00
1ee43a7633 Remove non-essential programs from runtime/ 2019-03-12 15:11:59 -05:00
3d2b7dd1ef Move programs/system into runtime/ 2019-03-12 11:30:58 -05:00
7b35114c0f Filter vote accounts with no delegate from being selected in Rotation (#3224) 2019-03-11 17:58:21 -07:00
b418525464 Update current leader information in metrics and dashboard 2019-03-11 17:43:59 -07:00
8bba11367e Provide drone's host address while setting up staking account 2019-03-11 17:11:34 -07:00
9eb7e63819 Add staking rewards design proposal for delegation (#3148)
* proposal for single vote many delegators
2019-03-11 16:35:42 -07:00
092501039c Cargo.lock 2019-03-11 16:27:22 -07:00
6899bd7099 0.13.0 2019-03-11 16:21:19 -07:00
5a0416b925 Keep stable dashboard on stable channel at all times 2019-03-11 16:19:16 -07:00
ba2cdd0bf6 Move testnet/testnet-perf to the stable channel 2019-03-11 16:14:16 -07:00
366 changed files with 29962 additions and 18691 deletions

View File

@ -1,10 +1,12 @@
{
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": {
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]",
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]",
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]",
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]",
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]"
"CODECOV_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:rHBSqXK7uSnveA4qwUxARZjTNZcA0hXU:ko8lLGwPECpVm19znWBRxKEpMF7xpTHBCEzVOxRar2wDThw4lNDAKqTS61vtkJLtdkHtug==]",
"CRATES_IO_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:NzN6y0ooXJBYvxB589khepthSxhKFkLB:ZTTFZh2A/kB2SAgjJJAMbwAfanRlzxOCNMVcA2MXBCpQHJeeZGULg+0MLACYswfS]",
"GITHUB_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:iy0Fnxeo0aslTCvgXc5Ddj2ly6ZsQ8gK:GNOOj/kZUJ2rYKxTbLyVKtajWNoGQ3PcChwfEB4HdN18qDHlB96Z7gx01Pcf0qeIHODOWRtxlH4=]",
"INFLUX_DATABASE": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:Ly/TpIRF0oCxmiBWv225S3mX8s6pfQR+:+tXGB2c9rRCVDcgNO1IDOo89]",
"INFLUX_PASSWORD": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:ycrq1uQLoSfI932czD+krUOaJeLWpeq6:2iS7ukp/C7wVD3IT0GvQVcwccWGyLr4UocStF/XiDi0OB/N3YKIKN8SQU4ob1b6StAPZ/XOHmag=]",
"INFLUX_USERNAME": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:35hBKofakZ4Db/u0TOW53RXoNWzJTIcl:HWREcMTrgZ8DGB0ZupgSzNWr/tVyE06P]",
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:kRz8CyJYKAg/AiwgLrcRNDJAmlRX2zvX:uV1XV6y2Fb+dN4Z9BIMPBRiNS3n+NL8GlJXyu1i7meIsph1DzfLg4Thcp5Mj9nUsFNLgqQgjnsa5C4XNY/h5AgMSzRrJxVj7RhVTRmDJ5/Vjq6v7wCMRfBOvF3rITsV4zTwWSV8yafFmS+ZQ+QJTRgtYsuoYAUNZ06IEebfDHcuNwws72hEGoD9w43hOLSpyEOmXbtZ9h1lIRxrgsrhYDpBlU5LkhDeTXAX5M5dwYxyquJFRwd5quGDV5DYsCh9bAkbjAyjWYymVJ78U9YJIQHT9izzQqTDlMQN49EbLo7MDIaC7O7HVtb7unDJs+DRejbHacoyWVulqVVwu3GRiZezu8zdjwzGHphMMxOtKQaidnqYgflNp/O01I8wZRgR1alsGcmIhEhI8YV/IvQ==]"
}
}

View File

@ -46,10 +46,17 @@ and longer descriptions detailing what problem it solves and how it solves it.
Draft Pull Requests
---
If you want early feedback on your PR, use GitHub's "Draft Pull Request" mechanism. Draft
PRs are a convenient way to collaborate with the Solana maintainers without triggering
notifications as you make changes. When you feel your PR is ready for a broader audience,
you can transition your draft PR to a standard PR with the click of a button.
If you want early feedback on your PR, use GitHub's "Draft Pull Request"
mechanism. Draft PRs are a convenient way to collaborate with the Solana
maintainers without triggering notifications as you make changes. When you feel
your PR is ready for a broader audience, you can transition your draft PR to a
standard PR with the click of a button.
Do not add reviewers to draft PRs. GitHub doesn't automatically clear approvals
when you click "Ready for Review", so a review that meant "I approve of the
direction" suddenly has the appearance of "I approve of these changes." Instead,
add a comment that mentions the usernames that you would like a review from. Ask
explicitly what you would like feedback on.
Rust coding conventions
---
@ -89,24 +96,23 @@ understood. Avoid introducing new 3-letter terms, which can be confused with 3-l
[Terms currently in use](book/src/terminology.md)
Proposing architectural changes
Design Proposals
---
Solana's architecture is described by a book generated from markdown files in
the `book/src/` directory, maintained by an *editor* (currently @garious). To
change the architecture, you'll need to at least propose a change the content
under the [Proposed
Changes](https://solana-labs.github.io/book-edge/proposals.html) chapter. Here's
the full process:
add a design proposal, you'll need to at least propose a change the content
under the [Accepted Design
Proposals](https://solana-labs.github.io/book-edge/proposals.html) chapter.
Here's the full process:
1. Propose to a change to the architecture by creating a PR that adds a
markdown document to the directory `book/src/` and references it from the
[table of contents](book/src/SUMMARY.md). Add the editor and any relevant
*maintainers* to the PR review.
1. Propose a design by creating a PR that adds a markdown document to the
directory `book/src/` and references it from the [table of
contents](book/src/SUMMARY.md). Add any relevant *maintainers* to the PR review.
2. The PR being merged indicates your proposed change was accepted and that the
editor and maintainers support your plan of attack.
maintainers support your plan of attack.
3. Submit PRs that implement the proposal. When the implementation reveals the
need for tweaks to the architecture, be sure to update the proposal and have
need for tweaks to the proposal, be sure to update the proposal and have
that change reviewed by the same people as in step 1.
4. Once the implementation is complete, the editor will then work to integrate
the document into the book.
4. Once the implementation is complete, submit a PR that moves the link from
the Accepted Proposals to the Implemented Proposals section.

1825
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,86 +1,36 @@
[package]
name = "solana-workspace"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.0"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"]
license = "Apache-2.0"
edition = "2018"
[badges]
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[features]
chacha = ["solana/chacha"]
cuda = ["solana/cuda"]
erasure = ["solana/erasure"]
[dev-dependencies]
bincode = "1.1.2"
bs58 = "0.2.0"
hashbrown = "0.1.8"
log = "0.4.2"
rand = "0.6.5"
rayon = "1.0.0"
reqwest = "0.9.11"
serde_json = "1.0.39"
solana = { path = "core", version = "0.12.0" }
solana-logger = { path = "logger", version = "0.12.0" }
solana-netutil = { path = "netutil", version = "0.12.0" }
solana-runtime = { path = "runtime", version = "0.12.0" }
solana-sdk = { path = "sdk", version = "0.12.0" }
sys-info = "0.5.6"
[[bench]]
name = "banking_stage"
[[bench]]
name = "blocktree"
[[bench]]
name = "ledger"
[[bench]]
name = "gen_keys"
[[bench]]
name = "sigverify"
[[bench]]
required-features = ["chacha"]
name = "chacha"
[workspace]
members = [
".",
"bench-streamer",
"bench-tps",
"core",
"drone",
"fullnode",
"genesis",
"gossip",
"install",
"keygen",
"kvstore",
"ledger-tool",
"logger",
"metrics",
"programs/bpf",
"programs/bpf_loader",
"programs/budget",
"programs/budget_api",
"programs/token",
"programs/budget_program",
"programs/config_api",
"programs/config_program",
"programs/exchange_api",
"programs/exchange_program",
"programs/token_api",
"programs/failure",
"programs/noop",
"programs/rewards",
"programs/rewards_api",
"programs/storage",
"programs/token_program",
"programs/failure_program",
"programs/noop_program",
"programs/stake_api",
"programs/stake_program",
"programs/storage_api",
"programs/system",
"programs/vote",
"programs/storage_program",
"programs/vote_api",
"programs/vote_program",
"replicator",
"sdk",
"upload-perf",

View File

@ -44,7 +44,7 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt-preview
```
If your rustc version is lower than 1.31.0, please update it:
If your rustc version is lower than 1.34.0, please update it:
```bash
$ rustup update
@ -83,12 +83,6 @@ Run the test suite:
$ cargo test --all
```
To emulate all the tests that will run on a Pull Request, run:
```bash
$ ./ci/run-local.sh
```
Local Testnet
---
@ -131,6 +125,47 @@ can run your own testnet using the scripts in the `net/` directory.
Edit `ci/testnet-manager.sh`
## Metrics Server Maintenance
Sometimes the dashboard becomes unresponsive. This happens due to glitch in the metrics server.
The current solution is to reset the metrics server. Use the following steps.
1. The server is hosted in a GCP VM instance. Check if the VM instance is down by trying to SSH
into it from the GCP console. The name of the VM is ```metrics-solana-com```.
2. If the VM is inaccessible, reset it from the GCP console.
3. Once VM is up (or, was already up), the metrics services can be restarted from build automation.
1. Navigate to https://buildkite.com/solana-labs/metrics-dot-solana-dot-com in your web browser
2. Click on ```New Build```
3. This will show a pop up dialog. Click on ```options``` drop down.
4. Type in ```FORCE_START=true``` in ```Environment Variables``` text box.
5. Click ```Create Build```
6. This will restart the metrics services, and the dashboards should be accessible afterwards.
## Debugging Testnet
Testnet may exhibit different symptoms of failures. Primary statistics to check are
1. Rise in Confirmation Time
2. Nodes are not voting
3. Panics, and OOM notifications
Check the following if there are any signs of failure.
1. Did testnet deployment fail?
1. View buildkite logs for the last deployment: https://buildkite.com/solana-labs/testnet-management
2. Use the relevant branch
3. If the deployment failed, look at the build logs. The build artifacts for each remote node is uploaded.
It's a good first step to triage from these logs.
2. You may have to log into remote node if the deployment succeeded, but something failed during runtime.
1. Get the private key for the testnet deployment from ```metrics-solana-com``` GCP instance.
2. SSH into ```metrics-solana-com``` using GCP console and do the following.
```bash
sudo bash
cd ~buildkite-agent/.ssh
ls
```
3. Copy the relevant private key to your local machine
4. Find the public IP address of the AWS instance for the remote node using AWS console
5. ```ssh -i <private key file> ubuntu@<ip address of remote node>```
6. The logs are in ```~solana\solana``` folder
Benchmarking
---

View File

@ -63,20 +63,25 @@ There are three release channels that map to branches as follows:
### Changing channels
When cutting a new channel branch these pre-steps are required:
#### Create the new branch
1. Pick your branch point for release on master.
1. Create the branch. The name should be "v" + the first 2 "version" fields
from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies
the next branch name is "v0.9".
1. Push the new branch to the solana repository
1. Update Cargo.toml on master to the next semantic version (e.g. 0.9.0 -> 0.10.0)
by running `./scripts/increment-cargo-version.sh`, then rebuild with a
`cargo build --all` to cause a refresh of `Cargo.lock`.
1. Note the Cargo.toml in the repo root directory does not contain a version. Look at any other Cargo.toml file.
1. Create a new branch and push this branch to the solana repository.
1. `git checkout -b <branchname>`
1. `git push -u origin <branchname>`
#### Update master with the next version
1. After the new branch has been created and pushed, update Cargo.toml on **master** to the next semantic version (e.g. 0.9.0 -> 0.10.0)
by running `./scripts/increment-cargo-version.sh`, then rebuild with
`cargo build` to cause a refresh of `Cargo.lock`.
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
master branch
At this point, ci/channel-info.sh should show your freshly cut release branch as
At this point, `ci/channel-info.sh` should show your freshly cut release branch as
"BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
### Updating channels (i.e. "making a release")
@ -86,7 +91,8 @@ We use [github's Releases UI](https://github.com/solana-labs/solana/releases) fo
1. Go [there ;)](https://github.com/solana-labs/solana/releases).
1. Click "Draft new release". The release tag must exactly match the `version`
field in `/Cargo.toml` prefixed by `v` (ie, `<branchname>.X`).
1. If the first major release on the branch (e.g. v0.8.0), paste in [this
1. If the Cargo.toml verion field is **0.12.3**, then the release tag must be **v0.12.3**
1. If this is the first release on the branch (e.g. v0.13.**0**), paste in [this
template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md)
and fill it in.
1. Test the release by generating a tag using semver's rules. First try at a
@ -97,9 +103,9 @@ We use [github's Releases UI](https://github.com/solana-labs/solana/releases) fo
1. After testnet deployment, verify that testnets are running correct software.
http://metrics.solana.com should show testnet running on a hash from your
newly created branch.
1. Once the release has been made, update Cargo.toml on release to the next
1. Once the release has been made, update Cargo.toml on the release branch to the next
semantic version (e.g. 0.9.0 -> 0.9.1) by running
`./scripts/increment-cargo-version.sh patch`, then rebuild with a `cargo
build --all` to cause a refresh of `Cargo.lock`.
`./scripts/increment-cargo-version.sh patch`, then rebuild with `cargo
build` to cause a refresh of `Cargo.lock`.
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
release branch
release branch.

View File

@ -2,16 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.12.0"
version = "0.13.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
solana = { path = "../core", version = "0.12.0" }
solana-logger = { path = "../logger", version = "0.12.0" }
solana-netutil = { path = "../netutil", version = "0.12.0" }
clap = "2.33.0"
solana = { path = "../core", version = "0.13.0" }
solana-logger = { path = "../logger", version = "0.13.0" }
solana-netutil = { path = "../netutil", version = "0.13.0" }
[features]
cuda = ["solana/cuda"]
erasure = []

View File

@ -1,4 +1,4 @@
use clap::{App, Arg};
use clap::{crate_description, crate_name, crate_version, App, Arg};
use solana::packet::{Packet, SharedPackets, BLOB_SIZE, PACKET_DATA_SIZE};
use solana::result::Result;
use solana::streamer::{receiver, PacketReceiver};
@ -51,7 +51,9 @@ fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> Join
fn main() -> Result<()> {
let mut num_sockets = 1usize;
let matches = App::new("solana-bench-streamer")
let matches = App::new(crate_name!())
.about(crate_description!())
.version(crate_version!())
.arg(
Arg::with_name("num-recv-sockets")
.long("num-recv-sockets")

View File

@ -2,20 +2,23 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.12.0"
version = "0.13.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
clap = "2.33.0"
rayon = "1.0.3"
serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.0" }
solana-drone = { path = "../drone", version = "0.12.0" }
solana-logger = { path = "../logger", version = "0.12.0" }
solana-metrics = { path = "../metrics", version = "0.12.0" }
solana-sdk = { path = "../sdk", version = "0.12.0" }
solana = { path = "../core", version = "0.13.0" }
solana-client = { path = "../client", version = "0.13.0" }
solana-drone = { path = "../drone", version = "0.13.0" }
solana-logger = { path = "../logger", version = "0.13.0" }
solana-metrics = { path = "../metrics", version = "0.13.0" }
solana-netutil = { path = "../netutil", version = "0.13.0" }
solana-sdk = { path = "../sdk", version = "0.13.0" }
[features]
cuda = ["solana/cuda"]
erasure = []

View File

@ -1,15 +1,21 @@
use solana_metrics;
use crate::cli::Config;
use rayon::prelude::*;
use solana::client::mk_client;
use solana::cluster_info::FULLNODE_PORT_RANGE;
use solana::contact_info::ContactInfo;
use solana::thin_client::ThinClient;
use solana::gen_keys::GenKeys;
use solana::gossip_service::discover_nodes;
use solana_client::thin_client::create_client;
use solana_client::thin_client::ThinClient;
use solana_drone::drone::request_airdrop_transaction;
use solana_metrics::influxdb;
use solana_sdk::client::{AsyncClient, SyncClient};
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::system_instruction;
use solana_sdk::system_transaction;
use solana_sdk::timing::timestamp;
use solana_sdk::timing::{duration_as_ms, duration_as_s};
use solana_sdk::transaction::Transaction;
@ -20,6 +26,7 @@ use std::process::exit;
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::Builder;
use std::time::Duration;
use std::time::Instant;
@ -34,7 +41,201 @@ pub const MAX_SPENDS_PER_TX: usize = 4;
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
pub fn metrics_submit_lamport_balance(lamport_balance: u64) {
pub fn do_bench_tps(config: Config) {
let Config {
network_addr: network,
drone_addr,
id,
threads,
thread_batch_sleep_ms,
num_nodes,
duration,
tx_count,
sustained,
} = config;
let nodes = discover_nodes(&network, num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
if nodes.len() < num_nodes {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
num_nodes
);
exit(1);
}
let cluster_entrypoint = nodes[0].clone(); // Pick the first node, why not?
let client = create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
let mut barrier_client =
create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
let mut seed = [0u8; 32];
seed.copy_from_slice(&id.public_key_bytes()[..32]);
let mut rnd = GenKeys::new(seed);
println!("Creating {} keypairs...", tx_count * 2);
let mut total_keys = 0;
let mut target = tx_count * 2;
while target > 0 {
total_keys += target;
target /= MAX_SPENDS_PER_TX;
}
let gen_keypairs = rnd.gen_n_keypairs(total_keys as u64);
let barrier_source_keypair = Keypair::new();
let barrier_dest_id = Pubkey::new_rand();
println!("Get lamports...");
let num_lamports_per_account = 20;
// Sample the first keypair, see if it has lamports, if so then resume
// to avoid lamport loss
let keypair0_balance = client
.poll_get_balance(&gen_keypairs.last().unwrap().pubkey())
.unwrap_or(0);
if num_lamports_per_account > keypair0_balance {
let extra = num_lamports_per_account - keypair0_balance;
let total = extra * (gen_keypairs.len() as u64);
airdrop_lamports(&client, &drone_addr, &id, total);
println!("adding more lamports {}", extra);
fund_keys(&client, &id, &gen_keypairs, extra);
}
let start = gen_keypairs.len() - (tx_count * 2) as usize;
let keypairs = &gen_keypairs[start..];
airdrop_lamports(&barrier_client, &drone_addr, &barrier_source_keypair, 1);
println!("Get last ID...");
let mut blockhash = client.get_recent_blockhash().unwrap();
println!("Got last ID {:?}", blockhash);
let first_tx_count = client.get_transaction_count().expect("transaction count");
println!("Initial transaction count {}", first_tx_count);
let exit_signal = Arc::new(AtomicBool::new(false));
// Setup a thread per validator to sample every period
// collect the max transaction rate and total tx count seen
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
println!("Sampling TPS every {} second...", sample_period);
let v_threads: Vec<_> = nodes
.into_iter()
.map(|v| {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
})
.unwrap()
})
.collect();
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
let s_threads: Vec<_> = (0..threads)
.map(|_| {
let exit_signal = exit_signal.clone();
let shared_txs = shared_txs.clone();
let cluster_entrypoint = cluster_entrypoint.clone();
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
let total_tx_sent_count = total_tx_sent_count.clone();
Builder::new()
.name("solana-client-sender".to_string())
.spawn(move || {
do_tx_transfers(
&exit_signal,
&shared_txs,
&cluster_entrypoint,
&shared_tx_active_thread_count,
&total_tx_sent_count,
thread_batch_sleep_ms,
);
})
.unwrap()
})
.collect();
// generate and send transactions for the specified duration
let start = Instant::now();
let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance;
while start.elapsed() < duration {
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
// ping-pong between source and destination accounts for each loop iteration
// this seems to be faster than trying to determine the balance of individual
// accounts
let len = tx_count as usize;
generate_txs(
&shared_txs,
&keypairs[..len],
&keypairs[len..],
threads,
reclaim_lamports_back_to_source_account,
&cluster_entrypoint,
);
// In sustained mode overlap the transfers with generation
// this has higher average performance but lower peak performance
// in tested environments.
if !sustained {
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
sleep(Duration::from_millis(100));
}
}
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
// to validate the network is still functional.
send_barrier_transaction(
&mut barrier_client,
&mut blockhash,
&barrier_source_keypair,
&barrier_dest_id,
);
i += 1;
if should_switch_directions(num_lamports_per_account, i) {
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
}
}
// Stop the sampling threads so it will collect the stats
exit_signal.store(true, Ordering::Relaxed);
println!("Waiting for validator threads...");
for t in v_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
// join the tx send threads
println!("Waiting for transmit threads...");
for t in s_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
compute_and_report_stats(
&maxes,
sample_period,
&start.elapsed(),
total_tx_sent_count.load(Ordering::Relaxed),
);
}
fn metrics_submit_lamport_balance(lamport_balance: u64) {
println!("Token balance: {}", lamport_balance);
solana_metrics::submit(
influxdb::Point::new("bench-tps")
@ -44,23 +245,23 @@ pub fn metrics_submit_lamport_balance(lamport_balance: u64) {
);
}
pub fn sample_tx_count(
fn sample_tx_count(
exit_signal: &Arc<AtomicBool>,
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
first_tx_count: u64,
v: &ContactInfo,
sample_period: u64,
) {
let mut client = mk_client(&v);
let client = create_client(v.client_facing_addr(), FULLNODE_PORT_RANGE);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
let mut initial_tx_count = client.get_transaction_count().expect("transaction count");
let mut max_tps = 0.0;
let mut total;
let log_prefix = format!("{:21}:", v.tpu.to_string());
loop {
let tx_count = client.transaction_count();
let tx_count = client.get_transaction_count().expect("transaction count");
assert!(
tx_count >= initial_tx_count,
"expected tx_count({}) >= initial_tx_count({})",
@ -101,7 +302,7 @@ pub fn sample_tx_count(
}
/// Send loopback payment of 0 lamports and confirm the network processed it
pub fn send_barrier_transaction(
fn send_barrier_transaction(
barrier_client: &mut ThinClient,
blockhash: &mut Hash,
source_keypair: &Keypair,
@ -118,9 +319,12 @@ pub fn send_barrier_transaction(
);
}
*blockhash = barrier_client.get_recent_blockhash();
*blockhash = barrier_client.get_recent_blockhash().unwrap();
let transaction =
system_transaction::create_user_account(&source_keypair, dest_id, 0, *blockhash, 0);
let signature = barrier_client
.transfer(0, &source_keypair, dest_id, blockhash)
.async_send_transaction(transaction)
.expect("Unable to send barrier transaction");
let confirmatiom = barrier_client.poll_for_signature(&signature);
@ -160,7 +364,7 @@ pub fn send_barrier_transaction(
exit(1);
}
let new_blockhash = barrier_client.get_recent_blockhash();
let new_blockhash = barrier_client.get_recent_blockhash().unwrap();
if new_blockhash == *blockhash {
if poll_count > 0 && poll_count % 8 == 0 {
println!("blockhash is not advancing, still at {:?}", *blockhash);
@ -173,7 +377,7 @@ pub fn send_barrier_transaction(
}
}
pub fn generate_txs(
fn generate_txs(
shared_txs: &SharedTransactions,
source: &[Keypair],
dest: &[Keypair],
@ -181,8 +385,8 @@ pub fn generate_txs(
reclaim: bool,
contact_info: &ContactInfo,
) {
let mut client = mk_client(contact_info);
let blockhash = client.get_recent_blockhash();
let client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
let blockhash = client.get_recent_blockhash().unwrap();
let tx_count = source.len();
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
let signing_start = Instant::now();
@ -196,7 +400,7 @@ pub fn generate_txs(
.par_iter()
.map(|(id, keypair)| {
(
SystemTransaction::new_account(id, &keypair.pubkey(), 1, blockhash, 0),
system_transaction::create_user_account(id, &keypair.pubkey(), 1, blockhash, 0),
timestamp(),
)
})
@ -233,7 +437,7 @@ pub fn generate_txs(
}
}
pub fn do_tx_transfers(
fn do_tx_transfers(
exit_signal: &Arc<AtomicBool>,
shared_txs: &SharedTransactions,
contact_info: &ContactInfo,
@ -241,7 +445,7 @@ pub fn do_tx_transfers(
total_tx_sent_count: &Arc<AtomicUsize>,
thread_batch_sleep_ms: usize,
) {
let client = mk_client(&contact_info);
let client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
loop {
if thread_batch_sleep_ms > 0 {
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
@ -265,7 +469,7 @@ pub fn do_tx_transfers(
if now > tx.1 && now - tx.1 > 1000 * 30 {
continue;
}
client.transfer_signed(&tx.0).unwrap();
client.async_send_transaction(tx.0).unwrap();
}
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
total_tx_sent_count.fetch_add(tx_len, Ordering::Relaxed);
@ -291,8 +495,8 @@ pub fn do_tx_transfers(
}
}
pub fn verify_funding_transfer(client: &mut ThinClient, tx: &Transaction, amount: u64) -> bool {
for a in &tx.account_keys[1..] {
fn verify_funding_transfer(client: &ThinClient, tx: &Transaction, amount: u64) -> bool {
for a in &tx.message().account_keys[1..] {
if client.get_balance(a).unwrap_or(0) >= amount {
return true;
}
@ -304,7 +508,7 @@ pub fn verify_funding_transfer(client: &mut ThinClient, tx: &Transaction, amount
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
/// on every iteration. This allows us to replay the transfers because the source is either empty,
/// or full
pub fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], lamports: u64) {
fn fund_keys(client: &ThinClient, source: &Keypair, dests: &[Keypair], lamports: u64) {
let total = lamports * dests.len() as u64;
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
@ -348,7 +552,10 @@ pub fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], l
.map(|(k, m)| {
(
k.clone(),
SystemTransaction::new_move_many(k, &m, Hash::default(), 0),
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
&k.pubkey(),
&m,
)),
)
})
.collect();
@ -358,7 +565,7 @@ pub fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], l
while !to_fund_txs.is_empty() {
let receivers = to_fund_txs
.iter()
.fold(0, |len, (_, tx)| len + tx.instructions.len());
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
println!(
"{} {} to {} in {} txs",
@ -372,7 +579,7 @@ pub fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], l
to_fund_txs.len(),
);
let blockhash = client.get_recent_blockhash();
let blockhash = client.get_recent_blockhash().unwrap();
// re-sign retained to_fund_txes with updated blockhash
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
@ -380,13 +587,19 @@ pub fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], l
});
to_fund_txs.iter().for_each(|(_, tx)| {
client.transfer_signed(&tx).expect("transfer");
client.async_send_transaction(tx.clone()).expect("transfer");
});
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
// retry
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
for _ in 0..10 {
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
if to_fund_txs.is_empty() {
break;
}
sleep(Duration::from_millis(100));
}
tries += 1;
}
@ -397,12 +610,7 @@ pub fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], l
}
}
pub fn airdrop_lamports(
client: &mut ThinClient,
drone_addr: &SocketAddr,
id: &Keypair,
tx_count: u64,
) {
fn airdrop_lamports(client: &ThinClient, drone_addr: &SocketAddr, id: &Keypair, tx_count: u64) {
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(starting_balance);
println!("starting balance {}", starting_balance);
@ -416,10 +624,10 @@ pub fn airdrop_lamports(
id.pubkey(),
);
let blockhash = client.get_recent_blockhash();
let blockhash = client.get_recent_blockhash().unwrap();
match request_airdrop_transaction(&drone_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let signature = client.transfer_signed(&transaction).unwrap();
let signature = client.async_send_transaction(transaction).unwrap();
client.poll_for_signature(&signature).unwrap();
}
Err(err) => {
@ -449,7 +657,7 @@ pub fn airdrop_lamports(
}
}
pub fn compute_and_report_stats(
fn compute_and_report_stats(
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
sample_period: u64,
tx_send_elapsed: &Duration,
@ -516,13 +724,18 @@ pub fn compute_and_report_stats(
// First transfer 3/4 of the lamports to the dest accounts
// then ping-pong 1/4 of the lamports back to the other account
// this leaves 1/4 lamport buffer in each account
pub fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
}
#[cfg(test)]
mod tests {
use super::*;
use solana::fullnode::FullnodeConfig;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana_drone::drone::run_local_drone;
use std::sync::mpsc::channel;
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(20, 0), false);
@ -537,4 +750,33 @@ mod tests {
assert_eq!(should_switch_directions(20, 100), true);
assert_eq!(should_switch_directions(20, 101), false);
}
#[test]
#[ignore]
fn test_bench_tps() {
let fullnode_config = FullnodeConfig::default();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 2_000_000,
fullnode_config,
..ClusterConfig::default()
});
let drone_keypair = Keypair::new();
cluster.transfer(&cluster.funding_keypair, &drone_keypair.pubkey(), 1_000_000);
let (addr_sender, addr_receiver) = channel();
run_local_drone(drone_keypair, addr_sender, None);
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
let mut cfg = Config::default();
cfg.network_addr = cluster.entry_point_info.gossip;
cfg.drone_addr = drone_addr;
cfg.tx_count = 100;
cfg.duration = Duration::from_secs(5);
cfg.num_nodes = NUM_NODES;
do_bench_tps(cfg);
}
}

View File

@ -2,7 +2,7 @@ use std::net::SocketAddr;
use std::process::exit;
use std::time::Duration;
use clap::{crate_version, App, Arg, ArgMatches};
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
use solana_drone::drone::DRONE_PORT;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
@ -17,8 +17,6 @@ pub struct Config {
pub tx_count: usize,
pub thread_batch_sleep_ms: usize,
pub sustained: bool,
pub reject_extra_nodes: bool,
pub converge_only: bool,
}
impl Default for Config {
@ -33,15 +31,13 @@ impl Default for Config {
tx_count: 500_000,
thread_batch_sleep_ms: 0,
sustained: false,
reject_extra_nodes: false,
converge_only: false,
}
}
}
/// Defines and builds the CLI args for a run of the benchmark
pub fn build_args<'a, 'b>() -> App<'a, 'b> {
App::new("solana-bench-tps")
App::new(crate_name!()).about(crate_description!())
.version(crate_version!())
.arg(
Arg::with_name("network")
@ -75,11 +71,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
.takes_value(true)
.help("Wait for NUM nodes to converge"),
)
.arg(
Arg::with_name("reject-extra-nodes")
.long("reject-extra-nodes")
.help("Require exactly `num-nodes` on convergence. Appropriate only for internal networks"),
)
.arg(
Arg::with_name("threads")
.short("t")
@ -95,11 +86,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
.takes_value(true)
.help("Seconds to run benchmark, then exit; default is forever"),
)
.arg(
Arg::with_name("converge-only")
.long("converge-only")
.help("Exit immediately after converging"),
)
.arg(
Arg::with_name("sustained")
.long("sustained")
@ -131,14 +117,14 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
let mut args = Config::default();
if let Some(addr) = matches.value_of("network") {
args.network_addr = addr.parse().unwrap_or_else(|e| {
eprintln!("failed to parse network: {}", e);
args.network_addr = solana_netutil::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse network address: {}", e);
exit(1)
});
}
if let Some(addr) = matches.value_of("drone") {
args.drone_addr = addr.parse().unwrap_or_else(|e| {
args.drone_addr = solana_netutil::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse drone address: {}", e);
exit(1)
});
@ -176,8 +162,6 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
}
args.sustained = matches.is_present("sustained");
args.converge_only = matches.is_present("converge-only");
args.reject_extra_nodes = matches.is_present("reject-extra-nodes");
args
}

View File

@ -1,20 +1,7 @@
mod bench;
mod cli;
use crate::bench::*;
use solana::client::mk_client;
use solana::gen_keys::GenKeys;
use solana::gossip_service::discover;
use solana_metrics;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::collections::VecDeque;
use std::process::exit;
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::Builder;
use std::time::Duration;
use std::time::Instant;
use crate::bench::do_bench_tps;
fn main() {
solana_logger::setup();
@ -24,226 +11,5 @@ fn main() {
let cfg = cli::extract_args(&matches);
let cli::Config {
network_addr: network,
drone_addr,
id,
threads,
thread_batch_sleep_ms,
num_nodes,
duration,
tx_count,
sustained,
reject_extra_nodes,
converge_only,
} = cfg;
let nodes = discover(&network, num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
if nodes.len() < num_nodes {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
num_nodes
);
exit(1);
}
if reject_extra_nodes && nodes.len() > num_nodes {
eprintln!(
"Error: Extra nodes discovered. Expecting exactly {}",
num_nodes
);
exit(1);
}
if converge_only {
return;
}
let cluster_entrypoint = nodes[0].clone(); // Pick the first node, why not?
let mut client = mk_client(&cluster_entrypoint);
let mut barrier_client = mk_client(&cluster_entrypoint);
let mut seed = [0u8; 32];
seed.copy_from_slice(&id.public_key_bytes()[..32]);
let mut rnd = GenKeys::new(seed);
println!("Creating {} keypairs...", tx_count * 2);
let mut total_keys = 0;
let mut target = tx_count * 2;
while target > 0 {
total_keys += target;
target /= MAX_SPENDS_PER_TX;
}
let gen_keypairs = rnd.gen_n_keypairs(total_keys as u64);
let barrier_source_keypair = Keypair::new();
let barrier_dest_id = Keypair::new().pubkey();
println!("Get lamports...");
let num_lamports_per_account = 20;
// Sample the first keypair, see if it has lamports, if so then resume
// to avoid lamport loss
let keypair0_balance = client
.poll_get_balance(&gen_keypairs.last().unwrap().pubkey())
.unwrap_or(0);
if num_lamports_per_account > keypair0_balance {
let extra = num_lamports_per_account - keypair0_balance;
let total = extra * (gen_keypairs.len() as u64);
airdrop_lamports(&mut client, &drone_addr, &id, total);
println!("adding more lamports {}", extra);
fund_keys(&mut client, &id, &gen_keypairs, extra);
}
let start = gen_keypairs.len() - (tx_count * 2) as usize;
let keypairs = &gen_keypairs[start..];
airdrop_lamports(&mut barrier_client, &drone_addr, &barrier_source_keypair, 1);
println!("Get last ID...");
let mut blockhash = client.get_recent_blockhash();
println!("Got last ID {:?}", blockhash);
let first_tx_count = client.transaction_count();
println!("Initial transaction count {}", first_tx_count);
let exit_signal = Arc::new(AtomicBool::new(false));
// Setup a thread per validator to sample every period
// collect the max transaction rate and total tx count seen
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
println!("Sampling TPS every {} second...", sample_period);
let v_threads: Vec<_> = nodes
.into_iter()
.map(|v| {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
})
.unwrap()
})
.collect();
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
let s_threads: Vec<_> = (0..threads)
.map(|_| {
let exit_signal = exit_signal.clone();
let shared_txs = shared_txs.clone();
let cluster_entrypoint = cluster_entrypoint.clone();
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
let total_tx_sent_count = total_tx_sent_count.clone();
Builder::new()
.name("solana-client-sender".to_string())
.spawn(move || {
do_tx_transfers(
&exit_signal,
&shared_txs,
&cluster_entrypoint,
&shared_tx_active_thread_count,
&total_tx_sent_count,
thread_batch_sleep_ms,
);
})
.unwrap()
})
.collect();
// generate and send transactions for the specified duration
let start = Instant::now();
let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance;
while start.elapsed() < duration {
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
// ping-pong between source and destination accounts for each loop iteration
// this seems to be faster than trying to determine the balance of individual
// accounts
let len = tx_count as usize;
generate_txs(
&shared_txs,
&keypairs[..len],
&keypairs[len..],
threads,
reclaim_lamports_back_to_source_account,
&cluster_entrypoint,
);
// In sustained mode overlap the transfers with generation
// this has higher average performance but lower peak performance
// in tested environments.
if !sustained {
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
sleep(Duration::from_millis(100));
}
}
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
// to validate the network is still functional.
send_barrier_transaction(
&mut barrier_client,
&mut blockhash,
&barrier_source_keypair,
&barrier_dest_id,
);
i += 1;
if should_switch_directions(num_lamports_per_account, i) {
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
}
}
// Stop the sampling threads so it will collect the stats
exit_signal.store(true, Ordering::Relaxed);
println!("Waiting for validator threads...");
for t in v_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
// join the tx send threads
println!("Waiting for transmit threads...");
for t in s_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
compute_and_report_stats(
&maxes,
sample_period,
&start.elapsed(),
total_tx_sent_count.load(Ordering::Relaxed),
);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(20, 0), false);
assert_eq!(should_switch_directions(20, 1), false);
assert_eq!(should_switch_directions(20, 14), false);
assert_eq!(should_switch_directions(20, 15), true);
assert_eq!(should_switch_directions(20, 16), false);
assert_eq!(should_switch_directions(20, 19), false);
assert_eq!(should_switch_directions(20, 20), true);
assert_eq!(should_switch_directions(20, 21), false);
assert_eq!(should_switch_directions(20, 99), false);
assert_eq!(should_switch_directions(20, 100), true);
assert_eq!(should_switch_directions(20, 101), false);
}
do_bench_tps(cfg);
}

View File

@ -1,248 +0,0 @@
#![feature(test)]
extern crate rand;
extern crate test;
use bincode::{deserialize, serialize_into, serialized_size};
use rand::{thread_rng, Rng};
use solana_runtime::append_vec::{
deserialize_account, get_serialized_size, serialize_account, AppendVec,
};
use solana_sdk::account::Account;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::env;
use std::io::Cursor;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::spawn;
use test::Bencher;
const START_SIZE: u64 = 4 * 1024 * 1024;
const INC_SIZE: u64 = 1 * 1024 * 1024;
macro_rules! align_up {
($addr: expr, $align: expr) => {
($addr + ($align - 1)) & !($align - 1)
};
}
fn get_append_vec_bench_path(path: &str) -> PathBuf {
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
let mut buf = PathBuf::new();
buf.push(&format!("{}/{}", out_dir, path));
buf
}
#[bench]
fn append_vec_atomic_append(bencher: &mut Bencher) {
let path = get_append_vec_bench_path("bench_append");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
bencher.iter(|| {
if vec.append(AtomicUsize::new(0)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(0)).is_some());
}
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn append_vec_atomic_random_access(bencher: &mut Bencher) {
let path = get_append_vec_bench_path("bench_ra");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
let size = 1_000_000;
for _ in 0..size {
if vec.append(AtomicUsize::new(0)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(0)).is_some());
}
}
bencher.iter(|| {
let index = thread_rng().gen_range(0, size as u64);
vec.get(index * std::mem::size_of::<AtomicUsize>() as u64);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn append_vec_atomic_random_change(bencher: &mut Bencher) {
let path = get_append_vec_bench_path("bench_rax");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
let size = 1_000_000;
for k in 0..size {
if vec.append(AtomicUsize::new(k)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(k)).is_some());
}
}
bencher.iter(|| {
let index = thread_rng().gen_range(0, size as u64);
let atomic1 = vec.get(index * std::mem::size_of::<AtomicUsize>() as u64);
let current1 = atomic1.load(Ordering::Relaxed);
assert_eq!(current1, index as usize);
let next = current1 + 1;
let mut index = vec.append(AtomicUsize::new(next));
if index.is_none() {
assert!(vec.grow_file().is_ok());
index = vec.append(AtomicUsize::new(next));
}
let atomic2 = vec.get(index.unwrap());
let current2 = atomic2.load(Ordering::Relaxed);
assert_eq!(current2, next);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn append_vec_atomic_random_read(bencher: &mut Bencher) {
let path = get_append_vec_bench_path("bench_read");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
let size = 1_000_000;
for _ in 0..size {
if vec.append(AtomicUsize::new(0)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(0)).is_some());
}
}
bencher.iter(|| {
let index = thread_rng().gen_range(0, size);
let atomic1 = vec.get((index * std::mem::size_of::<AtomicUsize>()) as u64);
let current1 = atomic1.load(Ordering::Relaxed);
assert_eq!(current1, 0);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn append_vec_concurrent_lock_append(bencher: &mut Bencher) {
let path = get_append_vec_bench_path("bench_lock_append");
let vec = Arc::new(RwLock::new(AppendVec::<AtomicUsize>::new(
&path, true, START_SIZE, INC_SIZE,
)));
let vec1 = vec.clone();
let size = 1_000_000;
let count = Arc::new(AtomicUsize::new(0));
let count1 = count.clone();
spawn(move || loop {
let mut len = count.load(Ordering::Relaxed);
{
let rlock = vec1.read().unwrap();
loop {
if rlock.append(AtomicUsize::new(0)).is_none() {
break;
}
len = count.fetch_add(1, Ordering::Relaxed);
}
if len >= size {
break;
}
}
{
let mut wlock = vec1.write().unwrap();
if len >= size {
break;
}
assert!(wlock.grow_file().is_ok());
}
});
bencher.iter(|| {
let _rlock = vec.read().unwrap();
let len = count1.load(Ordering::Relaxed);
assert!(len < size * 2);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn append_vec_concurrent_get_append(bencher: &mut Bencher) {
let path = get_append_vec_bench_path("bench_get_append");
let vec = Arc::new(RwLock::new(AppendVec::<AtomicUsize>::new(
&path, true, START_SIZE, INC_SIZE,
)));
let vec1 = vec.clone();
let size = 1_000_000;
let count = Arc::new(AtomicUsize::new(0));
let count1 = count.clone();
spawn(move || loop {
let mut len = count.load(Ordering::Relaxed);
{
let rlock = vec1.read().unwrap();
loop {
if rlock.append(AtomicUsize::new(0)).is_none() {
break;
}
len = count.fetch_add(1, Ordering::Relaxed);
}
if len >= size {
break;
}
}
{
let mut wlock = vec1.write().unwrap();
if len >= size {
break;
}
assert!(wlock.grow_file().is_ok());
}
});
bencher.iter(|| {
let rlock = vec.read().unwrap();
let len = count1.load(Ordering::Relaxed);
if len > 0 {
let index = thread_rng().gen_range(0, len);
rlock.get((index * std::mem::size_of::<AtomicUsize>()) as u64);
}
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn bench_account_serialize(bencher: &mut Bencher) {
let num: usize = 1000;
let account = Account::new(2, 100, &Keypair::new().pubkey());
let len = get_serialized_size(&account);
let ser_len = align_up!(len + std::mem::size_of::<u64>(), std::mem::size_of::<u64>());
let mut memory = vec![0; num * ser_len];
bencher.iter(|| {
for i in 0..num {
let start = i * ser_len;
serialize_account(&mut memory[start..start + ser_len], &account, len);
}
});
// make sure compiler doesn't delete the code.
let index = thread_rng().gen_range(0, num);
if memory[index] != 0 {
println!("memory: {}", memory[index]);
}
let start = index * ser_len;
let new_account = deserialize_account(&memory[start..start + ser_len], 0, num * len).unwrap();
assert_eq!(new_account, account);
}
#[bench]
fn bench_account_serialize_bincode(bencher: &mut Bencher) {
let num: usize = 1000;
let account = Account::new(2, 100, &Keypair::new().pubkey());
let len = serialized_size(&account).unwrap() as usize;
let mut memory = vec![0u8; num * len];
bencher.iter(|| {
for i in 0..num {
let start = i * len;
let cursor = Cursor::new(&mut memory[start..start + len]);
serialize_into(cursor, &account).unwrap();
}
});
// make sure compiler doesn't delete the code.
let index = thread_rng().gen_range(0, len);
if memory[index] != 0 {
println!("memory: {}", memory[index]);
}
let start = index * len;
let new_account: Account = deserialize(&memory[start..start + len]).unwrap();
assert_eq!(new_account, account);
}

1
book/.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
theme/highlight.js binary

View File

@ -25,6 +25,6 @@
| | | | | | | Downstream | |
| | .--+--. .-------+---. | | | Validators | |
`-------->| TPU +---->| Broadcast +--------------->| | |
| `-----` | Service | | | `------------` |
| `-----` | Stage | | | `------------` |
| `-----------` | `------------------`
`--------------------------------------`

View File

@ -0,0 +1,30 @@
msc {
hscale="2.2";
VoteSigner,
Validator,
Cluster,
StakerX,
StakerY;
|||;
Validator box Validator [label="boot.."];
VoteSigner <:> Validator [label="register\n\n(optional)"];
Validator => Cluster [label="VoteState::Initialize(VoteSigner)"];
StakerX => Cluster [label="StakeState::Delegate(Validator)"];
StakerY => Cluster [label="StakeState::Delegate(Validator)"];
|||;
Validator box Cluster [label="\nvalidate\n"];
Validator => VoteSigner [label="sign(vote)"];
VoteSigner >> Validator [label="signed vote"];
Validator => Cluster [label="gossip(vote)"];
...;
... ;
Validator abox Validator [label="\nmax\nlockout\n"];
|||;
StakerX => Cluster [label="StakeState::RedeemCredits()"];
StakerY => Cluster [label="StakeState::RedeemCredits()"] ;
}

View File

@ -1,16 +1,17 @@
.-------------------------------------------.
| TPU .-------------. |
| | PoH Service | |
| `--------+----` |
| ^ | |
| | v |
| .-------. .-----------. .-+-------. | .------------.
.---------. | | Fetch | | SigVerify | | Banking | | | Broadcast |
| Clients |--->| Stage |->| Stage |->| Stage |------>| Service |
`---------` | | | | | | | | | |
| `-------` `-----------` `----+----` | `------------`
| | |
`---------------------------------|---------`
.-------------.
| PoH Service |
`--------+----`
^ |
.------------------------------|----|--------------------.
| TPU | v |
| .-------. .-----------. .-+-------. .-----------. | .------------.
.---------. | | Fetch | | SigVerify | | Banking | | Broadcast | | | Downstream |
| Clients |--->| Stage |->| Stage |->| Stage |->| Stage |---->| Validators |
`---------` | | | | | | | | | | | |
| `-------` `-----------` `----+----` `-----------` | `------------`
| | |
`---------------------------------|----------------------`
|
v
.------.

View File

@ -34,14 +34,13 @@
- [JavaScript API](javascript-api.md)
- [solana-wallet CLI](wallet.md)
- [Proposed Architectural Changes](proposals.md)
- [Accepted Design Proposals](proposals.md)
- [Ledger Replication](ledger-replication-to-implement.md)
- [Secure Vote Signing](vote-signing-to-implement.md)
- [Staking Rewards](staking-rewards.md)
- [Fork Selection](fork-selection.md)
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
- [Reliable Vote Transmission](reliable-vote-transmission.md)
- [Persistent Account Storage](persistent-account-storage.md)
- [Leader to Leader Transition](leader-leader-transition.md)
- [Cluster Economics](ed_overview.md)
- [Validation-client Economics](ed_validation_client_economics.md)
- [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md)
@ -53,7 +52,16 @@
- [Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md)
- [Economic Sustainability](ed_economic_sustainability.md)
- [Attack Vectors](ed_attack_vectors.md)
- [Economic Design MVP](ed_mvp.md)
- [References](ed_references.md)
- [Leader-to-Validator Transition](leader-validator-transition.md)
- [Cluster Test Framework](cluster-test-framework.md)
- [Testing Programs](testing-programs.md)
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
- [Cluster Software Installation and Updates](installer.md)
- [Deterministic Transaction Fees](transaction-fees.md)
- [Implemented Design Proposals](implemented-proposals.md)
- [Fork Selection](fork-selection.md)
- [Leader-to-Leader Transition](leader-leader-transition.md)
- [Leader-to-Validator Transition](leader-validator-transition.md)
- [Testnet Participation](testnet-participation.md)

View File

@ -51,10 +51,10 @@ At test start, the cluster has already been established and is fully connected.
The test can discover most of the available nodes over a few second.
```rust,ignore
use crate::gossip_service::discover;
use crate::gossip_service::discover_nodes;
// Discover the cluster over a few seconds.
let cluster_nodes = discover(&entry_point_info, num_nodes);
let cluster_nodes = discover_nodes(&entry_point_info, num_nodes);
```
## Cluster Configuration
@ -99,10 +99,10 @@ pub fn test_large_invalid_gossip_nodes(
funding_keypair: &Keypair,
num_nodes: usize,
) {
let cluster = discover(&entry_point_info, num_nodes);
let cluster = discover_nodes(&entry_point_info, num_nodes);
// Poison the cluster.
let mut client = mk_client(&entry_point_info);
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
for _ in 0..(num_nodes * 100) {
client.gossip_push(
cluster_info::invalid_contact_info()
@ -112,7 +112,7 @@ pub fn test_large_invalid_gossip_nodes(
// Force refresh of the active set.
for node in &cluster {
let mut client = mk_client(&node);
let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
client.gossip_refresh_active_set();
}

View File

@ -0,0 +1,140 @@
# Credit-Only Accounts
This design covers the handling of credit-only and credit-debit accounts in the
[runtime](runtime.md). Accounts already distinguish themselves as credit-only or
credit-debit based on the program ID specified by the transaction's instruction.
Programs must treat accounts that are not owned by them as credit-only.
To identify credit-only accounts by program id would require the account to be
fetched and loaded from disk. This operation is expensive, and while it is
occurring, the runtime would have to reject any transactions referencing the same
account.
The proposal introduces a `num_readonly_accounts` field to the transaction
structure, and removes the `program_ids` dedicated vector for program accounts.
This design doesn't change the runtime transaction processing rules.
Programs still can't write or spend accounts that they do not own, but it
allows the runtime to optimistically take the correct lock for each account
specified in the transaction before loading the accounts from storage.
Accounts selected as credit-debit by the transaction can still be treated as
credit-only by the instructions.
## Runtime handling
credit-only accounts have the following properties:
* Can be deposited into: Deposits can be implemented as a simple `atomic_add`.
* read-only access to account data.
Instructions that debit or modify the credit-only account data will fail.
## Account Lock Optimizations
The Accounts module keeps track of current locked accounts in the runtime,
which separates credit-only accounts from the credit-debit accounts. The credit-only
accounts can be cached in memory and shared between all the threads executing
transactions.
The current runtime can't predict whether an account is credit-only or credit-debit when
the transaction account keys are locked at the start of the transaction
processing pipeline. Accounts referenced by the transaction have not been
loaded from the disk yet.
An ideal design would cache the credit-only accounts while they are referenced by
any transaction moving through the runtime, and release the cache when the last
transaction exits the runtime.
## Credit-only accounts and read-only account data
Credit-only account data can be treated as read-only. Credit-debit
account data is treated as read-write.
## Transaction changes
To enable the possibility of caching accounts only while they are in the
runtime, the Transaction structure should be changed in the following way:
* `program_ids: Vec<Pubkey>` - This vector is removed. Program keys can be
placed at the end of the `account_keys` vector within the `num_readonly_accounts`
number set to the number of programs.
* `num_readonly_accounts: u8` - The number of keys from the **end** of the
transaction's `account_keys` array that is credit-only.
The following possible accounts are present in an transaction:
* paying account
* RW accounts
* R accounts
* Program IDs
The paying account must be credit-debit, and program IDs must be credit-only. The
first account in the `account_keys` array is always the account that pays for
the transaction fee, therefore it cannot be credit-only. For these reasons the
credit-only accounts are all grouped together at the end of the `account_keys`
vector. Counting credit-only accounts from the end allow for the default `0`
value to still be functionally correct, since a transaction will succeed with
all credit-debit accounts.
Since accounts can only appear once in the transaction's `account_keys` array,
an account can only be credit-only or credit-debit in a single transaction, not
both. The runtime treats a transaction as one atomic unit of execution. If any
instruction needs credit-debit access to an account, a copy needs to be made. The
write lock is held for the entire time the transaction is being processed by
the runtime.
## Starvation
Read locks for credit-only accounts can keep the runtime from executing
transactions requesting a write lock to a credit-debit account.
When a request for a write lock is made while a read lock is open, the
transaction requesting the write lock should be cached. Upon closing the read
lock, the pending transactions can be pushed through the runtime.
While a pending write transaction exists, any additional read lock requests for
that account should fail. It follows that any other write lock requests will also
fail. Currently, clients must retransmit when a transaction fails because of
a pending transaction. This approach would mimic that behavior as closely as
possible while preventing write starvation.
## Program execution with credit-only accounts
Before handing off the accounts to program execution, the runtime can mark each
account in each instruction as a credit-only account. The credit-only accounts can
be passed as references without an extra copy. The transaction will abort on a
write to credit-only.
An alternative is to detect writes to credit-only accounts and fail the
transactions before commit.
## Alternative design
This design attempts to cache a credit-only account after loading without the use
of a transaction-specified credit-only accounts list. Instead, the credit-only
accounts are held in a reference-counted table inside the runtime as the
transactions are processed.
1. Transaction accounts are locked.
a. If the account is present in the credit-only' table, the TX does not fail.
The pending state for this TX is marked NeedReadLock.
2. Transaction accounts are loaded.
a. Transaction accounts that are credit-only increase their reference
count in the `credit-only` table.
b. Transaction accounts that need a write lock and are present in the
`credit-only` table fail.
3. Transaction accounts are unlocked.
a. Decrement the `credit-only` lock table reference count; remove if its 0
b. Remove from the `lock` set if the account is not in the `credit-only`
table.
The downside with this approach is that if the `lock` set mutex is released
between lock and load to allow better pipelining of transactions, a request for
a credit-only account may fail. Therefore, this approach is not suitable for
treating programs as credit-only accounts.
Holding the accounts lock mutex while fetching the account from disk would
potentially have a significant performance hit on the runtime. Fetching from
disk is expected to be slow, but can be parallelized between multiple disks.

12
book/src/ed_mvp.md Normal file
View File

@ -0,0 +1,12 @@
## Proposed MVP of Economic Design
The preceeding sections, outlined in the [Economic Design Overview](ed_overview.md), describe a long-term vision of a sustainable Solana economy. Of course, we don't expect the final implementation to perfectly match what has been described above. We intend to fully engage with network stakeholders throughout the implementation phases (i.e. pre-testnet, testnet, mainnet) to ensure the system supports, and is representative of, the various network participants' interests. The first step toward this goal, however, is outlining a some desired MVP economic features to be available for early pre-testnet and testnet participants. Below is a rough sketch outlining basic economic functionality from which a more complete and functional system can be developed.
### MVP Economic Features
* Faucet to deliver testnet SOLs to validators for staking and dapp development.
* Mechanism by which validators are rewarded in proportion to their stake. Interest rate mechansism (i.e. to be determined by total % staked) to come later.
* Ability to delegate tokens to validator nodes.
* Replicators to receive fixed, arbitrary reward for submitting validated PoReps. Reward size mechanism (i.e. PoRep reward as a function of total ledger redundancy) to come later.
* Pooling of replicator PoRep transaction fees and weighted distribution to validators based on PoRep verification (see [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md). It will be useful to test this protection against attacks on testnet.
* Nice-to-have: auto-delegation of replicator rewards to validator.

View File

@ -8,7 +8,7 @@ These protocol-based rewards, to be distributed to participating validation and
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction (be it a state execution or proof-of-replication verification). A mechanism for continuous and long-term funding of the mining pool through a pre-dedicated portion of transaction fees is also discussed below.
A high-level schematic of Solanas crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics.md), [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. The [Replication-client Economics](ed_replication_client_economics.md) chapter will review the Solana network design for global ledger storage/redundancy and replicator-client economics ([Storage-replication rewards](ed_rce_storage_replication_rewards.md)) along with a replicator-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md). The [Economic Sustainability](ed_economic_sustainability.md) section dives deeper into Solanas design for long-term economic sustainability and outlines the constraints and conditions for a self-sustaining economy. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
A high-level schematic of Solanas crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics.md), [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. The [Replication-client Economics](ed_replication_client_economics.md) chapter will review the Solana network design for global ledger storage/redundancy and replicator-client economics ([Storage-replication rewards](ed_rce_storage_replication_rewards.md)) along with a replicator-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md). The [Economic Sustainability](ed_economic_sustainability.md) section dives deeper into Solanas design for long-term economic sustainability and outlines the constraints and conditions for a self-sustaining economy. An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
<!-- ![img alt text](solana_economic_design.png) -->
<p style="text-align:center;"><img src="img/solana_economic_design.png" alt="== Solana Economic Design Diagram ==" width="800"/></p>

View File

@ -161,7 +161,7 @@ This will dump all the threads stack traces into gdb.txt
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
```bash
$ ./multinode-demo/client.sh --network $(dig +short testnet.solana.com):8001 --duration 60
$ ./multinode-demo/client.sh --network testnet.solana.com:8001 --duration 60
```
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)

View File

@ -0,0 +1,3 @@
# Implemented Design Proposals
The following design proposals are fully implemented.

213
book/src/installer.md Normal file
View File

@ -0,0 +1,213 @@
## Cluster Software Installation and Updates
Currently users are required to build the solana cluster software themselves
from the git repository and manually update it, which is error prone and
inconvenient.
This document proposes an easy to use software install and updater that can be
used to deploy pre-built binaries for supported platforms. Users may elect to
use binaries supplied by Solana or any other party they trust. Deployment of
updates is managed using an on-chain update manifest program.
### Motivating Examples
#### Fetch and run a pre-built installer using a bootstrap curl/shell script
The easiest install method for supported platforms:
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh
```
This script will check github for the latest tagged release and download and run the
`solana-install` binary from there.
If additional arguments need to be specified during the installation, the
following shell syntax is used:
```bash
$ init_args=.... # arguments for `solana-installer init ...`
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh -s - ${init_args}
```
#### Fetch and run a pre-built installer from a Github release
With a well-known release URL, a pre-built binary can be obtained for supported
platforms:
```bash
$ curl -o solana-install https://github.com/solana-labs/solana/releases/download/v0.13.0/solana-install-x86_64-apple-darwin
$ chmod +x ./solana-install
$ ./solana-install --help
```
#### Build and run the installer from source
If a pre-built binary is not available for a given platform, building the
installer from source is always an option:
```bash
$ git clone https://github.com/solana-labs/solana.git
$ cd solana/install
$ cargo run -- --help
```
#### Deploy a new update to a cluster
Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL,
the following commands will deploy the update:
```bash
$ solana-keygen -o update-manifest.json # <-- only generated once, the public key is shared with users
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
```
#### Run a validator node that auto updates itself
```bash
$ solana-install init --pubkey 92DMonmBYXwEMHJ99c9ceRSpAmk9v6i3RdvDdXaVcrfj # <-- pubkey is obtained from whoever is deploying the updates
$ export PATH=~/.local/share/solana-install/bin:$PATH
$ solana-keygen ... # <-- runs the latest solana-keygen
$ solana-install run solana-fullnode ... # <-- runs a fullnode, restarting it as necesary when an update is applied
```
### On-chain Update Manifest
An update manifest is used to advertise the deployment of new release tarballs
on a solana cluster. The update manifest is stored using the `config` program,
and each update manifest account describes a logical update channel for a given
target triple (eg, `x86_64-apple-darwin`). The account public key is well-known
between the entity deploying new updates and users consuming those updates.
The update tarball itself is hosted elsewhere, off-chain and can be fetched from
the specified `download_url`.
```rust,ignore
use solana_sdk::signature::Signature;
/// Information required to download and apply a given update
pub struct UpdateManifest {
pub timestamp_secs: u64, // When the release was deployed in seconds since UNIX EPOCH
pub download_url: String, // Download URL to the release tar.bz2
pub download_sha256: String, // SHA256 digest of the release tar.bz2 file
}
/// Userdata of an Update Manifest program Account.
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
pub struct SignedUpdateManifest {
pub manifest: UpdateManifest,
pub manifest_signature: Signature,
}
```
Note that the `manifest` field itself contains a corresponding signature
(`manifest_signature`) to guard against man-in-the-middle attacks between the
`solana-install` tool and the solana cluster RPC API.
To guard against rollback attacks, `solana-install` will refuse to install an
update with an older `timestamp_secs` than what is currently installed.
### Release Archive Contents
A release archive is expected to be a tar file compressed with
bzip2 with the following internal structure:
* `/version.yml` - a simple YAML file containing the field `"target"` - the
target tuple. Any additional fields are ignored.
* `/bin/` -- directory containing available programs in the release.
`solana-install` will symlink this directory to
`~/.local/share/solana-install/bin` for use by the `PATH` environment
variable.
* `...` -- any additional files and directories are permitted
### solana-install Tool
The `solana-install` tool is used by the user to install and update their cluster software.
It manages the following files and directories in the user's home directory:
* `~/.config/solana/install/config.yml` - user configuration and information about currently installed software version
* `~/.local/share/solana/install/bin` - a symlink to the current release. eg, `~/.local/share/solana-update/<update-pubkey>-<manifest_signature>/bin`
* `~/.local/share/solana/install/releases/<download_sha256>/` - contents of a release
#### Command-line Interface
```manpage
solana-install 0.13.0
The solana cluster software installer
USAGE:
solana-install [OPTIONS] <SUBCOMMAND>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
-c, --config <PATH> Configuration file to use [default: /Users/mvines/Library/Preferences/solana/install.yml]
SUBCOMMANDS:
deploy deploys a new update
help Prints this message or the help of the given subcommand(s)
info displays information about the current installation
init initializes a new installation
run Runs a program while periodically checking and applying software updates
update checks for an update, and if available downloads and applies it
```
```manpage
solana-install-init
initializes a new installation
USAGE:
solana-install init [OPTIONS]
FLAGS:
-h, --help Prints help information
OPTIONS:
-d, --data_dir <PATH> Directory to store install data [default: /Users/mvines/Library/Application Support/solana]
-u, --url <URL> JSON RPC URL for the solana cluster [default: https://api.testnet.solana.com/]
-p, --pubkey <PUBKEY> Public key of the update manifest [default: 9XX329sPuskWhH4DQh6k16c87dHKhXLBZTL3Gxmve8Gp]
```
```manpage
solana-install-info
displays information about the current installation
USAGE:
solana-install info [FLAGS]
FLAGS:
-h, --help Prints help information
-l, --local only display local information, don't check the cluster for new updates
```
```manpage
solana-install-deploy
deploys a new update
USAGE:
solana-install deploy <download_url> <update_manifest_keypair>
FLAGS:
-h, --help Prints help information
ARGS:
<download_url> URL to the solana release archive
<update_manifest_keypair> Keypair file for the update manifest (/path/to/keypair.json)
```
```manpage
solana-install-update
checks for an update, and if available downloads and applies it
USAGE:
solana-install update
FLAGS:
-h, --help Prints help information
```
```manpage
solana-install-run
Runs a program while periodically checking and applying software updates
USAGE:
solana-install run <program_name> [program_arguments]...
FLAGS:
-h, --help Prints help information
ARGS:
<program_name> program to run
<program_arguments>... arguments to supply to the program
The program will be restarted upon a successful software update
```

View File

@ -26,6 +26,7 @@ Methods
* [getBalance](#getbalance)
* [getRecentBlockhash](#getrecentblockhash)
* [getSignatureStatus](#getsignaturestatus)
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
* [getTransactionCount](#gettransactioncount)
* [requestAirdrop](#requestairdrop)
* [sendTransaction](#sendtransaction)
@ -124,7 +125,7 @@ The result field will be a JSON object with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `userdata`, array of bytes representing any userdata associated with the account
* `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
@ -134,7 +135,7 @@ The result field will be a JSON object with the following sub fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
```
---
@ -168,12 +169,10 @@ events.
* `string` - Signature of Transaction to confirm, as base-58 encoded string
##### Results:
* `string` - Transaction status:
* `Confirmed` - Transaction was successful
* `SignatureNotFound` - Unknown transaction
* `ProgramRuntimeError` - An error occurred in the program that processed this Transaction
* `AccountInUse` - Another Transaction had a write lock one of the Accounts specified in this Transaction. The Transaction may succeed if retried
* `GenericFailure` - Some other error occurred. **Note**: In the future new Transaction statuses may be added to this list. It's safe to assume that all new statuses will be more specific error conditions that previously presented as `GenericFailure`
* `null` - Unknown transaction
* `object` - Transaction status:
* `"Ok": null` - Transaction was successful
* `"Err": <ERR>` - Transaction failed with TransactionError <ERR> [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
##### Example:
```bash
@ -185,6 +184,27 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
```
---
### getNumBlocksSinceSignatureConfirmation
Returns the current number of blocks since signature has been confirmed.
##### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string
##### Results:
* `integer` - count, as unsigned 64-bit integer
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getNumBlocksSinceSignatureConfirmation", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":8,"id":1}
```
---
### getTransactionCount
Returns the current Transaction count from the ledger
@ -254,7 +274,7 @@ After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
---
### accountSubscribe
Subscribe to an account to receive notifications when the lamports or userdata
Subscribe to an account to receive notifications when the lamports or data
for a given account public key changes
##### Parameters:
@ -274,7 +294,7 @@ for a given account public key changes
##### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
```
---
@ -300,7 +320,7 @@ Unsubscribe from account change notifications
---
### programSubscribe
Subscribe to a program to receive notifications when the lamports or userdata
Subscribe to a program to receive notifications when the lamports or data
for a given account owned by the program changes
##### Parameters:
@ -322,7 +342,7 @@ for a given account owned by the program changes
* `string` - account Pubkey, as base-58 encoded string
* `object` - account info JSON object (see [getAccountInfo](#getaccountinfo) for field details)
```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"userdata":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
```
---

View File

@ -0,0 +1,212 @@
# Stake Delegation and Reward
This design proposal focuses on the software architecture for the on-chain
voting and staking programs. Incentives for staking is covered in [staking
rewards](staking-rewards.md).
The current architecture requires a vote for each delegated stake from the
validator, and therefore does not scale to allow replicator clients to
automatically delegate their rewards.
The design proposes a new set of programs for voting and stake delegation, The
proposed programs allow many stake accounts to passively earn rewards with a
single validator vote without permission or active involvement from the
validator.
## Current Design Problems
In the current design each staker creates their own VoteState, and assigns a
**delegate** in the VoteState that can submit votes. Since the validator has to
actively vote for each stake delegated to it, validators can censor stakes by
not voting for them.
The number of votes is equal to the number of stakers, and not the number of
validators. Replicator clients are expected to delegate their replication
rewards as they are earned, and therefore the number of stakes is expected to be
large compared to the number of validators in a long running cluster.
## Proposed changes to the current design.
The general idea is that instead of the staker, the validator will own the
VoteState program. In this proposal the VoteState program is there to track
validator votes, count validator generated credits and to provide any
additional validator specific state. The VoteState program is not aware of any
stakes delegated to it, and has no staking weight.
The rewards generated are proportional to the amount of lamports staked. In
this proposal stake state is stored as part of the StakeState program. This
program is owned by the staker only. Lamports stored in this program are the
stake. Unlike the current design, this program contains a new field to indicate
which VoteState program the stake is delegated to.
### VoteState
VoteState is the current state of all the votes the **delegate** has submitted
to the bank. VoteState contains the following state information:
* votes - The submitted votes data structure.
* credits - The total number of rewards this vote program has generated over its
lifetime.
* root\_slot - The last slot to reach the full lockout commitment necessary for
rewards.
* commission - The commission taken by this VoteState for any rewards claimed by
staker's StakeState accounts. This is the percentage ceiling of the reward.
* Account::lamports - The accumulated lamports from the commission. These do not
count as stakes.
* `authorized_vote_signer` - Only this identity is authorized to submit votes, and
this field can only modified by this entity
### VoteInstruction::Initialize
* `account[0]` - RW - The VoteState
`VoteState::authorized_vote_signer` is initialized to `account[0]`
other VoteState members defaulted
### VoteInstruction::AuthorizeVoteSigner(Pubkey)
* `account[0]` - RW - The VoteState
`VoteState::authorized_vote_signer` is set to to `Pubkey`, instruction must by
signed by Pubkey
### StakeState
A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
### StakeState::Delegate
StakeState is the current delegation preference of the **staker**. StakeState
contains the following state information:
* Account::lamports - The staked lamports.
* `voter_id` - The pubkey of the VoteState instance the lamports are
delegated to.
* `credits_observed` - The total credits claimed over the lifetime of the
program.
### StakeState::MiningPool
There are two approaches to the mining pool. The bank could allow the
StakeState program to bypass the token balance check, or a program representing
the mining pool could run on the network. To avoid a single network wide lock,
the pool can be split into several mining pools. This design focuses on using a
StakeState::MiningPool as the cluster wide mining pools.
* 256 StakeState::MiningPool are initialized, each with 1/256 number of mining pool
tokens stored as `Account::lamports`.
The stakes and the MiningPool are accounts that are owned by the same `Stake`
program.
### StakeInstruction::Initialize
* `account[0]` - RW - The StakeState::Delegate instance.
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
`StakeState::Delegate::voter_id` is initialized to `account[1]`
* `account[1]` - R - The VoteState instance.
### StakeInstruction::RedeemVoteCredits
The VoteState program and the StakeState programs maintain a lifetime counter
of total rewards generated and claimed. Therefore an explicit `Clear`
instruction is not necessary. When claiming rewards, the total lamports
deposited into the StakeState and as validator commission is proportional to
`VoteState::credits - StakeState::credits_observed`.
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
reward.
* `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
credits.
* `account[2]` - R - The VoteState instance, must be the same as
`StakeState::voter_id`
Reward is payed out for the difference between `VoteState::credits` to
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
`VoteState::credits`. The commission is deposited into the `VoteState` token
balance, and the reward is deposited to the `StakeState::Delegate` token balance. The
reward and the commission is weighted by the `StakeState::lamports` divided by total lamports staked.
The Staker or the owner of the Stake program sends a transaction with this
instruction to claim the reward.
Any random MiningPool can be used to redeem the credits.
```rust,ignore
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
stake_state.credits_observed = vote_state.credits;
```
`credits_to_claim` is used to compute the reward and commission, and
`StakeState::Delegate::credits_observed` is updated to the latest
`VoteState::credits` value.
### Collecting network fees into the MiningPool
At the end of the block, before the bank is frozen, but after it processed all
the transactions for the block, a virtual instruction is executed to collect
the transaction fees.
* A portion of the fees are deposited into the leader's account.
* A portion of the fees are deposited into the smallest StakeState::MiningPool
account.
### Benefits
* Single vote for all the stakers.
* Clearing of the credit variable is not necessary for claiming rewards.
* Each delegated stake can claim its rewards independently.
* Commission for the work is deposited when a reward is claimed by the delegated
stake.
This proposal would benefit from the `read-only` accounts proposal to allow for
many rewards to be claimed concurrently.
## Passive Delegation
Any number of instances of StakeState::Delegate programs can delegate to a single
VoteState program without an interactive action from the identity controlling
the VoteState program or submitting votes to the program.
The total stake allocated to a VoteState program can be calculated by the sum of
all the StakeState programs that have the VoteState pubkey as the
`StakeState::Delegate::voter_id`.
## Example Callflow
<img alt="Passive Staking Callflow" src="img/passive-staking-callflow.svg" class="center"/>
## Future work
Validators may want to split the stake delegated to them amongst many validator
nodes since stake is used as weight in the network control and data planes. One
way to implement this would be for the StakeState to delegate to a pool of
validators instead of a single one.
Instead of a single `vote_id` and `credits_observed` entry in the StakeState
program, the program can be initialized with a vector of tuples.
```rust,ignore
Voter {
voter_id: Pubkey,
credits_observed: u64,
weight: u8,
}
```
* voters: Vec<Voter> - Array of VoteState accounts that are voting rewards with
this stake.
A StakeState program would claim a fraction of the reward from each voter in
the `voters` array, and each voter would be delegated a fraction of the stake.

View File

@ -40,7 +40,7 @@ retransmitted twice around the network.
4. CrdsValue for vote should look like this ``` Votes(Vec<Transaction>) ```
Each vote transaction should maintain a `wallclock` in its userdata. The merge
Each vote transaction should maintain a `wallclock` in its data. The merge
strategy for Votes will keep the last N set of votes as configured by the local
client. For push/pull the vector is traversed recursively and each Transaction
is treated as an individual CrdsValue with its own local wallclock and

View File

@ -6,7 +6,7 @@ separating program code from the state it operates on, the runtime is able to
choreograph concurrent access. Transactions accessing only credit-only
accounts are executed in parallel whereas transactions accessing writable
accounts are serialized. The runtime interacts with the program through an
entrypoint with a well-defined interface. The userdata stored in an account is
entrypoint with a well-defined interface. The data stored in an account is
an opaque type, an array of bytes. The program has full control over its
contents.
@ -42,7 +42,7 @@ programs can be executed in parallel.
The runtime enforces the following rules:
1. Only the *owner* program may modify the contents of an account. This means
that upon assignment userdata vector is guaranteed to be zero.
that upon assignment data vector is guaranteed to be zero.
2. Total balances on all the accounts is equal before and after execution of a
transaction.
@ -59,11 +59,11 @@ accounts.
## SystemProgram Interface
The interface is best described by the `Instruction::userdata` that the user
The interface is best described by the `Instruction::data` that the user
encodes.
* `CreateAccount` - This allows the user to create an account with an allocated
userdata array and assign it to a Program.
data array and assign it to a Program.
* `Assign` - Allows the user to assign an existing account to a program.
@ -73,10 +73,10 @@ userdata array and assign it to a Program.
For blockchain to function correctly, the program code must be resilient to user
inputs. That is why in this design the program specific code is the only code
that can change the state of the userdata byte array in the Accounts that are
that can change the state of the data byte array in the Accounts that are
assigned to it. It is also the reason why `Assign` or `CreateAccount` must zero
out the userdata. Otherwise there would be no possible way for the program to
distinguish the recently assigned account userdata from a natively generated
out the data. Otherwise there would be no possible way for the program to
distinguish the recently assigned account data from a natively generated
state transition without some additional metadata from the runtime to indicate
that this memory is assigned instead of natively generated.
@ -94,12 +94,12 @@ instruction can be composed into a single transaction with the call to the
program itself.
* `CreateAccount` and `Assign` guarantee that when account is assigned to the
program, the Account's userdata is zero initialized.
program, the Account's data is zero initialized.
* Once assigned to program an Account cannot be reassigned.
* Runtime guarantees that a program's code is the only code that can modify
Account userdata that the Account is assigned to.
Account data that the Account is assigned to.
* Runtime guarantees that the program can only spend lamports that are in
accounts that are assigned to it.

View File

@ -25,7 +25,7 @@ When Futures 0.3.0 is released, the Transact trait may look like this:
```rust,ignore
trait Transact {
async fn send_transactions(txs: &[Transaction]) -> Vec<Result<(), BankError>>;
async fn send_transactions(txs: &[Transaction]) -> Vec<Result<(), TransactionError>>;
}
```

View File

@ -0,0 +1,136 @@
## Testnet Participation
This document describes how to participate in the beta testnet as a
validator node.
Please note some of the information and instructions described here may change
in future releases.
### Beta Testnet Overview
The beta testnet features a validator running at beta.testnet.solana.com, which
serves as the entrypoint to the cluster for your validator.
Additionally there is a blockexplorer available at http://beta.testnet.solana.com/.
The beta testnet is configured to reset the ledger every 24hours, or sooner
should an hourly automated sanity test fail.
### Machine Requirements
Since the beta testnet is not intended for stress testing of max transaction
throughput, a higher-end machine with a GPU is not necessary to participate.
However ensure the machine used is not behind a residential NAT to avoid NAT
traversal issues. A cloud-hosted machine works best. Ensure that IP ports
8000 through 10000 are not blocked for Internet traffic.
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
MacOS or WSL users may build from source.
### Validator Setup
#### Obtaining The Software
##### Bootstrap with `solana-install`
The `solana-install` tool can be used to easily install and upgrade the cluster
software on Linux x86_64 systems.
Install the latest release with a single shell command:
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | \
sh -c - --url https://api.beta.testnet.solana.com
```
Alternatively build the `solana-install` program from source and run the
following command to obtain the same result:
```bash
$ solana-install init --url https://api.beta.testnet.solana.com
```
After a successful install, `solana-install update` may be used to easily update the cluster
software to a newer version.
##### Download Prebuilt Binaries
Binaries are available for Linux x86_64 systems.
Download the binaries by navigating to https://github.com/solana-labs/solana/releases/latest, download
**solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
```bash
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
```
##### Build From Source
If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to:
> https://github.com/solana-labs/solana/releases/latest
Download the source code tarball (solana-*[release]*.tar.gz) from our latest release tag. Extract the code and build the binaries with:
```bash
$ ./scripts/cargo-install-all.sh .
$ export PATH=$PWD/bin:$PATH
```
#### Confirm The Testnet Is Reachable
Before attaching a validator node, sanity check that the cluster is accessible
to your machine by running some simple wallet commands. If any of these
commands fail, please retry 5-10 minutes later to confirm the testnet is not
just restarting itself before debugging further.
Receive an airdrop of lamports from the testnet drone:
```bash
$ solana-wallet -n beta.testnet.solana.com airdrop 123
$ solana-wallet -n beta.testnet.solana.com balance
```
Fetch the current testnet transaction count over JSON RPC:
```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
```
Inspect the blockexplorer at http://beta.testnet.solana.com/ for activity.
Run the following command to join the gossip network and view all the other nodes in the cluster:
```bash
$ solana-gossip --network beta.testnet.solana.com:8001
```
### Starting The Validator
The following command will start a new validator node.
If this is a `solana-install`-installation:
```bash
$ fullnode-x.sh --public-address --no-stake --poll-for-new-genesis-block beta.testnet.solana.com:8001
```
Alternatively, the `solana-install run` command can be used to run the validator
node while periodically checking for and applying software updates:
```bash
$ solana-install run fullnode-x.sh --public-address --no-stake --poll-for-new-genesis-block beta.testnet.solana.com:8001
```
When not using `solana-install`:
```bash
$ USE_INSTALL=1 ./multinode-demo/fullnode-x.sh --public-address --no-stake --poll-for-new-genesis-block beta.testnet.solana.com:8001
```
Then from another console, confirm the IP address if your node is now visible in
the gossip network by running:
```bash
$ solana-gossip --network beta.testnet.solana.com:8001
```
Congratulations, you're now participating in the testnet cluster!
#### Controlling local network port allocation
By default the validator will dynamically select available network ports in the
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
example, `fullnode-x.sh --dynamic-port-range 11000-11010 ...` will restrict the
validator to ports 11000-11011.
### Sharing Metrics From Your Validator
If you'd like to share metrics perform the following steps before starting the
validator node:
```bash
export u="username obtained from the Solana maintainers"
export p="password obtained from the Solana maintainers"
export SOLANA_METRICS_CONFIG="db=testnet-beta,u=${u:?},p=${p:?}"
source scripts/configure-metrics.sh
```
Inspect for your contributions to our [metrics dashboard](https://metrics.solana.com:3000/d/U9-26Cqmk/testnet-monitor-cloud?refresh=60s&orgId=2&var-hostid=All).

View File

@ -0,0 +1,59 @@
# Deterministic Transaction Fees
Transactions currently include a fee field that indicates the maximum fee field
a slot leader is permitted to charge to process a transaction. The cluster, on
the other hand, agrees on a minimum fee. If the network is congested, the slot
leader may prioritize the transactions offering higher fees. That means the
client won't know how much was collected until the transaction is confirmed by
the cluster and the remaining balance is checked. It smells of exactly what we
dislike about Ethereum's "gas", non-determinism.
## Implementation Status
This design is not yet implemented, but is written as though it has been. Once
implemented, delete this comment.
### Congestion-driven fees
Each validator uses *signatures per slot* (SPS) to estimate network congestion
and *SPS target* to estimate the desired processing capacity of the cluster.
The validator learns the SPS target from the genesis block, whereas it
calculates SPS from the ledger data in the previous epoch.
### Calculating fees
The client uses the JSON RPC API to query the cluster for the current fee
parameters. Those parameters are tagged with a blockhash and remain valid
until that blockhash is old enough to be rejected by the slot leader.
Before sending a transaction to the cluster, a client may submit the
transaction and fee account data to an SDK module called the *fee calculator*.
So long as the client's SDK version matches the slot leader's version, the
client is assured that its account will be changed exactly the same number of
lamports as returned by the fee calculator.
### Fee Parameters
In the first implementation of this design, the only fee parameter is
`lamports_per_signature`. The more signatures the cluster needs to verify, the
higher the fee. The exact number of lamports is determined by the ratio of SPS
to the SPS target. The cluster lowers `lamports_per_signature` when SPS is
below the target and raises it when at or above the target.
Future parameters might include:
* `lamports_per_pubkey` - cost to load an account
* `lamports_per_slot_distance` - higher cost to load very old accounts
* `lamports_per_byte` - cost per size of account loaded
* `lamports_per_bpf_instruction` - cost to run a program
### Attacks
#### Hijacking the SPS Target
A group of validators can centralize the cluster if they can convince it to
raise the SPS Target above a point where the rest of the validators can keep
up. Raising the target will cause fees to drop, presumably creating more demand
and therefore higher TPS. If the validator doesn't have hardware that can
process that many transactions that fast, its confirmation votes will
eventually get so long that the cluster will be forced to boot it.

View File

@ -41,7 +41,7 @@ $ solana-wallet balance
$ solana-wallet confirm <TX_SIGNATURE>
// Return
"Confirmed" / "Not found"
"Confirmed" / "Not found" / "Transaction failed with error <ERR>"
```
#### Deploy program
@ -352,4 +352,3 @@ ARGS:
<PUBKEY> The pubkey of recipient
<PROCESS_ID> The process id of the transfer to unlock
```

View File

@ -2,22 +2,24 @@ steps:
- command: "ci/shellcheck.sh"
name: "shellcheck"
timeout_in_minutes: 5
- command: "ci/docker-run.sh solanalabs/rust:1.32.0 ci/test-checks.sh"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 15
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 20
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
- command: "ci/test-bench.sh"
name: "bench"
timeout_in_minutes: 20
- command: "ci/docker-run.sh solanalabs/rust:1.32.0 ci/test-stable.sh"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 20
- command: "ci/docker-run.sh solanalabs/rust-nightly:2019-01-31 ci/test-coverage.sh"
timeout_in_minutes: 25
artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 20
# TODO: Fix and re-enable test-large-network.sh

View File

@ -1,17 +1,26 @@
#!/usr/bin/env bash
#
# Outputs the current crate version
# Outputs the current crate version from a given Cargo.toml
#
set -e
cd "$(dirname "$0")"/..
Cargo_toml=$1
[[ -n $Cargo_toml ]] || {
echo "Usage: $0 path/to/Cargo.toml"
exit 0
}
[[ -r $Cargo_toml ]] || {
echo "Error: unable to read $Cargo_toml"
exit 1
}
while read -r name equals value _; do
if [[ $name = version && $equals = = ]]; then
echo "${value//\"/}"
exit 0
fi
done < <(cat Cargo.toml)
done < <(cat "$Cargo_toml")
echo Unable to locate version in Cargo.toml 1>&2
exit 1

View File

@ -4,11 +4,9 @@ ARG date
RUN set -x \
&& rustup install nightly-$date \
&& rustup show \
&& mv /usr/local/rustup/toolchains/nightly-$date-* \
/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu \
&& rustup show \
&& rustc --version \
&& cargo --version \
&& rustc +nightly --version \
&& cargo +nightly --version
&& rustc +nightly-$date --version \
&& cargo +nightly-$date --version

View File

@ -19,7 +19,7 @@ To update the pinned version:
to confirm the new nightly image builds. Fix any issues as needed
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/buildkite.yml` from the previous to
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/rust-version.sh` from the previous to
new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed.
## Troubleshooting

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/buildkite.yml to pick up the new image tag
FROM rust:1.32.0
FROM rust:1.34.0
RUN set -x \
&& apt update \
@ -17,6 +17,7 @@ RUN set -x \
lcov \
libclang-common-7-dev \
llvm-7 \
mscgen \
rsync \
sudo \
\

View File

@ -8,5 +8,4 @@ docker build -t solanalabs/rust .
read -r rustc version _ < <(docker run solanalabs/rust rustc --version)
[[ $rustc = rustc ]]
docker tag solanalabs/rust:latest solanalabs/rust:"$version"
docker push solanalabs/rust
docker push solanalabs/rust:"$version"

View File

@ -24,9 +24,10 @@ fi
build() {
$genPipeline && return
ci/version-check-with-upgrade.sh stable
source ci/rust-version.sh stable
_ scripts/ulimit-n.sh
_ cargo build --all
_ cargo +$rust_stable build --all
}
runTest() {

View File

@ -55,7 +55,7 @@ while getopts "ch?i:k:brxR" opt; do
restartInterval=$OPTARG
;;
b)
maybeNoLeaderRotation="--no-leader-rotation"
maybeNoLeaderRotation="--no-stake"
;;
x)
extraNodes=$((extraNodes + 1))
@ -78,7 +78,6 @@ source scripts/configure-metrics.sh
nodes=(
"multinode-demo/drone.sh"
"multinode-demo/bootstrap-leader.sh \
$maybeNoLeaderRotation \
--enable-rpc-exit \
--init-complete-file init-complete-node1.log"
"multinode-demo/fullnode.sh \
@ -307,11 +306,8 @@ while [[ $iteration -le $iterations ]]; do
set -x
client_id=/tmp/client-id.json-$$
$solana_keygen -o $client_id || exit $?
$solana_bench_tps \
--identity $client_id \
--num-nodes $numNodes \
--reject-extra-nodes \
--converge-only || exit $?
$solana_gossip \
--num-nodes-exactly $numNodes || exit $?
rm -rf $client_id
) || flag_error

View File

@ -2,15 +2,7 @@
set -e
cd "$(dirname "$0")/.."
eval "$(ci/channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
echo --- Creating tarball
(

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
source ci/semver_bash/semver.sh
# List of internal crates to publish
#
@ -11,19 +11,25 @@ cd "$(dirname "$0")/.."
# here. (TODO: figure the crate ordering dynamically)
#
CRATES=(
kvstore
logger
netutil
sdk
keygen
metrics
client
drone
programs/{budget,bpf_loader,native_loader,noop,system,vote}
programs/{budget_api,config_api,storage_api,token_api,vote_api}
runtime
programs/{budget,bpf_loader,config,vote,storage,token,vote}
vote-signer
core
fullnode
genesis
gossip
ledger-tool
wallet
runtime
install
)
@ -33,6 +39,9 @@ CRATES=(
exit 0
}
semverParseInto "$TRIGGERED_BUILDKITE_TAG" MAJOR MINOR PATCH SPECIAL
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
[[ -n "$CRATES_IO_TOKEN" ]] || {
echo CRATES_IO_TOKEN undefined
exit 1
@ -46,13 +55,17 @@ for crate in "${CRATES[@]}"; do
exit 1
fi
echo "-- $crate"
# TODO: Ensure the published version matches the contents of
# TRIGGERED_BUILDKITE_TAG
grep -q "^version = \"$expectedCrateVersion\"$" Cargo.toml || {
echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
exit 1
}
(
set -x
# TODO: the rocksdb package does not build with the stock rust docker image,
# so use the solana rust docker image until this is resolved upstream
ci/docker-run.sh solanalabs/rust:1.31.0 bash -exc "cd $crate; $cargoCommand"
source ci/rust-version.sh
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
#ci/docker-run.sh rust bash -exc "cd $crate; $cargoCommand"
)
done

View File

@ -45,11 +45,7 @@ beta)
CHANNEL_BRANCH=$BETA_CHANNEL
;;
stable)
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
CHANNEL_BRANCH=$BETA_CHANNEL
else
CHANNEL_BRANCH=$STABLE_CHANNEL
fi
CHANNEL_BRANCH=$STABLE_CHANNEL
;;
*)
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"

View File

@ -11,10 +11,13 @@ fi
eval "$(ci/channel-info.sh)"
TAG=
if [[ -n "$BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$BUILDKITE_TAG
TAG="$BUILDKITE_TAG"
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
TAG="$TRIGGERED_BUILDKITE_TAG"
else
CHANNEL_OR_TAG=$CHANNEL
fi
@ -24,18 +27,34 @@ if [[ -z $CHANNEL_OR_TAG ]]; then
exit 1
fi
case "$(uname)" in
Darwin)
TARGET=x86_64-apple-darwin
;;
Linux)
TARGET=x86_64-unknown-linux-gnu
;;
*)
TARGET=unknown-unknown-unknown
;;
esac
echo --- Creating tarball
(
set -x
rm -rf solana-release/
mkdir solana-release/
(
echo "$CHANNEL_OR_TAG"
git rev-parse HEAD
) > solana-release/version.txt
scripts/cargo-install-all.sh solana-release
COMMIT="$(git rev-parse HEAD)"
(
echo "channel: $CHANNEL"
echo "commit: $COMMIT"
echo "target: $TARGET"
) > solana-release/version.yml
source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" solana-release
./fetch-perf-libs.sh
# shellcheck source=/dev/null
@ -45,33 +64,62 @@ echo --- Creating tarball
cargo install --path . --features=cuda --root ../solana-release-cuda
)
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
cp -a scripts multinode-demo solana-release/
tar jvcf solana-release.tar.bz2 solana-release/
# Add a wrapper script for fullnode.sh
# TODO: Remove multinode/... from tarball
cat > solana-release/bin/fullnode.sh <<'EOF'
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"/..
export USE_INSTALL=1
exec multinode-demo/fullnode.sh "$@"
EOF
chmod +x solana-release/bin/fullnode.sh
# Add a wrapper script for fullnode-x.sh
# TODO: Remove multinode/... from tarball
cat > solana-release/bin/fullnode-x.sh <<'EOF'
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"/..
export USE_INSTALL=1
exec multinode-demo/fullnode-x.sh "$@"
EOF
chmod +x solana-release/bin/fullnode-x.sh
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
cp solana-release/bin/solana-install solana-install-$TARGET
)
echo --- Saving build artifacts
source ci/upload-ci-artifact.sh
upload-ci-artifact solana-release.tar.bz2
upload-ci-artifact solana-release-$TARGET.tar.bz2
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo Skipped due to DO_NOT_PUBLISH_TAR
exit 0
fi
echo --- AWS S3 Store
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/solana-release.tar.bz2 \
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
for file in solana-release-$TARGET.tar.bz2 solana-install-$TARGET; do
echo --- AWS S3 Store: $file
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://solana-release/"$CHANNEL_OR_TAG"/"$file"
echo Published to:
$DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
)
echo Published to:
$DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/"$file"
)
if [[ -n $TAG ]]; then
ci/upload-github-release-asset.sh $file
fi
done
echo --- ok

View File

@ -1,20 +0,0 @@
#!/usr/bin/env bash
#
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
# Github pull request
#
set -e
cd "$(dirname "$0")/.."
BKRUN=ci/node_modules/.bin/bkrun
if [[ ! -x $BKRUN ]]; then
(
set -x
cd ci/
npm install bkrun
)
fi
set -x
exec ./ci/node_modules/.bin/bkrun ci/buildkite.yml

45
ci/rust-version.sh Normal file
View File

@ -0,0 +1,45 @@
#
# This file maintains the rust versions for use by CI.
#
# Build with stable rust, updating the stable toolchain if necessary:
# $ source ci/rust-version.sh stable
# $ cargo +"$rust_stable" build
#
# Build with nightly rust, updating the nightly toolchain if necessary:
# $ source ci/rust-version.sh nightly
# $ cargo +"$rust_nightly" build
#
# Obtain the environment variables without any automatic toolchain updating:
# $ source ci/rust-version.sh
#
export rust_stable=1.34.0
export rust_stable_docker_image=solanalabs/rust:1.34.0
export rust_nightly=nightly-2019-03-14
export rust_nightly_docker_image=solanalabs/rust-nightly:2019-03-14
[[ -z $1 ]] || (
rustup_install() {
declare toolchain=$1
if ! cargo +"$toolchain" -V; then
rustup install "$toolchain"
cargo +"$toolchain" -V
fi
}
set -e
cd "$(dirname "${BASH_SOURCE[0]}")"
case $1 in
stable)
rustup_install "$rust_stable"
;;
nightly)
rustup_install "$rust_nightly"
;;
*)
echo "Note: ignoring unknown argument: $1"
;;
esac
)

View File

@ -24,7 +24,7 @@ source ci/_
source ci/upload-ci-artifact.sh
eval "$(ci/channel-info.sh)"
ci/version-check-with-upgrade.sh nightly
source ci/rust-version.sh nightly
set -o pipefail
export RUST_BACKTRACE=1
@ -39,19 +39,28 @@ fi
BENCH_FILE=bench_output.log
BENCH_ARTIFACT=current_bench_results.log
_ cargo +nightly bench ${V:+--verbose} \
-- -Z unstable-options --format=json | tee "$BENCH_FILE"
# First remove "BENCH_FILE", if it exists so that the following commands can append
rm -f "$BENCH_FILE"
# Run sdk benches
_ cargo +$rust_nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run runtime benches
_ cargo +$rust_nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run core benches
_ cargo +$rust_nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run bpf benches
echo --- program/bpf
(
set -x
cd programs/bpf
cargo +nightly bench ${V:+--verbose} --features=bpf_c \
-- -Z unstable-options --format=json --nocapture | tee -a ../../../"$BENCH_FILE"
)
_ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \
-- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE"
_ cargo +nightly run --release --package solana-upload-perf \
_ cargo +$rust_nightly run --release --package solana-upload-perf \
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" > "$BENCH_ARTIFACT"
upload-ci-artifact "$BENCH_ARTIFACT"

View File

@ -4,14 +4,14 @@ set -e
cd "$(dirname "$0")/.."
source ci/_
ci/version-check.sh stable
source ci/rust-version.sh stable
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
_ cargo fmt --all -- --check
_ cargo clippy --all -- --version
_ cargo clippy --all -- --deny=warnings
_ cargo +"$rust_stable" fmt --all -- --check
_ cargo +"$rust_stable" clippy --all -- --version
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
_ ci/audit.sh
_ ci/nits.sh
_ book/build.sh

View File

@ -21,7 +21,6 @@ ci/affects-files.sh \
}
source ci/upload-ci-artifact.sh
ci/version-check-with-upgrade.sh nightly
source scripts/ulimit-n.sh
scripts/coverage.sh

View File

@ -4,9 +4,7 @@ set -e
here=$(dirname "$0")
cd "$here"/..
# This job doesn't run within a container, try once to upgrade tooling on a
# version check failure
ci/version-check-with-upgrade.sh stable
source ci/rust-version.sh stable
export RUST_BACKTRACE=1
@ -39,4 +37,4 @@ fi
set -x
export SOLANA_DYNAMIC_NODES=120
exec cargo test --release --features=erasure test_multi_node_dynamic_network -- --ignored
exec cargo +"$rust_stable" test --release --features=erasure test_multi_node_dynamic_network -- --ignored

View File

@ -10,7 +10,8 @@ annotate() {
}
}
ci/version-check-with-upgrade.sh stable
source ci/rust-version.sh stable
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
source scripts/ulimit-n.sh
@ -24,9 +25,8 @@ case $testName in
test-stable)
echo "Executing $testName"
_ cargo build --all ${V:+--verbose}
_ cargo test --all ${V:+--verbose} -- --nocapture --test-threads=1
_ cargo test --manifest-path programs/system/Cargo.toml
_ cargo +"$rust_stable" build --all ${V:+--verbose}
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
;;
test-stable-perf)
echo "Executing $testName"
@ -48,7 +48,9 @@ test-stable-perf)
# BPF program tests
_ make -C programs/bpf/c tests
_ programs/bpf/rust/noop/build.sh # Must be built out of band
_ cargo test --manifest-path programs/bpf/Cargo.toml --no-default-features --features=bpf_c,bpf_rust
_ cargo +"$rust_stable" test \
--manifest-path programs/bpf/Cargo.toml \
--no-default-features --features=bpf_c,bpf_rust
# Run root package tests with these features
ROOT_FEATURES=erasure,chacha
@ -67,20 +69,8 @@ test-stable-perf)
fi
# Run root package library tests
_ cargo build --all ${V:+--verbose} --features="$ROOT_FEATURES"
_ cargo test --all --lib ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
_ cargo test --manifest-path programs/system/Cargo.toml
# Run root package integration tests
for test in tests/*.rs; do
test=${test##*/} # basename x
test=${test%.rs} # basename x .rs
(
export RUST_LOG="$test"=trace,$RUST_LOG
_ cargo test --all ${V:+--verbose} --features="$ROOT_FEATURES" --test="$test" \
-- --test-threads=1 --nocapture
)
done
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
_ cargo +"$rust_stable" test --manifest-path=core/Cargo.toml ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
;;
*)
echo "Error: Unknown test: $testName"

View File

@ -17,18 +17,22 @@ source ci/upload-ci-artifact.sh
LEADER_CPU_MACHINE_TYPE="n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100"
[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2
[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation
[[ -n $TESTNET_ZONE ]] || TESTNET_ZONE=us-west1-b
[[ -n $TESTNET_ZONES ]] || TESTNET_ZONES="us-west1-b"
[[ -n $CHANNEL ]] || CHANNEL=beta
[[ -n $ADDITIONAL_FLAGS ]] || ADDITIONAL_FLAGS=""
TESTNET_CLOUD_ZONES=(); while read -r -d, ; do TESTNET_CLOUD_ZONES+=( "$REPLY" ); done <<< "${TESTNET_ZONES},"
launchTestnet() {
declare nodeCount=$1
echo --- setup "$nodeCount" node test
# shellcheck disable=SC2068
net/gce.sh create \
-b \
-d pd-ssd \
-n "$nodeCount" -c "$CLIENT_COUNT" \
-G "$LEADER_CPU_MACHINE_TYPE" \
-p "$TESTNET_TAG" -z "$TESTNET_ZONE"
-p "$TESTNET_TAG" ${TESTNET_CLOUD_ZONES[@]/#/-z } "$ADDITIONAL_FLAGS"
echo --- configure database
net/init-metrics.sh -e
@ -43,37 +47,39 @@ launchTestnet() {
echo --- wait "$ITERATION_WAIT" seconds to complete test
sleep "$ITERATION_WAIT"
set -x
declare q_mean_tps='
SELECT round(mean("sum_count")) AS "mean_tps" FROM (
SELECT sum("count") AS "sum_count"
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
FROM "testnet-automation"."autogen"."counter-bank-process_transactions-txs"
WHERE time > now() - 300s GROUP BY time(1s)
)'
declare q_max_tps='
SELECT round(max("sum_count")) AS "max_tps" FROM (
SELECT sum("count") AS "sum_count"
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
FROM "testnet-automation"."autogen"."counter-bank-process_transactions-txs"
WHERE time > now() - 300s GROUP BY time(1s)
)'
declare q_mean_confirmation='
SELECT round(mean("duration_ms")) as "mean_confirmation"
FROM "testnet-automation"."autogen"."leader-confirmation"
FROM "testnet-automation"."autogen"."validator-confirmation"
WHERE time > now() - 300s'
declare q_max_confirmation='
SELECT round(max("duration_ms")) as "max_confirmation"
FROM "testnet-automation"."autogen"."leader-confirmation"
FROM "testnet-automation"."autogen"."validator-confirmation"
WHERE time > now() - 300s'
declare q_99th_confirmation='
SELECT round(percentile("duration_ms", 99)) as "99th_confirmation"
FROM "testnet-automation"."autogen"."leader-confirmation"
FROM "testnet-automation"."autogen"."validator-confirmation"
WHERE time > now() - 300s'
curl -G "https://metrics.solana.com:8086/query?u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
--data-urlencode "db=$INFLUX_DATABASE" \
curl -G "${INFLUX_HOST}/query?u=ro&p=topsecret" \
--data-urlencode "db=testnet-automation" \
--data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_confirmation;$q_max_confirmation;$q_99th_confirmation" |
python ci/testnet-automation-json-parser.py >>TPS"$nodeCount".log

View File

@ -11,6 +11,8 @@ clientNodeCount=0
additionalFullNodeCount=10
publicNetwork=false
skipSetup=false
skipStart=false
externalNode=false
tarChannelOrTag=edge
delete=false
enableGpu=false
@ -25,13 +27,14 @@ usage() {
echo "Error: $*"
fi
cat <<EOF
usage: $0 [name] [cloud] [zone] [options...]
usage: $0 -p network-name -C cloud -z zone1 [-z zone2] ... [-z zoneN] [options...]
Deploys a CD testnet
name - name of the network
cloud - cloud provider to use (gce, ec2)
zone - cloud provider zone to deploy the network into
mandatory arguments:
-p [network-name] - name of the network
-C [cloud] - cloud provider to use (gce, ec2)
-z [zone] - cloud provider zone to deploy the network into. Must specify at least one zone
options:
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
@ -57,19 +60,22 @@ EOF
exit $exitcode
}
netName=$1
cloudProvider=$2
zone=$3
[[ -n $netName ]] || usage
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
[[ -n $zone ]] || usage "Zone not specified"
shift 3
zone=()
while getopts "h?p:Pn:c:t:gG:a:Dbd:ru" opt; do
while getopts "h?p:Pn:c:t:gG:a:Dbd:rusxz:p:C:" opt; do
case $opt in
h | \?)
usage
;;
p)
netName=$OPTARG
;;
C)
cloudProvider=$OPTARG
;;
z)
zone+=("$OPTARG")
;;
P)
publicNetwork=true
;;
@ -111,6 +117,12 @@ while getopts "h?p:Pn:c:t:gG:a:Dbd:ru" opt; do
r)
skipSetup=true
;;
s)
skipStart=true
;;
x)
externalNode=true
;;
u)
blockstreamer=true
;;
@ -120,6 +132,10 @@ while getopts "h?p:Pn:c:t:gG:a:Dbd:ru" opt; do
esac
done
[[ -n $netName ]] || usage
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
[[ -n ${zone[*]} ]] || usage "At least one zone must be specified"
shutdown() {
exitcode=$?
@ -140,9 +156,16 @@ trap shutdown EXIT INT
set -x
# Build a string to pass zone opts to $cloudProvider.sh: "-z zone1 -z zone2 ..."
zone_args=()
for val in "${zone[@]}"; do
zone_args+=("-z $val")
done
if ! $skipSetup; then
echo "--- $cloudProvider.sh delete"
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
# shellcheck disable=SC2068
time net/"$cloudProvider".sh delete ${zone_args[@]} -p "$netName" ${externalNode:+-x}
if $delete; then
exit 0
fi
@ -150,11 +173,12 @@ if ! $skipSetup; then
echo "--- $cloudProvider.sh create"
create_args=(
-p "$netName"
-z "$zone"
-a "$bootstrapFullNodeAddress"
-c "$clientNodeCount"
-n "$additionalFullNodeCount"
)
# shellcheck disable=SC2206
create_args+=(${zone_args[@]})
if $blockstreamer; then
create_args+=(-u)
@ -180,13 +204,18 @@ if ! $skipSetup; then
create_args+=(-P)
fi
if $externalNode; then
create_args+=(-x)
fi
time net/"$cloudProvider".sh create "${create_args[@]}"
else
echo "--- $cloudProvider.sh config"
config_args=(
-p "$netName"
-z "$zone"
)
# shellcheck disable=SC2206
config_args+=(${zone_args[@]})
if $publicNetwork; then
config_args+=(-P)
fi
@ -218,19 +247,33 @@ if $skipSetup; then
fi
ok=true
(
if $skipSetup; then
# TODO: Enable rolling updates
#op=update
op=restart
else
op=start
fi
if ! $skipStart; then
(
if $skipSetup; then
# TODO: Enable rolling updates
#op=update
op=restart
else
op=start
fi
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
time net/net.sh $op -t "$tarChannelOrTag" \
$maybeSkipSetup $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
) || ok=false
maybeUpdateManifestKeypairFile=
# shellcheck disable=SC2154 # SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu comes from .buildkite/env/
if [[ -n $SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu ]]; then
echo "$SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu" > update_manifest_keypair.json
maybeUpdateManifestKeypairFile="-i update_manifest_keypair.json"
fi
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
time net/net.sh $op -t "$tarChannelOrTag" \
$maybeUpdateManifestKeypairFile \
$maybeSkipSetup \
$maybeRejectExtraNodes \
$maybeNoValidatorSanity \
$maybeNoLedgerVerify
) || ok=false
net/net.sh logs
fi
net/net.sh logs
$ok

View File

@ -64,6 +64,10 @@ EOF
exit 0
fi
if [[ -n $TESTNET_DB_HOST ]]; then
SOLANA_METRICS_PARTIAL_CONFIG="host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
fi
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
source scripts/configure-metrics.sh
@ -71,33 +75,31 @@ source scripts/configure-metrics.sh
ci/channel-info.sh
eval "$(ci/channel-info.sh)"
case $TESTNET in
testnet-edge|testnet-edge-perf)
CHANNEL_OR_TAG=edge
CHANNEL_BRANCH=$EDGE_CHANNEL
;;
testnet-beta|testnet-beta-perf)
CHANNEL_OR_TAG=beta
CHANNEL_BRANCH=$BETA_CHANNEL
;;
testnet|testnet-perf)
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
if [[ -n $TESTNET_TAG ]]; then
CHANNEL_OR_TAG=$TESTNET_TAG
else
case $TESTNET in
testnet-edge|testnet-edge-perf)
CHANNEL_OR_TAG=edge
CHANNEL_BRANCH=$EDGE_CHANNEL
;;
testnet-beta|testnet-beta-perf)
CHANNEL_OR_TAG=beta
CHANNEL_BRANCH=$BETA_CHANNEL
else
;;
testnet|testnet-perf)
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
CHANNEL_BRANCH=$STABLE_CHANNEL
fi
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
(
cat <<EOF
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
@ -107,12 +109,15 @@ steps:
env:
TESTNET: "$TESTNET"
TESTNET_OP: "$TESTNET_OP"
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
EC2_NODE_COUNT: "$EC2_NODE_COUNT"
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
EOF
) | buildkite-agent pipeline upload
exit 0
) | buildkite-agent pipeline upload
exit 0
fi
fi
sanity() {
echo "--- sanity $TESTNET"
case $TESTNET in
@ -134,7 +139,23 @@ sanity() {
testnet-beta)
(
set -x
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
EC2_ZONES=(us-west-1a sa-east-1a ap-northeast-2a eu-central-1a ca-central-1a)
ok=true
for zone in "${EC2_ZONES[@]}"; do
if ! $ok; then
break
fi
ci/testnet-sanity.sh beta-testnet-solana-com ec2 "$zone" || ok=false
done
GCE_ZONES=(us-west1-b asia-east2-a europe-west4-a southamerica-east1-b us-east4-c)
for zone in "${GCE_ZONES[@]}"; do
if ! $ok; then
break
fi
ci/testnet-sanity.sh beta-testnet-solana-com gce "$zone" || ok=false
done
$ok
)
;;
testnet-beta-perf)
@ -184,12 +205,10 @@ start() {
testnet-edge)
(
set -x
NO_VALIDATOR_SANITY=1 \
RUST_LOG=solana=info \
ci/testnet-deploy.sh edge-testnet-solana-com ec2 us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D}
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D}
)
;;
testnet-edge-perf)
@ -197,7 +216,8 @@ start() {
set -x
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh edge-perf-testnet-solana-com ec2 us-west-2b \
RUST_LOG=solana=warn \
ci/testnet-deploy.sh -p edge-perf-testnet-solana-com -C ec2 -z us-west-2b \
-g -t "$CHANNEL_OR_TAG" -c 2 \
-b \
${maybeReuseLedger:+-r} \
@ -207,13 +227,33 @@ start() {
testnet-beta)
(
set -x
NO_VALIDATOR_SANITY=1 \
RUST_LOG=solana=info \
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
-b \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D}
EC2_ZONES=(us-west-1a sa-east-1a ap-northeast-2a eu-central-1a ca-central-1a)
GCE_ZONES=(us-west1-b asia-east2-a europe-west4-a southamerica-east1-b us-east4-c)
# Build an array to pass as opts to testnet-deploy.sh: "-z zone1 -z zone2 ..."
GCE_ZONE_ARGS=()
for val in "${GCE_ZONES[@]}"; do
GCE_ZONE_ARGS+=("-z $val")
done
EC2_ZONE_ARGS=()
for val in "${EC2_ZONES[@]}"; do
EC2_ZONE_ARGS+=("-z $val")
done
# shellcheck disable=SC2068
[[ -z $EC2_NODE_COUNT ]] || ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D} \
${GCE_NODE_COUNT:+-s}
# shellcheck disable=SC2068
[[ -z $GCE_NODE_COUNT ]] || ci/testnet-deploy.sh -p beta-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D} \
${EC2_NODE_COUNT:+-x}
)
;;
testnet-beta-perf)
@ -221,7 +261,8 @@ start() {
set -x
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh beta-perf-testnet-solana-com ec2 us-west-2b \
RUST_LOG=solana=warn \
ci/testnet-deploy.sh -p beta-perf-testnet-solana-com -C ec2 -z us-west-2b \
-g -t "$CHANNEL_OR_TAG" -c 2 \
-b \
${maybeReuseLedger:+-r} \
@ -232,13 +273,12 @@ start() {
(
set -x
NO_VALIDATOR_SANITY=1 \
RUST_LOG=solana=info \
ci/testnet-deploy.sh testnet-solana-com ec2 us-west-1a \
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0fa502bf95f6f18b2 \
-b \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D}
#ci/testnet-deploy.sh testnet-solana-com gce us-east1-c \
#ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-east1-c \
# -t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
# ${maybeReuseLedger:+-r} \
# ${maybeDelete:+-D}
@ -249,14 +289,15 @@ start() {
set -x
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh perf-testnet-solana-com gce us-west1-b \
RUST_LOG=solana=warn \
ci/testnet-deploy.sh -p perf-testnet-solana-com -C gce -z us-west1-b \
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
-t "$CHANNEL_OR_TAG" -c 2 \
-b \
-d pd-ssd \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D}
#ci/testnet-deploy.sh perf-testnet-solana-com ec2 us-east-1a \
#ci/testnet-deploy.sh -p perf-testnet-solana-com -C ec2 -z us-east-1a \
# -g \
# -t "$CHANNEL_OR_TAG" -c 2 \
# ${maybeReuseLedger:+-r} \
@ -299,11 +340,18 @@ sanity-or-restart)
else
echo "+++ Sanity failed, updating the network"
$metricsWriteDatapoint "testnet-manager sanity-failure=1"
if start "" update; then
echo Update successful
# TODO: Restore attempt to restart the cluster before recreating it
# See https://github.com/solana-labs/solana/issues/3774
if false; then
if start "" update; then
echo Update successful
else
echo "+++ Update failed, restarting the network"
$metricsWriteDatapoint "testnet-manager update-failure=1"
start
fi
else
echo "+++ Update failed, restarting the network"
$metricsWriteDatapoint "testnet-manager update-failure=1"
start
fi
fi

View File

@ -48,6 +48,7 @@ shutdown() {
exit $exitcode
}
rm -rf net/{log,-sanity}
rm -f net/config/config
trap shutdown EXIT INT
set -x

View File

@ -0,0 +1,50 @@
#!/usr/bin/env bash
#
# Uploads one or more files to a github release
#
# Prerequisites
# 1) GITHUB_TOKEN defined in the environment
# 2) TAG defined in the environment
#
set -e
REPO_SLUG=solana-labs/solana
if [[ -z $1 ]]; then
echo No files specified
exit 1
fi
if [[ -z $GITHUB_TOKEN ]]; then
echo Error: GITHUB_TOKEN not defined
exit 1
fi
if [[ -n $BUILDKITE_TAG ]]; then
TAG=$BUILDKITE_TAG
elif [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
TAG=$TRIGGERED_BUILDKITE_TAG
fi
if [[ -z $TAG ]]; then
echo Error: TAG not defined
exit 1
fi
releaseId=$( \
curl -s "https://api.github.com/repos/$REPO_SLUG/releases/tags/$TAG" \
| grep -m 1 \"id\": \
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
)
echo "Github release id for $TAG is $releaseId"
for file in "$@"; do
echo "--- Uploading $file to tag $TAG of $REPO_SLUG"
curl \
--data-binary @"$file" \
-H "Authorization: token $GITHUB_TOKEN" \
-H "Content-Type: application/octet-stream" \
"https://uploads.github.com/repos/$REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
echo
done

View File

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"
channel=${1:-stable}
if ! ./version-check.sh "$channel"; then
rustup install "$channel"
./version-check.sh "$channel"
fi

View File

@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -e
require() {
declare expectedProgram="$1"
declare expectedVersion="$2"
shift 2
read -r program version _ < <($expectedProgram "$@" -V)
declare ok=true
[[ $program = "$expectedProgram" ]] || ok=false
[[ $version =~ $expectedVersion ]] || ok=false
echo "Found $program $version"
if ! $ok; then
echo Error: expected "$expectedProgram $expectedVersion"
exit 1
fi
}
case ${1:-stable} in
nightly)
require rustc 1.34.[0-9]+-nightly +nightly
require cargo 1.34.[0-9]+-nightly +nightly
;;
stable)
require rustc 1.32.[0-9]+
require cargo 1.32.[0-9]+
;;
*)
echo Error: unknown argument: "$1"
exit 1
;;
esac
exit 0

26
client/Cargo.toml Normal file
View File

@ -0,0 +1,26 @@
[package]
name = "solana-client"
version = "0.13.0"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
homepage = "https://solana.com/"
license = "Apache-2.0"
edition = "2018"
[dependencies]
bincode = "1.1.2"
bs58 = "0.2.0"
log = "0.4.2"
jsonrpc-core = "10.1.0"
reqwest = "0.9.11"
serde = "1.0.89"
serde_derive = "1.0.88"
serde_json = "1.0.39"
solana-netutil = { path = "../netutil", version = "0.13.0" }
solana-sdk = { path = "../sdk", version = "0.13.0" }
[dev-dependencies]
jsonrpc-core = "10.1.0"
jsonrpc-http-server = "10.1.0"
solana-logger = { path = "../logger", version = "0.13.0" }

View File

@ -0,0 +1,10 @@
use crate::rpc_request::RpcRequest;
pub(crate) trait GenericRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: Option<serde_json::Value>,
retries: usize,
) -> Result<serde_json::Value, Box<dyn std::error::Error>>;
}

6
client/src/lib.rs Normal file
View File

@ -0,0 +1,6 @@
mod generic_rpc_client_request;
pub mod mock_rpc_client_request;
pub mod rpc_client;
pub mod rpc_client_request;
pub mod rpc_request;
pub mod thin_client;

View File

@ -0,0 +1,63 @@
use crate::generic_rpc_client_request::GenericRpcClientRequest;
use crate::rpc_request::RpcRequest;
use serde_json::{Number, Value};
use solana_sdk::transaction::{self, TransactionError};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
pub const SIGNATURE: &str =
"43yNSFC6fYTuPgTNFFhF4axw7AfWxB2BPdurme8yrsWEYwm8299xh8n6TAHjGymiSub1XtyxTNyd9GBfY2hxoBw8";
pub struct MockRpcClientRequest {
url: String,
}
impl MockRpcClientRequest {
pub fn new(url: String) -> Self {
Self { url }
}
}
impl GenericRpcClientRequest for MockRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: Option<serde_json::Value>,
_retries: usize,
) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
if self.url == "fails" {
return Ok(Value::Null);
}
let val = match request {
RpcRequest::ConfirmTransaction => {
if let Some(Value::Array(param_array)) = params {
if let Value::String(param_string) = &param_array[0] {
Value::Bool(param_string == SIGNATURE)
} else {
Value::Null
}
} else {
Value::Null
}
}
RpcRequest::GetBalance => {
let n = if self.url == "airdrop" { 0 } else { 50 };
Value::Number(Number::from(n))
}
RpcRequest::GetRecentBlockhash => Value::String(PUBKEY.to_string()),
RpcRequest::GetSignatureStatus => {
let response: Option<transaction::Result<()>> = if self.url == "account_in_use" {
Some(Err(TransactionError::AccountInUse))
} else if self.url == "sig_not_found" {
None
} else {
Some(Ok(()))
};
serde_json::to_value(response).unwrap()
}
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
_ => Value::Null,
};
Ok(val)
}
}

746
client/src/rpc_client.rs Normal file
View File

@ -0,0 +1,746 @@
use crate::generic_rpc_client_request::GenericRpcClientRequest;
use crate::mock_rpc_client_request::MockRpcClientRequest;
use crate::rpc_client_request::RpcClientRequest;
use crate::rpc_request::RpcRequest;
use bincode::serialize;
use bs58;
use log::*;
use serde_json::{json, Value};
use solana_sdk::account::Account;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::timing::{DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND};
use solana_sdk::transaction::{self, Transaction, TransactionError};
use std::error;
use std::io;
use std::net::SocketAddr;
use std::thread::sleep;
use std::time::{Duration, Instant};
pub struct RpcClient {
client: Box<GenericRpcClientRequest>,
}
impl RpcClient {
pub fn new(url: String) -> Self {
Self {
client: Box::new(RpcClientRequest::new(url)),
}
}
pub fn new_mock(url: String) -> Self {
Self {
client: Box::new(MockRpcClientRequest::new(url)),
}
}
pub fn new_socket(addr: SocketAddr) -> Self {
Self::new(get_rpc_request_str(addr, false))
}
pub fn new_socket_with_timeout(addr: SocketAddr, timeout: Duration) -> Self {
let url = get_rpc_request_str(addr, false);
Self {
client: Box::new(RpcClientRequest::new_with_timeout(url, timeout)),
}
}
pub fn send_transaction(
&self,
transaction: &Transaction,
) -> Result<String, Box<dyn error::Error>> {
let serialized = serialize(transaction).unwrap();
let params = json!([serialized]);
let signature = self
.client
.send(&RpcRequest::SendTransaction, Some(params), 5)?;
if signature.as_str().is_none() {
Err(io::Error::new(
io::ErrorKind::Other,
"Received result of an unexpected type",
))?;
}
Ok(signature.as_str().unwrap().to_string())
}
pub fn get_signature_status(
&self,
signature: &str,
) -> Result<Option<transaction::Result<()>>, Box<dyn error::Error>> {
let params = json!([signature.to_string()]);
let signature_status =
self.client
.send(&RpcRequest::GetSignatureStatus, Some(params), 5)?;
let result: Option<transaction::Result<()>> =
serde_json::from_value(signature_status).unwrap();
Ok(result)
}
pub fn send_and_confirm_transaction<T: KeypairUtil>(
&self,
transaction: &mut Transaction,
signer: &T,
) -> Result<String, Box<dyn error::Error>> {
let mut send_retries = 5;
loop {
let mut status_retries = 4;
let signature_str = self.send_transaction(transaction)?;
let status = loop {
let status = self.get_signature_status(&signature_str)?;
if status.is_none() {
status_retries -= 1;
if status_retries == 0 {
break status;
}
} else {
break status;
}
if cfg!(not(test)) {
// Retry ~twice during a slot
sleep(Duration::from_millis(
500 * DEFAULT_TICKS_PER_SLOT / NUM_TICKS_PER_SECOND,
));
}
};
send_retries = if let Some(result) = status.clone() {
match result {
Ok(_) => return Ok(signature_str),
Err(TransactionError::AccountInUse) => {
// Fetch a new blockhash and re-sign the transaction before sending it again
self.resign_transaction(transaction, signer)?;
send_retries - 1
}
Err(_) => 0,
}
} else {
send_retries - 1
};
if send_retries == 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Transaction {:?} failed: {:?}", signature_str, status),
))?;
}
}
}
pub fn send_and_confirm_transactions(
&self,
mut transactions: Vec<Transaction>,
signer: &Keypair,
) -> Result<(), Box<dyn error::Error>> {
let mut send_retries = 5;
loop {
let mut status_retries = 4;
// Send all transactions
let mut transactions_signatures = vec![];
for transaction in transactions {
if cfg!(not(test)) {
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
// when all the write transactions modify the same program account (eg, deploying a
// new program)
sleep(Duration::from_millis(1000 / NUM_TICKS_PER_SECOND));
}
let signature = self.send_transaction(&transaction).ok();
transactions_signatures.push((transaction, signature))
}
// Collect statuses for all the transactions, drop those that are confirmed
while status_retries > 0 {
status_retries -= 1;
if cfg!(not(test)) {
// Retry ~twice during a slot
sleep(Duration::from_millis(
500 * DEFAULT_TICKS_PER_SLOT / NUM_TICKS_PER_SECOND,
));
}
transactions_signatures = transactions_signatures
.into_iter()
.filter(|(_transaction, signature)| {
if let Some(signature) = signature {
if let Ok(status) = self.get_signature_status(&signature) {
if status.is_none() {
return false;
}
return status.unwrap().is_err();
}
}
true
})
.collect();
if transactions_signatures.is_empty() {
return Ok(());
}
}
if send_retries == 0 {
Err(io::Error::new(io::ErrorKind::Other, "Transactions failed"))?;
}
send_retries -= 1;
// Re-sign any failed transactions with a new blockhash and retry
let blockhash =
self.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
transactions = transactions_signatures
.into_iter()
.map(|(mut transaction, _)| {
transaction.sign(&[signer], blockhash);
transaction
})
.collect();
}
}
pub fn resign_transaction<T: KeypairUtil>(
&self,
tx: &mut Transaction,
signer_key: &T,
) -> Result<(), Box<dyn error::Error>> {
let blockhash = self.get_new_blockhash(&tx.message().recent_blockhash)?;
tx.sign(&[signer_key], blockhash);
Ok(())
}
pub fn retry_get_balance(
&self,
pubkey: &Pubkey,
retries: usize,
) -> Result<Option<u64>, Box<dyn error::Error>> {
let params = json!([format!("{}", pubkey)]);
let res = self
.client
.send(&RpcRequest::GetBalance, Some(params), retries)?
.as_u64();
Ok(res)
}
pub fn get_account_data(&self, pubkey: &Pubkey) -> io::Result<Vec<u8>> {
let params = json!([format!("{}", pubkey)]);
let response = self
.client
.send(&RpcRequest::GetAccountInfo, Some(params), 0);
match response {
Ok(account_json) => {
let account: Account =
serde_json::from_value(account_json).expect("deserialize account");
Ok(account.data)
}
Err(error) => {
debug!("get_account_data failed: {:?}", error);
Err(io::Error::new(
io::ErrorKind::Other,
"get_account_data failed",
))
}
}
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
let params = json!([format!("{}", pubkey)]);
let response = self
.client
.send(&RpcRequest::GetAccountInfo, Some(params), 0);
response
.and_then(|account_json| {
let account: Account =
serde_json::from_value(account_json).expect("deserialize account");
trace!("Response account {:?} {:?}", pubkey, account);
trace!("get_balance {:?}", account.lamports);
Ok(account.lamports)
})
.map_err(|error| {
debug!("Response account {}: None (error: {:?})", pubkey, error);
io::Error::new(io::ErrorKind::Other, "AccountNotFound")
})
}
/// Request the transaction count. If the response packet is dropped by the network,
/// this method will try again 5 times.
pub fn get_transaction_count(&self) -> io::Result<u64> {
debug!("get_transaction_count");
let mut num_retries = 5;
while num_retries > 0 {
let response = self.client.send(&RpcRequest::GetTransactionCount, None, 0);
match response {
Ok(value) => {
debug!("transaction_count response: {:?}", value);
if let Some(transaction_count) = value.as_u64() {
return Ok(transaction_count);
}
}
Err(err) => {
debug!("transaction_count failed: {:?}", err);
}
}
num_retries -= 1;
}
Err(io::Error::new(
io::ErrorKind::Other,
"Unable to get transaction count, too many retries",
))?
}
pub fn get_recent_blockhash(&self) -> io::Result<Hash> {
let mut num_retries = 5;
while num_retries > 0 {
match self.client.send(&RpcRequest::GetRecentBlockhash, None, 0) {
Ok(value) => {
if let Some(blockhash_str) = value.as_str() {
let blockhash_vec = bs58::decode(blockhash_str)
.into_vec()
.expect("bs58::decode");
return Ok(Hash::new(&blockhash_vec));
}
}
Err(err) => {
debug!("retry_get_recent_blockhash failed: {:?}", err);
}
}
num_retries -= 1;
}
Err(io::Error::new(
io::ErrorKind::Other,
"Unable to get recent blockhash, too many retries",
))
}
pub fn get_new_blockhash(&self, blockhash: &Hash) -> io::Result<Hash> {
let mut num_retries = 10;
while num_retries > 0 {
if let Ok(new_blockhash) = self.get_recent_blockhash() {
if new_blockhash != *blockhash {
return Ok(new_blockhash);
}
}
debug!("Got same blockhash ({:?}), will retry...", blockhash);
// Retry ~twice during a slot
sleep(Duration::from_millis(
500 * DEFAULT_TICKS_PER_SLOT / NUM_TICKS_PER_SECOND,
));
num_retries -= 1;
}
Err(io::Error::new(
io::ErrorKind::Other,
"Unable to get new blockhash, too many retries",
))
}
pub fn poll_balance_with_timeout(
&self,
pubkey: &Pubkey,
polling_frequency: &Duration,
timeout: &Duration,
) -> io::Result<u64> {
let now = Instant::now();
loop {
match self.get_balance(&pubkey) {
Ok(bal) => {
return Ok(bal);
}
Err(e) => {
sleep(*polling_frequency);
if now.elapsed() > *timeout {
return Err(e);
}
}
};
}
}
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
self.poll_balance_with_timeout(pubkey, &Duration::from_millis(100), &Duration::from_secs(1))
}
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
const LAST: usize = 30;
for run in 0..LAST {
let balance_result = self.poll_get_balance(pubkey);
if expected_balance.is_none() {
return balance_result.ok();
}
trace!(
"retry_get_balance[{}] {:?} {:?}",
run,
balance_result,
expected_balance
);
if let (Some(expected_balance), Ok(balance_result)) = (expected_balance, balance_result)
{
if expected_balance == balance_result {
return Some(balance_result);
}
}
}
None
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature(&self, signature: &Signature) -> io::Result<()> {
let now = Instant::now();
while !self.check_signature(signature) {
if now.elapsed().as_secs() > 15 {
// TODO: Return a better error.
return Err(io::Error::new(io::ErrorKind::Other, "signature not found"));
}
sleep(Duration::from_millis(250));
}
Ok(())
}
/// Check a signature in the bank.
pub fn check_signature(&self, signature: &Signature) -> bool {
trace!("check_signature: {:?}", signature);
let params = json!([format!("{}", signature)]);
for _ in 0..30 {
let response =
self.client
.send(&RpcRequest::ConfirmTransaction, Some(params.clone()), 0);
match response {
Ok(confirmation) => {
let signature_status = confirmation.as_bool().unwrap();
if signature_status {
trace!("Response found signature");
} else {
trace!("Response signature not found");
}
return signature_status;
}
Err(err) => {
debug!("check_signature request failed: {:?}", err);
}
};
sleep(Duration::from_millis(250));
}
panic!("Couldn't check signature of {}", signature);
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature_confirmation(
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> io::Result<()> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
let response = self.get_num_blocks_since_signature_confirmation(signature);
match response {
Ok(count) => {
if confirmed_blocks != count {
info!(
"signature {} confirmed {} out of {}",
signature, count, min_confirmed_blocks
);
now = Instant::now();
confirmed_blocks = count;
}
if count >= min_confirmed_blocks {
break;
}
}
Err(err) => {
debug!("check_confirmations request failed: {:?}", err);
}
};
if now.elapsed().as_secs() > 15 {
// TODO: Return a better error.
return Err(io::Error::new(io::ErrorKind::Other, "signature not found"));
}
sleep(Duration::from_millis(250));
}
Ok(())
}
pub fn get_num_blocks_since_signature_confirmation(
&self,
sig: &Signature,
) -> io::Result<usize> {
let params = json!([format!("{}", sig)]);
let response = self
.client
.send(
&RpcRequest::GetNumBlocksSinceSignatureConfirmation,
Some(params.clone()),
1,
)
.map_err(|error| {
debug!(
"Response get_num_blocks_since_signature_confirmation: {}",
error
);
io::Error::new(
io::ErrorKind::Other,
"GetNumBlocksSinceSignatureConfirmation request failure",
)
})?;
serde_json::from_value(response).map_err(|error| {
debug!(
"ParseError: get_num_blocks_since_signature_confirmation: {}",
error
);
io::Error::new(
io::ErrorKind::Other,
"GetNumBlocksSinceSignatureConfirmation parse failure",
)
})
}
pub fn fullnode_exit(&self) -> io::Result<bool> {
let response = self
.client
.send(&RpcRequest::FullnodeExit, None, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("FullnodeExit request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("FullnodeExit parse failure: {:?}", err),
)
})
}
// TODO: Remove
pub fn retry_make_rpc_request(
&self,
request: &RpcRequest,
params: Option<Value>,
retries: usize,
) -> Result<Value, Box<dyn error::Error>> {
self.client.send(request, params, retries)
}
}
pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
if tls {
format!("https://{}", rpc_addr)
} else {
format!("http://{}", rpc_addr)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::mock_rpc_client_request::{PUBKEY, SIGNATURE};
use jsonrpc_core::{Error, IoHandler, Params};
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
use serde_json::Number;
use solana_logger;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use solana_sdk::transaction::TransactionError;
use std::sync::mpsc::channel;
use std::thread;
#[test]
fn test_make_rpc_request() {
let (sender, receiver) = channel();
thread::spawn(move || {
let rpc_addr = "0.0.0.0:0".parse().unwrap();
let mut io = IoHandler::default();
// Successful request
io.add_method("getBalance", |_params: Params| {
Ok(Value::Number(Number::from(50)))
});
// Failed request
io.add_method("getRecentBlockhash", |params: Params| {
if params != Params::None {
Err(Error::invalid_request())
} else {
Ok(Value::String(
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx".to_string(),
))
}
});
let server = ServerBuilder::new(io)
.threads(1)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.start_http(&rpc_addr)
.expect("Unable to start RPC server");
sender.send(*server.address()).unwrap();
server.wait();
});
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance = rpc_client.retry_make_rpc_request(
&RpcRequest::GetBalance,
Some(json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"])),
0,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 50);
let blockhash = rpc_client.retry_make_rpc_request(&RpcRequest::GetRecentBlockhash, None, 0);
assert_eq!(
blockhash.unwrap().as_str().unwrap(),
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"
);
// Send erroneous parameter
let blockhash = rpc_client.retry_make_rpc_request(
&RpcRequest::GetRecentBlockhash,
Some(json!("paramter")),
0,
);
assert_eq!(blockhash.is_err(), true);
}
#[test]
fn test_retry_make_rpc_request() {
solana_logger::setup();
let (sender, receiver) = channel();
thread::spawn(move || {
// 1. Pick a random port
// 2. Tell the client to start using it
// 3. Delay for 1.5 seconds before starting the server to ensure the client will fail
// and need to retry
let rpc_addr: SocketAddr = "0.0.0.0:4242".parse().unwrap();
sender.send(rpc_addr.clone()).unwrap();
sleep(Duration::from_millis(1500));
let mut io = IoHandler::default();
io.add_method("getBalance", move |_params: Params| {
Ok(Value::Number(Number::from(5)))
});
let server = ServerBuilder::new(io)
.threads(1)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.start_http(&rpc_addr)
.expect("Unable to start RPC server");
server.wait();
});
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance = rpc_client.retry_make_rpc_request(
&RpcRequest::GetBalance,
Some(json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"])),
10,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 5);
}
#[test]
fn test_send_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = Pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::create_user_account(&key, &to, 50, blockhash, 0);
let signature = rpc_client.send_transaction(&tx);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let rpc_client = RpcClient::new_mock("fails".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
}
#[test]
fn test_get_recent_blockhash() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let vec = bs58::decode(PUBKEY).into_vec().unwrap();
let expected_blockhash = Hash::new(&vec);
let blockhash = dbg!(rpc_client.get_recent_blockhash()).expect("blockhash ok");
assert_eq!(blockhash, expected_blockhash);
let rpc_client = RpcClient::new_mock("fails".to_string());
let blockhash = dbg!(rpc_client.get_recent_blockhash());
assert!(blockhash.is_err());
}
#[test]
fn test_get_signature_status() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let signature = "good_signature";
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, Some(Ok(())));
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
let signature = "sig_not_found";
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, None);
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let signature = "account_in_use";
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, Some(Err(TransactionError::AccountInUse)));
}
#[test]
fn test_send_and_confirm_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = Pubkey::new_rand();
let blockhash = Hash::default();
let mut tx = system_transaction::create_user_account(&key, &to, 50, blockhash, 0);
let result = rpc_client.send_and_confirm_transaction(&mut tx, &key);
result.unwrap();
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let result = rpc_client.send_and_confirm_transaction(&mut tx, &key);
assert!(result.is_err());
let rpc_client = RpcClient::new_mock("fails".to_string());
let result = rpc_client.send_and_confirm_transaction(&mut tx, &key);
assert!(result.is_err());
}
#[test]
fn test_resign_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = Pubkey::new_rand();
let vec = bs58::decode("HUu3LwEzGRsUkuJS121jzkPJW39Kq62pXCTmTa1F9jDL")
.into_vec()
.unwrap();
let blockhash = Hash::new(&vec);
let prev_tx = system_transaction::create_user_account(&key, &to, 50, blockhash, 0);
let mut tx = system_transaction::create_user_account(&key, &to, 50, blockhash, 0);
rpc_client.resign_transaction(&mut tx, &key).unwrap();
assert_ne!(prev_tx, tx);
assert_ne!(prev_tx.signatures, tx.signatures);
assert_ne!(
prev_tx.message().recent_blockhash,
tx.message().recent_blockhash
);
}
}

View File

@ -0,0 +1,81 @@
use crate::generic_rpc_client_request::GenericRpcClientRequest;
use crate::rpc_request::{RpcError, RpcRequest};
use log::*;
use reqwest;
use reqwest::header::CONTENT_TYPE;
use solana_sdk::timing::{DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND};
use std::thread::sleep;
use std::time::Duration;
pub struct RpcClientRequest {
client: reqwest::Client,
url: String,
}
impl RpcClientRequest {
pub fn new(url: String) -> Self {
Self {
client: reqwest::Client::new(),
url,
}
}
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
let client = reqwest::Client::builder()
.timeout(timeout)
.build()
.expect("build rpc client");
Self { client, url }
}
}
impl GenericRpcClientRequest for RpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: Option<serde_json::Value>,
mut retries: usize,
) -> Result<serde_json::Value, Box<dyn std::error::Error>> {
// Concurrent requests are not supported so reuse the same request id for all requests
let request_id = 1;
let request_json = request.build_request_json(request_id, params);
loop {
match self
.client
.post(&self.url)
.header(CONTENT_TYPE, "application/json")
.body(request_json.to_string())
.send()
{
Ok(mut response) => {
let json: serde_json::Value = serde_json::from_str(&response.text()?)?;
if json["error"].is_object() {
Err(RpcError::RpcRequestError(format!(
"RPC Error response: {}",
serde_json::to_string(&json["error"]).unwrap()
)))?
}
return Ok(json["result"].clone());
}
Err(e) => {
info!(
"make_rpc_request() failed, {} retries left: {:?}",
retries, e
);
if retries == 0 {
Err(e)?;
}
retries -= 1;
// Sleep for approximately half a slot
sleep(Duration::from_millis(
500 * DEFAULT_TICKS_PER_SLOT / NUM_TICKS_PER_SECOND,
));
}
}
}
}
}

113
client/src/rpc_request.rs Normal file
View File

@ -0,0 +1,113 @@
use serde_json::{json, Value};
use std::{error, fmt};
#[derive(Debug, PartialEq)]
pub enum RpcRequest {
ConfirmTransaction,
GetAccountInfo,
GetBalance,
GetRecentBlockhash,
GetSignatureStatus,
GetTransactionCount,
RequestAirdrop,
SendTransaction,
RegisterNode,
SignVote,
DeregisterNode,
GetStorageBlockhash,
GetStorageEntryHeight,
GetStoragePubkeysForEntryHeight,
FullnodeExit,
GetNumBlocksSinceSignatureConfirmation,
}
impl RpcRequest {
pub(crate) fn build_request_json(&self, id: u64, params: Option<Value>) -> Value {
let jsonrpc = "2.0";
let method = match self {
RpcRequest::ConfirmTransaction => "confirmTransaction",
RpcRequest::GetAccountInfo => "getAccountInfo",
RpcRequest::GetBalance => "getBalance",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatus => "getSignatureStatus",
RpcRequest::GetTransactionCount => "getTransactionCount",
RpcRequest::RequestAirdrop => "requestAirdrop",
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::RegisterNode => "registerNode",
RpcRequest::SignVote => "signVote",
RpcRequest::DeregisterNode => "deregisterNode",
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
RpcRequest::GetStorageEntryHeight => "getStorageEntryHeight",
RpcRequest::GetStoragePubkeysForEntryHeight => "getStoragePubkeysForEntryHeight",
RpcRequest::FullnodeExit => "fullnodeExit",
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
"getNumBlocksSinceSignatureConfirmation"
}
};
let mut request = json!({
"jsonrpc": jsonrpc,
"id": id,
"method": method,
});
if let Some(param_string) = params {
request["params"] = param_string;
}
request
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum RpcError {
RpcRequestError(String),
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "invalid")
}
}
impl error::Error for RpcError {
fn description(&self) -> &str {
"invalid"
}
fn cause(&self) -> Option<&dyn error::Error> {
// Generic error, underlying cause isn't tracked.
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_build_request_json() {
let test_request = RpcRequest::GetAccountInfo;
let addr = json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]);
let request = test_request.build_request_json(1, Some(addr.clone()));
assert_eq!(request["method"], "getAccountInfo");
assert_eq!(request["params"], addr,);
let test_request = RpcRequest::GetBalance;
let request = test_request.build_request_json(1, Some(addr));
assert_eq!(request["method"], "getBalance");
let test_request = RpcRequest::GetRecentBlockhash;
let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "getRecentBlockhash");
let test_request = RpcRequest::GetTransactionCount;
let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "getTransactionCount");
let test_request = RpcRequest::RequestAirdrop;
let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "requestAirdrop");
let test_request = RpcRequest::SendTransaction;
let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "sendTransaction");
}
}

291
client/src/thin_client.rs Normal file
View File

@ -0,0 +1,291 @@
//! The `thin_client` module is a client-side object that interfaces with
//! a server-side TPU. Client code should use this object instead of writing
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use crate::rpc_client::RpcClient;
use bincode::{serialize_into, serialized_size};
use log::*;
use solana_sdk::client::{AsyncClient, Client, SyncClient};
use solana_sdk::hash::Hash;
use solana_sdk::instruction::Instruction;
use solana_sdk::message::Message;
use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::system_instruction;
use solana_sdk::transaction::{self, Transaction};
use solana_sdk::transport::Result as TransportResult;
use std::io;
use std::net::{SocketAddr, UdpSocket};
use std::time::Duration;
/// An object for querying and sending transactions to the network.
pub struct ThinClient {
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
rpc_client: RpcClient,
}
impl ThinClient {
/// Create a new ThinClient that will interface with the Rpc at `rpc_addr` using TCP
/// and the Tpu at `transactions_addr` over `transactions_socket` using UDP.
pub fn new(
rpc_addr: SocketAddr,
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
) -> Self {
Self::new_from_client(
transactions_addr,
transactions_socket,
RpcClient::new_socket(rpc_addr),
)
}
pub fn new_socket_with_timeout(
rpc_addr: SocketAddr,
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
timeout: Duration,
) -> Self {
let rpc_client = RpcClient::new_socket_with_timeout(rpc_addr, timeout);
Self::new_from_client(transactions_addr, transactions_socket, rpc_client)
}
fn new_from_client(
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
rpc_client: RpcClient,
) -> Self {
Self {
rpc_client,
transactions_addr,
transactions_socket,
}
}
/// Retry a sending a signed Transaction to the server for processing.
pub fn retry_transfer_until_confirmed(
&self,
keypair: &Keypair,
transaction: &mut Transaction,
tries: usize,
min_confirmed_blocks: usize,
) -> io::Result<Signature> {
self.send_and_confirm_transaction(&[keypair], transaction, tries, min_confirmed_blocks)
}
/// Retry sending a signed Transaction with one signing Keypair to the server for processing.
pub fn retry_transfer(
&self,
keypair: &Keypair,
transaction: &mut Transaction,
tries: usize,
) -> io::Result<Signature> {
self.send_and_confirm_transaction(&[keypair], transaction, tries, 0)
}
/// Retry sending a signed Transaction to the server for processing
pub fn send_and_confirm_transaction(
&self,
keypairs: &[&Keypair],
transaction: &mut Transaction,
tries: usize,
min_confirmed_blocks: usize,
) -> io::Result<Signature> {
for x in 0..tries {
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
let mut wr = std::io::Cursor::new(&mut buf[..]);
serialize_into(&mut wr, &transaction)
.expect("serialize Transaction in pub fn transfer_signed");
self.transactions_socket
.send_to(&buf[..], &self.transactions_addr)?;
if self
.poll_for_signature_confirmation(&transaction.signatures[0], min_confirmed_blocks)
.is_ok()
{
return Ok(transaction.signatures[0]);
}
info!("{} tries failed transfer to {}", x, self.transactions_addr);
transaction.sign(keypairs, self.rpc_client.get_recent_blockhash()?);
}
Err(io::Error::new(
io::ErrorKind::Other,
format!("retry_transfer failed in {} retries", tries),
))
}
pub fn get_new_blockhash(&self, blockhash: &Hash) -> io::Result<Hash> {
self.rpc_client.get_new_blockhash(blockhash)
}
pub fn poll_balance_with_timeout(
&self,
pubkey: &Pubkey,
polling_frequency: &Duration,
timeout: &Duration,
) -> io::Result<u64> {
self.rpc_client
.poll_balance_with_timeout(pubkey, polling_frequency, timeout)
}
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
self.rpc_client.poll_get_balance(pubkey)
}
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
self.rpc_client.wait_for_balance(pubkey, expected_balance)
}
pub fn poll_for_signature(&self, signature: &Signature) -> io::Result<()> {
self.rpc_client.poll_for_signature(signature)
}
/// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks`
pub fn poll_for_signature_confirmation(
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> io::Result<()> {
self.rpc_client
.poll_for_signature_confirmation(signature, min_confirmed_blocks)
}
/// Check a signature in the bank. This method blocks
/// until the server sends a response.
pub fn check_signature(&self, signature: &Signature) -> bool {
self.rpc_client.check_signature(signature)
}
pub fn fullnode_exit(&self) -> io::Result<bool> {
self.rpc_client.fullnode_exit()
}
pub fn get_num_blocks_since_signature_confirmation(
&mut self,
sig: &Signature,
) -> io::Result<usize> {
self.rpc_client
.get_num_blocks_since_signature_confirmation(sig)
}
}
impl Client for ThinClient {}
impl SyncClient for ThinClient {
fn send_message(&self, keypairs: &[&Keypair], message: Message) -> TransportResult<Signature> {
let blockhash = self.get_recent_blockhash()?;
let mut transaction = Transaction::new(&keypairs, message, blockhash);
let signature = self.send_and_confirm_transaction(keypairs, &mut transaction, 5, 0)?;
Ok(signature)
}
fn send_instruction(
&self,
keypair: &Keypair,
instruction: Instruction,
) -> TransportResult<Signature> {
let message = Message::new(vec![instruction]);
self.send_message(&[keypair], message)
}
fn transfer(
&self,
lamports: u64,
keypair: &Keypair,
pubkey: &Pubkey,
) -> TransportResult<Signature> {
let transfer_instruction =
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
self.send_instruction(keypair, transfer_instruction)
}
fn get_account_data(&self, pubkey: &Pubkey) -> TransportResult<Option<Vec<u8>>> {
Ok(self.rpc_client.get_account_data(pubkey).ok())
}
fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
let balance = self.rpc_client.get_balance(pubkey)?;
Ok(balance)
}
fn get_signature_status(
&self,
signature: &Signature,
) -> TransportResult<Option<transaction::Result<()>>> {
let status = self
.rpc_client
.get_signature_status(&signature.to_string())
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("send_transaction failed with error {}", err),
)
})?;
Ok(status)
}
fn get_recent_blockhash(&self) -> TransportResult<Hash> {
let recent_blockhash = self.rpc_client.get_recent_blockhash()?;
Ok(recent_blockhash)
}
fn get_transaction_count(&self) -> TransportResult<u64> {
let transaction_count = self.rpc_client.get_transaction_count()?;
Ok(transaction_count)
}
}
impl AsyncClient for ThinClient {
fn async_send_transaction(&self, transaction: Transaction) -> io::Result<Signature> {
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
let mut wr = std::io::Cursor::new(&mut buf[..]);
serialize_into(&mut wr, &transaction)
.expect("serialize Transaction in pub fn transfer_signed");
assert!(buf.len() < PACKET_DATA_SIZE);
self.transactions_socket
.send_to(&buf[..], &self.transactions_addr)?;
Ok(transaction.signatures[0])
}
fn async_send_message(
&self,
keypairs: &[&Keypair],
message: Message,
recent_blockhash: Hash,
) -> io::Result<Signature> {
let transaction = Transaction::new(&keypairs, message, recent_blockhash);
self.async_send_transaction(transaction)
}
fn async_send_instruction(
&self,
keypair: &Keypair,
instruction: Instruction,
recent_blockhash: Hash,
) -> io::Result<Signature> {
let message = Message::new(vec![instruction]);
self.async_send_message(&[keypair], message, recent_blockhash)
}
fn async_transfer(
&self,
lamports: u64,
keypair: &Keypair,
pubkey: &Pubkey,
recent_blockhash: Hash,
) -> io::Result<Signature> {
let transfer_instruction =
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
self.async_send_instruction(keypair, transfer_instruction, recent_blockhash)
}
}
pub fn create_client((rpc, tpu): (SocketAddr, SocketAddr), range: (u16, u16)) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
ThinClient::new(rpc, tpu, transactions_socket)
}
pub fn create_client_with_timeout(
(rpc, tpu): (SocketAddr, SocketAddr),
range: (u16, u16),
timeout: Duration,
) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
ThinClient::new_socket_with_timeout(rpc, tpu, transactions_socket, timeout)
}

View File

@ -1,10 +1,10 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.0"
version = "0.13.0"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
readme = "../README.md"
repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"]
license = "Apache-2.0"
@ -17,21 +17,22 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
chacha = []
cuda = []
erasure = []
kvstore = ["memmap"]
kvstore = ["solana-kvstore"]
[dependencies]
bincode = "1.1.2"
bs58 = "0.2.0"
byteorder = "1.3.1"
chrono = { version = "0.4.0", features = ["serde"] }
hashbrown = "0.1.8"
crc = { version = "1.8.1", optional = true }
hashbrown = "0.2.0"
indexmap = "1.0"
itertools = "0.8.0"
jsonrpc-core = "10.1.0"
jsonrpc-derive = "10.1.0"
jsonrpc-http-server = "10.1.0"
jsonrpc-pubsub = "10.1.0"
jsonrpc-ws-server = "10.1.0"
jsonrpc-core = "11.0.0"
jsonrpc-derive = "11.0.0"
jsonrpc-http-server = "11.0.0"
jsonrpc-pubsub = "11.0.0"
jsonrpc-ws-server = "11.0.0"
libc = "0.2.50"
log = "0.4.2"
memmap = { version = "0.7.0", optional = true }
@ -45,24 +46,44 @@ rocksdb = "0.11.0"
serde = "1.0.89"
serde_derive = "1.0.88"
serde_json = "1.0.39"
solana-budget-api = { path = "../programs/budget_api", version = "0.12.0" }
solana-drone = { path = "../drone", version = "0.12.0" }
solana-logger = { path = "../logger", version = "0.12.0" }
solana-metrics = { path = "../metrics", version = "0.12.0" }
solana-netutil = { path = "../netutil", version = "0.12.0" }
solana-runtime = { path = "../runtime", version = "0.12.0" }
solana-sdk = { path = "../sdk", version = "0.12.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.12.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.0" }
solana-budget-api = { path = "../programs/budget_api", version = "0.13.0" }
solana-client = { path = "../client", version = "0.13.0" }
solana-drone = { path = "../drone", version = "0.13.0" }
solana-kvstore = { path = "../kvstore", version = "0.13.0", optional = true }
solana-logger = { path = "../logger", version = "0.13.0" }
solana-metrics = { path = "../metrics", version = "0.13.0" }
solana-netutil = { path = "../netutil", version = "0.13.0" }
solana-runtime = { path = "../runtime", version = "0.13.0" }
solana-sdk = { path = "../sdk", version = "0.13.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.13.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.13.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.13.0" }
sys-info = "0.5.6"
tokio = "0.1"
tokio-codec = "0.1"
untrusted = "0.6.2"
[dev-dependencies]
hex-literal = "0.1.3"
hex-literal = "0.1.4"
matches = "0.1.6"
solana-vote-program = { path = "../programs/vote", version = "0.12.0" }
solana-budget-program = { path = "../programs/budget", version = "0.12.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.13.0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.13.0" }
[[bench]]
name = "banking_stage"
[[bench]]
name = "blocktree"
[[bench]]
name = "ledger"
[[bench]]
name = "gen_keys"
[[bench]]
name = "sigverify"
[[bench]]
required-features = ["chacha"]
name = "chacha"

View File

@ -1,10 +1,13 @@
#![feature(test)]
extern crate test;
#[macro_use]
extern crate solana;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana::banking_stage::{create_test_recorder, BankingStage};
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
use solana::cluster_info::ClusterInfo;
use solana::cluster_info::Node;
use solana::packet::to_packets_chunked;
@ -15,7 +18,7 @@ use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{KeypairUtil, Signature};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::system_transaction;
use solana_sdk::timing::{DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES};
use std::iter;
use std::sync::atomic::Ordering;
@ -53,7 +56,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let (verified_sender, verified_receiver) = channel();
let bank = Arc::new(Bank::new(&genesis_block));
let dummy = SystemTransaction::new_move(
let dummy = system_transaction::transfer(
&mint_keypair,
&mint_keypair.pubkey(),
1,
@ -67,17 +70,17 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.account_keys[0] = Pubkey::new(&from[0..32]);
new.account_keys[1] = Pubkey::new(&to[0..32]);
new.message.account_keys[0] = Pubkey::new(&from[0..32]);
new.message.account_keys[1] = Pubkey::new(&to[0..32]);
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
.collect();
// fund all the accounts
transactions.iter().for_each(|tx| {
let fund = SystemTransaction::new_move(
let fund = system_transaction::transfer(
&mint_keypair,
&tx.account_keys[0],
&tx.message.account_keys[0],
mint_total / txes as u64,
genesis_block.hash(),
0,
@ -104,33 +107,41 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
(x, iter::repeat(1).take(len).collect())
})
.collect();
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[bench]
@ -145,7 +156,7 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
let (verified_sender, verified_receiver) = channel();
let bank = Arc::new(Bank::new(&genesis_block));
let dummy = SystemTransaction::new_move(
let dummy = system_transaction::transfer(
&mint_keypair,
&mint_keypair.pubkey(),
1,
@ -159,33 +170,33 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
new.account_keys[0] = Pubkey::new(&from[0..32]);
new.account_keys[1] = Pubkey::new(&to[0..32]);
let prog = new.instructions[0].clone();
new.message.account_keys[0] = Pubkey::new(&from[0..32]);
new.message.account_keys[1] = Pubkey::new(&to[0..32]);
let prog = new.message.instructions[0].clone();
for i in 1..progs {
//generate programs that spend to random keys
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let to_key = Pubkey::new(&to[0..32]);
new.account_keys.push(to_key);
assert_eq!(new.account_keys.len(), i + 2);
new.instructions.push(prog.clone());
assert_eq!(new.instructions.len(), i + 1);
new.instructions[i].accounts[1] = 1 + i as u8;
new.message.account_keys.push(to_key);
assert_eq!(new.message.account_keys.len(), i + 2);
new.message.instructions.push(prog.clone());
assert_eq!(new.message.instructions.len(), i + 1);
new.message.instructions[i].accounts[1] = 1 + i as u8;
assert_eq!(new.key(i, 1), Some(&to_key));
assert_eq!(
new.account_keys[new.instructions[i].accounts[1] as usize],
new.message.account_keys[new.message.instructions[i].accounts[1] as usize],
to_key
);
}
assert_eq!(new.instructions.len(), progs);
assert_eq!(new.message.instructions.len(), progs);
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
.collect();
transactions.iter().for_each(|tx| {
let fund = SystemTransaction::new_move(
let fund = system_transaction::transfer(
&mint_keypair,
&tx.account_keys[0],
&tx.message.account_keys[0],
mint_total / txes as u64,
genesis_block.hash(),
0,
@ -211,31 +222,40 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
(x, iter::repeat(1).take(len).collect())
})
.collect();
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}

View File

@ -5,7 +5,7 @@ extern crate test;
use solana::entry::{next_entries, reconstruct_entries_from_blobs, EntrySlice};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::system_transaction;
use test::Bencher;
#[bench]
@ -13,7 +13,7 @@ fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let keypair = Keypair::new();
let tx0 = SystemTransaction::new_move(&keypair, &keypair.pubkey(), 1, one, 0);
let tx0 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, one, 0);
let transactions = vec![tx0; 10];
let entries = next_entries(&zero, 1, transactions);

View File

@ -1,9 +1,12 @@
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
use hashbrown::{HashMap, HashSet};
use solana_metrics::counter::Counter;
use solana_runtime::bank::Bank;
use std::collections::HashMap;
use solana_sdk::timing;
use std::ops::Index;
use std::sync::Arc;
use std::time::Instant;
pub struct BankForks {
banks: HashMap<u64, Arc<Bank>>,
@ -27,17 +30,40 @@ impl BankForks {
working_bank,
}
}
pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> {
let mut frozen_banks: Vec<Arc<Bank>> = vec![];
frozen_banks.extend(self.banks.values().filter(|v| v.is_frozen()).cloned());
frozen_banks.extend(
self.banks
.iter()
.flat_map(|(_, v)| v.parents())
.filter(|v| v.is_frozen()),
);
frozen_banks.into_iter().map(|b| (b.slot(), b)).collect()
/// Create a map of bank slot id to the set of ancestors for the bank slot.
pub fn ancestors(&self) -> HashMap<u64, HashSet<u64>> {
let mut ancestors = HashMap::new();
for bank in self.banks.values() {
let set = bank.parents().into_iter().map(|b| b.slot()).collect();
ancestors.insert(bank.slot(), set);
}
ancestors
}
/// Create a map of bank slot id to the set of all of its descendants
pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> {
let mut descendants = HashMap::new();
for bank in self.banks.values() {
let _ = descendants.entry(bank.slot()).or_insert(HashSet::new());
for parent in bank.parents() {
descendants
.entry(parent.slot())
.or_insert(HashSet::new())
.insert(bank.slot());
}
}
descendants
}
pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> {
self.banks
.iter()
.filter(|(_, b)| b.is_frozen())
.map(|(k, b)| (*k, b.clone()))
.collect()
}
pub fn active_banks(&self) -> Vec<u64> {
self.banks
.iter()
@ -45,6 +71,7 @@ impl BankForks {
.map(|(k, _v)| *k)
.collect()
}
pub fn get(&self, bank_slot: u64) -> Option<&Arc<Bank>> {
self.banks.get(&bank_slot)
}
@ -61,30 +88,37 @@ impl BankForks {
}
}
// TODO: use the bank's own ID instead of receiving a parameter?
pub fn insert(&mut self, bank_slot: u64, bank: Bank) {
let mut bank = Arc::new(bank);
assert_eq!(bank_slot, bank.slot());
let prev = self.banks.insert(bank_slot, bank.clone());
pub fn insert(&mut self, bank: Bank) {
let bank = Arc::new(bank);
let prev = self.banks.insert(bank.slot(), bank.clone());
assert!(prev.is_none());
self.working_bank = bank.clone();
// TODO: this really only needs to look at the first
// parent if we're always calling insert()
// when we construct a child bank
while let Some(parent) = bank.parent() {
if let Some(prev) = self.banks.remove(&parent.slot()) {
assert!(Arc::ptr_eq(&prev, &parent));
}
bank = parent;
}
}
// TODO: really want to kill this...
pub fn working_bank(&self) -> Arc<Bank> {
self.working_bank.clone()
}
pub fn set_root(&mut self, root: u64) {
let set_root_start = Instant::now();
let root_bank = self
.banks
.get(&root)
.expect("root bank didn't exist in bank_forks");
root_bank.squash();
self.prune_non_root(root);
inc_new_counter_info!(
"bank-forks_set_root_ms",
timing::duration_as_ms(&set_root_start.elapsed()) as usize
);
}
fn prune_non_root(&mut self, root: u64) {
self.banks
.retain(|slot, bank| *slot >= root || bank.is_in_subtree_of(root))
}
}
#[cfg(test)]
@ -101,18 +135,53 @@ mod tests {
let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
child_bank.register_tick(&Hash::default());
bank_forks.insert(1, child_bank);
bank_forks.insert(child_bank);
assert_eq!(bank_forks[1u64].tick_height(), 1);
assert_eq!(bank_forks.working_bank().tick_height(), 1);
}
#[test]
fn test_bank_forks_descendants() {
let (genesis_block, _) = GenesisBlock::new(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let bank0 = bank_forks[0].clone();
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.insert(bank);
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank);
let descendants = bank_forks.descendants();
let children: Vec<u64> = descendants[&0].iter().cloned().collect();
assert_eq!(children, vec![1, 2]);
assert!(descendants[&1].is_empty());
assert!(descendants[&2].is_empty());
}
#[test]
fn test_bank_forks_ancestors() {
let (genesis_block, _) = GenesisBlock::new(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let bank0 = bank_forks[0].clone();
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.insert(bank);
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank);
let ancestors = bank_forks.ancestors();
assert!(ancestors[&0].is_empty());
let parents: Vec<u64> = ancestors[&1].iter().cloned().collect();
assert_eq!(parents, vec![0]);
let parents: Vec<u64> = ancestors[&2].iter().cloned().collect();
assert_eq!(parents, vec![0]);
}
#[test]
fn test_bank_forks_frozen_banks() {
let (genesis_block, _) = GenesisBlock::new(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
bank_forks.insert(1, child_bank);
bank_forks.insert(child_bank);
assert!(bank_forks.frozen_banks().get(&0).is_some());
assert!(bank_forks.frozen_banks().get(&1).is_none());
}
@ -123,7 +192,7 @@ mod tests {
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
bank_forks.insert(1, child_bank);
bank_forks.insert(child_bank);
assert_eq!(bank_forks.active_banks(), vec![1]);
}

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,9 @@
use crate::entry::Entry;
use crate::result::Result;
use bincode::serialize;
use chrono::{SecondsFormat, Utc};
use serde_json::json;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use std::cell::RefCell;
@ -91,7 +93,13 @@ where
leader_id: &Pubkey,
entry: &Entry,
) -> Result<()> {
let json_entry = serde_json::to_string(&entry)?;
let transactions: Vec<Vec<u8>> = serialize_transactions(entry);
let stream_entry = json!({
"num_hashes": entry.num_hashes,
"hash": entry.hash,
"transactions": transactions
});
let json_entry = serde_json::to_string(&stream_entry)?;
let payload = format!(
r#"{{"dt":"{}","t":"entry","s":{},"h":{},"l":"{:?}","entry":{}}}"#,
Utc::now().to_rfc3339_opts(SecondsFormat::Nanos, true),
@ -148,6 +156,14 @@ impl MockBlockstream {
}
}
fn serialize_transactions(entry: &Entry) -> Vec<Vec<u8>> {
entry
.transactions
.iter()
.map(|tx| serialize(&tx).unwrap())
.collect()
}
#[cfg(test)]
mod test {
use super::*;
@ -156,8 +172,30 @@ mod test {
use serde_json::Value;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use std::collections::HashSet;
#[test]
fn test_serialize_transactions() {
let entry = Entry::new(&Hash::default(), 1, vec![]);
let empty_vec: Vec<Vec<u8>> = vec![];
assert_eq!(serialize_transactions(&entry), empty_vec);
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let tx0 =
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default(), 0);
let tx1 =
system_transaction::transfer(&keypair1, &keypair0.pubkey(), 2, Hash::default(), 0);
let serialized_tx0 = serialize(&tx0).unwrap();
let serialized_tx1 = serialize(&tx1).unwrap();
let entry = Entry::new(&Hash::default(), 1, vec![tx0, tx1]);
assert_eq!(
serialize_transactions(&entry),
vec![serialized_tx0, serialized_tx1]
);
}
#[test]
fn test_blockstream() -> () {
let blockstream = MockBlockstream::new("test_stream".to_string());
@ -170,7 +208,7 @@ mod test {
let tick_height_initial = 0;
let tick_height_final = tick_height_initial + ticks_per_slot + 2;
let mut curr_slot = 0;
let leader_id = Keypair::new().pubkey();
let leader_id = Pubkey::new_rand();
for tick_height in tick_height_initial..=tick_height_final {
if tick_height == 5 {

View File

@ -109,25 +109,26 @@ mod test {
use super::*;
use crate::blocktree::create_new_tmp_ledger;
use crate::entry::{create_ticks, Entry};
use bincode::{deserialize, serialize};
use chrono::{DateTime, FixedOffset};
use serde_json::Value;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::system_transaction;
use std::sync::mpsc::channel;
#[test]
fn test_blockstream_service_process_entries() {
let ticks_per_slot = 5;
let leader_id = Keypair::new().pubkey();
let leader_id = Pubkey::new_rand();
// Set up genesis block and blocktree
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(1000);
genesis_block.ticks_per_slot = ticks_per_slot;
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree = Blocktree::open_config(&ledger_path, ticks_per_slot).unwrap();
let blocktree = Blocktree::open(&ledger_path).unwrap();
// Set up blockstream
let mut blockstream = Blockstream::new("test_stream".to_string());
@ -140,7 +141,13 @@ mod test {
let keypair = Keypair::new();
let mut blockhash = entries[3].hash;
let tx = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, Hash::default(), 0);
let tx = system_transaction::create_user_account(
&keypair,
&keypair.pubkey(),
1,
Hash::default(),
0,
);
let entry = Entry::new(&mut blockhash, 1, vec![tx]);
blockhash = entry.hash;
entries.push(entry);
@ -150,7 +157,9 @@ mod test {
let expected_entries = entries.clone();
let expected_tick_heights = [5, 6, 7, 8, 8, 9];
blocktree.write_entries(1, 0, 0, &entries).unwrap();
blocktree
.write_entries(1, 0, 0, ticks_per_slot, &entries)
.unwrap();
slot_full_sender.send((1, leader_id)).unwrap();
BlockstreamService::process_entries(
@ -180,13 +189,34 @@ mod test {
assert_eq!(height, expected_tick_heights[i]);
let entry_obj = json["entry"].clone();
let tx = entry_obj["transactions"].as_array().unwrap();
let entry: Entry;
if tx.len() == 0 {
// TODO: There is a bug in Transaction deserialize methods such that
// `serde_json::from_str` does not work for populated Entries.
// Remove this `if` when fixed.
let entry: Entry = serde_json::from_value(entry_obj).unwrap();
assert_eq!(entry, expected_entries[i]);
entry = serde_json::from_value(entry_obj).unwrap();
} else {
let entry_json = entry_obj.as_object().unwrap();
entry = Entry {
num_hashes: entry_json.get("num_hashes").unwrap().as_u64().unwrap(),
hash: serde_json::from_value(entry_json.get("hash").unwrap().clone()).unwrap(),
transactions: entry_json
.get("transactions")
.unwrap()
.as_array()
.unwrap()
.into_iter()
.enumerate()
.map(|(j, tx)| {
let tx_vec: Vec<u8> = serde_json::from_value(tx.clone()).unwrap();
// Check explicitly that transaction matches bincode-serialized format
assert_eq!(
tx_vec,
serialize(&expected_entries[i].transactions[j]).unwrap()
);
deserialize(&tx_vec).unwrap()
})
.collect(),
};
}
assert_eq!(entry, expected_entries[i]);
}
for json in block_events {
let slot = json["s"].as_u64().unwrap();

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,3 @@
use crate::entry::Entry;
use crate::result::{Error, Result};
use bincode::{deserialize, serialize};
@ -7,24 +6,58 @@ use serde::de::DeserializeOwned;
use serde::Serialize;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::Path;
use std::sync::Arc;
pub trait Database: Sized + Send + Sync {
type Error: Into<Error>;
type Key: Borrow<Self::KeyRef>;
type KeyRef: ?Sized;
type ColumnFamily;
type Cursor: Cursor<Self>;
type EntryIter: Iterator<Item = Entry>;
pub mod columns {
#[derive(Debug)]
/// SlotMeta Column
pub struct SlotMeta;
#[derive(Debug)]
/// Orphans Column
pub struct Orphans;
#[derive(Debug)]
/// Erasure Column
pub struct Coding;
#[derive(Debug)]
/// Data Column
pub struct Data;
#[cfg(feature = "erasure")]
#[derive(Debug)]
/// The erasure meta column
pub struct ErasureMeta;
}
pub trait Backend: Sized + Send + Sync {
type Key: ?Sized + ToOwned<Owned = Self::OwnedKey>;
type OwnedKey: Borrow<Self::Key>;
type ColumnFamily: Clone;
type Cursor: DbCursor<Self>;
type Iter: Iterator<Item = (Box<Self::Key>, Box<[u8]>)>;
type WriteBatch: IWriteBatch<Self>;
type Error: Into<Error>;
fn cf_handle(&self, cf: &str) -> Option<Self::ColumnFamily>;
fn open(path: &Path) -> Result<Self>;
fn get_cf(&self, cf: Self::ColumnFamily, key: &Self::KeyRef) -> Result<Option<Vec<u8>>>;
fn columns(&self) -> Vec<&'static str>;
fn put_cf(&self, cf: Self::ColumnFamily, key: &Self::KeyRef, data: &[u8]) -> Result<()>;
fn destroy(path: &Path) -> Result<()>;
fn delete_cf(&self, cf: Self::ColumnFamily, key: &Self::KeyRef) -> Result<()>;
fn cf_handle(&self, cf: &str) -> Self::ColumnFamily;
fn get_cf(&self, cf: Self::ColumnFamily, key: &Self::Key) -> Result<Option<Vec<u8>>>;
fn put_cf(&self, cf: Self::ColumnFamily, key: &Self::Key, value: &[u8]) -> Result<()>;
fn delete_cf(&self, cf: Self::ColumnFamily, key: &Self::Key) -> Result<()>;
fn iterator_cf(&self, cf: Self::ColumnFamily) -> Result<Self::Iter>;
fn raw_iterator_cf(&self, cf: Self::ColumnFamily) -> Result<Self::Cursor>;
@ -33,163 +66,343 @@ pub trait Database: Sized + Send + Sync {
fn batch(&self) -> Result<Self::WriteBatch>;
}
pub trait Cursor<D: Database> {
pub trait Column<B>
where
B: Backend,
{
const NAME: &'static str;
type Index;
fn key(index: Self::Index) -> B::OwnedKey;
fn index(key: &B::Key) -> Self::Index;
}
pub trait DbCursor<B>
where
B: Backend,
{
fn valid(&self) -> bool;
fn seek(&mut self, key: &D::KeyRef);
fn seek(&mut self, key: &B::Key);
fn seek_to_first(&mut self);
fn next(&mut self);
fn key(&self) -> Option<D::Key>;
fn key(&self) -> Option<B::OwnedKey>;
fn value(&self) -> Option<Vec<u8>>;
}
pub trait IWriteBatch<D: Database> {
fn put_cf(&mut self, cf: D::ColumnFamily, key: &D::KeyRef, data: &[u8]) -> Result<()>;
pub trait IWriteBatch<B>
where
B: Backend,
{
fn put_cf(&mut self, cf: B::ColumnFamily, key: &B::Key, value: &[u8]) -> Result<()>;
fn delete_cf(&mut self, cf: B::ColumnFamily, key: &B::Key) -> Result<()>;
}
pub trait IDataCf<D: Database>: LedgerColumnFamilyRaw<D> {
fn new(db: Arc<D>) -> Self;
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
let key = Self::key(slot, index);
self.get(key.borrow())
}
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
let key = Self::key(slot, index);
self.delete(&key.borrow())
}
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
let key = Self::key(slot, index);
self.put(key.borrow(), serialized_value)
}
fn key(slot: u64, index: u64) -> D::Key;
fn slot_from_key(key: &D::KeyRef) -> Result<u64>;
fn index_from_key(key: &D::KeyRef) -> Result<u64>;
pub trait TypedColumn<B>: Column<B>
where
B: Backend,
{
type Type: Serialize + DeserializeOwned;
}
pub trait IErasureCf<D: Database>: LedgerColumnFamilyRaw<D> {
fn new(db: Arc<D>) -> Self;
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
let key = Self::key(slot, index);
self.delete(key.borrow())
}
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
let key = Self::key(slot, index);
self.get(key.borrow())
}
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
let key = Self::key(slot, index);
self.put(key.borrow(), serialized_value)
}
fn key(slot: u64, index: u64) -> D::Key;
fn slot_from_key(key: &D::KeyRef) -> Result<u64>;
fn index_from_key(key: &D::KeyRef) -> Result<u64>;
#[derive(Debug, Clone)]
pub struct Database<B>
where
B: Backend,
{
backend: B,
}
pub trait IMetaCf<D: Database>: LedgerColumnFamily<D, ValueType = super::SlotMeta> {
fn new(db: Arc<D>) -> Self;
fn key(slot: u64) -> D::Key;
fn get_slot_meta(&self, slot: u64) -> Result<Option<super::SlotMeta>> {
let key = Self::key(slot);
self.get(key.borrow())
}
fn put_slot_meta(&self, slot: u64, slot_meta: &super::SlotMeta) -> Result<()> {
let key = Self::key(slot);
self.put(key.borrow(), slot_meta)
}
fn index_from_key(key: &D::KeyRef) -> Result<u64>;
#[derive(Debug, Clone)]
pub struct Cursor<B, C>
where
B: Backend,
C: Column<B>,
{
db_cursor: B::Cursor,
column: PhantomData<C>,
backend: PhantomData<B>,
}
pub trait LedgerColumnFamily<D: Database> {
type ValueType: DeserializeOwned + Serialize;
#[derive(Debug, Clone)]
pub struct LedgerColumn<B, C>
where
B: Backend,
C: Column<B>,
{
pub db: Arc<Database<B>>,
column: PhantomData<C>,
}
fn get(&self, key: &D::KeyRef) -> Result<Option<Self::ValueType>> {
let db = self.db();
let data_bytes = db.get_cf(self.handle(), key)?;
#[derive(Debug)]
pub struct WriteBatch<B>
where
B: Backend,
{
write_batch: B::WriteBatch,
backend: PhantomData<B>,
map: HashMap<&'static str, B::ColumnFamily>,
}
if let Some(raw) = data_bytes {
let result: Self::ValueType = deserialize(&raw)?;
Ok(Some(result))
impl<B> Database<B>
where
B: Backend,
{
pub fn open(path: &Path) -> Result<Self> {
let backend = B::open(path)?;
Ok(Database { backend })
}
pub fn destroy(path: &Path) -> Result<()> {
B::destroy(path)?;
Ok(())
}
pub fn get_bytes<C>(&self, key: C::Index) -> Result<Option<Vec<u8>>>
where
C: Column<B>,
{
self.backend
.get_cf(self.cf_handle::<C>(), C::key(key).borrow())
}
pub fn put_bytes<C>(&self, key: C::Index, data: &[u8]) -> Result<()>
where
C: Column<B>,
{
self.backend
.put_cf(self.cf_handle::<C>(), C::key(key).borrow(), data)
}
pub fn delete<C>(&self, key: C::Index) -> Result<()>
where
C: Column<B>,
{
self.backend
.delete_cf(self.cf_handle::<C>(), C::key(key).borrow())
}
pub fn get<C>(&self, key: C::Index) -> Result<Option<C::Type>>
where
C: TypedColumn<B>,
{
if let Some(serialized_value) = self
.backend
.get_cf(self.cf_handle::<C>(), C::key(key).borrow())?
{
let value = deserialize(&serialized_value)?;
Ok(Some(value))
} else {
Ok(None)
}
}
fn get_bytes(&self, key: &D::KeyRef) -> Result<Option<Vec<u8>>> {
let db = self.db();
let data_bytes = db.get_cf(self.handle(), key)?;
Ok(data_bytes.map(|x| x.to_vec()))
pub fn put<C>(&self, key: C::Index, value: &C::Type) -> Result<()>
where
C: TypedColumn<B>,
{
let serialized_value = serialize(value)?;
self.backend.put_cf(
self.cf_handle::<C>(),
C::key(key).borrow(),
&serialized_value,
)
}
fn put_bytes(&self, key: &D::KeyRef, serialized_value: &[u8]) -> Result<()> {
let db = self.db();
db.put_cf(self.handle(), key, &serialized_value)?;
Ok(())
pub fn cursor<C>(&self) -> Result<Cursor<B, C>>
where
C: Column<B>,
{
let db_cursor = self.backend.raw_iterator_cf(self.cf_handle::<C>())?;
Ok(Cursor {
db_cursor,
column: PhantomData,
backend: PhantomData,
})
}
fn put(&self, key: &D::KeyRef, value: &Self::ValueType) -> Result<()> {
let db = self.db();
let serialized = serialize(value)?;
db.put_cf(self.handle(), key, &serialized)?;
Ok(())
pub fn iter<C>(&self) -> Result<impl Iterator<Item = (C::Index, Vec<u8>)>>
where
C: Column<B>,
{
let iter = self
.backend
.iterator_cf(self.cf_handle::<C>())?
.map(|(key, value)| (C::index(&key), value.into()));
Ok(iter)
}
fn delete(&self, key: &D::KeyRef) -> Result<()> {
let db = self.db();
db.delete_cf(self.handle(), key)?;
Ok(())
pub fn batch(&self) -> Result<WriteBatch<B>> {
let db_write_batch = self.backend.batch()?;
let map = self
.backend
.columns()
.into_iter()
.map(|desc| (desc, self.backend.cf_handle(desc)))
.collect();
Ok(WriteBatch {
write_batch: db_write_batch,
backend: PhantomData,
map,
})
}
fn db(&self) -> &Arc<D>;
pub fn write(&self, batch: WriteBatch<B>) -> Result<()> {
self.backend.write(batch.write_batch)
}
fn handle(&self) -> D::ColumnFamily;
#[inline]
pub fn cf_handle<C>(&self) -> B::ColumnFamily
where
C: Column<B>,
{
self.backend.cf_handle(C::NAME).clone()
}
}
pub trait LedgerColumnFamilyRaw<D: Database> {
fn get(&self, key: &D::KeyRef) -> Result<Option<Vec<u8>>> {
let db = self.db();
let data_bytes = db.get_cf(self.handle(), key)?;
Ok(data_bytes.map(|x| x.to_vec()))
impl<B, C> Cursor<B, C>
where
B: Backend,
C: Column<B>,
{
pub fn valid(&self) -> bool {
self.db_cursor.valid()
}
fn put(&self, key: &D::KeyRef, serialized_value: &[u8]) -> Result<()> {
let db = self.db();
db.put_cf(self.handle(), &key, &serialized_value)?;
Ok(())
pub fn seek(&mut self, key: C::Index) {
self.db_cursor.seek(C::key(key).borrow());
}
fn delete(&self, key: &D::KeyRef) -> Result<()> {
let db = self.db();
db.delete_cf(self.handle(), &key)?;
Ok(())
pub fn seek_to_first(&mut self) {
self.db_cursor.seek_to_first();
}
fn raw_iterator(&self) -> D::Cursor {
let db = self.db();
db.raw_iterator_cf(self.handle())
.expect("Expected to be able to open database iterator")
pub fn next(&mut self) {
self.db_cursor.next();
}
fn handle(&self) -> D::ColumnFamily;
pub fn key(&self) -> Option<C::Index> {
if let Some(key) = self.db_cursor.key() {
Some(C::index(key.borrow()))
} else {
None
}
}
fn db(&self) -> &Arc<D>;
pub fn value_bytes(&self) -> Option<Vec<u8>> {
self.db_cursor.value()
}
}
impl<B, C> Cursor<B, C>
where
B: Backend,
C: TypedColumn<B>,
{
pub fn value(&self) -> Option<C::Type> {
if let Some(bytes) = self.db_cursor.value() {
let value = deserialize(&bytes).ok()?;
Some(value)
} else {
None
}
}
}
impl<B, C> LedgerColumn<B, C>
where
B: Backend,
C: Column<B>,
{
pub fn new(db: &Arc<Database<B>>) -> Self {
LedgerColumn {
db: Arc::clone(db),
column: PhantomData,
}
}
pub fn put_bytes(&self, key: C::Index, value: &[u8]) -> Result<()> {
self.db
.backend
.put_cf(self.handle(), C::key(key).borrow(), value)
}
pub fn get_bytes(&self, key: C::Index) -> Result<Option<Vec<u8>>> {
self.db.backend.get_cf(self.handle(), C::key(key).borrow())
}
pub fn delete(&self, key: C::Index) -> Result<()> {
self.db
.backend
.delete_cf(self.handle(), C::key(key).borrow())
}
pub fn cursor(&self) -> Result<Cursor<B, C>> {
self.db.cursor()
}
pub fn iter(&self) -> Result<impl Iterator<Item = (C::Index, Vec<u8>)>> {
self.db.iter::<C>()
}
pub fn handle(&self) -> B::ColumnFamily {
self.db.cf_handle::<C>()
}
pub fn is_empty(&self) -> Result<bool> {
let mut cursor = self.cursor()?;
cursor.seek_to_first();
Ok(!cursor.valid())
}
}
impl<B, C> LedgerColumn<B, C>
where
B: Backend,
C: TypedColumn<B>,
{
pub fn put(&self, key: C::Index, value: &C::Type) -> Result<()> {
self.db.put::<C>(key, value)
}
pub fn get(&self, key: C::Index) -> Result<Option<C::Type>> {
self.db.get::<C>(key)
}
}
impl<B> WriteBatch<B>
where
B: Backend,
{
pub fn put_bytes<C: Column<B>>(&mut self, key: C::Index, bytes: &[u8]) -> Result<()> {
self.write_batch
.put_cf(self.get_cf::<C>(), C::key(key).borrow(), bytes)
}
pub fn delete<C: Column<B>>(&mut self, key: C::Index) -> Result<()> {
self.write_batch
.delete_cf(self.get_cf::<C>(), C::key(key).borrow())
}
pub fn put<C: TypedColumn<B>>(&mut self, key: C::Index, value: &C::Type) -> Result<()> {
let serialized_value = serialize(&value)?;
self.write_batch
.put_cf(self.get_cf::<C>(), C::key(key).borrow(), &serialized_value)
}
#[inline]
fn get_cf<C: Column<B>>(&self) -> B::ColumnFamily {
self.map[C::NAME].clone()
}
}

View File

@ -1,80 +1,42 @@
use crate::entry::Entry;
use crate::kvstore::{self, Key};
use crate::packet::Blob;
use crate::blocktree::db::columns as cf;
use crate::blocktree::db::{Backend, Column, DbCursor, IWriteBatch, TypedColumn};
use crate::blocktree::BlocktreeError;
use crate::result::{Error, Result};
use byteorder::{BigEndian, ByteOrder};
use solana_kvstore::{self as kvstore, Key, KvStore};
use std::path::Path;
use std::sync::Arc;
use super::db::{
Cursor, Database, IDataCf, IErasureCf, IMetaCf, IWriteBatch, LedgerColumnFamily,
LedgerColumnFamilyRaw,
};
use super::{Blocktree, BlocktreeError};
type ColumnFamily = u64;
#[derive(Debug)]
pub struct Kvs(());
pub struct Kvs(KvStore);
/// The metadata column family
#[derive(Debug)]
pub struct MetaCf {
db: Arc<Kvs>,
}
/// Dummy struct for now
#[derive(Debug, Clone, Copy)]
pub struct Dummy;
/// The data column family
#[derive(Debug)]
pub struct DataCf {
db: Arc<Kvs>,
}
/// The erasure column family
#[derive(Debug)]
pub struct ErasureCf {
db: Arc<Kvs>,
}
/// Dummy struct to get things compiling
/// TODO: all this goes away with Blocktree
pub struct EntryIterator(i32);
/// Dummy struct to get things compiling
pub struct KvsCursor;
/// Dummy struct to get things compiling
pub struct ColumnFamily;
/// Dummy struct to get things compiling
pub struct KvsWriteBatch;
impl Blocktree {
/// Opens a Ledger in directory, provides "infinite" window of blobs
pub fn open(_ledger_path: &str) -> Result<Blocktree> {
unimplemented!()
}
#[allow(unreachable_code)]
pub fn read_ledger_blobs(&self) -> impl Iterator<Item = Blob> {
unimplemented!();
self.read_ledger().unwrap().map(|_| Blob::new(&[]))
}
/// Return an iterator for all the entries in the given file.
#[allow(unreachable_code)]
pub fn read_ledger(&self) -> Result<impl Iterator<Item = Entry>> {
Ok(EntryIterator(unimplemented!()))
}
pub fn destroy(_ledger_path: &str) -> Result<()> {
unimplemented!()
}
}
impl Database for Kvs {
type Error = kvstore::Error;
impl Backend for Kvs {
type Key = Key;
type KeyRef = Key;
type OwnedKey = Key;
type ColumnFamily = ColumnFamily;
type Cursor = KvsCursor;
type EntryIter = EntryIterator;
type WriteBatch = KvsWriteBatch;
type Cursor = Dummy;
type Iter = Dummy;
type WriteBatch = Dummy;
type Error = kvstore::Error;
fn cf_handle(&self, _cf: &str) -> Option<ColumnFamily> {
fn open(_path: &Path) -> Result<Kvs> {
unimplemented!()
}
fn columns(&self) -> Vec<&'static str> {
unimplemented!()
}
fn destroy(_path: &Path) -> Result<()> {
unimplemented!()
}
fn cf_handle(&self, _cf: &str) -> ColumnFamily {
unimplemented!()
}
@ -82,28 +44,125 @@ impl Database for Kvs {
unimplemented!()
}
fn put_cf(&self, _cf: ColumnFamily, _key: &Key, _data: &[u8]) -> Result<()> {
fn put_cf(&self, _cf: ColumnFamily, _key: &Key, _value: &[u8]) -> Result<()> {
unimplemented!()
}
fn delete_cf(&self, _cf: Self::ColumnFamily, _key: &Key) -> Result<()> {
fn delete_cf(&self, _cf: ColumnFamily, _key: &Key) -> Result<()> {
unimplemented!()
}
fn raw_iterator_cf(&self, _cf: Self::ColumnFamily) -> Result<Self::Cursor> {
fn iterator_cf(&self, _cf: ColumnFamily) -> Result<Dummy> {
unimplemented!()
}
fn write(&self, _batch: Self::WriteBatch) -> Result<()> {
fn raw_iterator_cf(&self, _cf: ColumnFamily) -> Result<Dummy> {
unimplemented!()
}
fn batch(&self) -> Result<Self::WriteBatch> {
fn batch(&self) -> Result<Dummy> {
unimplemented!()
}
fn write(&self, _batch: Dummy) -> Result<()> {
unimplemented!()
}
}
impl Cursor<Kvs> for KvsCursor {
impl Column<Kvs> for cf::Coding {
const NAME: &'static str = super::ERASURE_CF;
type Index = (u64, u64);
fn key(index: (u64, u64)) -> Key {
cf::Data::key(index)
}
fn index(key: &Key) -> (u64, u64) {
cf::Data::index(key)
}
}
impl Column<Kvs> for cf::Data {
const NAME: &'static str = super::DATA_CF;
type Index = (u64, u64);
fn key((slot, index): (u64, u64)) -> Key {
let mut key = Key::default();
BigEndian::write_u64(&mut key.0[8..16], slot);
BigEndian::write_u64(&mut key.0[16..], index);
key
}
fn index(key: &Key) -> (u64, u64) {
let slot = BigEndian::read_u64(&key.0[8..16]);
let index = BigEndian::read_u64(&key.0[16..]);
(slot, index)
}
}
impl Column<Kvs> for cf::Orphans {
const NAME: &'static str = super::ORPHANS_CF;
type Index = u64;
fn key(slot: u64) -> Key {
let mut key = Key::default();
BigEndian::write_u64(&mut key.0[8..16], slot);
key
}
fn index(key: &Key) -> u64 {
BigEndian::read_u64(&key.0[8..16])
}
}
impl TypedColumn<Kvs> for cf::Orphans {
type Type = bool;
}
impl Column<Kvs> for cf::SlotMeta {
const NAME: &'static str = super::META_CF;
type Index = u64;
fn key(slot: u64) -> Key {
let mut key = Key::default();
BigEndian::write_u64(&mut key.0[8..16], slot);
key
}
fn index(key: &Key) -> u64 {
BigEndian::read_u64(&key.0[8..16])
}
}
impl TypedColumn<Kvs> for cf::SlotMeta {
type Type = super::SlotMeta;
}
#[cfg(feature = "erasure")]
impl Column<Kvs> for cf::ErasureMeta {
const NAME: &'static str = super::ERASURE_META_CF;
type Index = (u64, u64);
fn key((slot, set_index): (u64, u64)) -> Key {
let mut key = Key::default();
BigEndian::write_u64(&mut key.0[8..16], slot);
BigEndian::write_u64(&mut key.0[16..], set_index);
key
}
fn index(key: &Key) -> (u64, u64) {
let slot = BigEndian::read_u64(&key.0[8..16]);
let set_index = BigEndian::read_u64(&key.0[16..]);
(slot, set_index)
}
}
#[cfg(feature = "erasure")]
impl TypedColumn<Kvs> for cf::ErasureMeta {
type Type = super::ErasureMeta;
}
impl DbCursor<Kvs> for Dummy {
fn valid(&self) -> bool {
unimplemented!()
}
@ -129,124 +188,22 @@ impl Cursor<Kvs> for KvsCursor {
}
}
impl IWriteBatch<Kvs> for KvsWriteBatch {
fn put_cf(&mut self, _cf: ColumnFamily, _key: &Key, _data: &[u8]) -> Result<()> {
impl IWriteBatch<Kvs> for Dummy {
fn put_cf(&mut self, _cf: ColumnFamily, _key: &Key, _value: &[u8]) -> Result<()> {
unimplemented!()
}
fn delete_cf(&mut self, _cf: ColumnFamily, _key: &Key) -> Result<()> {
unimplemented!()
}
}
impl IDataCf<Kvs> for DataCf {
fn new(db: Arc<Kvs>) -> Self {
DataCf { db }
}
impl Iterator for Dummy {
type Item = (Box<Key>, Box<[u8]>);
fn get_by_slot_index(&self, _slot: u64, _index: u64) -> Result<Option<Vec<u8>>> {
fn next(&mut self) -> Option<Self::Item> {
unimplemented!()
}
fn delete_by_slot_index(&self, _slot: u64, _index: u64) -> Result<()> {
unimplemented!()
}
fn put_by_slot_index(&self, _slot: u64, _index: u64, _serialized_value: &[u8]) -> Result<()> {
unimplemented!()
}
fn key(_slot: u64, _index: u64) -> Key {
unimplemented!()
}
fn slot_from_key(_key: &Key) -> Result<u64> {
unimplemented!()
}
fn index_from_key(_key: &Key) -> Result<u64> {
unimplemented!()
}
}
impl IErasureCf<Kvs> for ErasureCf {
fn new(db: Arc<Kvs>) -> Self {
ErasureCf { db }
}
fn delete_by_slot_index(&self, _slot: u64, _index: u64) -> Result<()> {
unimplemented!()
}
fn get_by_slot_index(&self, _slot: u64, _index: u64) -> Result<Option<Vec<u8>>> {
unimplemented!()
}
fn put_by_slot_index(&self, _slot: u64, _index: u64, _serialized_value: &[u8]) -> Result<()> {
unimplemented!()
}
fn key(slot: u64, index: u64) -> Key {
DataCf::key(slot, index)
}
fn slot_from_key(key: &Key) -> Result<u64> {
DataCf::slot_from_key(key)
}
fn index_from_key(key: &Key) -> Result<u64> {
DataCf::index_from_key(key)
}
}
impl IMetaCf<Kvs> for MetaCf {
fn new(db: Arc<Kvs>) -> Self {
MetaCf { db }
}
fn key(_slot: u64) -> Key {
unimplemented!()
}
fn get_slot_meta(&self, _slot: u64) -> Result<Option<super::SlotMeta>> {
unimplemented!()
}
fn put_slot_meta(&self, _slot: u64, _slot_meta: &super::SlotMeta) -> Result<()> {
unimplemented!()
}
fn index_from_key(_key: &Key) -> Result<u64> {
unimplemented!()
}
}
impl LedgerColumnFamilyRaw<Kvs> for DataCf {
fn db(&self) -> &Arc<Kvs> {
&self.db
}
fn handle(&self) -> ColumnFamily {
self.db.cf_handle(super::DATA_CF).unwrap()
}
}
impl LedgerColumnFamilyRaw<Kvs> for ErasureCf {
fn db(&self) -> &Arc<Kvs> {
&self.db
}
fn handle(&self) -> ColumnFamily {
self.db.cf_handle(super::ERASURE_CF).unwrap()
}
}
impl LedgerColumnFamily<Kvs> for MetaCf {
type ValueType = super::SlotMeta;
fn db(&self) -> &Arc<Kvs> {
&self.db
}
fn handle(&self) -> ColumnFamily {
self.db.cf_handle(super::META_CF).unwrap()
}
}
impl std::convert::From<kvstore::Error> for Error {
@ -254,12 +211,3 @@ impl std::convert::From<kvstore::Error> for Error {
Error::BlocktreeError(BlocktreeError::KvsDb(e))
}
}
/// TODO: all this goes away with Blocktree
impl Iterator for EntryIterator {
type Item = Entry;
fn next(&mut self) -> Option<Entry> {
unimplemented!()
}
}

213
core/src/blocktree/meta.rs Normal file
View File

@ -0,0 +1,213 @@
#[cfg(feature = "erasure")]
use crate::erasure::{NUM_CODING, NUM_DATA};
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
// The Meta column family
pub struct SlotMeta {
// The number of slots above the root (the genesis block). The first
// slot has slot 0.
pub slot: u64,
// The total number of consecutive blobs starting from index 0
// we have received for this slot.
pub consumed: u64,
// The index *plus one* of the highest blob received for this slot. Useful
// for checking if the slot has received any blobs yet, and to calculate the
// range where there is one or more holes: `(consumed..received)`.
pub received: u64,
// The index of the blob that is flagged as the last blob for this slot.
pub last_index: u64,
// The slot height of the block this one derives from.
pub parent_slot: u64,
// The list of slot heights, each of which contains a block that derives
// from this one.
pub next_slots: Vec<u64>,
// True if this slot is full (consumed == last_index + 1) and if every
// slot that is a parent of this slot is also connected.
pub is_connected: bool,
// True if this slot is a root
pub is_root: bool,
}
impl SlotMeta {
pub fn is_full(&self) -> bool {
// last_index is std::u64::MAX when it has no information about how
// many blobs will fill this slot.
// Note: A full slot with zero blobs is not possible.
if self.last_index == std::u64::MAX {
return false;
}
assert!(self.consumed <= self.last_index + 1);
self.consumed == self.last_index + 1
}
pub fn is_parent_set(&self) -> bool {
self.parent_slot != std::u64::MAX
}
pub(in crate::blocktree) fn new(slot: u64, parent_slot: u64) -> Self {
SlotMeta {
slot,
consumed: 0,
received: 0,
parent_slot,
next_slots: vec![],
is_connected: slot == 0,
is_root: false,
last_index: std::u64::MAX,
}
}
}
#[cfg(feature = "erasure")]
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
/// Erasure coding information
pub struct ErasureMeta {
/// Which erasure set in the slot this is
pub set_index: u64,
/// Bitfield representing presence/absence of data blobs
pub data: u64,
/// Bitfield representing presence/absence of coding blobs
pub coding: u64,
}
#[cfg(feature = "erasure")]
impl ErasureMeta {
pub fn new(set_index: u64) -> ErasureMeta {
ErasureMeta {
set_index,
data: 0,
coding: 0,
}
}
pub fn can_recover(&self) -> bool {
let (data_missing, coding_missing) = (
NUM_DATA - self.data.count_ones() as usize,
NUM_CODING - self.coding.count_ones() as usize,
);
data_missing > 0 && data_missing + coding_missing <= NUM_CODING
}
pub fn is_coding_present(&self, index: u64) -> bool {
let set_index = Self::set_index_for(index);
let position = index - self.start_index();
set_index == self.set_index && self.coding & (1 << position) != 0
}
pub fn set_coding_present(&mut self, index: u64) {
let set_index = Self::set_index_for(index);
if set_index as u64 == self.set_index {
let position = index - self.start_index();
self.coding |= 1 << position;
}
}
pub fn is_data_present(&self, index: u64) -> bool {
let set_index = Self::set_index_for(index);
let position = index - self.start_index();
set_index == self.set_index && self.data & (1 << position) != 0
}
pub fn set_data_present(&mut self, index: u64) {
let set_index = Self::set_index_for(index);
if set_index as u64 == self.set_index {
let position = index - self.start_index();
self.data |= 1 << position;
}
}
pub fn set_index_for(index: u64) -> u64 {
index / NUM_DATA as u64
}
pub fn start_index(&self) -> u64 {
self.set_index * NUM_DATA as u64
}
/// returns a tuple of (data_end, coding_end)
pub fn end_indexes(&self) -> (u64, u64) {
let start = self.start_index();
(start + NUM_DATA as u64, start + NUM_CODING as u64)
}
}
#[cfg(feature = "erasure")]
#[test]
fn test_meta_coding_present() {
let set_index = 0;
let mut e_meta = ErasureMeta {
set_index,
data: 0,
coding: 0,
};
for i in 0..NUM_CODING as u64 {
e_meta.set_coding_present(i);
assert_eq!(e_meta.is_coding_present(i), true);
}
for i in NUM_CODING as u64..NUM_DATA as u64 {
assert_eq!(e_meta.is_coding_present(i), false);
}
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 17) as u64);
for i in (NUM_DATA * 17) as u64..((NUM_DATA * 17) + NUM_CODING) as u64 {
e_meta.set_coding_present(i);
assert_eq!(e_meta.is_coding_present(i), true);
}
for i in (NUM_DATA * 17 + NUM_CODING) as u64..((NUM_DATA * 17) + NUM_DATA) as u64 {
assert_eq!(e_meta.is_coding_present(i), false);
}
}
#[cfg(feature = "erasure")]
#[test]
fn test_can_recover() {
let set_index = 0;
let mut e_meta = ErasureMeta {
set_index,
data: 0,
coding: 0,
};
assert!(!e_meta.can_recover());
e_meta.data = 0b1111_1111_1111_1111;
e_meta.coding = 0x00;
assert!(!e_meta.can_recover());
e_meta.coding = 0x0e;
assert_eq!(0x0fu8, 0b0000_1111u8);
assert!(!e_meta.can_recover());
e_meta.data = 0b0111_1111_1111_1111;
assert!(e_meta.can_recover());
e_meta.data = 0b0111_1111_1111_1110;
assert!(e_meta.can_recover());
e_meta.data = 0b0111_1111_1011_1110;
assert!(e_meta.can_recover());
e_meta.data = 0b0111_1011_1011_1110;
assert!(!e_meta.can_recover());
e_meta.data = 0b0111_1011_1011_1110;
assert!(!e_meta.can_recover());
e_meta.coding = 0b0000_1110;
e_meta.data = 0b1111_1111_1111_1100;
assert!(e_meta.can_recover());
e_meta.data = 0b1111_1111_1111_1000;
assert!(e_meta.can_recover());
}

View File

@ -1,29 +1,17 @@
use crate::entry::Entry;
use crate::packet::{Blob, BLOB_HEADER_SIZE};
use crate::blocktree::db::columns as cf;
use crate::blocktree::db::{Backend, Column, DbCursor, IWriteBatch, TypedColumn};
use crate::blocktree::BlocktreeError;
use crate::result::{Error, Result};
use bincode::deserialize;
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use byteorder::{BigEndian, ByteOrder};
use rocksdb::{
self, ColumnFamily, ColumnFamilyDescriptor, DBRawIterator, IteratorMode, Options,
self, ColumnFamily, ColumnFamilyDescriptor, DBIterator, DBRawIterator, IteratorMode, Options,
WriteBatch as RWriteBatch, DB,
};
use solana_sdk::hash::Hash;
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use std::fs;
use std::io;
use std::path::Path;
use std::sync::Arc;
use super::db::{
Cursor, Database, IDataCf, IErasureCf, IMetaCf, IWriteBatch, LedgerColumnFamily,
LedgerColumnFamilyRaw,
};
use super::{Blocktree, BlocktreeError};
// A good value for this is the number of cores on the machine
const TOTAL_THREADS: i32 = 8;
@ -32,194 +20,222 @@ const MAX_WRITE_BUFFER_SIZE: usize = 512 * 1024 * 1024;
#[derive(Debug)]
pub struct Rocks(rocksdb::DB);
/// The metadata column family
#[derive(Debug)]
pub struct MetaCf {
db: Arc<Rocks>,
}
impl Backend for Rocks {
type Key = [u8];
type OwnedKey = Vec<u8>;
type ColumnFamily = ColumnFamily;
type Cursor = DBRawIterator;
type Iter = DBIterator;
type WriteBatch = RWriteBatch;
type Error = rocksdb::Error;
/// The data column family
#[derive(Debug)]
pub struct DataCf {
db: Arc<Rocks>,
}
fn open(path: &Path) -> Result<Rocks> {
#[cfg(feature = "erasure")]
use crate::blocktree::db::columns::ErasureMeta;
use crate::blocktree::db::columns::{Coding, Data, Orphans, SlotMeta};
/// The erasure column family
#[derive(Debug)]
pub struct ErasureCf {
db: Arc<Rocks>,
}
/// TODO: all this goes away with Blocktree
pub struct EntryIterator {
db_iterator: DBRawIterator,
// TODO: remove me when replay_stage is iterating by block (Blocktree)
// this verification is duplicating that of replay_stage, which
// can do this in parallel
blockhash: Option<Hash>,
// https://github.com/rust-rocksdb/rust-rocksdb/issues/234
// rocksdb issue: the _blocktree member must be lower in the struct to prevent a crash
// when the db_iterator member above is dropped.
// _blocktree is unused, but dropping _blocktree results in a broken db_iterator
// you have to hold the database open in order to iterate over it, and in order
// for db_iterator to be able to run Drop
// _blocktree: Blocktree,
}
impl Blocktree {
/// Opens a Ledger in directory, provides "infinite" window of blobs
pub fn open(ledger_path: &str) -> Result<Blocktree> {
fs::create_dir_all(&ledger_path)?;
let ledger_path = Path::new(ledger_path).join(super::BLOCKTREE_DIRECTORY);
fs::create_dir_all(&path)?;
// Use default database options
let db_options = Blocktree::get_db_options();
let db_options = get_db_options();
// Column family names
let meta_cf_descriptor =
ColumnFamilyDescriptor::new(super::META_CF, Blocktree::get_cf_options());
let data_cf_descriptor =
ColumnFamilyDescriptor::new(super::DATA_CF, Blocktree::get_cf_options());
let erasure_cf_descriptor =
ColumnFamilyDescriptor::new(super::ERASURE_CF, Blocktree::get_cf_options());
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
#[cfg(feature = "erasure")]
let erasure_meta_cf_descriptor =
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
let cfs = vec![
meta_cf_descriptor,
data_cf_descriptor,
erasure_cf_descriptor,
#[cfg(feature = "erasure")]
erasure_meta_cf_descriptor,
orphans_cf_descriptor,
];
// Open the database
let db = Arc::new(Rocks(DB::open_cf_descriptors(
&db_options,
ledger_path,
cfs,
)?));
let db = Rocks(DB::open_cf_descriptors(&db_options, path, cfs)?);
// Create the metadata column family
let meta_cf = MetaCf::new(db.clone());
// Create the data column family
let data_cf = DataCf::new(db.clone());
// Create the erasure column family
let erasure_cf = ErasureCf::new(db.clone());
let ticks_per_slot = DEFAULT_TICKS_PER_SLOT;
Ok(Blocktree {
db,
meta_cf,
data_cf,
erasure_cf,
new_blobs_signals: vec![],
ticks_per_slot,
})
Ok(db)
}
pub fn read_ledger_blobs(&self) -> impl Iterator<Item = Blob> {
self.db
.0
.iterator_cf(self.data_cf.handle(), IteratorMode::Start)
.unwrap()
.map(|(_, blob_data)| Blob::new(&blob_data))
fn columns(&self) -> Vec<&'static str> {
#[cfg(feature = "erasure")]
use crate::blocktree::db::columns::ErasureMeta;
use crate::blocktree::db::columns::{Coding, Data, Orphans, SlotMeta};
vec![
Coding::NAME,
#[cfg(feature = "erasure")]
ErasureMeta::NAME,
Data::NAME,
Orphans::NAME,
SlotMeta::NAME,
]
}
/// Return an iterator for all the entries in the given file.
pub fn read_ledger(&self) -> Result<impl Iterator<Item = Entry>> {
let mut db_iterator = self.db.raw_iterator_cf(self.data_cf.handle())?;
fn destroy(path: &Path) -> Result<()> {
DB::destroy(&Options::default(), path)?;
db_iterator.seek_to_first();
Ok(EntryIterator {
db_iterator,
blockhash: None,
})
}
pub fn destroy(ledger_path: &str) -> Result<()> {
// DB::destroy() fails if `ledger_path` doesn't exist
fs::create_dir_all(&ledger_path)?;
let ledger_path = Path::new(ledger_path).join(super::BLOCKTREE_DIRECTORY);
DB::destroy(&Options::default(), &ledger_path)?;
Ok(())
}
fn get_cf_options() -> Options {
let mut options = Options::default();
options.set_max_write_buffer_number(32);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
options
}
fn get_db_options() -> Options {
let mut options = Options::default();
options.create_if_missing(true);
options.create_missing_column_families(true);
options.increase_parallelism(TOTAL_THREADS);
options.set_max_background_flushes(4);
options.set_max_background_compactions(4);
options.set_max_write_buffer_number(32);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
options
}
}
impl Database for Rocks {
type Error = rocksdb::Error;
type Key = Vec<u8>;
type KeyRef = [u8];
type ColumnFamily = ColumnFamily;
type Cursor = DBRawIterator;
type EntryIter = EntryIterator;
type WriteBatch = RWriteBatch;
fn cf_handle(&self, cf: &str) -> Option<ColumnFamily> {
self.0.cf_handle(cf)
fn cf_handle(&self, cf: &str) -> ColumnFamily {
self.0
.cf_handle(cf)
.expect("should never get an unknown column")
}
fn get_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<Option<Vec<u8>>> {
let opt = self.0.get_cf(cf, key)?;
Ok(opt.map(|dbvec| dbvec.to_vec()))
let opt = self.0.get_cf(cf, key)?.map(|db_vec| db_vec.to_vec());
Ok(opt)
}
fn put_cf(&self, cf: ColumnFamily, key: &[u8], data: &[u8]) -> Result<()> {
self.0.put_cf(cf, key, data)?;
fn put_cf(&self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<()> {
self.0.put_cf(cf, key, value)?;
Ok(())
}
fn delete_cf(&self, cf: Self::ColumnFamily, key: &[u8]) -> Result<()> {
self.0.delete_cf(cf, key).map_err(From::from)
fn delete_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<()> {
self.0.delete_cf(cf, key)?;
Ok(())
}
fn raw_iterator_cf(&self, cf: Self::ColumnFamily) -> Result<Self::Cursor> {
Ok(self.0.raw_iterator_cf(cf)?)
fn iterator_cf(&self, cf: ColumnFamily) -> Result<DBIterator> {
let raw_iter = self.0.iterator_cf(cf, IteratorMode::Start)?;
Ok(raw_iter)
}
fn write(&self, batch: Self::WriteBatch) -> Result<()> {
self.0.write(batch).map_err(From::from)
fn raw_iterator_cf(&self, cf: ColumnFamily) -> Result<DBRawIterator> {
let raw_iter = self.0.raw_iterator_cf(cf)?;
Ok(raw_iter)
}
fn batch(&self) -> Result<Self::WriteBatch> {
fn batch(&self) -> Result<RWriteBatch> {
Ok(RWriteBatch::default())
}
fn write(&self, batch: RWriteBatch) -> Result<()> {
self.0.write(batch)?;
Ok(())
}
}
impl Cursor<Rocks> for DBRawIterator {
impl Column<Rocks> for cf::Coding {
const NAME: &'static str = super::ERASURE_CF;
type Index = (u64, u64);
fn key(index: (u64, u64)) -> Vec<u8> {
cf::Data::key(index)
}
fn index(key: &[u8]) -> (u64, u64) {
cf::Data::index(key)
}
}
impl Column<Rocks> for cf::Data {
const NAME: &'static str = super::DATA_CF;
type Index = (u64, u64);
fn key((slot, index): (u64, u64)) -> Vec<u8> {
let mut key = vec![0; 16];
BigEndian::write_u64(&mut key[..8], slot);
BigEndian::write_u64(&mut key[8..16], index);
key
}
fn index(key: &[u8]) -> (u64, u64) {
let slot = BigEndian::read_u64(&key[..8]);
let index = BigEndian::read_u64(&key[8..16]);
(slot, index)
}
}
impl Column<Rocks> for cf::Orphans {
const NAME: &'static str = super::ORPHANS_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
}
impl TypedColumn<Rocks> for cf::Orphans {
type Type = bool;
}
impl Column<Rocks> for cf::SlotMeta {
const NAME: &'static str = super::META_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
}
impl TypedColumn<Rocks> for cf::SlotMeta {
type Type = super::SlotMeta;
}
#[cfg(feature = "erasure")]
impl Column<Rocks> for cf::ErasureMeta {
const NAME: &'static str = super::ERASURE_META_CF;
type Index = (u64, u64);
fn index(key: &[u8]) -> (u64, u64) {
let slot = BigEndian::read_u64(&key[..8]);
let set_index = BigEndian::read_u64(&key[8..]);
(slot, set_index)
}
fn key((slot, set_index): (u64, u64)) -> Vec<u8> {
let mut key = vec![0; 16];
BigEndian::write_u64(&mut key[..8], slot);
BigEndian::write_u64(&mut key[8..], set_index);
key
}
}
#[cfg(feature = "erasure")]
impl TypedColumn<Rocks> for cf::ErasureMeta {
type Type = super::ErasureMeta;
}
impl DbCursor<Rocks> for DBRawIterator {
fn valid(&self) -> bool {
DBRawIterator::valid(self)
}
fn seek(&mut self, key: &[u8]) {
DBRawIterator::seek(self, key)
DBRawIterator::seek(self, key);
}
fn seek_to_first(&mut self) {
DBRawIterator::seek_to_first(self)
DBRawIterator::seek_to_first(self);
}
fn next(&mut self) {
DBRawIterator::next(self)
DBRawIterator::next(self);
}
fn key(&self) -> Option<Vec<u8>> {
@ -232,141 +248,14 @@ impl Cursor<Rocks> for DBRawIterator {
}
impl IWriteBatch<Rocks> for RWriteBatch {
fn put_cf(&mut self, cf: ColumnFamily, key: &[u8], data: &[u8]) -> Result<()> {
RWriteBatch::put_cf(self, cf, key, data)?;
fn put_cf(&mut self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<()> {
RWriteBatch::put_cf(self, cf, key, value)?;
Ok(())
}
}
impl IDataCf<Rocks> for DataCf {
fn new(db: Arc<Rocks>) -> Self {
DataCf { db }
}
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
let key = Self::key(slot, index);
self.get(&key)
}
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
let key = Self::key(slot, index);
self.delete(&key)
}
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
let key = Self::key(slot, index);
self.put(&key, serialized_value)
}
fn key(slot: u64, index: u64) -> Vec<u8> {
let mut key = vec![0u8; 16];
BigEndian::write_u64(&mut key[0..8], slot);
BigEndian::write_u64(&mut key[8..16], index);
key
}
fn slot_from_key(key: &[u8]) -> Result<u64> {
let mut rdr = io::Cursor::new(&key[0..8]);
let height = rdr.read_u64::<BigEndian>()?;
Ok(height)
}
fn index_from_key(key: &[u8]) -> Result<u64> {
let mut rdr = io::Cursor::new(&key[8..16]);
let index = rdr.read_u64::<BigEndian>()?;
Ok(index)
}
}
impl IErasureCf<Rocks> for ErasureCf {
fn new(db: Arc<Rocks>) -> Self {
ErasureCf { db }
}
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
let key = Self::key(slot, index);
self.delete(&key)
}
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
let key = Self::key(slot, index);
self.get(&key)
}
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
let key = Self::key(slot, index);
self.put(&key, serialized_value)
}
fn key(slot: u64, index: u64) -> Vec<u8> {
DataCf::key(slot, index)
}
fn slot_from_key(key: &[u8]) -> Result<u64> {
DataCf::slot_from_key(key)
}
fn index_from_key(key: &[u8]) -> Result<u64> {
DataCf::index_from_key(key)
}
}
impl IMetaCf<Rocks> for MetaCf {
fn new(db: Arc<Rocks>) -> Self {
MetaCf { db }
}
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0u8; 8];
BigEndian::write_u64(&mut key[0..8], slot);
key
}
fn get_slot_meta(&self, slot: u64) -> Result<Option<super::SlotMeta>> {
let key = Self::key(slot);
self.get(&key)
}
fn put_slot_meta(&self, slot: u64, slot_meta: &super::SlotMeta) -> Result<()> {
let key = Self::key(slot);
self.put(&key, slot_meta)
}
fn index_from_key(key: &[u8]) -> Result<u64> {
let mut rdr = io::Cursor::new(&key[..]);
let index = rdr.read_u64::<BigEndian>()?;
Ok(index)
}
}
impl LedgerColumnFamilyRaw<Rocks> for DataCf {
fn db(&self) -> &Arc<Rocks> {
&self.db
}
fn handle(&self) -> ColumnFamily {
self.db.cf_handle(super::DATA_CF).unwrap()
}
}
impl LedgerColumnFamilyRaw<Rocks> for ErasureCf {
fn db(&self) -> &Arc<Rocks> {
&self.db
}
fn handle(&self) -> ColumnFamily {
self.db.cf_handle(super::ERASURE_CF).unwrap()
}
}
impl LedgerColumnFamily<Rocks> for MetaCf {
type ValueType = super::SlotMeta;
fn db(&self) -> &Arc<Rocks> {
&self.db
}
fn handle(&self) -> ColumnFamily {
self.db.cf_handle(super::META_CF).unwrap()
fn delete_cf(&mut self, cf: ColumnFamily, key: &[u8]) -> Result<()> {
RWriteBatch::delete_cf(self, cf, key)?;
Ok(())
}
}
@ -376,25 +265,23 @@ impl std::convert::From<rocksdb::Error> for Error {
}
}
/// TODO: all this goes away with Blocktree
impl Iterator for EntryIterator {
type Item = Entry;
fn next(&mut self) -> Option<Entry> {
if self.db_iterator.valid() {
if let Some(value) = self.db_iterator.value() {
if let Ok(entry) = deserialize::<Entry>(&value[BLOB_HEADER_SIZE..]) {
if let Some(blockhash) = self.blockhash {
if !entry.verify(&blockhash) {
return None;
}
}
self.db_iterator.next();
self.blockhash = Some(entry.hash);
return Some(entry);
}
}
}
None
}
fn get_cf_options() -> Options {
let mut options = Options::default();
options.set_max_write_buffer_number(32);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
options
}
fn get_db_options() -> Options {
let mut options = Options::default();
options.create_if_missing(true);
options.create_missing_column_families(true);
options.increase_parallelism(TOTAL_THREADS);
options.set_max_background_flushes(4);
options.set_max_background_compactions(4);
options.set_max_write_buffer_number(32);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
options
}

View File

@ -4,22 +4,16 @@ use crate::entry::{Entry, EntrySlice};
use crate::leader_schedule_utils;
use rayon::prelude::*;
use solana_metrics::counter::Counter;
use solana_runtime::bank::{Bank, BankError, Result};
use solana_runtime::bank::Bank;
use solana_runtime::locked_accounts_results::LockedAccountsResults;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
use solana_sdk::transaction::{Result, TransactionError};
use std::result;
use std::sync::Arc;
use std::time::Instant;
pub fn process_entry(bank: &Bank, entry: &Entry) -> Result<()> {
if !entry.is_tick() {
first_err(&bank.process_transactions(&entry.transactions))?;
} else {
bank.register_tick(&entry.hash);
}
Ok(())
}
fn first_err(results: &[Result<()>]) -> Result<()> {
for r in results {
r.clone()?;
@ -27,30 +21,55 @@ fn first_err(results: &[Result<()>]) -> Result<()> {
Ok(())
}
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, Vec<Result<()>>)]) -> Result<()> {
fn is_unexpected_validator_error(r: &Result<()>) -> bool {
match r {
Err(TransactionError::DuplicateSignature) => true,
_ => false,
}
}
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)]) -> Result<()> {
inc_new_counter_info!("bank-par_execute_entries-count", entries.len());
let results: Vec<Result<()>> = entries
.into_par_iter()
.map(|(e, lock_results)| {
.map(|(e, locked_accounts)| {
let results = bank.load_execute_and_commit_transactions(
&e.transactions,
lock_results.to_vec(),
locked_accounts,
MAX_RECENT_BLOCKHASHES,
);
bank.unlock_accounts(&e.transactions, &results);
first_err(&results)
let mut first_err = None;
for r in results {
if let Err(ref e) = r {
if first_err.is_none() {
first_err = Some(r.clone());
}
if is_unexpected_validator_error(&r) {
warn!("Unexpected validator error: {:?}", e);
solana_metrics::submit(
solana_metrics::influxdb::Point::new("validator_process_entry_error")
.add_field(
"error",
solana_metrics::influxdb::Value::String(format!("{:?}", e)),
)
.to_owned(),
)
}
}
}
first_err.unwrap_or(Ok(()))
})
.collect();
first_err(&results)
}
/// process entries in parallel
/// Process an ordered list of entries in parallel
/// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry
/// 2. Process the locked group in parallel
/// 3. Register the `Tick` if it's available
/// 4. Update the leader scheduler, goto 1
fn par_process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
pub fn process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
// accumulator for entries that can be processed in parallel
let mut mt_group = vec![];
for entry in entries {
@ -65,11 +84,12 @@ fn par_process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
let lock_results = bank.lock_accounts(&entry.transactions);
// if any of the locks error out
// execute the current group
if first_err(&lock_results).is_err() {
if first_err(lock_results.locked_accounts_results()).is_err() {
par_execute_entries(bank, &mt_group)?;
// Drop all the locks on accounts by clearing the LockedAccountsFinalizer's in the
// mt_group
mt_group = vec![];
//reset the lock and push the entry
bank.unlock_accounts(&entry.transactions, &lock_results);
drop(lock_results);
let lock_results = bank.lock_accounts(&entry.transactions);
mt_group.push((entry, lock_results));
} else {
@ -81,22 +101,22 @@ fn par_process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
Ok(())
}
/// Process an ordered list of entries.
pub fn process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
par_process_entries(bank, entries)
}
#[derive(Debug, PartialEq)]
pub struct BankForksInfo {
pub bank_slot: u64,
pub entry_height: u64,
}
#[derive(Debug)]
pub enum BlocktreeProcessorError {
LedgerVerificationFailed,
}
pub fn process_blocktree(
genesis_block: &GenesisBlock,
blocktree: &Blocktree,
account_paths: Option<String>,
) -> Result<(BankForks, Vec<BankForksInfo>)> {
) -> result::Result<(BankForks, Vec<BankForksInfo>), BlocktreeProcessorError> {
let now = Instant::now();
info!("processing ledger...");
@ -112,7 +132,7 @@ pub fn process_blocktree(
.meta(slot)
.map_err(|err| {
warn!("Failed to load meta for slot {}: {:?}", slot, err);
BankError::LedgerVerificationFailed
BlocktreeProcessorError::LedgerVerificationFailed
})?
.unwrap();
@ -127,7 +147,7 @@ pub fn process_blocktree(
// Fetch all entries for this slot
let mut entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
warn!("Failed to load entries for slot {}: {:?}", slot, err);
BankError::LedgerVerificationFailed
BlocktreeProcessorError::LedgerVerificationFailed
})?;
if slot == 0 {
@ -136,16 +156,15 @@ pub fn process_blocktree(
// processed by the bank, skip over it.
if entries.is_empty() {
warn!("entry0 not present");
return Err(BankError::LedgerVerificationFailed);
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
}
let entry0 = &entries[0];
let entry0 = entries.remove(0);
if !(entry0.is_tick() && entry0.verify(&last_entry_hash)) {
warn!("Ledger proof of history failed at entry0");
return Err(BankError::LedgerVerificationFailed);
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
}
last_entry_hash = entry0.hash;
entry_height += 1;
entries = entries.drain(1..).collect();
}
if !entries.is_empty() {
@ -154,20 +173,23 @@ pub fn process_blocktree(
"Ledger proof of history failed at slot: {}, entry: {}",
slot, entry_height
);
return Err(BankError::LedgerVerificationFailed);
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
}
process_entries(&bank, &entries).map_err(|err| {
warn!("Failed to process entries for slot {}: {:?}", slot, err);
BankError::LedgerVerificationFailed
BlocktreeProcessorError::LedgerVerificationFailed
})?;
last_entry_hash = entries.last().unwrap().hash;
entry_height += entries.len() as u64;
}
// TODO merge with locktower, voting, bank.vote_accounts()...
bank.squash();
bank.freeze(); // all banks handled by this routine are created from complete slots
if blocktree.is_root(slot) {
bank.squash();
}
if meta.next_slots.is_empty() {
// Reached the end of this fork. Record the final entry height and last entry.hash
@ -185,7 +207,7 @@ pub fn process_blocktree(
.meta(next_slot)
.map_err(|err| {
warn!("Failed to load meta for slot {}: {:?}", slot, err);
BankError::LedgerVerificationFailed
BlocktreeProcessorError::LedgerVerificationFailed
})?
.unwrap();
@ -239,8 +261,11 @@ mod tests {
use crate::entry::{create_ticks, next_entry, Entry};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::instruction::InstructionError;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::system_transaction;
use solana_sdk::transaction::TransactionError;
fn fill_blocktree_slot_with_ticks(
blocktree: &Blocktree,
@ -281,8 +306,8 @@ mod tests {
let (ledger_path, mut blockhash) = create_new_tmp_ledger!(&genesis_block);
debug!("ledger_path: {:?}", ledger_path);
let blocktree = Blocktree::open_config(&ledger_path, ticks_per_slot)
.expect("Expected to successfully open database ledger");
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
// Write slot 1
// slot 1, points at slot 0. Missing one tick
@ -332,7 +357,7 @@ mod tests {
slot 0
|
slot 1
slot 1 <-- set_root(true)
/ \
slot 2 |
/ |
@ -341,8 +366,8 @@ mod tests {
slot 4
*/
let blocktree = Blocktree::open_config(&ledger_path, ticks_per_slot)
.expect("Expected to successfully open database ledger");
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
// Fork 1, ending at slot 3
let last_slot1_entry_hash =
@ -359,6 +384,9 @@ mod tests {
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blocktree.set_root(0).unwrap();
blocktree.set_root(1).unwrap();
let (bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
@ -370,6 +398,14 @@ mod tests {
entry_height: ticks_per_slot * 4,
}
);
assert_eq!(
&bank_forks[3]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[2, 1]
);
assert_eq!(
bank_forks_info[1],
BankForksInfo {
@ -377,9 +413,16 @@ mod tests {
entry_height: ticks_per_slot * 3,
}
);
assert_eq!(
&bank_forks[4]
.parents()
.iter()
.map(|bank| bank.slot())
.collect::<Vec<_>>(),
&[1]
);
// Ensure bank_forks holds the right banks, and that everything's
// frozen
// Ensure bank_forks holds the right banks
for info in bank_forks_info {
assert_eq!(bank_forks[info.bank_slot].slot(), info.bank_slot);
assert!(bank_forks[info.bank_slot].is_frozen());
@ -390,32 +433,32 @@ mod tests {
fn test_first_err() {
assert_eq!(first_err(&[Ok(())]), Ok(()));
assert_eq!(
first_err(&[Ok(()), Err(BankError::DuplicateSignature)]),
Err(BankError::DuplicateSignature)
first_err(&[Ok(()), Err(TransactionError::DuplicateSignature)]),
Err(TransactionError::DuplicateSignature)
);
assert_eq!(
first_err(&[
Ok(()),
Err(BankError::DuplicateSignature),
Err(BankError::AccountInUse)
Err(TransactionError::DuplicateSignature),
Err(TransactionError::AccountInUse)
]),
Err(BankError::DuplicateSignature)
Err(TransactionError::DuplicateSignature)
);
assert_eq!(
first_err(&[
Ok(()),
Err(BankError::AccountInUse),
Err(BankError::DuplicateSignature)
Err(TransactionError::AccountInUse),
Err(TransactionError::DuplicateSignature)
]),
Err(BankError::AccountInUse)
Err(TransactionError::AccountInUse)
);
assert_eq!(
first_err(&[
Err(BankError::AccountInUse),
Err(TransactionError::AccountInUse),
Ok(()),
Err(BankError::DuplicateSignature)
Err(TransactionError::DuplicateSignature)
]),
Err(BankError::AccountInUse)
Err(TransactionError::AccountInUse)
);
}
@ -427,7 +470,7 @@ mod tests {
let bank = Bank::new(&genesis_block);
let keypair = Keypair::new();
let slot_entries = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash());
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair.pubkey(),
1,
@ -438,18 +481,18 @@ mod tests {
// First, ensure the TX is rejected because of the unregistered last ID
assert_eq!(
bank.process_transaction(&tx),
Err(BankError::BlockhashNotFound)
Err(TransactionError::BlockhashNotFound)
);
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
par_process_entries(&bank, &slot_entries).unwrap();
process_entries(&bank, &slot_entries).unwrap();
assert_eq!(bank.process_transaction(&tx), Ok(()));
}
#[test]
fn test_process_ledger_simple() {
solana_logger::setup();
let leader_pubkey = Keypair::new().pubkey();
let leader_pubkey = Pubkey::new_rand();
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(100, &leader_pubkey, 50);
let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger!(&genesis_block);
debug!("ledger_path: {:?}", ledger_path);
@ -459,16 +502,27 @@ mod tests {
for _ in 0..3 {
// Transfer one token from the mint to a random account
let keypair = Keypair::new();
let tx =
SystemTransaction::new_account(&mint_keypair, &keypair.pubkey(), 1, blockhash, 0);
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair.pubkey(),
1,
blockhash,
0,
);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.hash;
entries.push(entry);
// Add a second Transaction that will produce a
// ProgramError<0, ResultWithNegativeLamports> error when processed
// InstructionError<0, ResultWithNegativeLamports> error when processed
let keypair2 = Keypair::new();
let tx = SystemTransaction::new_account(&keypair, &keypair2.pubkey(), 42, blockhash, 0);
let tx = system_transaction::create_user_account(
&keypair,
&keypair2.pubkey(),
42,
blockhash,
0,
);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.hash;
entries.push(entry);
@ -479,7 +533,9 @@ mod tests {
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
blocktree.write_entries(1, 0, 0, &entries).unwrap();
blocktree
.write_entries(1, 0, 0, genesis_block.ticks_per_slot, &entries)
.unwrap();
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
let (bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
@ -522,19 +578,19 @@ mod tests {
}
#[test]
fn test_par_process_entries_tick() {
fn test_process_entries_tick() {
let (genesis_block, _mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
// ensure bank can process a tick
assert_eq!(bank.tick_height(), 0);
let tick = next_entry(&genesis_block.hash(), 1, vec![]);
assert_eq!(par_process_entries(&bank, &[tick.clone()]), Ok(()));
assert_eq!(process_entries(&bank, &[tick.clone()]), Ok(()));
assert_eq!(bank.tick_height(), 1);
}
#[test]
fn test_par_process_entries_2_entries_collision() {
fn test_process_entries_2_entries_collision() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
@ -543,7 +599,7 @@ mod tests {
let blockhash = bank.last_blockhash();
// ensure bank can process 2 entries that have a common account and no tick is registered
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair1.pubkey(),
2,
@ -551,7 +607,7 @@ mod tests {
0,
);
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair2.pubkey(),
2,
@ -559,14 +615,14 @@ mod tests {
0,
);
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
assert_eq!(par_process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
assert_eq!(bank.last_blockhash(), blockhash);
}
#[test]
fn test_par_process_entries_2_txes_collision() {
fn test_process_entries_2_txes_collision() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
@ -574,20 +630,14 @@ mod tests {
let keypair3 = Keypair::new();
// fund: put 4 in each of 1 and 2
assert_matches!(
bank.transfer(4, &mint_keypair, &keypair1.pubkey(), bank.last_blockhash()),
Ok(_)
);
assert_matches!(
bank.transfer(4, &mint_keypair, &keypair2.pubkey(), bank.last_blockhash()),
Ok(_)
);
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![SystemTransaction::new_account(
vec![system_transaction::create_user_account(
&keypair1,
&mint_keypair.pubkey(),
1,
@ -600,14 +650,14 @@ mod tests {
&entry_1_to_mint.hash,
1,
vec![
SystemTransaction::new_account(
system_transaction::create_user_account(
&keypair2,
&keypair3.pubkey(),
2,
bank.last_blockhash(),
0,
), // should be fine
SystemTransaction::new_account(
system_transaction::create_user_account(
&keypair1,
&mint_keypair.pubkey(),
2,
@ -618,7 +668,7 @@ mod tests {
);
assert_eq!(
par_process_entries(&bank, &[entry_1_to_mint, entry_2_to_3_mint_to_1]),
process_entries(&bank, &[entry_1_to_mint, entry_2_to_3_mint_to_1]),
Ok(())
);
@ -628,7 +678,89 @@ mod tests {
}
#[test]
fn test_par_process_entries_2_entries_par() {
fn test_process_entries_2_txes_collision_and_error() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
// fund: put 4 in each of 1 and 2
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_));
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![
system_transaction::create_user_account(
&keypair1,
&mint_keypair.pubkey(),
1,
bank.last_blockhash(),
0,
),
system_transaction::transfer(
&keypair4,
&keypair4.pubkey(),
1,
Hash::default(), // Should cause a transaction failure with BlockhashNotFound
0,
),
],
);
let entry_2_to_3_mint_to_1 = next_entry(
&entry_1_to_mint.hash,
1,
vec![
system_transaction::create_user_account(
&keypair2,
&keypair3.pubkey(),
2,
bank.last_blockhash(),
0,
), // should be fine
system_transaction::create_user_account(
&keypair1,
&mint_keypair.pubkey(),
2,
bank.last_blockhash(),
0,
), // will collide
],
);
assert!(process_entries(
&bank,
&[entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()]
)
.is_err());
// First transaction in first entry succeeded, so keypair1 lost 1 lamport
assert_eq!(bank.get_balance(&keypair1.pubkey()), 3);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 4);
// Check all accounts are unlocked
let txs1 = &entry_1_to_mint.transactions[..];
let txs2 = &entry_2_to_3_mint_to_1.transactions[..];
let locked_accounts1 = bank.lock_accounts(txs1);
for result in locked_accounts1.locked_accounts_results() {
assert!(result.is_ok());
}
// txs1 and txs2 have accounts that conflict, so we must drop txs1 first
drop(locked_accounts1);
let locked_accounts2 = bank.lock_accounts(txs2);
for result in locked_accounts2.locked_accounts_results() {
assert!(result.is_ok());
}
}
#[test]
fn test_process_entries_2_entries_par() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
@ -637,7 +769,7 @@ mod tests {
let keypair4 = Keypair::new();
//load accounts
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair1.pubkey(),
1,
@ -645,7 +777,7 @@ mod tests {
0,
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair2.pubkey(),
1,
@ -656,7 +788,7 @@ mod tests {
// ensure bank can process 2 entries that do not have a common account and no tick is registered
let blockhash = bank.last_blockhash();
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&keypair1,
&keypair3.pubkey(),
1,
@ -664,7 +796,7 @@ mod tests {
0,
);
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&keypair2,
&keypair4.pubkey(),
1,
@ -672,14 +804,14 @@ mod tests {
0,
);
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
assert_eq!(par_process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
assert_eq!(bank.last_blockhash(), blockhash);
}
#[test]
fn test_par_process_entries_2_entries_tick() {
fn test_process_entries_2_entries_tick() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
@ -688,7 +820,7 @@ mod tests {
let keypair4 = Keypair::new();
//load accounts
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair1.pubkey(),
1,
@ -696,7 +828,7 @@ mod tests {
0,
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&mint_keypair,
&keypair2.pubkey(),
1,
@ -711,10 +843,11 @@ mod tests {
}
// ensure bank can process 2 entries that do not have a common account and tick is registered
let tx = SystemTransaction::new_account(&keypair2, &keypair3.pubkey(), 1, blockhash, 0);
let tx =
system_transaction::create_user_account(&keypair2, &keypair3.pubkey(), 1, blockhash, 0);
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
let tick = next_entry(&entry_1.hash, 1, vec![]);
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&keypair1,
&keypair4.pubkey(),
1,
@ -723,14 +856,14 @@ mod tests {
);
let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
assert_eq!(
par_process_entries(&bank, &[entry_1.clone(), tick.clone(), entry_2.clone()]),
process_entries(&bank, &[entry_1.clone(), tick.clone(), entry_2.clone()]),
Ok(())
);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
// ensure that an error is returned for an empty account (keypair2)
let tx = SystemTransaction::new_account(
let tx = system_transaction::create_user_account(
&keypair2,
&keypair3.pubkey(),
1,
@ -739,8 +872,91 @@ mod tests {
);
let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
assert_eq!(
par_process_entries(&bank, &[entry_3]),
Err(BankError::AccountNotFound)
process_entries(&bank, &[entry_3]),
Err(TransactionError::AccountNotFound)
);
}
#[test]
fn test_update_transaction_statuses() {
// Make sure instruction errors still update the signature cache
let (genesis_block, mint_keypair) = GenesisBlock::new(11_000);
let bank = Bank::new(&genesis_block);
let pubkey = Pubkey::new_rand();
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_eq!(bank.transaction_count(), 1);
assert_eq!(bank.get_balance(&pubkey), 1_000);
assert_eq!(
bank.transfer(10_001, &mint_keypair, &pubkey),
Err(TransactionError::InstructionError(
0,
InstructionError::new_result_with_negative_lamports(),
))
);
assert_eq!(
bank.transfer(10_001, &mint_keypair, &pubkey),
Err(TransactionError::DuplicateSignature)
);
// Make sure other errors don't update the signature cache
let tx = system_transaction::create_user_account(
&mint_keypair,
&pubkey,
1000,
Hash::default(),
0,
);
let signature = tx.signatures[0];
// Should fail with blockhash not found
assert_eq!(
bank.process_transaction(&tx).map(|_| signature),
Err(TransactionError::BlockhashNotFound)
);
// Should fail again with blockhash not found
assert_eq!(
bank.process_transaction(&tx).map(|_| signature),
Err(TransactionError::BlockhashNotFound)
);
}
#[test]
fn test_update_transaction_statuses_fail() {
let (genesis_block, mint_keypair) = GenesisBlock::new(11_000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let success_tx = system_transaction::create_user_account(
&mint_keypair,
&keypair1.pubkey(),
1,
bank.last_blockhash(),
0,
);
let fail_tx = system_transaction::create_user_account(
&mint_keypair,
&keypair2.pubkey(),
2,
bank.last_blockhash(),
0,
);
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![
success_tx,
fail_tx.clone(), // will collide
],
);
assert_eq!(
process_entries(&bank, &[entry_1_to_mint]),
Err(TransactionError::AccountInUse)
);
// Should not see duplicate signature error
assert_eq!(bank.process_transaction(&fail_tx), Ok(()));
}
}

View File

@ -2,7 +2,7 @@
//!
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT};
use crate::entry::EntrySlice;
use crate::entry::{EntrySender, EntrySlice};
#[cfg(feature = "erasure")]
use crate::erasure::CodingGenerator;
use crate::packet::index_blobs;
@ -41,10 +41,11 @@ impl Broadcast {
receiver: &Receiver<WorkingBankEntries>,
sock: &UdpSocket,
blocktree: &Arc<Blocktree>,
storage_entry_sender: &EntrySender,
) -> Result<()> {
let timer = Duration::new(1, 0);
let (mut bank, entries) = receiver.recv_timeout(timer)?;
let mut max_tick_height = (bank.slot() + 1) * bank.ticks_per_slot() - 1;
let mut max_tick_height = bank.max_tick_height();
let now = Instant::now();
let mut num_entries = entries.len();
@ -52,7 +53,7 @@ impl Broadcast {
let mut last_tick = entries.last().map(|v| v.1).unwrap_or(0);
ventries.push(entries);
assert!(last_tick <= max_tick_height,);
assert!(last_tick <= max_tick_height);
if last_tick != max_tick_height {
while let Ok((same_bank, entries)) = receiver.try_recv() {
// If the bank changed, that implies the previous slot was interrupted and we do not have to
@ -61,7 +62,7 @@ impl Broadcast {
num_entries = 0;
ventries.clear();
bank = same_bank.clone();
max_tick_height = (bank.slot() + 1) * bank.ticks_per_slot() - 1;
max_tick_height = bank.max_tick_height();
}
num_entries += entries.len();
last_tick = entries.last().map(|v| v.1).unwrap_or(0);
@ -87,10 +88,13 @@ impl Broadcast {
let blobs: Vec<_> = ventries
.into_par_iter()
.flat_map(|p| {
.map_with(storage_entry_sender.clone(), |s, p| {
let entries: Vec<_> = p.into_iter().map(|e| e.0).collect();
entries.to_shared_blobs()
let blobs = entries.to_shared_blobs();
let _ignored = s.send(entries);
blobs
})
.flatten()
.collect();
let blob_index = blocktree
@ -115,6 +119,9 @@ impl Broadcast {
blocktree.write_shared_blobs(&blobs)?;
#[cfg(feature = "erasure")]
let coding = self.coding_generator.next(&blobs);
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
let broadcast_start = Instant::now();
@ -122,16 +129,14 @@ impl Broadcast {
// Send out data
ClusterInfo::broadcast(&self.id, contains_last_tick, &broadcast_table, sock, &blobs)?;
#[cfg(feature = "erasure")]
ClusterInfo::broadcast(&self.id, false, &broadcast_table, sock, &coding)?;
inc_new_counter_info!("streamer-broadcast-sent", blobs.len());
// Fill in the coding blob data from the window data blobs
#[cfg(feature = "erasure")]
{
let coding = self.coding_generator.next(&blobs)?;
// send out erasures
ClusterInfo::broadcast(&self.id, false, &broadcast_table, sock, &coding)?;
}
// generate and transmit any erasure coding blobs. if erasure isn't supported, just send everything again
#[cfg(not(feature = "erasure"))]
ClusterInfo::broadcast(&self.id, contains_last_tick, &broadcast_table, sock, &blobs)?;
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
@ -186,6 +191,7 @@ impl BroadcastStage {
cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>,
blocktree: &Arc<Blocktree>,
storage_entry_sender: EntrySender,
) -> BroadcastStageReturnType {
let me = cluster_info.read().unwrap().my_data().clone();
@ -196,7 +202,13 @@ impl BroadcastStage {
};
loop {
if let Err(e) = broadcast.run(&cluster_info, receiver, sock, blocktree) {
if let Err(e) = broadcast.run(
&cluster_info,
receiver,
sock,
blocktree,
&storage_entry_sender,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
return BroadcastStageReturnType::ChannelDisconnected;
@ -234,6 +246,7 @@ impl BroadcastStage {
receiver: Receiver<WorkingBankEntries>,
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
storage_entry_sender: EntrySender,
) -> Self {
let blocktree = blocktree.clone();
let exit_sender = exit_sender.clone();
@ -241,7 +254,13 @@ impl BroadcastStage {
.name("solana-broadcaster".to_string())
.spawn(move || {
let _finalizer = Finalizer::new(exit_sender);
Self::run(&sock, &cluster_info, &receiver, &blocktree)
Self::run(
&sock,
&cluster_info,
&receiver,
&blocktree,
storage_entry_sender,
)
})
.unwrap();
@ -301,6 +320,7 @@ mod test {
let cluster_info = Arc::new(RwLock::new(cluster_info));
let exit_sender = Arc::new(AtomicBool::new(false));
let (storage_sender, _receiver) = channel();
let bank = Arc::new(Bank::default());
// Start up the broadcast stage
@ -310,6 +330,7 @@ mod test {
entry_receiver,
&exit_sender,
&blocktree,
storage_sender,
);
MockBroadcastStage {

View File

@ -97,7 +97,7 @@ mod tests {
use ring::signature::Ed25519KeyPair;
use solana_sdk::hash::{hash, Hash, Hasher};
use solana_sdk::signature::KeypairUtil;
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::system_transaction;
use std::fs::remove_file;
use std::fs::File;
use std::io::Read;
@ -124,7 +124,7 @@ mod tests {
Entry::new_mut(
&mut id,
&mut num_hashes,
vec![SystemTransaction::new_account(
vec![system_transaction::create_user_account(
&keypair,
&keypair.pubkey(),
1,
@ -142,11 +142,13 @@ mod tests {
let ledger_dir = "chacha_test_encrypt_file";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open_config(&ledger_path, ticks_per_slot).unwrap());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let out_path = Path::new("test_chacha_encrypt_file_output.txt.enc");
let entries = make_tiny_deterministic_test_entries(32);
blocktree.write_entries(0, 0, 0, &entries).unwrap();
blocktree
.write_entries(0, 0, 0, ticks_per_slot, &entries)
.unwrap();
let mut key = hex!(
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
@ -162,7 +164,7 @@ mod tests {
use bs58;
// golden needs to be updated if blob stuff changes....
let golden = Hash::new(
&bs58::decode("B33zQ8Kc3Wr3vZAbB6GcWaB3sSGeG98nvm4QB9URpJhR")
&bs58::decode("5NBn4cBZmNZRftkjxj3um8W1eyYPzn2RgUJSA3SVbHaJ")
.into_vec()
.unwrap(),
);

View File

@ -127,9 +127,11 @@ mod tests {
let ledger_dir = "test_encrypt_file_many_keys_single";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open_config(&ledger_path, ticks_per_slot).unwrap());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree.write_entries(0, 0, 0, &entries).unwrap();
blocktree
.write_entries(0, 0, 0, ticks_per_slot, &entries)
.unwrap();
let out_path = Path::new("test_chacha_encrypt_file_many_keys_single_output.txt.enc");
@ -161,8 +163,10 @@ mod tests {
let ledger_dir = "test_encrypt_file_many_keys_multiple";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open_config(&ledger_path, ticks_per_slot).unwrap());
blocktree.write_entries(0, 0, 0, &entries).unwrap();
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree
.write_entries(0, 0, 0, ticks_per_slot, &entries)
.unwrap();
let out_path = Path::new("test_chacha_encrypt_file_many_keys_multiple_output.txt.enc");

View File

@ -1,14 +0,0 @@
use crate::cluster_info::FULLNODE_PORT_RANGE;
use crate::contact_info::ContactInfo;
use crate::thin_client::ThinClient;
use std::time::Duration;
pub fn mk_client(r: &ContactInfo) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
ThinClient::new(r.rpc, r.tpu, transactions_socket)
}
pub fn mk_client_with_timeout(r: &ContactInfo, timeout: Duration) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
ThinClient::new_with_timeout(r.rpc, r.tpu, transactions_socket, timeout)
}

6
core/src/cluster.rs Normal file
View File

@ -0,0 +1,6 @@
use solana_sdk::pubkey::Pubkey;
pub trait Cluster {
fn get_node_ids(&self) -> Vec<Pubkey>;
fn restart_node(&mut self, pubkey: Pubkey);
}

View File

@ -20,8 +20,8 @@ use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use crate::crds_value::{CrdsValue, CrdsValueLabel, Vote};
use crate::packet::{to_shared_blob, Blob, SharedBlob, BLOB_SIZE};
use crate::repair_service::RepairType;
use crate::result::Result;
use crate::rpc_service::RPC_PORT;
use crate::staking_utils;
use crate::streamer::{BlobReceiver, BlobSender};
use bincode::{deserialize, serialize};
@ -31,7 +31,9 @@ use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_metrics::counter::Counter;
use solana_metrics::{influxdb, submit};
use solana_netutil::{bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range};
use solana_netutil::{
bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range, PortRange,
};
use solana_runtime::bloom::Bloom;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
@ -47,7 +49,7 @@ use std::sync::{Arc, RwLock};
use std::thread::{sleep, Builder, JoinHandle};
use std::time::{Duration, Instant};
pub const FULLNODE_PORT_RANGE: (u16, u16) = (8000, 10_000);
pub const FULLNODE_PORT_RANGE: PortRange = (8000, 10_000);
/// The fanout for Ledger Replication
pub const DATA_PLANE_FANOUT: usize = 200;
@ -58,6 +60,9 @@ pub const GROW_LAYER_CAPACITY: bool = false;
/// milliseconds we sleep for between gossip requests
pub const GOSSIP_SLEEP_MILLIS: u64 = 100;
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
#[derive(Debug, PartialEq, Eq)]
pub enum ClusterInfoError {
NoPeers,
@ -161,6 +166,7 @@ enum Protocol {
/// TODO: move this message to a different module
RequestWindowIndex(ContactInfo, u64, u64),
RequestHighestWindowIndex(ContactInfo, u64, u64),
RequestOrphan(ContactInfo, u64),
}
impl ClusterInfo {
@ -221,7 +227,7 @@ impl ClusterInfo {
self.gossip
.crds
.lookup(&entry)
.and_then(|x| x.contact_info())
.and_then(CrdsValue::contact_info)
}
pub fn my_data(&self) -> ContactInfo {
@ -240,7 +246,7 @@ impl ClusterInfo {
pub fn contact_info_trace(&self) -> String {
let leader_id = self.gossip_leader_id;
let nodes: Vec<_> = self
.rpc_peers()
.tvu_peers()
.into_iter()
.map(|node| {
let mut annotation = String::new();
@ -256,7 +262,11 @@ impl ClusterInfo {
node.id,
annotation,
node.tpu.to_string(),
node.rpc.to_string()
if ContactInfo::is_valid_address(&node.rpc) {
node.rpc.to_string()
} else {
"none".to_string()
}
)
})
.collect();
@ -282,28 +292,28 @@ impl ClusterInfo {
pub fn push_vote(&mut self, vote: Transaction) {
let now = timestamp();
let vote = Vote::new(vote, now);
let vote = Vote::new(&self.id(), vote, now);
let mut entry = CrdsValue::Vote(vote);
entry.sign(&self.keypair);
self.gossip.process_push_message(&[entry], now);
}
/// Get votes in the crds
/// * since - The local timestamp when the vote was updated or inserted must be greater then
/// * since - The timestamp of when the vote inserted must be greater than
/// since. This allows the bank to query for new votes only.
///
/// * return - The votes, and the max local timestamp from the new set.
/// * return - The votes, and the max timestamp from the new set.
pub fn get_votes(&self, since: u64) -> (Vec<Transaction>, u64) {
let votes: Vec<_> = self
.gossip
.crds
.table
.values()
.filter(|x| x.local_timestamp > since)
.filter(|x| x.insert_timestamp > since)
.filter_map(|x| {
x.value
.vote()
.map(|v| (x.local_timestamp, v.transaction.clone()))
.map(|v| (x.insert_timestamp, v.transaction.clone()))
})
.collect();
let max_ts = votes.iter().map(|x| x.0).max().unwrap_or(since);
@ -746,12 +756,13 @@ impl ClusterInfo {
Ok(out)
}
pub fn window_index_request(
&self,
slot: u64,
blob_index: u64,
get_highest: bool,
) -> Result<(SocketAddr, Vec<u8>)> {
fn orphan_bytes(&self, slot: u64) -> Result<Vec<u8>> {
let req = Protocol::RequestOrphan(self.my_data().clone(), slot);
let out = serialize(&req)?;
Ok(out)
}
pub fn repair_request(&self, repair_request: &RepairType) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication, as indicated
// by a valid tvu port location
let valid: Vec<_> = self.repair_peers();
@ -761,19 +772,42 @@ impl ClusterInfo {
let n = thread_rng().gen::<usize>() % valid.len();
let addr = valid[n].gossip; // send the request to the peer's gossip port
let out = {
if get_highest {
self.window_highest_index_request_bytes(slot, blob_index)?
} else {
self.window_index_request_bytes(slot, blob_index)?
match repair_request {
RepairType::Blob(slot, blob_index) => {
submit(
influxdb::Point::new("cluster_info-repair")
.add_field("repair-slot", influxdb::Value::Integer(*slot as i64))
.add_field("repair-ix", influxdb::Value::Integer(*blob_index as i64))
.to_owned(),
);
self.window_index_request_bytes(*slot, *blob_index)?
}
RepairType::HighestBlob(slot, blob_index) => {
submit(
influxdb::Point::new("cluster_info-repair_highest")
.add_field(
"repair-highest-slot",
influxdb::Value::Integer(*slot as i64),
)
.add_field(
"repair-highest-ix",
influxdb::Value::Integer(*blob_index as i64),
)
.to_owned(),
);
self.window_highest_index_request_bytes(*slot, *blob_index)?
}
RepairType::Orphan(slot) => {
submit(
influxdb::Point::new("cluster_info-repair_orphan")
.add_field("repair-orphan", influxdb::Value::Integer(*slot as i64))
.to_owned(),
);
self.orphan_bytes(*slot)?
}
}
};
submit(
influxdb::Point::new("cluster-info")
.add_field("repair-ix", influxdb::Value::Integer(blob_index as i64))
.to_owned(),
);
Ok((addr, out))
}
// If the network entrypoint hasn't been discovered yet, add it to the crds table
@ -813,7 +847,7 @@ impl ClusterInfo {
self.gossip
.crds
.lookup(&peer_label)
.and_then(|v| v.contact_info())
.and_then(CrdsValue::contact_info)
.map(|peer_info| (peer, filter, peer_info.gossip, self_info))
})
.collect();
@ -839,7 +873,7 @@ impl ClusterInfo {
self.gossip
.crds
.lookup(&peer_label)
.and_then(|v| v.contact_info())
.and_then(CrdsValue::contact_info)
.map(|p| p.gossip)
})
.map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone())))
@ -966,6 +1000,35 @@ impl ClusterInfo {
vec![]
}
fn run_orphan(
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
mut slot: u64,
max_responses: usize,
) -> Vec<SharedBlob> {
let mut res = vec![];
if let Some(blocktree) = blocktree {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blocktree.meta(slot) {
if meta.received == 0 {
break;
}
let blob = blocktree.get_data_blob(slot, meta.received - 1);
if let Ok(Some(mut blob)) = blob {
blob.meta.set_addr(from_addr);
res.push(Arc::new(RwLock::new(blob)));
}
if meta.is_parent_set() && res.len() <= max_responses {
slot = meta.parent_slot;
} else {
break;
}
}
}
res
}
//TODO we should first coalesce all the requests
fn handle_blob(
obj: &Arc<RwLock<Self>>,
@ -1082,14 +1145,21 @@ impl ClusterInfo {
vec![]
}
}
fn handle_request_window_index(
fn get_repair_sender(request: &Protocol) -> &ContactInfo {
match request {
Protocol::RequestWindowIndex(ref from, _, _) => from,
Protocol::RequestHighestWindowIndex(ref from, _, _) => from,
Protocol::RequestOrphan(ref from, _) => from,
_ => panic!("Not a repair request"),
}
}
fn handle_repair(
me: &Arc<RwLock<Self>>,
from: &ContactInfo,
blocktree: Option<&Arc<Blocktree>>,
slot: u64,
blob_index: u64,
from_addr: &SocketAddr,
is_get_highest: bool,
blocktree: Option<&Arc<Blocktree>>,
request: Protocol,
) -> Vec<SharedBlob> {
let now = Instant::now();
@ -1098,12 +1168,13 @@ impl ClusterInfo {
//TODO verify from is signed
let self_id = me.read().unwrap().gossip.id;
let from = Self::get_repair_sender(&request);
if from.id == me.read().unwrap().gossip.id {
warn!(
"{}: Ignored received RequestWindowIndex from ME {} {} {} ",
self_id, from.id, slot, blob_index,
"{}: Ignored received repair request from ME {}",
self_id, from.id,
);
inc_new_counter_info!("cluster_info-window-request-address-eq", 1);
inc_new_counter_info!("cluster_info-handle-repair--eq", 1);
return vec![];
}
@ -1113,26 +1184,49 @@ impl ClusterInfo {
.crds
.update_record_timestamp(&from.id, timestamp());
let my_info = me.read().unwrap().my_data().clone();
inc_new_counter_info!("cluster_info-window-request-recv", 1);
trace!(
"{}: received RequestWindowIndex from: {} slot: {}, blob_index: {}",
self_id,
from.id,
slot,
blob_index,
);
let res = {
if is_get_highest {
Self::run_highest_window_request(&from_addr, blocktree, slot, blob_index)
} else {
Self::run_window_request(&from, &from_addr, blocktree, &my_info, slot, blob_index)
let (res, label) = {
match &request {
Protocol::RequestWindowIndex(from, slot, blob_index) => {
inc_new_counter_info!("cluster_info-request-window-index", 1);
(
Self::run_window_request(
from,
&from_addr,
blocktree,
&my_info,
*slot,
*blob_index,
),
"RequestWindowIndex",
)
}
Protocol::RequestHighestWindowIndex(_, slot, highest_index) => {
inc_new_counter_info!("cluster_info-request-highest-window-index", 1);
(
Self::run_highest_window_request(
&from_addr,
blocktree,
*slot,
*highest_index,
),
"RequestHighestWindowIndex",
)
}
Protocol::RequestOrphan(_, slot) => {
inc_new_counter_info!("cluster_info-request-orphan", 1);
(
Self::run_orphan(&from_addr, blocktree, *slot, MAX_ORPHAN_REPAIR_RESPONSES),
"RequestOrphan",
)
}
_ => panic!("Not a repair request"),
}
};
report_time_spent(
"RequestWindowIndex",
&now.elapsed(),
&format!("slot {}, blob_index: {}", slot, blob_index),
);
trace!("{}: received repair request: {:?}", self_id, request);
report_time_spent(label, &now.elapsed(), "");
res
}
@ -1198,22 +1292,7 @@ impl ClusterInfo {
}
vec![]
}
Protocol::RequestWindowIndex(from, slot, blob_index) => {
Self::handle_request_window_index(
me, &from, blocktree, slot, blob_index, from_addr, false,
)
}
Protocol::RequestHighestWindowIndex(from, slot, highest_index) => {
Self::handle_request_window_index(
me,
&from,
blocktree,
slot,
highest_index,
from_addr,
true,
)
}
_ => Self::handle_repair(me, from_addr, blocktree, request),
}
}
@ -1347,6 +1426,7 @@ pub struct Sockets {
pub broadcast: UdpSocket,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
pub storage: Option<UdpSocket>,
}
#[derive(Debug)]
@ -1357,9 +1437,44 @@ pub struct Node {
impl Node {
pub fn new_localhost() -> Self {
let pubkey = Keypair::new().pubkey();
let pubkey = Pubkey::new_rand();
Self::new_localhost_with_pubkey(&pubkey)
}
pub fn new_localhost_replicator(pubkey: &Pubkey) -> Self {
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
let tvu = UdpSocket::bind("127.0.0.1:0").unwrap();
let storage = UdpSocket::bind("127.0.0.1:0").unwrap();
let empty = "0.0.0.0:0".parse().unwrap();
let repair = UdpSocket::bind("127.0.0.1:0").unwrap();
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
let info = ContactInfo::new(
pubkey,
gossip.local_addr().unwrap(),
tvu.local_addr().unwrap(),
empty,
empty,
storage.local_addr().unwrap(),
empty,
empty,
timestamp(),
);
Node {
info,
sockets: Sockets {
gossip,
tvu: vec![tvu],
tpu: vec![],
tpu_via_blobs: vec![],
broadcast,
repair,
retransmit,
storage: Some(storage),
},
}
}
pub fn new_localhost_with_pubkey(pubkey: &Pubkey) -> Self {
let tpu = UdpSocket::bind("127.0.0.1:0").unwrap();
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
@ -1396,15 +1511,12 @@ impl Node {
broadcast,
repair,
retransmit,
storage: None,
},
}
}
pub fn new_with_external_ip(pubkey: &Pubkey, gossip_addr: &SocketAddr) -> Node {
fn bind() -> (u16, UdpSocket) {
bind_in_range(FULLNODE_PORT_RANGE).expect("Failed to bind")
};
let (gossip_port, gossip) = if gossip_addr.port() != 0 {
fn get_gossip_port(gossip_addr: &SocketAddr, port_range: PortRange) -> (u16, UdpSocket) {
if gossip_addr.port() != 0 {
(
gossip_addr.port(),
bind_to(gossip_addr.port(), false).unwrap_or_else(|e| {
@ -1412,22 +1524,29 @@ impl Node {
}),
)
} else {
bind()
};
Self::bind(port_range)
}
}
fn bind(port_range: PortRange) -> (u16, UdpSocket) {
bind_in_range(port_range).expect("Failed to bind")
}
pub fn new_with_external_ip(
pubkey: &Pubkey,
gossip_addr: &SocketAddr,
port_range: PortRange,
) -> Node {
let (gossip_port, gossip) = Self::get_gossip_port(gossip_addr, port_range);
let (tvu_port, tvu_sockets) =
multi_bind_in_range(FULLNODE_PORT_RANGE, 8).expect("tvu multi_bind");
let (tvu_port, tvu_sockets) = multi_bind_in_range(port_range, 8).expect("tvu multi_bind");
let (tpu_port, tpu_sockets) =
multi_bind_in_range(FULLNODE_PORT_RANGE, 32).expect("tpu multi_bind");
let (tpu_port, tpu_sockets) = multi_bind_in_range(port_range, 32).expect("tpu multi_bind");
let (tpu_via_blobs_port, tpu_via_blobs_sockets) =
multi_bind_in_range(FULLNODE_PORT_RANGE, 8).expect("tpu multi_bind");
multi_bind_in_range(port_range, 8).expect("tpu multi_bind");
let (_, repair) = bind();
let (_, broadcast) = bind();
let (_, retransmit) = bind();
let (storage_port, _) = bind();
let (_, repair) = Self::bind(port_range);
let (_, broadcast) = Self::bind(port_range);
let (_, retransmit) = Self::bind(port_range);
let info = ContactInfo::new(
pubkey,
@ -1435,9 +1554,9 @@ impl Node {
SocketAddr::new(gossip_addr.ip(), tvu_port),
SocketAddr::new(gossip_addr.ip(), tpu_port),
SocketAddr::new(gossip_addr.ip(), tpu_via_blobs_port),
SocketAddr::new(gossip_addr.ip(), storage_port),
SocketAddr::new(gossip_addr.ip(), RPC_PORT),
SocketAddr::new(gossip_addr.ip(), RPC_PORT + 1),
socketaddr_any!(),
socketaddr_any!(),
socketaddr_any!(),
0,
);
trace!("new ContactInfo: {:?}", info);
@ -1452,9 +1571,29 @@ impl Node {
broadcast,
repair,
retransmit,
storage: None,
},
}
}
pub fn new_replicator_with_external_ip(
pubkey: &Pubkey,
gossip_addr: &SocketAddr,
port_range: PortRange,
) -> Node {
let mut new = Self::new_with_external_ip(pubkey, gossip_addr, port_range);
let (storage_port, storage_socket) = Self::bind(port_range);
new.info.storage_addr = SocketAddr::new(gossip_addr.ip(), storage_port);
new.sockets.storage = Some(storage_socket);
let empty = socketaddr_any!();
new.info.tpu = empty;
new.info.tpu_via_blobs = empty;
new.sockets.tpu = vec![];
new.sockets.tpu_via_blobs = vec![];
new
}
}
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
@ -1468,9 +1607,11 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::tests::make_many_slot_entries;
use crate::blocktree::Blocktree;
use crate::crds_value::CrdsValueLabel;
use crate::packet::BLOB_HEADER_SIZE;
use crate::repair_service::RepairType;
use crate::result::Error;
use crate::test_tx::test_tx;
use solana_sdk::signature::{Keypair, KeypairUtil};
@ -1482,7 +1623,7 @@ mod tests {
fn test_cluster_spy_gossip() {
//check that gossip doesn't try to push to invalid addresses
let node = Node::new_localhost();
let (spy, _) = ClusterInfo::spy_node(&Keypair::new().pubkey());
let (spy, _) = ClusterInfo::spy_node(&Pubkey::new_rand());
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
node.info,
)));
@ -1506,43 +1647,43 @@ mod tests {
#[test]
fn test_cluster_info_new() {
let d = ContactInfo::new_localhost(&Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
assert_eq!(d.id, cluster_info.my_data().id);
}
#[test]
fn insert_info_test() {
let d = ContactInfo::new_localhost(&Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(d);
let d = ContactInfo::new_localhost(&Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let label = CrdsValueLabel::ContactInfo(d.id);
cluster_info.insert_info(d);
assert!(cluster_info.gossip.crds.lookup(&label).is_some());
}
#[test]
fn test_insert_self() {
let d = ContactInfo::new_localhost(&Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
let entry_label = CrdsValueLabel::ContactInfo(cluster_info.id());
assert!(cluster_info.gossip.crds.lookup(&entry_label).is_some());
// inserting something else shouldn't work
let d = ContactInfo::new_localhost(&Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
cluster_info.insert_self(d.clone());
let label = CrdsValueLabel::ContactInfo(d.id);
assert!(cluster_info.gossip.crds.lookup(&label).is_none());
}
#[test]
fn window_index_request() {
let me = ContactInfo::new_localhost(&Keypair::new().pubkey(), timestamp());
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(me);
let rv = cluster_info.window_index_request(0, 0, false);
let rv = cluster_info.repair_request(&RepairType::Blob(0, 0));
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
let gossip_addr = socketaddr!([127, 0, 0, 1], 1234);
let nxt = ContactInfo::new(
&Keypair::new().pubkey(),
&Pubkey::new_rand(),
gossip_addr,
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
@ -1553,13 +1694,15 @@ mod tests {
0,
);
cluster_info.insert_info(nxt.clone());
let rv = cluster_info.window_index_request(0, 0, false).unwrap();
let rv = cluster_info
.repair_request(&RepairType::Blob(0, 0))
.unwrap();
assert_eq!(nxt.gossip, gossip_addr);
assert_eq!(rv.0, nxt.gossip);
let gossip_addr2 = socketaddr!([127, 0, 0, 2], 1234);
let nxt = ContactInfo::new(
&Keypair::new().pubkey(),
&Pubkey::new_rand(),
gossip_addr2,
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
@ -1574,7 +1717,9 @@ mod tests {
let mut two = false;
while !one || !two {
//this randomly picks an option, so eventually it should pick both
let rv = cluster_info.window_index_request(0, 0, false).unwrap();
let rv = cluster_info
.repair_request(&RepairType::Blob(0, 0))
.unwrap();
if rv.0 == gossip_addr {
one = true;
}
@ -1593,7 +1738,7 @@ mod tests {
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let me = ContactInfo::new(
&Keypair::new().pubkey(),
&Pubkey::new_rand(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
@ -1692,10 +1837,46 @@ mod tests {
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn run_orphan() {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_orphan(&socketaddr_any!(), Some(&blocktree), 2, 0);
assert!(rv.is_empty());
// Create slots 1, 2, 3 with 5 blobs apiece
let (blobs, _) = make_many_slot_entries(1, 3, 5);
blocktree
.write_blobs(&blobs)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv = ClusterInfo::run_orphan(&socketaddr_any!(), Some(&blocktree), 4, 5);
assert!(rv.is_empty());
// For slot 3, we should return the highest blobs from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> = ClusterInfo::run_orphan(&socketaddr_any!(), Some(&blocktree), 3, 5)
.iter()
.map(|b| b.read().unwrap().clone())
.collect();
let expected: Vec<_> = (1..=3)
.rev()
.map(|slot| blocktree.get_data_blob(slot, 4).unwrap().unwrap())
.collect();
assert_eq!(rv, expected)
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_default_leader() {
solana_logger::setup();
let contact_info = ContactInfo::new_localhost(&Keypair::new().pubkey(), 0);
let contact_info = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let network_entry_point =
ContactInfo::new_gossip_entry_point(&socketaddr!("127.0.0.1:1239"));
@ -1703,69 +1884,80 @@ mod tests {
assert!(cluster_info.leader_data().is_none());
}
fn assert_in_range(x: u16, range: (u16, u16)) {
assert!(x >= range.0);
assert!(x < range.1);
}
fn check_sockets(sockets: &Vec<UdpSocket>, ip: IpAddr, range: (u16, u16)) {
assert!(sockets.len() > 1);
let port = sockets[0].local_addr().unwrap().port();
for socket in sockets.iter() {
check_socket(socket, ip, range);
assert_eq!(socket.local_addr().unwrap().port(), port);
}
}
fn check_socket(socket: &UdpSocket, ip: IpAddr, range: (u16, u16)) {
let local_addr = socket.local_addr().unwrap();
assert_eq!(local_addr.ip(), ip);
assert_in_range(local_addr.port(), range);
}
fn check_node_sockets(node: &Node, ip: IpAddr, range: (u16, u16)) {
check_socket(&node.sockets.gossip, ip, range);
check_socket(&node.sockets.repair, ip, range);
check_sockets(&node.sockets.tvu, ip, range);
check_sockets(&node.sockets.tpu, ip, range);
}
#[test]
fn new_with_external_ip_test_random() {
let ip = Ipv4Addr::from(0);
let node = Node::new_with_external_ip(&Keypair::new().pubkey(), &socketaddr!(ip, 0));
assert_eq!(node.sockets.gossip.local_addr().unwrap().ip(), ip);
assert!(node.sockets.tvu.len() > 1);
for tx_socket in node.sockets.tvu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().ip(), ip);
}
assert!(node.sockets.tpu.len() > 1);
for tx_socket in node.sockets.tpu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().ip(), ip);
}
assert_eq!(node.sockets.repair.local_addr().unwrap().ip(), ip);
let node = Node::new_with_external_ip(
&Pubkey::new_rand(),
&socketaddr!(ip, 0),
FULLNODE_PORT_RANGE,
);
assert!(node.sockets.gossip.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.gossip.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
let tx_port = node.sockets.tvu[0].local_addr().unwrap().port();
assert!(tx_port >= FULLNODE_PORT_RANGE.0);
assert!(tx_port < FULLNODE_PORT_RANGE.1);
for tx_socket in node.sockets.tvu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().port(), tx_port);
}
let tx_port = node.sockets.tpu[0].local_addr().unwrap().port();
assert!(tx_port >= FULLNODE_PORT_RANGE.0);
assert!(tx_port < FULLNODE_PORT_RANGE.1);
for tx_socket in node.sockets.tpu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().port(), tx_port);
}
assert!(node.sockets.repair.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.repair.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
check_node_sockets(&node, IpAddr::V4(ip), FULLNODE_PORT_RANGE);
}
#[test]
fn new_with_external_ip_test_gossip() {
let ip = IpAddr::V4(Ipv4Addr::from(0));
let node = Node::new_with_external_ip(&Keypair::new().pubkey(), &socketaddr!(0, 8050));
assert_eq!(node.sockets.gossip.local_addr().unwrap().ip(), ip);
assert!(node.sockets.tvu.len() > 1);
for tx_socket in node.sockets.tvu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().ip(), ip);
}
assert!(node.sockets.tpu.len() > 1);
for tx_socket in node.sockets.tpu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().ip(), ip);
}
assert_eq!(node.sockets.repair.local_addr().unwrap().ip(), ip);
let port = {
bind_in_range(FULLNODE_PORT_RANGE)
.expect("Failed to bind")
.0
};
let node = Node::new_with_external_ip(
&Pubkey::new_rand(),
&socketaddr!(0, port),
FULLNODE_PORT_RANGE,
);
assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), 8050);
let tx_port = node.sockets.tvu[0].local_addr().unwrap().port();
assert!(tx_port >= FULLNODE_PORT_RANGE.0);
assert!(tx_port < FULLNODE_PORT_RANGE.1);
for tx_socket in node.sockets.tvu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().port(), tx_port);
}
let tx_port = node.sockets.tpu[0].local_addr().unwrap().port();
assert!(tx_port >= FULLNODE_PORT_RANGE.0);
assert!(tx_port < FULLNODE_PORT_RANGE.1);
for tx_socket in node.sockets.tpu.iter() {
assert_eq!(tx_socket.local_addr().unwrap().port(), tx_port);
}
assert!(node.sockets.repair.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.repair.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
check_node_sockets(&node, ip, FULLNODE_PORT_RANGE);
assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), port);
}
#[test]
fn new_replicator_external_ip_test() {
let ip = Ipv4Addr::from(0);
let node = Node::new_replicator_with_external_ip(
&Pubkey::new_rand(),
&socketaddr!(ip, 0),
FULLNODE_PORT_RANGE,
);
let ip = IpAddr::V4(ip);
check_socket(&node.sockets.storage.unwrap(), ip, FULLNODE_PORT_RANGE);
check_socket(&node.sockets.gossip, ip, FULLNODE_PORT_RANGE);
check_socket(&node.sockets.repair, ip, FULLNODE_PORT_RANGE);
check_sockets(&node.sockets.tvu, ip, FULLNODE_PORT_RANGE);
}
//test that all cluster_info objects only generate signed messages
@ -1980,7 +2172,7 @@ fn test_add_entrypoint() {
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
);
let entrypoint_id = Keypair::new().pubkey();
let entrypoint_id = Pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_id, timestamp());
cluster_info.set_entrypoint(entrypoint.clone());
let pulls = cluster_info.new_pull_requests(&HashMap::new());

View File

@ -1,11 +1,13 @@
use crate::cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS};
use crate::packet;
use crate::poh_recorder::PohRecorder;
use crate::result::Result;
use crate::service::Service;
use crate::streamer::PacketSender;
use crate::sigverify_stage::VerifiedPackets;
use crate::{packet, sigverify};
use solana_metrics::counter::Counter;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::mpsc::Sender;
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Duration;
@ -17,13 +19,22 @@ impl ClusterInfoVoteListener {
pub fn new(
exit: &Arc<AtomicBool>,
cluster_info: Arc<RwLock<ClusterInfo>>,
sender: PacketSender,
sigverify_disabled: bool,
sender: Sender<VerifiedPackets>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
) -> Self {
let exit = exit.clone();
let poh_recorder = poh_recorder.clone();
let thread = Builder::new()
.name("solana-cluster_info_vote_listener".to_string())
.spawn(move || {
let _ = Self::recv_loop(exit, &cluster_info, &sender);
let _ = Self::recv_loop(
exit,
&cluster_info,
sigverify_disabled,
&sender,
poh_recorder,
);
})
.unwrap();
Self {
@ -33,7 +44,9 @@ impl ClusterInfoVoteListener {
fn recv_loop(
exit: Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sender: &PacketSender,
sigverify_disabled: bool,
sender: &Sender<VerifiedPackets>,
poh_recorder: Arc<Mutex<PohRecorder>>,
) -> Result<()> {
let mut last_ts = 0;
loop {
@ -41,11 +54,16 @@ impl ClusterInfoVoteListener {
return Ok(());
}
let (votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
last_ts = new_ts;
inc_new_counter_info!("cluster_info_vote_listener-recv_count", votes.len());
let msgs = packet::to_packets(&votes);
for m in msgs {
sender.send(m)?;
if poh_recorder.lock().unwrap().bank().is_some() {
last_ts = new_ts;
inc_new_counter_info!("cluster_info_vote_listener-recv_count", votes.len());
let msgs = packet::to_packets(&votes);
let r = if sigverify_disabled {
sigverify::ed25519_verify_disabled(&msgs)
} else {
sigverify::ed25519_verify_cpu(&msgs)
};
sender.send(msgs.into_iter().zip(r).collect())?;
}
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
}

View File

@ -3,14 +3,21 @@ use crate::blocktree::Blocktree;
///
/// All tests must start from an entry point and a funding keypair and
/// discover the rest of the network.
use crate::client::mk_client;
use crate::cluster_info::FULLNODE_PORT_RANGE;
use crate::contact_info::ContactInfo;
use crate::entry::{Entry, EntrySlice};
use crate::gossip_service::discover;
use crate::gossip_service::discover_nodes;
use crate::locktower::VOTE_THRESHOLD_DEPTH;
use crate::poh_service::PohServiceConfig;
use solana_client::thin_client::create_client;
use solana_sdk::client::SyncClient;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND};
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::system_transaction;
use solana_sdk::timing::{
duration_as_ms, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS, NUM_TICKS_PER_SECOND,
};
use std::io;
use std::thread::sleep;
use std::time::Duration;
@ -22,45 +29,46 @@ pub fn spend_and_verify_all_nodes(
funding_keypair: &Keypair,
nodes: usize,
) {
let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap();
let cluster_nodes = discover_nodes(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes);
for ingress_node in &cluster_nodes {
let random_keypair = Keypair::new();
let mut client = mk_client(&ingress_node);
let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
let bal = client
.poll_get_balance(&funding_keypair.pubkey())
.expect("balance in source");
assert!(bal > 0);
let mut transaction = SystemTransaction::new_move(
let mut transaction = system_transaction::transfer(
&funding_keypair,
&random_keypair.pubkey(),
1,
client.get_recent_blockhash(),
client.get_recent_blockhash().unwrap(),
0,
);
let confs = VOTE_THRESHOLD_DEPTH + 1;
let sig = client
.retry_transfer(&funding_keypair, &mut transaction, 5)
.retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 5, confs)
.unwrap();
for validator in &cluster_nodes {
let mut client = mk_client(&validator);
client.poll_for_signature(&sig).unwrap();
let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
client.poll_for_signature_confirmation(&sig, confs).unwrap();
}
}
}
pub fn send_many_transactions(node: &ContactInfo, funding_keypair: &Keypair, num_txs: u64) {
let mut client = mk_client(node);
let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
for _ in 0..num_txs {
let random_keypair = Keypair::new();
let bal = client
.poll_get_balance(&funding_keypair.pubkey())
.expect("balance in source");
assert!(bal > 0);
let mut transaction = SystemTransaction::new_move(
let mut transaction = system_transaction::transfer(
&funding_keypair,
&random_keypair.pubkey(),
1,
client.get_recent_blockhash(),
client.get_recent_blockhash().unwrap(),
0,
);
client
@ -70,15 +78,15 @@ pub fn send_many_transactions(node: &ContactInfo, funding_keypair: &Keypair, num
}
pub fn fullnode_exit(entry_point_info: &ContactInfo, nodes: usize) {
let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap();
let cluster_nodes = discover_nodes(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes);
for node in &cluster_nodes {
let mut client = mk_client(&node);
let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
assert!(client.fullnode_exit().unwrap());
}
sleep(Duration::from_millis(SLOT_MILLIS));
for node in &cluster_nodes {
let mut client = mk_client(&node);
let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
assert!(client.fullnode_exit().is_err());
}
}
@ -116,53 +124,121 @@ pub fn verify_ledger_ticks(ledger_path: &str, ticks_per_slot: usize) {
}
}
pub fn sleep_n_epochs(
num_epochs: f64,
config: &PohServiceConfig,
ticks_per_slot: u64,
slots_per_epoch: u64,
) {
let num_ticks_per_second = {
match config {
PohServiceConfig::Sleep(d) => (1000 / duration_as_ms(d)) as f64,
_ => panic!("Unsuppported tick config for testing"),
}
};
let num_ticks_to_sleep = num_epochs * ticks_per_slot as f64 * slots_per_epoch as f64;
sleep(Duration::from_secs(
((num_ticks_to_sleep + num_ticks_per_second - 1.0) / num_ticks_per_second) as u64,
));
}
pub fn kill_entry_and_spend_and_verify_rest(
entry_point_info: &ContactInfo,
funding_keypair: &Keypair,
nodes: usize,
) {
solana_logger::setup();
let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap();
let cluster_nodes = discover_nodes(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes);
let mut client = mk_client(&entry_point_info);
info!("sleeping for an epoch");
sleep(Duration::from_millis(SLOT_MILLIS * DEFAULT_SLOTS_PER_EPOCH));
info!("done sleeping for an epoch");
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
info!("sleeping for 2 leader fortnights");
sleep(Duration::from_millis(
SLOT_MILLIS * NUM_CONSECUTIVE_LEADER_SLOTS * 2,
));
info!("done sleeping for 2 fortnights");
info!("killing entry point");
assert!(client.fullnode_exit().unwrap());
info!("sleeping for a slot");
sleep(Duration::from_millis(SLOT_MILLIS));
info!("done sleeping for a slot");
info!("sleeping for 2 leader fortnights");
sleep(Duration::from_millis(
SLOT_MILLIS * NUM_CONSECUTIVE_LEADER_SLOTS,
));
info!("done sleeping for 2 fortnights");
for ingress_node in &cluster_nodes {
if ingress_node.id == entry_point_info.id {
continue;
}
let random_keypair = Keypair::new();
let mut client = mk_client(&ingress_node);
let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
let bal = client
.poll_get_balance(&funding_keypair.pubkey())
.expect("balance in source");
assert!(bal > 0);
let mut transaction = SystemTransaction::new_move(
&funding_keypair,
&random_keypair.pubkey(),
1,
client.get_recent_blockhash(),
0,
);
let sig = client
.retry_transfer(&funding_keypair, &mut transaction, 5)
.unwrap();
for validator in &cluster_nodes {
if validator.id == entry_point_info.id {
continue;
let mut result = Ok(());
let mut retries = 0;
loop {
retries += 1;
if retries > 5 {
result.unwrap();
}
let random_keypair = Keypair::new();
let mut transaction = system_transaction::transfer(
&funding_keypair,
&random_keypair.pubkey(),
1,
client.get_recent_blockhash().unwrap(),
0,
);
let confs = VOTE_THRESHOLD_DEPTH + 1;
let sig = {
let sig = client.retry_transfer_until_confirmed(
&funding_keypair,
&mut transaction,
5,
confs,
);
match sig {
Err(e) => {
result = Err(e);
continue;
}
Ok(sig) => sig,
}
};
match poll_all_nodes_for_signature(&entry_point_info, &cluster_nodes, &sig, confs) {
Err(e) => {
result = Err(e);
}
Ok(()) => {
break;
}
}
let mut client = mk_client(&validator);
client.poll_for_signature(&sig).unwrap();
}
}
}
fn poll_all_nodes_for_signature(
entry_point_info: &ContactInfo,
cluster_nodes: &[ContactInfo],
sig: &Signature,
confs: usize,
) -> io::Result<()> {
for validator in cluster_nodes {
if validator.id == entry_point_info.id {
continue;
}
let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
client.poll_for_signature_confirmation(&sig, confs)?;
}
Ok(())
}
fn get_and_verify_slot_entries(blocktree: &Blocktree, slot: u64, last_entry: &Hash) -> Vec<Entry> {
let entries = blocktree.get_slot_entries(slot, 0, None).unwrap();
assert!(entries.verify(last_entry));

View File

@ -1,7 +1,10 @@
use crate::rpc_service::RPC_PORT;
use bincode::serialize;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signable, Signature};
#[cfg(test)]
use solana_sdk::rpc_port;
#[cfg(test)]
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::signature::{Signable, Signature};
use solana_sdk::timing::timestamp;
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
@ -130,7 +133,7 @@ impl ContactInfo {
let addr = socketaddr!("224.0.1.255:1000");
assert!(addr.ip().is_multicast());
Self::new(
&Keypair::new().pubkey(),
&Pubkey::new_rand(),
addr,
addr,
addr,
@ -141,18 +144,21 @@ impl ContactInfo {
0,
)
}
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
let mut nxt_addr = *addr;
nxt_addr.set_port(addr.port() + nxt);
nxt_addr
}
#[cfg(test)]
fn new_with_pubkey_socketaddr(pubkey: &Pubkey, bind_addr: &SocketAddr) -> Self {
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
let mut nxt_addr = *addr;
nxt_addr.set_port(addr.port() + nxt);
nxt_addr
}
let tpu_addr = *bind_addr;
let gossip_addr = Self::next_port(&bind_addr, 1);
let tvu_addr = Self::next_port(&bind_addr, 2);
let tpu_via_blobs_addr = Self::next_port(&bind_addr, 3);
let rpc_addr = SocketAddr::new(bind_addr.ip(), RPC_PORT);
let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), RPC_PORT + 1);
let gossip_addr = next_port(&bind_addr, 1);
let tvu_addr = next_port(&bind_addr, 2);
let tpu_via_blobs_addr = next_port(&bind_addr, 3);
let rpc_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
Self::new(
pubkey,
gossip_addr,
@ -165,7 +171,9 @@ impl ContactInfo {
timestamp(),
)
}
pub fn new_with_socketaddr(bind_addr: &SocketAddr) -> Self {
#[cfg(test)]
pub(crate) fn new_with_socketaddr(bind_addr: &SocketAddr) -> Self {
let keypair = Keypair::new();
Self::new_with_pubkey_socketaddr(&keypair.pubkey(), bind_addr)
}
@ -185,17 +193,23 @@ impl ContactInfo {
timestamp(),
)
}
fn is_valid_ip(addr: IpAddr) -> bool {
!(addr.is_unspecified() || addr.is_multicast())
// || (addr.is_loopback() && !cfg_test))
// TODO: boot loopback in production networks
}
/// port must not be 0
/// ip must be specified and not mulitcast
/// loopback ip is only allowed in tests
pub fn is_valid_address(addr: &SocketAddr) -> bool {
(addr.port() != 0) && Self::is_valid_ip(addr.ip())
}
pub fn client_facing_addr(&self) -> (SocketAddr, SocketAddr) {
(self.rpc, self.tpu)
}
}
impl Signable for ContactInfo {
@ -210,6 +224,7 @@ impl Signable for ContactInfo {
gossip: SocketAddr,
tvu: SocketAddr,
tpu: SocketAddr,
tpu_via_blobs: SocketAddr,
storage_addr: SocketAddr,
rpc: SocketAddr,
rpc_pubsub: SocketAddr,
@ -223,6 +238,7 @@ impl Signable for ContactInfo {
tvu: me.tvu,
tpu: me.tpu,
storage_addr: me.storage_addr,
tpu_via_blobs: me.tpu_via_blobs,
rpc: me.rpc,
rpc_pubsub: me.rpc_pubsub,
wallclock: me.wallclock,

View File

@ -165,7 +165,6 @@ impl Crds {
mod test {
use super::*;
use crate::contact_info::ContactInfo;
use solana_sdk::signature::{Keypair, KeypairUtil};
#[test]
fn test_insert() {
@ -302,11 +301,11 @@ mod test {
fn test_label_order() {
let v1 = VersionedCrdsValue::new(
1,
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0)),
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)),
);
let v2 = VersionedCrdsValue::new(
1,
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0)),
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)),
);
assert_ne!(v1, v2);
assert!(!(v1 == v2));

View File

@ -209,18 +209,16 @@ impl CrdsGossipPull {
mod test {
use super::*;
use crate::contact_info::ContactInfo;
use solana_sdk::signature::{Keypair, KeypairUtil};
#[test]
fn test_new_pull_with_stakes() {
let mut crds = Crds::default();
let mut stakes = HashMap::new();
let node = CrdsGossipPull::default();
let me = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let me = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
crds.insert(me.clone(), 0).unwrap();
for i in 1..=30 {
let entry =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let id = entry.label().pubkey();
crds.insert(entry.clone(), 0).unwrap();
stakes.insert(id, i * 100);
@ -239,7 +237,7 @@ mod test {
#[test]
fn test_new_pull_request() {
let mut crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let id = entry.label().pubkey();
let node = CrdsGossipPull::default();
assert_eq!(
@ -253,7 +251,7 @@ mod test {
Err(CrdsGossipError::NoPeers)
);
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
crds.insert(new.clone(), 0).unwrap();
let req = node.new_pull_request(&crds, &id, 0, &HashMap::new());
let (to, _, self_info) = req.unwrap();
@ -264,13 +262,13 @@ mod test {
#[test]
fn test_new_mark_creation_time() {
let mut crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let node_id = entry.label().pubkey();
let mut node = CrdsGossipPull::default();
crds.insert(entry.clone(), 0).unwrap();
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
crds.insert(old.clone(), 0).unwrap();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
crds.insert(new.clone(), 0).unwrap();
// set request creation time to max_value
@ -288,11 +286,11 @@ mod test {
#[test]
fn test_process_pull_request() {
let mut node_crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let node_id = entry.label().pubkey();
let node = CrdsGossipPull::default();
node_crds.insert(entry.clone(), 0).unwrap();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
node_crds.insert(new.clone(), 0).unwrap();
let req = node.new_pull_request(&node_crds, &node_id, 0, &HashMap::new());
@ -320,17 +318,17 @@ mod test {
#[test]
fn test_process_pull_request_response() {
let mut node_crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let node_id = entry.label().pubkey();
let mut node = CrdsGossipPull::default();
node_crds.insert(entry.clone(), 0).unwrap();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
node_crds.insert(new.clone(), 0).unwrap();
let mut dest = CrdsGossipPull::default();
let mut dest_crds = Crds::default();
let new_id = Keypair::new().pubkey();
let new_id = Pubkey::new_rand();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&new_id, 1));
dest_crds.insert(new.clone(), 0).unwrap();
@ -384,12 +382,12 @@ mod test {
#[test]
fn test_gossip_purge() {
let mut node_crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let node_label = entry.label();
let node_id = node_label.pubkey();
let mut node = CrdsGossipPull::default();
node_crds.insert(entry.clone(), 0).unwrap();
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
node_crds.insert(old.clone(), 0).unwrap();
let value_hash = node_crds.lookup_versioned(&old.label()).unwrap().value_hash;

View File

@ -266,13 +266,12 @@ impl CrdsGossipPush {
mod test {
use super::*;
use crate::contact_info::ContactInfo;
use solana_sdk::signature::{Keypair, KeypairUtil};
#[test]
fn test_process_push() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let label = value.label();
// push a new message
assert_eq!(
@ -291,7 +290,7 @@ mod test {
fn test_process_push_old_version() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Keypair::new().pubkey(), 0);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 1;
let value = CrdsValue::ContactInfo(ci.clone());
@ -311,7 +310,7 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let timeout = push.msg_timeout;
let mut ci = ContactInfo::new_localhost(&Keypair::new().pubkey(), 0);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
// push a version to far in the future
ci.wallclock = timeout + 1;
@ -333,7 +332,7 @@ mod test {
fn test_process_push_update() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Keypair::new().pubkey(), 0);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 0;
let value_old = CrdsValue::ContactInfo(ci.clone());
@ -366,15 +365,13 @@ mod test {
solana_logger::setup();
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value1 =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let value1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
for _ in 0..30 {
@ -386,8 +383,7 @@ mod test {
assert!(push.active_set.get(&value2.label().pubkey()).is_some());
for _ in 0..push.num_active {
let value2 =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
}
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
@ -401,7 +397,7 @@ mod test {
let mut stakes = HashMap::new();
for i in 1..=100 {
let peer =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), time));
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), time));
let id = peer.label().pubkey();
crds.insert(peer.clone(), time).unwrap();
stakes.insert(id, i * 100);
@ -419,12 +415,11 @@ mod test {
fn test_new_push_messages() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let new_msg =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(
push.process_push_message(&mut crds, new_msg.clone(), 0),
Ok(None)
@ -439,12 +434,11 @@ mod test {
fn test_process_prune() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let new_msg =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(
push.process_push_message(&mut crds, new_msg.clone(), 0),
Ok(None)
@ -459,11 +453,11 @@ mod test {
fn test_purge_old_pending_push_messages() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Keypair::new().pubkey(), 0));
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let mut ci = ContactInfo::new_localhost(&Keypair::new().pubkey(), 0);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 1;
let new_msg = CrdsValue::ContactInfo(ci.clone());
assert_eq!(
@ -481,7 +475,7 @@ mod test {
fn test_purge_old_pushed_once_messages() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Keypair::new().pubkey(), 0);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 0;
let value = CrdsValue::ContactInfo(ci.clone());
let label = value.label();

View File

@ -1,4 +1,5 @@
use crate::contact_info::ContactInfo;
use bincode::serialize;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signable, Signature};
use solana_sdk::transaction::Transaction;
@ -15,30 +16,37 @@ pub enum CrdsValue {
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Vote {
pub from: Pubkey,
pub transaction: Transaction,
pub signature: Signature,
pub wallclock: u64,
}
impl Signable for Vote {
fn sign(&mut self, _keypair: &Keypair) {}
fn verify(&self) -> bool {
self.transaction.verify_signature()
}
fn pubkey(&self) -> Pubkey {
self.transaction.account_keys[0]
self.from
}
fn signable_data(&self) -> Vec<u8> {
vec![]
#[derive(Serialize)]
struct SignData {
transaction: Transaction,
wallclock: u64,
}
let data = SignData {
transaction: self.transaction.clone(),
wallclock: self.wallclock,
};
serialize(&data).expect("unable to serialize Vote")
}
fn get_signature(&self) -> Signature {
Signature::default()
self.signature
}
fn set_signature(&mut self, _signature: Signature) {}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature
}
}
/// Type of the replicated value
@ -68,10 +76,11 @@ impl CrdsValueLabel {
}
impl Vote {
// TODO: it might make sense for the transaction to encode the wallclock in the userdata
pub fn new(transaction: Transaction, wallclock: u64) -> Self {
pub fn new(from: &Pubkey, transaction: Transaction, wallclock: u64) -> Self {
Vote {
from: *from,
transaction,
signature: Signature::default(),
wallclock,
}
}
@ -177,21 +186,31 @@ mod test {
let key = v.clone().contact_info().unwrap().id;
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
let v = CrdsValue::Vote(Vote::new(test_tx(), 0));
let v = CrdsValue::Vote(Vote::new(&Pubkey::default(), test_tx(), 0));
assert_eq!(v.wallclock(), 0);
let key = v.clone().vote().unwrap().transaction.account_keys[0];
let key = v.clone().vote().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::Vote(key));
}
#[test]
fn test_signature() {
let keypair = Keypair::new();
let fake_keypair = Keypair::new();
let wrong_keypair = Keypair::new();
let mut v =
CrdsValue::ContactInfo(ContactInfo::new_localhost(&keypair.pubkey(), timestamp()));
v.sign(&keypair);
assert!(v.verify());
v.sign(&fake_keypair);
assert!(!v.verify());
verify_signatures(&mut v, &keypair, &wrong_keypair);
v = CrdsValue::Vote(Vote::new(&keypair.pubkey(), test_tx(), timestamp()));
verify_signatures(&mut v, &keypair, &wrong_keypair);
}
fn verify_signatures(
value: &mut CrdsValue,
correct_keypair: &Keypair,
wrong_keypair: &Keypair,
) {
assert!(!value.verify());
value.sign(&correct_keypair);
assert!(value.verify());
value.sign(&wrong_keypair);
assert!(!value.verify());
}
}

View File

@ -1,474 +0,0 @@
//! Set of functions for emulating windowing functions from a database ledger implementation
use crate::blocktree::*;
#[cfg(feature = "erasure")]
use crate::erasure;
use crate::packet::{SharedBlob, BLOB_HEADER_SIZE};
use crate::result::Result;
use crate::streamer::BlobSender;
use solana_metrics::counter::Counter;
use solana_sdk::pubkey::Pubkey;
use std::borrow::Borrow;
use std::sync::Arc;
pub const MAX_REPAIR_LENGTH: usize = 128;
pub fn retransmit_blobs(dq: &[SharedBlob], retransmit: &BlobSender, id: &Pubkey) -> Result<()> {
let mut retransmit_queue: Vec<SharedBlob> = Vec::new();
for b in dq {
// Don't add blobs generated by this node to the retransmit queue
if b.read().unwrap().id() != *id {
retransmit_queue.push(b.clone());
}
}
if !retransmit_queue.is_empty() {
inc_new_counter_info!("streamer-recv_window-retransmit", retransmit_queue.len());
retransmit.send(retransmit_queue)?;
}
Ok(())
}
/// Process a blob: Add blob to the ledger window.
pub fn process_blob(blocktree: &Arc<Blocktree>, blob: &SharedBlob) -> Result<()> {
let is_coding = blob.read().unwrap().is_coding();
// Check if the blob is in the range of our known leaders. If not, we return.
let (slot, pix) = {
let r_blob = blob.read().unwrap();
(r_blob.slot(), r_blob.index())
};
// TODO: Once the original leader signature is added to the blob, make sure that
// the blob was originally generated by the expected leader for this slot
// Insert the new blob into block tree
if is_coding {
let blob = &blob.read().unwrap();
blocktree.put_coding_blob_bytes(slot, pix, &blob.data[..BLOB_HEADER_SIZE + blob.size()])?;
} else {
blocktree.insert_data_blobs(vec![(*blob.read().unwrap()).borrow()])?;
}
#[cfg(feature = "erasure")]
{
// TODO: Support per-slot erasure. Issue: https://github.com/solana-labs/solana/issues/2441
if let Err(e) = try_erasure(blocktree, 0) {
trace!(
"erasure::recover failed to write recovered coding blobs. Err: {:?}",
e
);
}
}
Ok(())
}
#[cfg(feature = "erasure")]
fn try_erasure(blocktree: &Arc<Blocktree>, slot_index: u64) -> Result<()> {
let meta = blocktree.meta(slot_index)?;
if let Some(meta) = meta {
let (data, coding) = erasure::recover(blocktree, slot_index, meta.consumed)?;
for c in coding {
let c = c.read().unwrap();
blocktree.put_coding_blob_bytes(
0,
c.index(),
&c.data[..BLOB_HEADER_SIZE + c.size()],
)?;
}
blocktree.write_shared_blobs(data)
} else {
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
#[cfg(all(feature = "erasure", test))]
use crate::entry::reconstruct_entries_from_blobs;
use crate::entry::{make_tiny_test_entries, EntrySlice};
#[cfg(all(feature = "erasure", test))]
use crate::erasure::test::{generate_blocktree_from_window, setup_window_ledger};
#[cfg(all(feature = "erasure", test))]
use crate::erasure::{NUM_CODING, NUM_DATA};
use crate::packet::{index_blobs, Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE};
use crate::streamer::{receiver, responder, PacketReceiver};
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::io;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::time::Duration;
fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => *num += m.read().unwrap().packets.len(),
e => info!("error {:?}", e),
}
if *num == 10 {
break;
}
}
}
#[test]
pub fn streamer_debug() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
write!(io::sink(), "{:?}", Blob::default()).unwrap();
}
#[test]
pub fn streamer_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let (s_reader, r_reader) = channel();
let t_receiver = receiver(Arc::new(read), &exit, s_reader, "window-streamer-test");
let t_responder = {
let (s_responder, r_responder) = channel();
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
let mut msgs = Vec::new();
for i in 0..10 {
let b = SharedBlob::default();
{
let mut w = b.write().unwrap();
w.data[0] = i as u8;
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
}
msgs.push(b);
}
s_responder.send(msgs).expect("send");
t_responder
};
let mut num = 0;
get_msgs(r_reader, &mut num);
assert_eq!(num, 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
#[test]
pub fn test_find_missing_data_indexes_sanity() {
let slot = 0;
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Early exit conditions
let empty: Vec<u64> = vec![];
assert_eq!(blocktree.find_missing_data_indexes(slot, 0, 0, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 5, 5, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 4, 3, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 1, 2, 0), empty);
let mut blobs = make_tiny_test_entries(2).to_blobs();
const ONE: u64 = 1;
const OTHER: u64 = 4;
blobs[0].set_index(ONE);
blobs[1].set_index(OTHER);
// Insert one blob at index = first_index
blocktree.write_blobs(&blobs).unwrap();
const STARTS: u64 = OTHER * 2;
const END: u64 = OTHER * 3;
const MAX: usize = 10;
// The first blob has index = first_index. Thus, for i < first_index,
// given the input range of [i, first_index], the missing indexes should be
// [i, first_index - 1]
for start in 0..STARTS {
let result = blocktree.find_missing_data_indexes(
slot, start, // start
END, //end
MAX, //max
);
let expected: Vec<u64> = (start..END).filter(|i| *i != ONE && *i != OTHER).collect();
assert_eq!(result, expected);
}
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_find_missing_data_indexes() {
let slot = 0;
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let gap = 10;
assert!(gap > 3);
let num_entries = 10;
let mut blobs = make_tiny_test_entries(num_entries).to_blobs();
for (i, b) in blobs.iter_mut().enumerate() {
b.set_index(i as u64 * gap);
b.set_slot(slot);
}
blocktree.write_blobs(&blobs).unwrap();
// Index of the first blob is 0
// Index of the second blob is "gap"
// Thus, the missing indexes should then be [1, gap - 1] for the input index
// range of [0, gap)
let expected: Vec<u64> = (1..gap).collect();
assert_eq!(
blocktree.find_missing_data_indexes(slot, 0, gap, gap as usize),
expected
);
assert_eq!(
blocktree.find_missing_data_indexes(slot, 1, gap, (gap - 1) as usize),
expected,
);
assert_eq!(
blocktree.find_missing_data_indexes(slot, 0, gap - 1, (gap - 1) as usize),
&expected[..expected.len() - 1],
);
assert_eq!(
blocktree.find_missing_data_indexes(slot, gap - 2, gap, gap as usize),
vec![gap - 2, gap - 1],
);
assert_eq!(
blocktree.find_missing_data_indexes(slot, gap - 2, gap, 1),
vec![gap - 2],
);
assert_eq!(
blocktree.find_missing_data_indexes(slot, 0, gap, 1),
vec![1],
);
// Test with end indexes that are greater than the last item in the ledger
let mut expected: Vec<u64> = (1..gap).collect();
expected.push(gap + 1);
assert_eq!(
blocktree.find_missing_data_indexes(slot, 0, gap + 2, (gap + 2) as usize),
expected,
);
assert_eq!(
blocktree.find_missing_data_indexes(slot, 0, gap + 2, (gap - 1) as usize),
&expected[..expected.len() - 1],
);
for i in 0..num_entries as u64 {
for j in 0..i {
let expected: Vec<u64> = (j..i)
.flat_map(|k| {
let begin = k * gap + 1;
let end = (k + 1) * gap;
(begin..end)
})
.collect();
assert_eq!(
blocktree.find_missing_data_indexes(
slot,
j * gap,
i * gap,
((i - j) * gap) as usize
),
expected,
);
}
}
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_find_missing_data_indexes_slots() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let num_entries_per_slot = 10;
let num_slots = 2;
let mut blobs = make_tiny_test_entries(num_slots * num_entries_per_slot).to_blobs();
// Insert every nth entry for each slot
let nth = 3;
for (i, b) in blobs.iter_mut().enumerate() {
b.set_index(((i % num_entries_per_slot) * nth) as u64);
b.set_slot((i / num_entries_per_slot) as u64);
}
blocktree.write_blobs(&blobs).unwrap();
let mut expected: Vec<u64> = (0..num_entries_per_slot)
.flat_map(|x| ((nth * x + 1) as u64..(nth * x + nth) as u64))
.collect();
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
for slot in 0..num_slots {
assert_eq!(
blocktree.find_missing_data_indexes(
slot as u64,
0,
(num_entries_per_slot * nth) as u64,
num_entries_per_slot * nth as usize
),
expected,
);
}
// Test with a limit on the number of returned entries
for slot in 0..num_slots {
assert_eq!(
blocktree.find_missing_data_indexes(
slot as u64,
0,
(num_entries_per_slot * nth) as u64,
num_entries_per_slot * (nth - 1)
)[..],
expected[..num_entries_per_slot * (nth - 1)],
);
}
// Try to find entries in the range [num_entries_per_slot * nth..num_entries_per_slot * (nth + 1)
// that don't exist in the ledger.
let extra_entries =
(num_entries_per_slot * nth) as u64..(num_entries_per_slot * (nth + 1)) as u64;
expected.extend(extra_entries);
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
for slot in 0..num_slots {
assert_eq!(
blocktree.find_missing_data_indexes(
slot as u64,
0,
(num_entries_per_slot * (nth + 1)) as u64,
num_entries_per_slot * (nth + 1),
),
expected,
);
}
}
#[test]
pub fn test_no_missing_blob_indexes() {
let slot = 0;
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let num_entries = 10;
let shared_blobs = make_tiny_test_entries(num_entries).to_shared_blobs();
index_blobs(&shared_blobs, &Keypair::new().pubkey(), 0, slot, 0);
let blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.read().unwrap()).collect();
let blobs: Vec<&Blob> = blob_locks.iter().map(|b| &**b).collect();
blocktree.write_blobs(blobs).unwrap();
let empty: Vec<u64> = vec![];
for i in 0..num_entries as u64 {
for j in 0..i {
assert_eq!(
blocktree.find_missing_data_indexes(slot, j, i, (i - j) as usize),
empty
);
}
}
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[cfg(all(feature = "erasure", test))]
#[test]
pub fn test_try_erasure() {
// Setup the window
let offset = 0;
let num_blobs = NUM_DATA + 2;
let slot = 0;
let mut window = setup_window_ledger(offset, num_blobs, false, slot);
let end_index = (offset + num_blobs) % window.len();
// Test erasing a data block and an erasure block
let coding_start = offset - (offset % NUM_DATA) + (NUM_DATA - NUM_CODING);
let erased_index = coding_start % window.len();
// Create a hole in the window
let erased_data = window[erased_index].data.clone();
let erased_coding = window[erased_index].coding.clone().unwrap();
window[erased_index].data = None;
window[erased_index].coding = None;
// Generate the blocktree from the window
let ledger_path = get_tmp_ledger_path!();
let blocktree = Arc::new(generate_blocktree_from_window(&ledger_path, &window, false));
try_erasure(&blocktree, 0).expect("Expected successful erasure attempt");
window[erased_index].data = erased_data;
{
let data_blobs: Vec<_> = window[erased_index..end_index]
.iter()
.map(|entry| entry.data.clone().unwrap())
.collect();
let locks: Vec<_> = data_blobs.iter().map(|blob| blob.read().unwrap()).collect();
let locked_data: Vec<&Blob> = locks.iter().map(|lock| &**lock).collect();
let (expected, _) = reconstruct_entries_from_blobs(locked_data).unwrap();
assert_eq!(
blocktree
.get_slot_entries(
0,
erased_index as u64,
Some((end_index - erased_index) as u64)
)
.unwrap(),
expected
);
}
let erased_coding_l = erased_coding.read().unwrap();
assert_eq!(
&blocktree
.get_coding_blob_bytes(slot, erased_index as u64)
.unwrap()
.unwrap()[BLOB_HEADER_SIZE..],
&erased_coding_l.data()[..erased_coding_l.size() as usize],
);
}
#[test]
fn test_process_blob() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let num_entries = 10;
let original_entries = make_tiny_test_entries(num_entries);
let shared_blobs = original_entries.clone().to_shared_blobs();
index_blobs(&shared_blobs, &Keypair::new().pubkey(), 0, 0, 0);
for blob in shared_blobs.iter().rev() {
process_blob(&blocktree, blob).expect("Expect successful processing of blob");
}
assert_eq!(
blocktree.get_slot_entries(0, 0, None).unwrap(),
original_entries
);
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
}

View File

@ -5,18 +5,14 @@
use crate::packet::{Blob, SharedBlob, BLOB_DATA_SIZE};
use crate::poh::Poh;
use crate::result::Result;
use bincode::{deserialize, serialize_into, serialized_size};
use bincode::{deserialize, serialized_size};
use chrono::prelude::Utc;
use rayon::prelude::*;
use solana_budget_api::budget_transaction::BudgetTransaction;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_budget_api::budget_instruction;
use solana_sdk::hash::{Hash, Hasher};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::transaction::Transaction;
use solana_vote_api::vote_instruction::Vote;
use solana_vote_api::vote_transaction::VoteTransaction;
use std::borrow::Borrow;
use std::io::Cursor;
use std::mem::size_of;
use std::sync::mpsc::{Receiver, Sender};
use std::sync::{Arc, RwLock};
@ -102,62 +98,19 @@ impl Entry {
}
pub fn to_blob(&self) -> Blob {
let mut blob = Blob::default();
let pos = {
let mut out = Cursor::new(blob.data_mut());
serialize_into(&mut out, &self).expect("failed to serialize output");
out.position() as usize
};
blob.set_size(pos);
blob
Blob::from_serializable(&vec![&self])
}
/// Estimate serialized_size of Entry without creating an Entry.
pub fn serialized_size(transactions: &[Transaction]) -> u64 {
let txs_size: u64 = transactions
.iter()
.map(|tx| tx.serialized_size().unwrap())
.map(|tx| serialized_size(tx).unwrap())
.sum();
// num_hashes + hash + txs
(2 * size_of::<u64>() + size_of::<Hash>()) as u64 + txs_size
}
pub fn num_will_fit(transactions: &[Transaction]) -> usize {
if transactions.is_empty() {
return 0;
}
let mut num = transactions.len();
let mut upper = transactions.len();
let mut lower = 1; // if one won't fit, we have a lot of TODOs
let mut next = transactions.len(); // optimistic
loop {
debug!(
"num {}, upper {} lower {} next {} transactions.len() {}",
num,
upper,
lower,
next,
transactions.len()
);
if Self::serialized_size(&transactions[..num]) <= BLOB_DATA_SIZE as u64 {
next = (upper + num) / 2;
lower = num;
debug!("num {} fits, maybe too well? trying {}", num, next);
} else {
next = (lower + num) / 2;
upper = num;
debug!("num {} doesn't fit! trying {}", num, next);
}
// same as last time
if next == num {
debug!("converged on num {}", num);
break;
}
num = next;
}
num
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(
start_hash: &mut Hash,
@ -202,6 +155,17 @@ impl Entry {
}
}
pub fn hash_transactions(transactions: &[Transaction]) -> Hash {
// a hash of a slice of transactions only needs to hash the signatures
let mut hasher = Hasher::default();
transactions.iter().for_each(|tx| {
if !tx.signatures.is_empty() {
hasher.hash(&tx.signatures[0].as_ref());
}
});
hasher.result()
}
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature. If num_hashes is zero and there's no transaction data,
@ -220,7 +184,7 @@ fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -
if transactions.is_empty() {
poh.tick().hash
} else {
poh.record(Transaction::hash(transactions)).hash
poh.record(hash_transactions(transactions)).hash
}
}
@ -233,15 +197,14 @@ where
let mut num_ticks = 0;
for blob in blobs.into_iter() {
let entry: Entry = {
let new_entries: Vec<Entry> = {
let msg_size = blob.borrow().size();
deserialize(&blob.borrow().data()[..msg_size])?
};
if entry.is_tick() {
num_ticks += 1
}
entries.push(entry)
let num_new_ticks: u64 = new_entries.iter().map(|entry| entry.is_tick() as u64).sum();
num_ticks += num_new_ticks;
entries.extend(new_entries)
}
Ok((entries, num_ticks))
}
@ -252,7 +215,8 @@ pub trait EntrySlice {
fn verify(&self, start_hash: &Hash) -> bool;
fn to_shared_blobs(&self) -> Vec<SharedBlob>;
fn to_blobs(&self) -> Vec<Blob>;
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)>;
fn to_single_entry_blobs(&self) -> Vec<Blob>;
fn to_single_entry_shared_blobs(&self) -> Vec<SharedBlob>;
}
impl EntrySlice for [Entry] {
@ -278,23 +242,31 @@ impl EntrySlice for [Entry] {
}
fn to_blobs(&self) -> Vec<Blob> {
self.iter().map(|entry| entry.to_blob()).collect()
split_serializable_chunks(
&self,
BLOB_DATA_SIZE as u64,
&|s| bincode::serialized_size(&s).unwrap(),
&mut |entries: &[Entry]| Blob::from_serializable(entries),
)
}
fn to_shared_blobs(&self) -> Vec<SharedBlob> {
self.iter().map(|entry| entry.to_shared_blob()).collect()
self.to_blobs()
.into_iter()
.map(|b| Arc::new(RwLock::new(b)))
.collect()
}
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)> {
self.iter()
.flat_map(|entry| {
entry
.transactions
.iter()
.flat_map(VoteTransaction::get_votes)
})
fn to_single_entry_shared_blobs(&self) -> Vec<SharedBlob> {
self.to_single_entry_blobs()
.into_iter()
.map(|b| Arc::new(RwLock::new(b)))
.collect()
}
fn to_single_entry_blobs(&self) -> Vec<Blob> {
self.iter().map(Entry::to_blob).collect()
}
}
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
@ -303,6 +275,67 @@ pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Trans
entry
}
pub fn num_will_fit<T, F>(serializables: &[T], max_size: u64, serialized_size: &F) -> usize
where
F: Fn(&[T]) -> u64,
{
if serializables.is_empty() {
return 0;
}
let mut num = serializables.len();
let mut upper = serializables.len();
let mut lower = 1; // if one won't fit, we have a lot of TODOs
let mut next = serializables.len(); // optimistic
loop {
debug!(
"num {}, upper {} lower {} next {} serializables.len() {}",
num,
upper,
lower,
next,
serializables.len()
);
if serialized_size(&serializables[..num]) <= max_size {
next = (upper + num) / 2;
lower = num;
debug!("num {} fits, maybe too well? trying {}", num, next);
} else {
next = (lower + num) / 2;
upper = num;
debug!("num {} doesn't fit! trying {}", num, next);
}
// same as last time
if next == num {
debug!("converged on num {}", num);
break;
}
num = next;
}
num
}
pub fn split_serializable_chunks<T, R, F1, F2>(
serializables: &[T],
max_size: u64,
serialized_size: &F1,
converter: &mut F2,
) -> Vec<R>
where
F1: Fn(&[T]) -> u64,
F2: FnMut(&[T]) -> R,
{
let mut result = vec![];
let mut chunk_start = 0;
while chunk_start < serializables.len() {
let chunk_end =
chunk_start + num_will_fit(&serializables[chunk_start..], max_size, serialized_size);
result.push(converter(&serializables[chunk_start..chunk_end]));
chunk_start = chunk_end;
}
result
}
/// Creates the next entries for given transactions, outputs
/// updates start_hash to hash of last Entry, sets num_hashes to 0
pub fn next_entries_mut(
@ -310,61 +343,12 @@ pub fn next_entries_mut(
num_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
// TODO: ?? find a number that works better than |?
// V
if transactions.is_empty() || transactions.len() == 1 {
vec![Entry::new_mut(start_hash, num_hashes, transactions)]
} else {
let mut chunk_start = 0;
let mut entries = Vec::new();
while chunk_start < transactions.len() {
let mut chunk_end = transactions.len();
let mut upper = chunk_end;
let mut lower = chunk_start;
let mut next = chunk_end; // be optimistic that all will fit
// binary search for how many transactions will fit in an Entry (i.e. a BLOB)
loop {
debug!(
"chunk_end {}, upper {} lower {} next {} transactions.len() {}",
chunk_end,
upper,
lower,
next,
transactions.len()
);
if Entry::serialized_size(&transactions[chunk_start..chunk_end])
<= BLOB_DATA_SIZE as u64
{
next = (upper + chunk_end) / 2;
lower = chunk_end;
debug!(
"chunk_end {} fits, maybe too well? trying {}",
chunk_end, next
);
} else {
next = (lower + chunk_end) / 2;
upper = chunk_end;
debug!("chunk_end {} doesn't fit! trying {}", chunk_end, next);
}
// same as last time
if next == chunk_end {
debug!("converged on chunk_end {}", chunk_end);
break;
}
chunk_end = next;
}
entries.push(Entry::new_mut(
start_hash,
num_hashes,
transactions[chunk_start..chunk_end].to_vec(),
));
chunk_start = chunk_end;
}
entries
}
split_serializable_chunks(
&transactions[..],
BLOB_DATA_SIZE as u64,
&Entry::serialized_size,
&mut |txs: &[Transaction]| Entry::new_mut(start_hash, num_hashes, txs.to_vec()),
)
}
/// Creates the next Entries for given transactions
@ -390,22 +374,15 @@ pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
pub fn make_tiny_test_entries_from_hash(start: &Hash, num: usize) -> Vec<Entry> {
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let mut hash = *start;
let mut num_hashes = 0;
(0..num)
.map(|_| {
Entry::new_mut(
&mut hash,
&mut num_hashes,
vec![BudgetTransaction::new_timestamp(
&keypair,
&keypair.pubkey(),
&keypair.pubkey(),
Utc::now(),
*start,
)],
)
let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
let tx = Transaction::new_signed_instructions(&[&keypair], vec![ix], *start);
Entry::new_mut(&mut hash, &mut num_hashes, vec![tx])
})
.collect()
}
@ -420,16 +397,12 @@ pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
let zero = Hash::default();
let one = solana_sdk::hash::hash(&zero.as_ref());
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let tx = BudgetTransaction::new_timestamp(
&keypair,
&keypair.pubkey(),
&keypair.pubkey(),
Utc::now(),
one,
);
let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
let tx = Transaction::new_signed_instructions(&[&keypair], vec![ix], one);
let serialized_size = tx.serialized_size().unwrap();
let serialized_size = serialized_size(&tx).unwrap();
let num_txs = BLOB_DATA_SIZE / serialized_size as usize;
let txs = vec![tx; num_txs];
let entry = next_entries(&one, 1, txs)[0].clone();
@ -438,7 +411,7 @@ pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
#[cfg(test)]
pub fn make_consecutive_blobs(
id: &Pubkey,
id: &solana_sdk::pubkey::Pubkey,
num_blobs_to_make: u64,
start_height: u64,
start_hash: Hash,
@ -446,7 +419,7 @@ pub fn make_consecutive_blobs(
) -> Vec<SharedBlob> {
let entries = create_ticks(num_blobs_to_make, start_hash);
let blobs = entries.to_shared_blobs();
let blobs = entries.to_single_entry_shared_blobs();
let mut index = start_height;
for blob in &blobs {
let mut blob = blob.write().unwrap();
@ -477,9 +450,35 @@ mod tests {
use crate::packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
use solana_sdk::hash::hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::system_transaction;
use solana_vote_api::vote_instruction;
use solana_vote_api::vote_state::Vote;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
fn create_sample_payment(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
let ixs = budget_instruction::payment(&pubkey, &pubkey, 1);
Transaction::new_signed_instructions(&[keypair], ixs, hash)
}
fn create_sample_timestamp(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
}
fn create_sample_signature(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
let ix = budget_instruction::apply_signature(&pubkey, &pubkey, &pubkey);
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
}
fn create_sample_vote(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
let ix = vote_instruction::vote(&pubkey, vec![Vote::new(1)]);
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
}
#[test]
fn test_entry_verify() {
let zero = Hash::default();
@ -496,8 +495,8 @@ mod tests {
// First, verify entries
let keypair = Keypair::new();
let tx0 = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 0, zero, 0);
let tx1 = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, zero, 0);
let tx0 = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero, 0);
let tx1 = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 1, zero, 0);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
@ -513,15 +512,8 @@ mod tests {
// First, verify entries
let keypair = Keypair::new();
let tx0 = BudgetTransaction::new_timestamp(
&keypair,
&keypair.pubkey(),
&keypair.pubkey(),
Utc::now(),
zero,
);
let tx1 =
BudgetTransaction::new_signature(&keypair, &keypair.pubkey(), &keypair.pubkey(), zero);
let tx0 = create_sample_timestamp(&keypair, zero);
let tx1 = create_sample_signature(&keypair, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
@ -543,13 +535,7 @@ mod tests {
assert_eq!(tick.hash, zero);
let keypair = Keypair::new();
let tx0 = BudgetTransaction::new_timestamp(
&keypair,
&keypair.pubkey(),
&keypair.pubkey(),
Utc::now(),
zero,
);
let tx0 = create_sample_timestamp(&keypair, zero);
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
assert_eq!(entry0.num_hashes, 1);
assert_eq!(entry0.hash, next_hash(&zero, 1, &vec![tx0]));
@ -560,7 +546,7 @@ mod tests {
fn test_next_entry_panic() {
let zero = Hash::default();
let keypair = Keypair::new();
let tx = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 0, zero, 0);
let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero, 0);
next_entry(&zero, 0, vec![tx]);
}
@ -568,7 +554,7 @@ mod tests {
fn test_serialized_size() {
let zero = Hash::default();
let keypair = Keypair::new();
let tx = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 0, zero, 0);
let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero, 0);
let entry = next_entry(&zero, 1, vec![tx.clone()]);
assert_eq!(
Entry::serialized_size(&[tx]),
@ -596,14 +582,8 @@ mod tests {
let one = hash(&zero.as_ref());
let keypair = Keypair::new();
let vote_account = Keypair::new();
let tx0 = VoteTransaction::new_vote(&vote_account.pubkey(), &vote_account, 1, one, 1);
let tx1 = BudgetTransaction::new_timestamp(
&keypair,
&keypair.pubkey(),
&keypair.pubkey(),
Utc::now(),
one,
);
let tx0 = create_sample_vote(&vote_account, one);
let tx1 = create_sample_timestamp(&keypair, one);
//
// TODO: this magic number and the mix of transaction types
// is designed to fill up a Blob more or less exactly,
@ -620,7 +600,7 @@ mod tests {
}
#[test]
fn test_entries_to_shared_blobs() {
fn test_entries_to_blobs() {
solana_logger::setup();
let entries = make_test_entries();
@ -629,6 +609,23 @@ mod tests {
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap().0, entries);
}
#[test]
fn test_multiple_entries_to_blobs() {
solana_logger::setup();
let num_blobs = 10;
let serialized_size =
bincode::serialized_size(&make_tiny_test_entries_from_hash(&Hash::default(), 1))
.unwrap();
let num_entries = (num_blobs * BLOB_DATA_SIZE as u64) / serialized_size;
let entries = make_tiny_test_entries_from_hash(&Hash::default(), num_entries as usize);
let blob_q = entries.to_blobs();
assert_eq!(blob_q.len() as u64, num_blobs);
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap().0, entries);
}
#[test]
fn test_bad_blobs_attack() {
solana_logger::setup();
@ -644,12 +641,11 @@ mod tests {
let next_hash = solana_sdk::hash::hash(&hash.as_ref());
let keypair = Keypair::new();
let vote_account = Keypair::new();
let tx_small =
VoteTransaction::new_vote(&vote_account.pubkey(), &vote_account, 1, next_hash, 2);
let tx_large = BudgetTransaction::new_payment(&keypair, &keypair.pubkey(), 1, next_hash, 0);
let tx_small = create_sample_vote(&vote_account, next_hash);
let tx_large = create_sample_payment(&keypair, next_hash);
let tx_small_size = tx_small.serialized_size().unwrap() as usize;
let tx_large_size = tx_large.serialized_size().unwrap() as usize;
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
let tx_large_size = serialized_size(&tx_large).unwrap() as usize;
let entry_size = serialized_size(&Entry {
num_hashes: 0,
hash: Hash::default(),
@ -685,4 +681,40 @@ mod tests {
assert!(entries0.verify(&hash));
}
#[test]
fn test_num_will_fit_empty() {
let serializables: Vec<u32> = vec![];
let result = num_will_fit(&serializables[..], 8, &|_| 4);
assert_eq!(result, 0);
}
#[test]
fn test_num_fit() {
let serializables_vec: Vec<u8> = (0..10).map(|_| 1).collect();
let serializables = &serializables_vec[..];
let sum = |i: &[u8]| (0..i.len()).into_iter().sum::<usize>() as u64;
// sum[0] is = 0, but sum[0..1] > 0, so result contains 1 item
let result = num_will_fit(serializables, 0, &sum);
assert_eq!(result, 1);
// sum[0..3] is <= 8, but sum[0..4] > 8, so result contains 3 items
let result = num_will_fit(serializables, 8, &sum);
assert_eq!(result, 4);
// sum[0..1] is = 1, but sum[0..2] > 0, so result contains 2 items
let result = num_will_fit(serializables, 1, &sum);
assert_eq!(result, 2);
// sum[0..9] = 45, so contains all items
let result = num_will_fit(serializables, 45, &sum);
assert_eq!(result, 10);
// sum[0..8] <= 44, but sum[0..9] = 45, so contains all but last item
let result = num_will_fit(serializables, 44, &sum);
assert_eq!(result, 9);
// sum[0..9] <= 46, but contains all items
let result = num_will_fit(serializables, 46, &sum);
assert_eq!(result, 10);
}
}

Some files were not shown because too many files have changed in this diff Show More