Compare commits

...

318 Commits

Author SHA1 Message Date
Michael Vines
be5f2ef9b9 Consolidate CI jobs 2018-06-24 22:28:24 -07:00
Greg Fitzgerald
adfcb79387 Force install cargo-cov 2018-06-24 15:34:30 -06:00
Greg Fitzgerald
73c4c0ac5f Revert "cargo-cov installed by default in nightly?"
This reverts commit 6fc601f696.
2018-06-24 15:34:30 -06:00
Greg Fitzgerald
6fc601f696 cargo-cov installed by default in nightly? 2018-06-24 12:17:42 -06:00
Greg Fitzgerald
07111fb7bb Use llvm-cov instead of gcov
@marco-c called this a hack, but since grcov isn't working
out-of-the-box (panics on call to gcov), we'll take a stab at using
llvm-cov.
2018-06-24 12:17:42 -06:00
Greg Fitzgerald
a06d2170b0 No need for rustfmt on nightly 2018-06-24 12:17:42 -06:00
Greg Fitzgerald
7f53ea3bf3 Generate coverage with Rust nightly
Fixes #177

Thanks @marco-c!
2018-06-24 12:17:42 -06:00
Michael Vines
b2accd1c2a Run snap build sooner to better mask the delay 2018-06-24 10:24:32 -07:00
Anatoly Yakovenko
8ef8a8dea7 borrow checker 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
e929404676 comments 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
c2258bedae fixed! 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
215fdbb7ed nits 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
ee998f6882 fix docs 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
826e95afca fix logs 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
47583d48e7 get rid of dummy test 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
e759cdf061 tests 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
88503c2a09 generic array fail case 2018-06-24 11:17:55 -06:00
Tyera Eulberg
d5be23dffe fmt 2018-06-24 10:44:17 -06:00
Tyera Eulberg
80c01dc085 Use leader.json or ReplicatedData to get ports for drone 2018-06-24 10:44:17 -06:00
Tyera Eulberg
45b2549fa9 Reset bad TestNode edit 2018-06-24 10:44:17 -06:00
Greg Fitzgerald
c7ce454188 Use pnet_datalink instead of all of pnet
pnet_transport takes a long time to build. It's been especially
painful from within a docker container for reasons I don't care
to understand. pnet_datalink is the only part of pnet we're using
so booting the rest.
2018-06-24 10:39:59 -06:00
Anatoly Yakovenko
7059ea42d6 comments 2018-06-24 09:19:05 -06:00
Anatoly Yakovenko
8ea1c29c9b more notes 2018-06-24 09:19:05 -06:00
Michael Vines
33bbfdbc9b Retry flaky coverage/cuda builds on initial failure 2018-06-23 16:17:25 -07:00
Michael Vines
5de54f8853 Make cuda/erasure build logs public 2018-06-23 16:17:25 -07:00
Michael Vines
a1ac41218a Document CUDA version 2018-06-23 16:17:25 -07:00
Rob Walker
55fc647568 fix more shellcheck 2018-06-23 16:00:17 -07:00
Rob Walker
e83e898eed fix shellcheck's concerns 2018-06-23 16:00:17 -07:00
Rob Walker
eb07e4588b remove IPADDR, which was making Rob feel ill
IPADDR is simple, but not exactly what we need for testnet, where NAT'd
  folks need to join in, need to advertize themselves as on the interweb.

  myip() helps, but there's some TODOs: fullnode-config probably needs to
  be told where it lives in the real world (machine interfaces tell us dick),
  or incorporate something like the "ifconfig.co" code in myip.sh
2018-06-23 16:00:17 -07:00
Michael Vines
563f834c96 Document how to update the snap 2018-06-23 15:29:22 -07:00
Michael Vines
183178681d Simply fetching perf libs 2018-06-23 12:54:38 -07:00
anatoly yakovenko
8dba53e494 debit undo (#423) 2018-06-23 06:14:52 -07:00
Michael Vines
e4782b19a3 Document GCP setup 2018-06-23 02:12:20 -07:00
Michael Vines
ec86b1dffa Adapt to GCP-based CI 2018-06-23 02:12:20 -07:00
anatoly yakovenko
6cb8266c7b cleanup (#419) 2018-06-22 23:26:42 -07:00
Greg Fitzgerald
9c50302a39 Update rfc-001-smart-contracts-engine.md 2018-06-22 22:45:22 -07:00
Anatoly Yakovenko
3313c69898 remove ccal 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
530c6ca7ec a bunch of updates 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
07ed2fb523 cleanup 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
d9ec380a15 cleanup 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
b60eb3a899 edits 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
b4df69791b cleanup 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
c21b8a22b9 update 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
475a76e656 wip 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
7ba5d5ef86 first! 2018-06-22 22:43:54 -07:00
Greg Fitzgerald
737dc1ddde Per rustc 1.27.0, we can ensure nested results are used 2018-06-22 22:42:47 -07:00
Greg Fitzgerald
164bf19b36 Update LICENSE 2018-06-22 22:41:04 -07:00
Greg Fitzgerald
25976771d9 Version bump 2018-06-22 22:38:18 -07:00
Greg Fitzgerald
f2198c2e9a cargo fmt
rustc 1.27.0

```
$ cargo fmt --version
rustfmt 0.6.1-stable (49279d71 2018-05-08)
```
2018-06-22 22:23:55 -07:00
Rob Walker
eec19c6d2c move genesis to new Entry generation 2018-06-22 17:46:45 -07:00
Michael Vines
30e03feb5f Add initial CI subsystem documentation 2018-06-22 15:30:29 -07:00
Michael Vines
58cd3bde9f Add drone to snap package 2018-06-22 15:27:25 -07:00
Tyera Eulberg
662bfb7b88 fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
5f3e3a17d3 Fix test_send_airdrop cap; add helpful panic msgs 2018-06-22 14:52:36 -07:00
Tyera Eulberg
feba2d9975 Set request cap to a reasonable number, based on 30min reset noted in issue #341 2018-06-22 14:52:36 -07:00
Tyera Eulberg
e3e3a1c457 Better drone request cap handling 2018-06-22 14:52:36 -07:00
Tyera Eulberg
90628f3c8d Edit TestNode port logic to be consistent with new_leader (fixes hanging test_send_airdrop) 2018-06-22 14:52:36 -07:00
Tyera Eulberg
f6bcadb79d Make airdrop amount variable 2018-06-22 14:52:36 -07:00
Tyera Eulberg
d4ac16773c fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
96f044d2bf Clean up; add new_from_server_addr routine 2018-06-22 14:52:36 -07:00
Tyera Eulberg
f31868b913 Rename drone bin; fix usage statement 2018-06-22 14:52:36 -07:00
Tyera Eulberg
73b0ff5b55 Add request-count check and tests; fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
64cf69045a Add request-count check; Clean up solana-drone and fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
e57dae0f31 Update config and dependencies for solana-drone 2018-06-22 14:52:36 -07:00
Tyera Eulberg
6386e7d5cf Leave some tokens in the mint for solana-drone 2018-06-22 14:52:36 -07:00
Tyera Eulberg
4bad103da9 Add solana-drone CLI 2018-06-22 14:52:36 -07:00
Tyera Eulberg
30a26adb7c Add solana-drone module to library 2018-06-22 14:52:36 -07:00
Stephen Akridge
8be4adfc0a Rename tr => tx and add back comments 2018-06-22 14:34:46 -07:00
Stephen Akridge
fed4cc3965 Remove commented code/imports 2018-06-22 14:34:46 -07:00
Stephen Akridge
7d1e074683 bump last_ids 2018-06-22 14:34:46 -07:00
Stephen Akridge
00516e50a1 last_ids opt 2018-06-22 14:34:46 -07:00
Stephen Akridge
e83d76fbd9 Remove mutexes 2018-06-22 14:34:46 -07:00
Stephen Akridge
304f152315 rwlock balances table 2018-06-22 14:34:46 -07:00
Stephen Akridge
3a82ebf7fd Add multiple source accounts for benchmark 2018-06-22 14:34:46 -07:00
Pankaj Garg
0253d34467 Address review comments 2018-06-22 14:18:45 -07:00
Pankaj Garg
9209f9acde Run multiple instances from same workspace
* Support running leader and validators from multiple machines
  using the same NFS mounted workspace.
* Changes to setup, leader and validator scripts
2018-06-22 14:18:45 -07:00
Rob Walker
3dbbb398df use next_entries() in recorder, recycle blobs in reconstruct_from_blobs 2018-06-22 14:17:36 -07:00
Michael Vines
17e8ad110f Temporarily disable failing CI to get back to green 2018-06-22 11:29:31 -07:00
Rob Walker
5e91d31ed3 issue 309 part 1
* limit the number of Tntries per Blob to at most one
* limit the number of Transactions per Entry such that an Entry will
    always fit in a Blob

With a one-to-one map of Entries to Blobs, recovery of a validator
  is a simple fast-forward from the end of the initial genesis.log
  and tx-*.logs Entries.

TODO: initialize validators' blob index with initial # of Entries.
2018-06-22 09:58:51 -07:00
Greg Fitzgerald
fad9d20820 Add assertion for now next_entry must be called 2018-06-21 21:24:32 -07:00
Greg Fitzgerald
fe9a1c8580 Fix comment 2018-06-21 21:24:32 -07:00
Greg Fitzgerald
cd6d7d5198 Remove redundant clones 2018-06-21 21:24:32 -07:00
Michael Vines
771478bc68 Add simple CUDA version check, warn on mismatch 2018-06-21 13:42:06 -07:00
Michael Vines
c4a59896f8 Run test-erasure in a container 2018-06-21 13:00:40 -07:00
Michael Vines
3eb1608403 Skip --user if SOLANA_DOCKER_RUN_NOSETUID is set 2018-06-21 12:24:52 -07:00
Michael Vines
8fde70d4dc Erasure tests do not require a CUDA agent 2018-06-21 11:42:37 -07:00
Michael Vines
5a047833ed Run snap publishing directly on CUDA agent
This is necessary until we build a docker image that also contains a CUDA
installation
2018-06-21 11:42:37 -07:00
Michael Vines
f6c28e6be1 Update snapcraft docker image contain snapcraft 2.42.1 2018-06-21 11:42:37 -07:00
Michael Vines
0ebf10d19d Snap cuda fullnode 2018-06-21 11:42:37 -07:00
Pankaj Garg
d3005d3ef3 Updated setup and leader scripts
* Setup will us -b to set validator ports
* Leader script fixed to append .log to the log file
* Updated readme file
2018-06-20 19:05:38 -07:00
Anatoly Yakovenko
effcef2184 fixed sleep bug 2018-06-20 16:58:10 -07:00
Michael Vines
89fc0ad7a9 Add convenience script to download performance libraries 2018-06-20 16:48:32 -07:00
Greg Fitzgerald
410272ee1d Update generic_array
Warning: this may have performance implications.
2018-06-20 11:41:54 -07:00
Greg Fitzgerald
1c97bf50b6 Fix nightly
No longer ignore failures in the nightly build.
2018-06-19 17:38:04 -07:00
Rob Walker
4ecd2c9d0b update demo scripts
* add setup to combine init steps, configurable initial mint
  * bash -e -> bash and be explicit about errors with || exit $?
  * feed transaction logs to validator, too
2018-06-19 17:04:44 -07:00
Michael Vines
e592243a09 De-double quote 2018-06-19 13:20:47 -07:00
Greg Fitzgerald
2f4a92e352 Cleanup test 2018-06-19 12:36:02 -07:00
OEM Configuration (temporary user)
ceafc29040 fix linting errors, add retransmission fix to repair requests 2018-06-19 12:36:02 -07:00
OEM Configuration (temporary user)
b20efabfd2 added retransmission of repair messages 2018-06-19 12:36:02 -07:00
Michael Vines
85b6e7293c Add cleanup script to manage build agent disk space 2018-06-19 12:22:45 -07:00
Rob Walker
6aced927ad improve ledger initialization for fullnode
* use a line iterator on stdin instead of a line iterator on a buffer
 * move some unwrap() to expect(), documenting failures
 * bind entry type earlier (for kicks)
2018-06-19 09:28:35 -07:00
Michael Vines
75997e6c08 Allow BUILDKITE_BRANCH in containers 2018-06-18 22:51:30 -07:00
Michael Vines
9040d00110 Package solana as a snap 2018-06-18 17:36:03 -07:00
Michael Vines
8ebc5c6b07 Suggest different validator port by default to coexist with leader port on the same machine 2018-06-18 17:36:03 -07:00
Michael Vines
d4807790ff Add snapcraft login credentials
This file was created as follows:
$ snapcraft export-login --snaps solana --channels beta,edge snapcraft.credentials
$ openssl aes-256-cbc -e -in snapcraft.credentials -out snapcraft.credentials.enc
2018-06-18 17:36:03 -07:00
Rob Walker
0de5e7a285 attempt to understand entry 2018-06-18 16:48:59 -07:00
Greg Fitzgerald
c40000aeda Fix compiler warning 2018-06-18 15:49:41 -07:00
Stephen Akridge
31198bc105 Fix cargo bench nightly 2018-06-18 13:20:39 -07:00
Michael Vines
92599acfca Abort when -l is not present or unreadable 2018-06-16 09:55:03 -07:00
Greg Fitzgerald
f6e70779fe Don't panic if sent a bad packet 2018-06-16 09:51:45 -06:00
Greg Fitzgerald
3017bde686 Update README.md 2018-06-16 09:43:23 -06:00
Greg Fitzgerald
9d84ec4bb3 Delete TODO
That comment predates the separation of RPU and TPU.
2018-06-16 08:59:30 -06:00
Anatoly Yakovenko
586141adb2 Cleanup TVU docs 2018-06-15 22:45:35 -06:00
Michael Vines
3f763f99e2 Fail fast in CI when |cargo fmt| says no 2018-06-15 17:10:00 -07:00
Michael Vines
15c7f36ea3 Improve error reporting 2018-06-15 17:10:00 -07:00
Michael Vines
04d1a083fa Skip |sudo sysctl ...| on macOS 2018-06-15 17:10:00 -07:00
Greg Fitzgerald
327ee1dae8 Apply feedback from @aeyakovenko 2018-06-15 17:01:38 -06:00
Greg Fitzgerald
22885c3e64 Add TVU ASCII art 2018-06-15 17:01:38 -06:00
Stephen Akridge
94ededb54c Add comments and limit digits for tps prints 2018-06-15 11:54:01 -06:00
Stephen Akridge
af6a07697a Change client-demo to run continuosly for some amount of time
Also retry for get_last_id/transaction_count if dropped.
2018-06-15 11:54:01 -06:00
Stephen Akridge
5f1d8c95eb Fix blob data size 2018-06-15 11:54:01 -06:00
Anatoly Yakovenko
7d9e032407 make sure we test large tables 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
bc918a5ad5 purger 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
ee54ce4727 min table size before purge 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
e85bf2f2d5 tests pass 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
a7460ffbd1 purge validators we havent seen for a long time 2018-06-15 06:56:35 -06:00
Rob Walker
7fe1fd2f95 clean up fullnode cmdline
* fix documentation, other opt parameters
 * add support for a named output file, remove hardcoded "leader.log"
 * resurrect stdout as the default output
2018-06-15 00:41:07 -07:00
Rob Walker
d30670e92e clean up demo bash scripts
* allow other level of RUST logging
 * avoid "echo" in favor of printf (builtin)
 * single quotes for literals, double quotes for variables
2018-06-14 23:12:11 -06:00
Greg Fitzgerald
9b202c6e1e No longer flood log with emtpy entries 2018-06-14 18:04:36 -06:00
Stephen Akridge
87946eafd5 Lower processing transaction message to debug by default 2018-06-14 17:08:11 -06:00
Greg Fitzgerald
7575d3c726 Add timestamp to log messages
Upgraded env_logger and now we have timestamps and colorful messages.

Fixes #318
2018-06-14 17:07:58 -06:00
Stephen Akridge
8b9713a934 Skip link_local v4 addresses and v6 address when v6 is not enabled 2018-06-14 16:10:31 -06:00
Stephen Akridge
ec713c18c4 Revert client.sh script to use cargo 2018-06-14 11:56:36 -06:00
anatoly yakovenko
c24b0a1a3f TVU rework (#352)
Refactored TVU, into stages
* blob fetch stage for blobs
* window stage for maintaining the blob window
* pulled out NCP out of the TVU so they can be separate units
TVU is now just the fetch -> window -> request and bank processing
2018-06-13 21:52:23 -07:00
Robert Kelly
34e0cb0092 cargo fmt 2018-06-13 19:17:21 -07:00
Robert Kelly
7b7c7cba21 changed atty library 2018-06-13 19:17:21 -07:00
Anatoly Yakovenko
c45343dd30 comments 2018-06-13 16:11:44 -06:00
Anatoly Yakovenko
b7f6603c1f fix coverage build 2018-06-13 16:11:44 -06:00
anatoly yakovenko
2d3b052dea allow for insertion of dummy entry points into the local table (#346)
* Needed for #341. Create a dummy entry with public key 0..., but with a valid gossip address that we can ask for updates. This will allow validators to discover the full network by just knowing a single node's gossip address without knowing anything else about their identity.
* once we start removing dead validators this entry should get purged since we will never see a message from public key 0, #344
2018-06-13 11:42:30 -07:00
Tyera Eulberg
dcb6234771 Fix relative link to client demo 2018-06-12 23:29:04 -06:00
Greg Fitzgerald
e44d423e83 Make version syntax consistent
Using no symbol implies its a symver caret requirement.

https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
2018-06-11 15:51:25 -06:00
Greg Fitzgerald
5435bb734c Upgrade rand 2018-06-11 15:51:25 -06:00
Michael Vines
13f59adf61 Update build status badge link to publicly available build log 2018-06-10 22:12:09 -07:00
Stephen Akridge
0fce3368d3 Fix json perf counter print and add script to generate a chart 2018-06-09 10:55:22 -07:00
Stephen Akridge
1ee5c81267 Fix benchmarking banking_stage 2018-06-08 15:50:36 -07:00
Stephen Akridge
3bb9d5eb50 Use timing::timestamp for counters 2018-06-08 15:50:36 -07:00
Grimes
efb23f7cf9 Ensure stuck builds eventually timeout 2018-06-07 19:08:03 -07:00
Grimes
013f4674de Target cuda agents 2018-06-07 19:08:03 -07:00
Greg Fitzgerald
6966b25d9c Don't mark a build as failed if line coverage drops
It's not always a problem if line coverage drops. For example,
coverage will drop if you make well-tested code more succinct.
It just means the uncovered code is just a larger percentage of
the codebase.
2018-06-07 19:09:25 -06:00
Greg Fitzgerald
d513f56c8c Readme version bump 2018-06-07 17:32:07 -06:00
Greg Fitzgerald
7aa05618a3 data_replicator -> ncp
Fixes #327
2018-06-07 17:11:17 -06:00
Greg Fitzgerald
cdfbbe5e60 Fix diagram typos 2018-06-07 17:11:17 -06:00
Greg Fitzgerald
fe7d1cb81c Race -> Or
Thanks for the suggestion @FishmanL!
2018-06-07 17:11:03 -06:00
Anatoly Yakovenko
c2a9395a4b perf counters 2018-06-07 14:59:21 -07:00
Greg Fitzgerald
586279bcfc Add server diagrams 2018-06-07 15:24:44 -06:00
Greg Fitzgerald
8bd10e7c4c Cleanup top-level lib doc 2018-06-07 15:24:44 -06:00
Greg Fitzgerald
928e6165bc Add TPU & RPU diagrams 2018-06-07 15:24:44 -06:00
anatoly yakovenko
77c9e801aa fixed client demo (#325)
* fixed client demo
2018-06-07 13:51:15 -07:00
Anatoly Yakovenko
c78132417f fix deadlock 2018-06-07 13:52:33 -06:00
Anatoly Yakovenko
849928887e undo 2018-06-07 13:52:33 -06:00
Anatoly Yakovenko
ba1163d49f fix logs 2018-06-07 13:52:33 -06:00
Anatoly Yakovenko
6f9c89af39 fix deadlock 2018-06-07 13:52:33 -06:00
Greg Fitzgerald
246b8b1242 No longer cat scripts
Because we keep changing those scripts and not updating the readme.

Also, this removes the "-b 9000" starting validators. Is that right?
Or should we be passing that to the validator config?
2018-06-07 12:17:43 -06:00
Stephen Akridge
f0db68cb75 Add note about validator.json and -d flag to config generating scripts 2018-06-07 11:15:41 -06:00
Greg Fitzgerald
f0d1fdfb46 Add missing module descriptions 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
3b8b2e030a Better docs for transaction 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
b4fee677a5 Better docs for payment_plan 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
fe706583f9 Better docs for sigverify_stage 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
d0e0c17ece Better docs for rpu 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
5aaa38bcaf Better docs for write_stage 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
6ff9b27f8e Better docs for entry 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
3f4e035506 Better docs for budget 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
57d9fbb927 Better docs for banking_stage 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
ee44e51b30 Better docs for the bank 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
5011f24123 Move more interesting content into first header
The first header and its content is the only text displayed on
GitHub's mobile page. Reorder so that the disclaimer is the only
information people see.

Disclaimer: IANAL and assume reordering these doesn't matter. :)
2018-06-07 09:25:36 -06:00
Anatoly Yakovenko
d1eda334f3 gdb 2018-06-07 09:25:08 -06:00
Hleb Albau
2ae5ce9f2c Do not use cuda for multinode-demo validator component 2018-06-07 07:04:33 -06:00
Greg Fitzgerald
4f5ac78b7e Add readme to crates.io 2018-06-06 15:00:25 -06:00
Stephen Akridge
074c9af020 Shellcheck again 2018-06-05 15:32:25 -06:00
Stephen Akridge
2da2d4e365 More shellcheck 2018-06-05 15:32:25 -06:00
Stephen Akridge
8eb76ab2a5 Fix shellcheck 2018-06-05 15:32:25 -06:00
Stephen Akridge
a710d95243 Fix non-erasure blob nulling 2018-06-05 15:32:25 -06:00
Stephen Akridge
a06535d7ed cargo fmt 2018-06-05 15:32:25 -06:00
Stephen Akridge
f511ac9be7 Fixes for receiving old blobs and nulling the window with coding 2018-06-05 15:32:25 -06:00
Stephen Akridge
e28ad2177e Receive fixes 2018-06-05 15:32:25 -06:00
Stephen Akridge
cb16fe84cd Rework to fix coding blob insertion 2018-06-05 15:32:25 -06:00
Stephen Akridge
ec3569aa39 Move receive_index to correct place 2018-06-05 15:32:25 -06:00
Stephen Akridge
246edecf53 Add receive_index for broadcast blobs and fix blobs_len position 2018-06-05 15:32:25 -06:00
Stephen Akridge
34834c5af9 Store another size in the data block so it is coded as well 2018-06-05 15:32:25 -06:00
Stephen Akridge
b845245614 Restore more of the blob window and add is_coding helper 2018-06-05 15:32:25 -06:00
Stephen Akridge
5711fb9969 Generate coding for the current blob set not just the first coding set 2018-06-05 15:32:25 -06:00
Stephen Akridge
d1eaecde9a Fix deadlock and only push to contq if it's not a coding blob 2018-06-05 15:32:25 -06:00
Stephen Akridge
00c8505d1e Handle set_flags error 2018-06-05 15:32:25 -06:00
Stephen Akridge
33f01efe69 Fixes for erasure coding 2018-06-05 15:32:25 -06:00
Stephen Akridge
377d312c81 Revert log levels 2018-06-05 15:32:25 -06:00
Stephen Akridge
badf5d5412 Add window recovery 2018-06-05 15:32:25 -06:00
Stephen Akridge
0339f90b40 Fix gf-complete url and symlinks 2018-06-05 15:32:25 -06:00
Stephen Akridge
5455e8e6a9 Review comments 2018-06-05 15:32:25 -06:00
Stephen Akridge
6843b71a0d Debug erasure ci script 2018-06-05 15:32:25 -06:00
Stephen Akridge
634408b5e8 Add erasure build to ci 2018-06-05 15:32:25 -06:00
Stephen Akridge
d053f78b74 Erasure refinements, fix generating orders table 2018-06-05 15:32:25 -06:00
Stephen Akridge
93b6fceb2f generate coding after indexing 2018-06-05 15:32:25 -06:00
Stephen Akridge
ac7860c35d indexing blobs then coding 2018-06-05 15:32:25 -06:00
Stephen Akridge
b0eab8729f Add erasure ci script 2018-06-05 15:32:25 -06:00
Stephen Akridge
cb81f80b31 Enable logging for client demo 2018-06-05 15:32:25 -06:00
Stephen Akridge
ea97529185 Fix erasure compilation 2018-06-05 15:32:25 -06:00
Greg Fitzgerald
f1075191fe Clean up comments: Event -> Transaction 2018-06-04 21:43:46 -06:00
Greg Fitzgerald
74c479fbc9 Delete bitrotted docs 2018-06-04 21:43:46 -06:00
Greg Fitzgerald
7e788d3a17 No longer need explicit refs in rustc 1.26 2018-06-04 21:43:46 -06:00
anatoly yakovenko
69b3c75f0d Power of two chance (#314)
* fix validator script
* 1/2^30 that we fail due to random returning the same value
2018-06-04 13:32:34 -07:00
Anatoly Yakovenko
b2c2fa40a2 comments 2018-06-03 22:08:25 -06:00
Anatoly Yakovenko
50458d9524 more tests 2018-06-03 22:08:25 -06:00
Anatoly Yakovenko
9679e3e356 more tests 2018-06-03 22:08:25 -06:00
Anatoly Yakovenko
6db9f92b8a crdt gossip tests 2018-06-03 22:08:25 -06:00
Stephen Akridge
4a44498d45 Fix args in validator script, readme version, client-demo perf print 2018-06-02 21:55:27 -06:00
anatoly yakovenko
216510c573 repair socket and receiver thread (#303)
repair socket and receiver thread
2018-06-02 08:32:51 -07:00
Stephen Akridge
fd338c3097 Run release binary for leader node 2018-06-01 17:10:48 -06:00
Greg Fitzgerald
b66ebf5dec Version bump 2018-06-01 17:10:37 -06:00
Greg Fitzgerald
5da99de579 Review feedback 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
3aa2907bd6 Restore shellcheck 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
05d1618659 Add more detail to testnet setup 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
86113811f2 Readme/demo cleanup 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
53ecaa03f1 Need another beta 2018-05-31 19:08:09 -06:00
Greg Fitzgerald
205c1aa505 Version bump 2018-05-31 18:49:41 -06:00
Greg Fitzgerald
9b54c1542b Move defaults from bash to Rust 2018-05-31 17:18:11 -07:00
Greg Fitzgerald
93d5d1b2ad Default to 1 node 2018-05-31 17:18:11 -07:00
Greg Fitzgerald
4c0f3ed6f3 Attempt to revive the singlenode demo 2018-05-31 17:18:11 -07:00
Greg Fitzgerald
2580155bf2 Enable last of the ignored tests 2018-05-31 16:45:21 -06:00
Greg Fitzgerald
6ab0dd4df9 Remove config options from fullnode 2018-05-31 16:15:02 -06:00
Greg Fitzgerald
4b8c36b6b9 Add solana-fullnode-config 2018-05-31 16:15:02 -06:00
Greg Fitzgerald
359a8397c0 Make bootstrapping functions accessible to other binaries 2018-05-31 16:15:02 -06:00
Greg Fitzgerald
c9fd5d74b5 Boot futures 0.1
We added them thinking it'd be a good stepping stone towards an
asynchronous thin client, but it's used inconsistently and where
it used, the function is still synchronous, which is just confusing.
2018-05-31 14:13:09 -06:00
Greg Fitzgerald
391744af97 Speed up the creation of the million accounts
All threads were locked on the same set of signatures.
2018-05-31 12:13:18 -06:00
Greg Fitzgerald
587ab29e09 Don't register entry ID until after processing its transactions 2018-05-31 12:13:18 -06:00
Greg Fitzgerald
80f07dadc5 Generalize process_entries()
And use it in fullnode
2018-05-31 12:13:18 -06:00
Greg Fitzgerald
60609a44ba Initialize recorder from bank's last_id 2018-05-31 12:13:18 -06:00
Greg Fitzgerald
30c8fa46b4 rustc version bump 2018-05-30 20:49:55 -06:00
Greg Fitzgerald
7aab7d2f82 Sleep between events if PoH is disabled 2018-05-30 15:55:10 -06:00
Anatoly Yakovenko
a8e1c44663 names 2018-05-30 14:50:53 -06:00
Anatoly Yakovenko
a2b92c35e1 thread names 2018-05-30 14:50:53 -06:00
Anatoly Yakovenko
9f2086c772 names 2018-05-30 14:50:53 -06:00
Anatoly Yakovenko
3eb005d492 names for threds 2018-05-30 14:50:53 -06:00
Stephen Akridge
68955bfcf4 Change multinode script argument to leader path
Some may have cloned their code in different place
2018-05-30 14:49:42 -06:00
Anatoly Yakovenko
9ac7070e08 fix ci 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
e44e81bd17 fmt 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
f5eedd2d19 fmt 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
46059a37eb skip shell check 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
adc655a3a2 scripts 2018-05-30 14:04:48 -06:00
Ubuntu
3058f80489 log 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
df98cae4b6 cleanup 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
d327e0aabd warn on tx verify sig 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
17d3a6763c update 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
02c5b0343b fixed cloned 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
2888e45fea comments 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
f1311075d9 integration tests 2018-05-30 14:04:48 -06:00
Ubuntu
6c380e04a3 fix 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
cef1c208a5 Crdt pipeline, coalesce window repair requests in the listener by examining all of them at once, and ublock those threads from doing io. 2018-05-30 14:04:48 -06:00
Greg Fitzgerald
ef8eac92e3 Version bump 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
9c9c63572b cargo fmt
rustfmt was updated with 1.26.1
2018-05-29 20:33:45 -07:00
Greg Fitzgerald
6c0c6de1d0 Better error names 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
b57aecc24c Better error if Bank doesn't recognize tx last_id 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
290dde60a0 Test invalid tokens and fees 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
38623785f9 Add fee to Transaction
Fixes #161
2018-05-29 20:33:45 -07:00
Grimes
256ecc7208 Build status badge now excludes pull requests 2018-05-29 20:33:34 -07:00
Greg Fitzgerald
76b06b47ba Delete dead code 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
cf15cf587f spending plan -> budget
Review feedback from @sakridge
2018-05-29 18:09:03 -06:00
Greg Fitzgerald
134c7add57 Fix bench build 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
ac0791826a plan.rs -> payment_plan.rs 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
d2622b7798 Allow for addtional smart contract languages
Fixes #159
2018-05-29 18:09:03 -06:00
Greg Fitzgerald
f82cbf3a27 Move Budget EDSL into its own module 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
aa7e3df8d6 Plan -> Budget
Budget is now an EDSL. PaymentPlan is the interface to it.
2018-05-29 18:09:03 -06:00
Greg Fitzgerald
ad00d7bd9c Move plan methods to a trait 2018-05-29 18:09:03 -06:00
Anatoly Yakovenko
8d1f82c34d breaks 2018-05-29 16:53:26 -07:00
Anatoly Yakovenko
0cb2036e3a comment on bad blob usage 2018-05-29 16:53:26 -07:00
Greg Fitzgerald
2b1e90b0a5 More idiomatic Rust 2018-05-29 14:04:27 -06:00
Greg Fitzgerald
f2ccc133a2 Finally made fetch happen 2018-05-29 14:04:27 -06:00
Greg Fitzgerald
5e824b39dd Move multinode communication outside TPU 2018-05-29 14:04:27 -06:00
Greg Fitzgerald
41efcae64b Remove dead code
History: we thought SigVerifyStage would use these, but it does
signature verification before deserializing transactions.
2018-05-29 10:38:58 -06:00
Greg Fitzgerald
cf5671d058 tr -> tx
Missed a few.
2018-05-29 10:38:58 -06:00
Greg Fitzgerald
2570bba6b1 Make apply_payment a method
History: the function was pulled out of Bank when each field wasn't
wrapped in a RwLock, and that locking 'balances' meant to lock
everything in the bank. Now that the RwLocks are here to stay,
we can make it a method again.
2018-05-29 10:38:58 -06:00
Greg Fitzgerald
71cb7d5c97 Better names 2018-05-29 10:38:58 -06:00
Greg Fitzgerald
0df6541d5e Fewer public functions 2018-05-29 10:38:58 -06:00
Greg Fitzgerald
52145caf7e Cleanup: make 'verified' qualifier implicit
History: Qualifying the method names with 'verified' was done to
distinguish them from methods that first did signature verification.
After we moved all signature verication to SigVerifyStage, we removed
those methods from Bank, leaving only the 'verified' ones.

This patch removes the word 'verified' from all method names, since
it is now implied by any code running after SigVerifyStage.
2018-05-29 10:38:58 -06:00
Grimes
86a50ae9e1 Add RUST_BACKTRACE 2018-05-28 22:23:25 -07:00
Grimes
c64cfb74f3 Update code coverage command 2018-05-28 22:23:25 -07:00
Grimes
26153d9919 Avoid docker buildkite plugin, which is not supported by bkrun 2018-05-28 22:23:25 -07:00
Grimes
5af922722f Add local buildkite CI runner 2018-05-28 22:23:25 -07:00
Grimes
b70d730b32 Support local .a, skip if unable to find .a 2018-05-28 22:23:25 -07:00
Grimes
bf4b856e0c Don't fail if CODECOV_TOKEN is undefined 2018-05-28 22:23:25 -07:00
Grimes
0cf0ae6755 s/label:/name:/g 2018-05-28 22:23:25 -07:00
Grimes
29061cff39 Delint existing shell scripts 2018-05-28 05:18:46 -06:00
Grimes
b7eec4c89f Lint shell scripts in CI 2018-05-28 05:18:46 -06:00
Greg Fitzgerald
a3854c229e More rebase typos 2018-05-26 20:48:42 -06:00
Greg Fitzgerald
dcde256433 Fix rebase typo 2018-05-26 20:28:22 -06:00
Greg Fitzgerald
931bdbd5cd Fix typo 2018-05-26 20:25:44 -06:00
Greg Fitzgerald
b7bd59c344 Cleanup whitespace
And delete rebasing artifact
2018-05-26 20:23:18 -06:00
Anatoly Yakovenko
2dbf9a6017 rename 2018-05-26 20:13:42 -06:00
Anatoly Yakovenko
fe93bba457 logs
poll both endpoints in client

logs

logs

logs

names

verify plan not sig

log

set udp buffer to max

drop output

more verbose about window requests

log the leader

load leader identity

readme for single node demo

update

asserts

update

replay all

rsync

dynamic file read in testnode

fix

cleanup

readme

sum

fix scripts

cleanup

cleanup

readme
2018-05-26 20:13:42 -06:00
Grimes
6e35f54738 Simplify environment blocks 2018-05-26 14:38:26 -07:00
Grimes
089294a85e 'ignored' step failures are no longer ignored 2018-05-26 11:00:20 -07:00
Grimes
25c0b44641 Run ignored build step in docker 2018-05-26 11:00:20 -07:00
Greg Fitzgerald
58c1589688 More typos 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
bb53f69016 Fix typos 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
75659ca042 Light up coverage build 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
fc00594ea4 Move multinode test to integration tests 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
8d26be8b89 Run benchmarks in nightly
And name functions the same way as test functions
2018-05-26 00:36:50 -06:00
Greg Fitzgerald
af4e95ae0f Only check formatting in stable build 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
ffb4a7aa78 Boot TravisCI configuration 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
dcaeacc507 request_stage::serialize_packets -> packet::to_blobs
Good stuff - no need to hide them.
2018-05-25 17:31:07 -06:00
Greg Fitzgerald
4f377e6710 Generalize serialize_responses 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
122db85727 Move channel-oriented code into request_stage 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
a598e4aa74 Fix comments 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
733b31ebbd testnode -> fullnode
It's the real deal.
2018-05-25 17:31:07 -06:00
Greg Fitzgerald
dac9775de0 Replace client-demo with multinode-demo 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
46c19a5783 Rename sigverify modules 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
aaeb5ba52f tr -> tx 2018-05-25 16:47:21 -06:00
Greg Fitzgerald
9f5a3d6064 events -> transactions 2018-05-25 16:47:21 -06:00
Greg Fitzgerald
4cdf873f98 Delete event.rs 2018-05-25 16:47:21 -06:00
87 changed files with 6077 additions and 3193 deletions

View File

@@ -1,2 +1,5 @@
ignore: ignore:
- "src/bin" - "src/bin"
coverage:
status:
patch: off

View File

@@ -1,36 +0,0 @@
language: rust
required: sudo
services:
- docker
matrix:
allow_failures:
- rust: nightly
include:
- rust: stable
- rust: nightly
env:
- FEATURES='unstable'
before_script: |
export PATH="$PATH:$HOME/.cargo/bin"
rustup component add rustfmt-preview
script:
- cargo fmt -- --write-mode=diff
- cargo build --verbose --features "$FEATURES"
- cargo test --verbose --features "$FEATURES"
after_success: |
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
bash <(curl -s https://codecov.io/bash) -s target/cov
before_deploy:
- cargo package
deploy:
provider: releases
api-key:
secure: j3cPAbOuGjXuSl+j+JL/4GWxD6dA0/f5NQ0Od4LBVewPmnKiqimGOJ1xj3eKth+ZzwuCpcHwBIIR54NEDSJgHaYDXiukc05qCeToIPqOc0wGJ+GcUrWAy8M7Wo981I/0SVYDAnLv4+ivvJxYE7b2Jr3pHsQAzH7ClY8g2xu9HlNkScEsc4cizA9Sf3zIqtIoi480vxtQ5ghGOUCkwZuG3+Dg+IGnnjvE4qQOYey1del+KIDkmbHjry7iFWPF6fWK2187JNt6XiO2/2tZt6BkMEmdRnkw1r/wL9tj0AbqLgyBjzlI4QQfkBwsuX3ZFeNGArn71s7WmAUGyVOl0DJXfwN/BEUxMTd+lkMjuMNUxaU/hxVZ7zAWH55KJK+qf6B95DLVWr7ypjfJLLBcds+JfkBNoReWLM1XoDUKAU+wBf1b+PKiywNfNascjZTcz6QGe94sa7l/T4PxtHDSREmflFgu1Hysg61WuODDwTTHGrsg9ZuvlINnqQhXsJo9r9+TMIGwwWHcvLQDNz2TPALCfcLtd+RsevdOeXItYa0KD3D4gKGv36bwAVDpCIoZnSeiaT/PUyjilFtJjBpKz9BbOKgOtQhHGrHucn0WOF+bu/t3SFaJKQf/W+hLwO3NV8yiL5LQyHVm/TPY62nBfne2KEqi/LOFxgKG35aACouP0ig=
file: target/package/solana-$TRAVIS_TAG.crate
skip_cleanup: true
on:
tags: true
condition: "$TRAVIS_RUST_VERSION = stable"
after_deploy:
- cargo publish --token "$CRATES_IO_TOKEN"

View File

@@ -1,9 +1,10 @@
[package] [package]
name = "solana" name = "solana"
description = "The World's Fastest Blockchain" description = "Blockchain, Rebuilt for Scale"
version = "0.6.0-alpha" version = "0.7.0-alpha"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "http://solana.com/" homepage = "http://solana.com/"
readme = "README.md"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
authors = [ authors = [
"Anatoly Yakovenko <anatoly@solana.com>", "Anatoly Yakovenko <anatoly@solana.com>",
@@ -17,12 +18,12 @@ name = "solana-client-demo"
path = "src/bin/client-demo.rs" path = "src/bin/client-demo.rs"
[[bin]] [[bin]]
name = "solana-multinode-demo" name = "solana-fullnode"
path = "src/bin/multinode-demo.rs" path = "src/bin/fullnode.rs"
[[bin]] [[bin]]
name = "solana-testnode" name = "solana-fullnode-config"
path = "src/bin/testnode.rs" path = "src/bin/fullnode-config.rs"
[[bin]] [[bin]]
name = "solana-genesis" name = "solana-genesis"
@@ -40,6 +41,10 @@ path = "src/bin/mint.rs"
name = "solana-mint-demo" name = "solana-mint-demo"
path = "src/bin/mint-demo.rs" path = "src/bin/mint-demo.rs"
[[bin]]
name = "solana-drone"
path = "src/bin/drone.rs"
[badges] [badges]
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" } codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
@@ -52,7 +57,7 @@ erasure = []
[dependencies] [dependencies]
rayon = "1.0.0" rayon = "1.0.0"
sha2 = "0.7.0" sha2 = "0.7.0"
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] } generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
serde = "1.0.27" serde = "1.0.27"
serde_derive = "1.0.27" serde_derive = "1.0.27"
serde_json = "1.0.10" serde_json = "1.0.10"
@@ -60,13 +65,15 @@ ring = "0.12.1"
untrusted = "0.5.1" untrusted = "0.5.1"
bincode = "1.0.0" bincode = "1.0.0"
chrono = { version = "0.4.0", features = ["serde"] } chrono = { version = "0.4.0", features = ["serde"] }
log = "^0.4.1" log = "0.4.2"
env_logger = "^0.4.1" env_logger = "0.5.10"
matches = "^0.1.6" matches = "0.1.6"
byteorder = "^1.2.1" byteorder = "1.2.1"
libc = "^0.2.1" libc = "0.2.1"
getopts = "^0.2" getopts = "0.2"
isatty = "0.1" atty = "0.2"
futures = "0.1" rand = "0.5.1"
rand = "0.4.2" pnet_datalink = "0.21.0"
pnet = "^0.21.0" tokio = "0.1"
tokio-codec = "0.1"
tokio-io = "0.1"

View File

@@ -1,4 +1,4 @@
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge Copyright 2018 Solana Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

190
README.md
View File

@@ -1,27 +1,42 @@
[![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana) [![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana)
[![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana) [![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana)
[![Build status](https://badge.buildkite.com/d4c4d7da9154e3a8fb7199325f430ccdb05be5fc1e92777e51.svg)](https://buildkite.com/solana-labs/solana) [![Build status](https://badge.buildkite.com/d4c4d7da9154e3a8fb7199325f430ccdb05be5fc1e92777e51.svg?branch=master)](https://solana-ci-gate.herokuapp.com/buildkite_public_log?https://buildkite.com/solana-labs/solana/builds/latest/master)
[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana) [![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
Blockchain, Rebuilt for Scale
===
Solana&trade; is a new blockchain architecture built from the ground up for scale. The architecture supports
up to 710 thousand transactions per second on a gigabit network.
Disclaimer Disclaimer
=== ===
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment. All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
Solana: High Performance Blockchain
===
Solana&trade; is a new architecture for a high performance blockchain. It aims to support
over 700 thousand transactions per second on a gigabit network.
Introduction Introduction
=== ===
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 178 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second. It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
Running the demo
Testnet Demos
=== ===
The Solana repo contains all the scripts you might need to spin up your own
local testnet. Depending on what you're looking to achieve, you may want to
run a different variation, as the full-fledged, performance-enhanced
multinode testnet is considerably more complex to set up than a Rust-only,
singlenode testnode. If you are looking to develop high-level features, such
as experimenting with smart contracts, save yourself some setup headaches and
stick to the Rust-only singlenode demo. If you're doing performance optimization
of the transaction pipeline, consider the enhanced singlenode demo. If you're
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
to reproduce our TPS metrics, run the enhanced multinode demo.
For all four variations, you'd need the latest Rust toolchain and the Solana
source code:
First, install Rust's package manager Cargo. First, install Rust's package manager Cargo.
```bash ```bash
@@ -36,58 +51,107 @@ $ git clone https://github.com/solana-labs/solana.git
$ cd solana $ cd solana
``` ```
The testnode server is initialized with a ledger from stdin and The demo code is sometimes broken between releases as we add new low-level
generates new ledger entries on stdout. To create the input ledger, we'll need features, so if this is your first time running the demo, you'll improve
to create *the mint* and use it to generate a *genesis ledger*. It's done in your odds of success if you check out the
two steps because the mint-demo.json file contains private keys that will be [latest release](https://github.com/solana-labs/solana/releases)
used later in this demo. before proceeding:
```bash ```bash
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json $ git checkout v0.6.1
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
``` ```
Now you can start the server: Configuration Setup
---
The network is initialized with a genesis ledger and leader/validator configuration files.
These files can be generated by running the following script.
```bash ```bash
$ cat genesis.log | cargo run --release --bin solana-testnode > transactions0.log $ ./multinode-demo/setup.sh
``` ```
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe Singlenode Testnet
to start sending it transactions. ---
Then, in a separate shell, let's execute some transactions. Note we pass in Before you start a fullnode, make sure you know the IP address of the machine you
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
open on all the machines you want to test with.
Now start the server:
```bash
$ ./multinode-demo/leader.sh
```
To run a performance-enhanced fullnode on Linux, download `libcuda_verify_ed25519.a`. Enable
it by adding `--features=cuda` to the line that runs `solana-fullnode` in
`leader.sh`. [CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on your system.
```bash
$ ./fetch-perf-libs.sh
$ cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
```
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
receive transactions.
Multinode Testnet
---
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
```bash
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana #The leader machine
```
As with the leader node, you can run a performance-enhanced validator fullnode by adding
`--features=cuda` to the line that runs `solana-fullnode` in `validator.sh`.
```bash
$ cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json < genesis.log
```
Testnet Client Demo
---
Now that your singlenode or multinode testnet is up and running, in a separate shell, let's send it some transactions! Note we pass in
the JSON configuration file here, not the genesis ledger. the JSON configuration file here, not the genesis ledger.
```bash ```bash
$ cat mint-demo.json | cargo run --release --bin solana-client-demo $ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
``` ```
Now kill the server with Ctrl-C, and take a look at the ledger. You should What just happened? The client demo spins up several threads to send 500,000 transactions
see something similar to: to the testnet as quickly as it can. The client then pings the testnet periodically to see
how many transactions it processed in that time. Take note that the demo intentionally
```json floods the network with UDP packets, such that the network will almost certainly drop a
{"num_hashes":27,"id":[0, "..."],"event":"Tick"} bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}} demo completes after it has convinced itself the testnet won't process any additional
{"num_hashes":27,"id":[0, "..."],"event":"Tick"} transactions. You should see several TPS measurements printed to the screen. In the
``` multinode variation, you'll see TPS measurements for each validator node as well.
Now restart the server from where we left off. Pass it both the genesis ledger, and
the transaction ledger.
Linux Snap
---
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
easily get Solana running on supported Linux systems without building anything
from source. The `edge` Snap channel is updated daily with the latest
development from the `master` branch. To install:
```bash ```bash
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode > transactions1.log $ sudo snap install solana --edge --devmode
``` ```
(`--devmode` flag is required only for `solana.fullnode-cuda`)
Lastly, run the client demo again, and verify that all funds were spent in the Once installed the usual Solana programs will be available as `solona.*` instead
previous round, and so no additional transactions are added. of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
Update to the latest version at any time with
```bash ```bash
$ cat mint-demo.json | cargo run --release --bin solana-client-demo $ snap info solana
$ sudo snap refresh solana --devmode
``` ```
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
Developing Developing
=== ===
@@ -102,7 +166,7 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt-preview $ rustup component add rustfmt-preview
``` ```
If your rustc version is lower than 1.25.0, please update it: If your rustc version is lower than 1.26.1, please update it:
```bash ```bash
$ rustup update $ rustup update
@@ -121,21 +185,37 @@ Testing
Run the test suite: Run the test suite:
```bash ```bash
cargo test $ cargo test
```
To emulate all the tests that will run on a Pull Request, run:
```bash
$ ./ci/run-local.sh
``` ```
Debugging Debugging
--- ---
There are some useful debug messages in the code, you can enable them on a per-module and per-level There are some useful debug messages in the code, you can enable them on a per-module and per-level
basis with the normal RUST\_LOG environment variable. Run the testnode with this syntax: basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
```bash ```bash
$ RUST_LOG=solana::streamer=debug,solana::accountant_skel=info cat genesis.log | ./target/release/solana-testnode > transactions0.log $ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
``` ```
to see the debug and info sections for streamer and accountant\_skel respectively. Generally to see the debug and info sections for streamer and server respectively. Generally
we are using debug for infrequent debug messages, trace for potentially frequent messages and we are using debug for infrequent debug messages, trace for potentially frequent messages and
info for performance-related logging. info for performance-related logging.
Attaching to a running process with gdb
```
$ sudo gdb
attach <PID>
set logging on
thread apply all bt
```
This will dump all the threads stack traces into gdb.txt
Benchmarking Benchmarking
--- ---
@@ -151,22 +231,26 @@ Run the benchmarks:
$ cargo +nightly bench --features="unstable" $ cargo +nightly bench --features="unstable"
``` ```
To run the benchmarks on Linux with GPU optimizations enabled:
```bash
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
$ cargo +nightly bench --features="unstable,cuda"
```
Code coverage Code coverage
--- ---
To generate code coverage statistics, run kcov via Docker: To generate code coverage statistics, install cargo-cov. Note: the tool currently only works
in Rust nightly.
```bash ```bash
$ docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov $ cargo +nightly install cargo-cov
``` ```
Run cargo-cov and generate a report:
```bash
$ cargo +nightly cov test
$ cargo +nightly cov report --open
```
The coverage report will be written to `./target/cov/report/index.html`
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running

View File

@@ -11,5 +11,6 @@ fn main() {
} }
if !env::var("CARGO_FEATURE_ERASURE").is_err() { if !env::var("CARGO_FEATURE_ERASURE").is_err() {
println!("cargo:rustc-link-lib=dylib=Jerasure"); println!("cargo:rustc-link-lib=dylib=Jerasure");
println!("cargo:rustc-link-lib=dylib=gf_complete");
} }
} }

3
ci/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
/node_modules/
/package-lock.json
/snapcraft.credentials

88
ci/README.md Normal file
View File

@@ -0,0 +1,88 @@
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
additional GitHub integration provided by https://github.com/mvines/ci-gate
## Agent Queues
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
`queue=default` and `queue=cuda`. The `default` queue should be favored and
runs on lower-cost CPU instances. The `cuda` queue is only necessary for
running **tests** that depend on GPU (via CUDA) access -- CUDA builds may still
be run on the `default` queue, and the [buildkite artifact
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
products over to a GPU instance for testing.
## Buildkite Agent Management
### Buildkite GCP Setup
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
VM Instances in each group is manually adjusted.
#### Updating a CI Disk Image
Each Instance group has its own disk image, `ci-default-vX` and
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
The process to update a disk image is as follows (TODO: make this less manual):
1. Create a new VM Instance using the disk image to modify.
2. Once the VM boots, ssh to it and modify the disk as desired.
3. Stop the VM Instance running the modified disk. Remember the name of the VM disk
4. From another machine, `gcloud auth login`, then create a new Disk Image based
off the modified VM Instance:
```
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
```
or
```
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
```
5. Delete the new VM instance.
6. Go to the Instance templates tab, find the existing template named
`ci-default-vX` or `ci-cuda-vY` and select it. Use the "Copy" button to create
a new Instance template called `ci-default-vX+1` or `ci-cuda-vY+1` with the
newly created Disk image.
7. Go to the Instance Groups tag and find the applicable group, `ci-default` or
`ci-cuda`. Edit the Instance Group in two steps: (a) Set the number of
instances to 0 and wait for them all to terminate, (b) Update the Instance
template and restore the number of instances to the original value.
8. Clean up the previous version by deleting it from Instance Templates and
Images.
## Reference
### Buildkite AWS CloudFormation Setup
**AWS CloudFormation is currently inactive, although it may be restored in the
future**
AWS CloudFormation can be used to scale machines up and down based on the
current CI load. If no machine is currently running it can take up to 60
seconds to spin up a new instance, please remain calm during this time.
#### AMI
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
Use the following process to update this AMI as dependencies change:
```bash
$ export AWS_ACCESS_KEY_ID=my_access_key
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
$ cd elastic-ci-stack-for-aws/
$ make build
$ make build-ami
```
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
AMI. For example:
```
amazon-ebs: AMI: ami-07118545e8b4ce6dc
```
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
*apply* the stack changes.

View File

@@ -1,37 +1,31 @@
steps: steps:
- command: "ci/coverage.sh || true" - command: "ci/docker-run.sh rust ci/test-stable.sh"
label: "coverage" name: "stable [public]"
# TODO: Run coverage in a docker image rather than assuming kcov/cargo-kcov timeout_in_minutes: 20
# is installed on the build agent... - command: "ci/shellcheck.sh"
#plugins: name: "shellcheck [public]"
# docker#v1.1.1: timeout_in_minutes: 20
# image: "rust"
# user: "998:997" # buildkite-agent:buildkite-agent
# environment:
# - CODECOV_TOKEN=$CODECOV_TOKEN
- command: "ci/test-stable.sh"
label: "stable [public]"
plugins:
docker#v1.1.1:
image: "rust"
user: "998:997" # buildkite-agent:buildkite-agent
- command: "ci/test-nightly.sh || true"
label: "nightly - FAILURES IGNORED [public]"
plugins:
docker#v1.1.1:
image: "rustlang/rust:nightly"
user: "998:997" # buildkite-agent:buildkite-agent
- command: "ci/test-ignored.sh || true"
label: "ignored - FAILURES IGNORED [public]"
- command: "ci/test-cuda.sh"
label: "cuda"
- wait - wait
- command: "ci/publish.sh" - command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
label: "publish release artifacts" name: "nightly [public]"
plugins: timeout_in_minutes: 20
docker#v1.1.1: - command: "ci/test-stable-perf.sh"
image: "rust" name: "stable-perf [public]"
user: "998:997" # buildkite-agent:buildkite-agent timeout_in_minutes: 20
environment: retry:
- BUILDKITE_TAG=$BUILDKITE_TAG automatic:
- CRATES_IO_TOKEN=$CRATES_IO_TOKEN - exit_status: "*"
limit: 2
agents:
- "queue=cuda"
- command: "ci/snap.sh [public]"
timeout_in_minutes: 20
name: "snap [public]"
- wait
- command: "ci/publish-crate.sh [public]"
timeout_in_minutes: 20
name: "publish crate"
- command: "ci/hoover.sh [public]"
timeout_in_minutes: 20
name: "clean agent"

View File

@@ -1,25 +0,0 @@
#!/bin/bash -e
cd $(dirname $0)/..
if [[ -r ~/.cargo/env ]]; then
# Pick up local install of kcov/cargo-kcov
source ~/.cargo/env
fi
rustc --version
cargo --version
kcov --version
cargo-kcov --version
export RUST_BACKTRACE=1
cargo build
cargo kcov
if [[ -z "$CODECOV_TOKEN" ]]; then
echo CODECOV_TOKEN undefined
exit 1
fi
bash <(curl -s https://codecov.io/bash)
exit 0

50
ci/docker-run.sh Executable file
View File

@@ -0,0 +1,50 @@
#!/bin/bash -e
usage() {
echo "Usage: $0 [docker image name] [command]"
echo
echo Runs command in the specified docker image with
echo a CI-appropriate environment
echo
}
cd "$(dirname "$0")/.."
IMAGE="$1"
if [[ -z "$IMAGE" ]]; then
echo Error: image not defined
exit 1
fi
docker pull "$IMAGE"
shift
ARGS=(
--workdir /solana
--volume "$PWD:/solana"
--env "HOME=/solana"
--rm
)
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
# kcov tries to set the personality of the binary which docker
# doesn't allow by default.
ARGS+=(--security-opt "seccomp=unconfined")
# Ensure files are created with the current host uid/gid
if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then
ARGS+=(--user "$(id -u):$(id -g)")
fi
# Environment variables to propagate into the container
ARGS+=(
--env BUILDKITE_BRANCH
--env BUILDKITE_TAG
--env CODECOV_TOKEN
--env CRATES_IO_TOKEN
--env SNAPCRAFT_CREDENTIALS_KEY
)
set -x
exec docker run "${ARGS[@]}" "$IMAGE" "$@"

View File

@@ -0,0 +1,7 @@
FROM snapcraft/xenial-amd64
# Update snapcraft to latest version
RUN apt-get update -qq \
&& apt-get install -y snapcraft \
&& rm -rf /var/lib/apt/lists/* \
&& snapcraft --version

6
ci/docker-snapcraft/build.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash -ex
cd "$(dirname "$0")"
docker build -t solanalabs/snapcraft .
docker push solanalabs/snapcraft

57
ci/hoover.sh Executable file
View File

@@ -0,0 +1,57 @@
#!/bin/bash
#
# Regular maintenance performed on a buildkite agent to control disk usage
#
echo --- Delete all exited containers first
(
set -x
exited=$(docker ps -aq --no-trunc --filter "status=exited")
if [[ -n "$exited" ]]; then
# shellcheck disable=SC2086 # Don't want to double quote "$exited"
docker rm $exited
fi
)
echo --- Delete untagged images
(
set -x
untagged=$(docker images | grep '<none>'| awk '{ print $3 }')
if [[ -n "$untagged" ]]; then
# shellcheck disable=SC2086 # Don't want to double quote "$untagged"
docker rmi $untagged
fi
)
echo --- Delete all dangling images
(
set -x
dangling=$(docker images --filter 'dangling=true' -q --no-trunc | sort | uniq)
if [[ -n "$dangling" ]]; then
# shellcheck disable=SC2086 # Don't want to double quote "$dangling"
docker rmi $dangling
fi
)
echo --- Remove unused docker networks
(
set -x
docker network prune -f
)
echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
(
set -x
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
)
echo --- System Status
(
set -x
docker images
docker ps
docker network ls
df -h
)
exit 0

View File

@@ -1,6 +1,6 @@
#!/bin/bash -e #!/bin/bash -e
cd $(dirname $0)/.. cd "$(dirname "$0")/.."
if [[ -z "$BUILDKITE_TAG" ]]; then if [[ -z "$BUILDKITE_TAG" ]]; then
# Skip publish if this is not a tagged release # Skip publish if this is not a tagged release
@@ -12,8 +12,8 @@ if [[ -z "$CRATES_IO_TOKEN" ]]; then
exit 1 exit 1
fi fi
cargo package
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG # TODO: Ensure the published version matches the contents of BUILDKITE_TAG
cargo publish --token "$CRATES_IO_TOKEN" ci/docker-run.sh rust \
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
exit 0 exit 0

19
ci/run-local.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash -e
#
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
# Github pull request
#
cd "$(dirname "$0")/.."
BKRUN=ci/node_modules/.bin/bkrun
if [[ ! -x $BKRUN ]]; then
(
set -x
cd ci/
npm install bkrun
)
fi
set -x
exec ./ci/node_modules/.bin/bkrun ci/buildkite.yml

11
ci/shellcheck.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash -e
#
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
cd "$(dirname "$0")/.."
set -x
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
| xargs -0 \
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
exit 0

40
ci/snap.sh Executable file
View File

@@ -0,0 +1,40 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
DRYRUN=
if [[ -z $BUILDKITE_BRANCH || $BUILDKITE_BRANCH =~ pull/* ]]; then
DRYRUN="echo"
fi
if [[ -z "$BUILDKITE_TAG" ]]; then
SNAP_CHANNEL=edge
else
SNAP_CHANNEL=beta
fi
if [[ -z $DRYRUN ]]; then
[[ -n $SNAPCRAFT_CREDENTIALS_KEY ]] || {
echo SNAPCRAFT_CREDENTIALS_KEY not defined
exit 1;
}
(
openssl aes-256-cbc -d \
-in ci/snapcraft.credentials.enc \
-out ci/snapcraft.credentials \
-k "$SNAPCRAFT_CREDENTIALS_KEY"
snapcraft login --with ci/snapcraft.credentials
) || {
rm -f ci/snapcraft.credentials;
exit 1
}
fi
set -x
echo --- build
snapcraft
echo --- publish
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL

Binary file not shown.

View File

@@ -1,17 +0,0 @@
#!/bin/bash -e
cd $(dirname $0)/..
if [[ -z "$libcuda_verify_ed25519_URL" ]]; then
echo libcuda_verify_ed25519_URL undefined
exit 1
fi
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
export PATH=$PATH:/usr/local/cuda/bin
curl -X GET -o libcuda_verify_ed25519.a "$libcuda_verify_ed25519_URL"
source $HOME/.cargo/env
cargo test --features=cuda
exit 0

View File

@@ -1,9 +0,0 @@
#!/bin/bash -e
cd $(dirname $0)/..
rustc --version
cargo --version
export RUST_BACKTRACE=1
cargo test -- --ignored

View File

@@ -1,13 +1,32 @@
#!/bin/bash -e #!/bin/bash -e
cd $(dirname $0)/.. cd "$(dirname "$0")/.."
export RUST_BACKTRACE=1
rustc --version rustc --version
cargo --version cargo --version
rustup component add rustfmt-preview _() {
cargo fmt -- --write-mode=diff echo "--- $*"
cargo build --verbose --features unstable "$@"
cargo test --verbose --features unstable }
_ cargo build --verbose --features unstable
_ cargo test --verbose --features unstable
_ cargo bench --verbose --features unstable
# Coverage ...
_ cargo install --force cargo-cov
_ cargo cov test
_ cargo cov report
echo --- Coverage report:
ls -l target/cov/report/index.html
if [[ -z "$CODECOV_TOKEN" ]]; then
echo CODECOV_TOKEN undefined
else
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
fi
exit 0

12
ci/test-stable-perf.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
./fetch-perf-libs.sh
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
export PATH=$PATH:/usr/local/cuda/bin
export RUST_BACKTRACE=1
set -x
exec cargo test --features=cuda,erasure

View File

@@ -1,13 +1,18 @@
#!/bin/bash -e #!/bin/bash -e
cd $(dirname $0)/.. cd "$(dirname "$0")/.."
export RUST_BACKTRACE=1
rustc --version rustc --version
cargo --version cargo --version
rustup component add rustfmt-preview _() {
cargo fmt -- --write-mode=diff echo "--- $*"
cargo build --verbose "$@"
cargo test --verbose }
exit 0 _ rustup component add rustfmt-preview
_ cargo fmt -- --write-mode=diff
_ cargo build --verbose
_ cargo test --verbose
_ cargo test -- --ignored

View File

@@ -1,65 +0,0 @@
The Historian
===
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
with by verifying each entry's hash can be generated from the hash in the previous entry:
![historian](https://user-images.githubusercontent.com/55449/36950845-459bdb58-1fb9-11e8-850e-894586f3729b.png)
```rust
extern crate solana;
use solana::historian::Historian;
use solana::ledger::{Block, Entry, Hash};
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
use std::thread::sleep;
use std::time::Duration;
use std::sync::mpsc::SendError;
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
sleep(Duration::from_millis(15));
let tokens = 42;
let keypair = generate_keypair();
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
hist.sender.send(event0)?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_ledger(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(entries[..].verify(&seed));
}
```
Running the program should produce a ledger similar to:
```rust
Entry { num_hashes: 0, id: [0, ...], event: Tick }
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
Entry { num_hashes: 3, id: [123, ...], event: Tick }
```
Proof-of-History
---
Take note of the last line:
```rust
assert!(entries[..].verify(&seed));
```
[It's a proof!](https://en.wikipedia.org/wiki/CurryHoward_correspondence) For each entry returned by the
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
included in the hash, the events cannot be reordered without regenerating all the hashes.

View File

@@ -1,18 +0,0 @@
msc {
client,historian,recorder;
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
client=>historian [ label = "Transaction(d0)" ] ;
historian=>recorder [ label = "Transaction(d0)" ] ;
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
client=>historian [ label = "collect()" ] ;
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
client=>client [ label = "entries.verify(h0)" ] ;
}

37
fetch-perf-libs.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/bin/bash -e
if [[ $(uname) != Linux ]]; then
echo Performance libraries are only available for Linux
exit 1
fi
if [[ $(uname -m) != x86_64 ]]; then
echo Performance libraries are only available for x86_64 architecture
exit 1
fi
(
set -x
curl -o solana-perf.tgz \
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
tar zxvf solana-perf.tgz
)
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
echo ==============================================
echo Warning: possible CUDA version mismatch
echo
echo "Expected version: $(cat cuda-version.txt)"
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
echo ==============================================
fi
else
echo ==============================================
echo Warning: unable to validate CUDA version
echo ==============================================
fi
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
exit 0

View File

@@ -1,7 +1,17 @@
#!/bin/bash #!/bin/bash
cd /home/ubuntu/solana
#git pull if [[ -z $1 ]]; then
export RUST_LOG=solana::crdt=trace echo "usage: $0 [network path to solana repo on leader machine] <number of nodes in the network>"
# scp ubuntu@18.206.1.146:~/solana/leader.json . exit 1
# scp ubuntu@18.206.1.146:~/solana/mint-demo.json . fi
cat mint-demo.json | cargo run --release --bin solana-multinode-demo -- -l leader.json -c 10.0.5.179:8100 -n 3
LEADER=$1
COUNT=${2:-1}
rsync -vz "$LEADER"/{leader.json,mint-demo.json} . || exit $?
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
cargo run --release --bin solana-client-demo -- \
-n "$COUNT" -l leader.json -d < mint-demo.json 2>&1 | tee client.log

View File

@@ -1,6 +1,28 @@
#!/bin/bash #!/bin/bash
cd /home/ubuntu/solana here=$(dirname "$0")
git pull
export RUST_LOG=solana=info # shellcheck source=/dev/null
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s leader.json -b 8000 -d | grep INFO . "${here}"/myip.sh
#cat genesis.log | cargo run --release --bin solana-testnode -- -s leader.json -b 8000 -d
myip=$(myip) || exit $?
[[ -f leader-"${myip}".json ]] || {
echo "I can't find a matching leader config file for \"${myip}\"...
Please run ${here}/setup.sh first.
"
exit 1
}
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
# this makes a leader.json file available alongside genesis, etc. for
# validators and clients
cp leader-"${myip}".json leader.json
cargo run --release --bin solana-fullnode -- \
-l leader-"${myip}".json \
< genesis.log tx-*.log \
> tx-"$(date -u +%Y%m%d%H%M%S%N)".log

58
multinode-demo/myip.sh Executable file
View File

@@ -0,0 +1,58 @@
#!/bin/bash
function myip()
{
declare ipaddrs=( )
# query interwebs
mapfile -t ipaddrs < <(curl -s ifconfig.co)
# machine's interfaces
mapfile -t -O "${#ipaddrs[*]}" ipaddrs < \
<(ifconfig | awk '/inet(6)? (addr:)?/ {print $2}')
ipaddrs=( "${extips[@]}" "${ipaddrs[@]}" )
if (( ! ${#ipaddrs[*]} ))
then
echo "
myip: error: I'm having trouble determining what our IP address is...
Are we connected to a network?
"
return 1
fi
declare prompt="
Please choose the IP address you want to advertise to the network:
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
"
for ((i=1; i < ${#ipaddrs[*]}; i++))
do
prompt+=" $i) ${ipaddrs[i]}
"
done
while read -r -p "${prompt}
please enter a number [0 for default]: " which
do
[[ -z ${which} ]] && break;
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
echo "Ug. invalid entry \"${which}\"...
"
sleep 1
done
which=${which:-0}
echo "${ipaddrs[which]}"
}
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
then
myip "$@"
fi

15
multinode-demo/setup.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
here=$(dirname "$0")
# shellcheck source=/dev/null
. "${here}"/myip.sh
myip=$(myip) || exit $?
num_tokens=${1:-1000000000}
cargo run --release --bin solana-mint-demo <<<"${num_tokens}" > mint-demo.json
cargo run --release --bin solana-genesis-demo < mint-demo.json > genesis.log
cargo run --release --bin solana-fullnode-config -- -d > leader-"${myip}".json
cargo run --release --bin solana-fullnode-config -- -b 9000 -d > validator-"${myip}".json

View File

@@ -1,10 +1,32 @@
#!/bin/bash #!/bin/bash
cd /home/ubuntu/solana here=$(dirname "$0")
git pull
scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
scp ubuntu@18.206.1.146:~/solana/leader.json .
scp ubuntu@18.206.1.146:~/solana/genesis.log .
scp ubuntu@18.206.1.146:~/solana/libcuda_verify_ed25519.a .
export RUST_LOG=solana=info
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s replicator.json -v leader.json -b 9000 -d 2>&1 | tee validator.log
# shellcheck source=/dev/null
. "${here}"/myip.sh
leader=$1
[[ -z ${leader} ]] && {
echo "usage: $0 [network path to solana repo on leader machine]"
exit 1
}
myip=$(myip) || exit $?
[[ -f validator-"$myip".json ]] || {
echo "I can't find a matching validator config file for \"${myip}\"...
Please run ${here}/setup.sh first.
"
exit 1
}
rsync -vz "${leader}"/{mint-demo.json,leader.json,genesis.log,tx-*.log} . || exit $?
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
cargo run --release --bin solana-fullnode -- \
-l validator-"${myip}".json -v leader.json \
< genesis.log tx-*.log

View File

@@ -0,0 +1,182 @@
# Smart Contracts Engine
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
## Toolchain Stack
+---------------------+ +---------------------+
| | | |
| +------------+ | | +------------+ |
| | | | | | | |
| | frontend | | | | verifier | |
| | | | | | | |
| +-----+------+ | | +-----+------+ |
| | | | | |
| | | | | |
| +-----+------+ | | +-----+------+ |
| | | | | | | |
| | llvm | | | | loader | |
| | | +------>+ | | |
| +-----+------+ | | +-----+------+ |
| | | | | |
| | | | | |
| +-----+------+ | | +-----+------+ |
| | | | | | | |
| | ELF | | | | runtime | |
| | | | | | | |
| +------------+ | | +------------+ |
| | | |
| client | | solana |
+---------------------+ +---------------------+
[Figure 1. Smart Contracts Stack]
In Figure 1 an untrusted client, creates a program in the front-end language of her choice, (like C/C++/Rust/Lua), and compiles it with LLVM to a position independent shared object ELF, targeting BPF bytecode. Solana will safely load and execute the ELF.
## Bytecode
Our bytecode is based on Berkley Packet Filter. The requirements for BPF overlap almost exactly with the requirements we have:
1. Deterministic amount of time to execute the code
2. Bytecode that is portable between machine instruction sets
3. Verified memory accesses
4. Fast to load the object, verify the bytecode and JIT to local machine instruction set
For 1, that means that loops are unrolled, and for any jumps back we can guard them with a check against the number of instruction that have been executed at this point. If the limit is reached, the program yields its execution. This involves saving the stack and current instruction index.
For 2, the BPF bytecode already easily maps to x8664, arm64 and other instruction sets. 
For 3, every load and store that is relative can be checked to be within the expected memory that is passed into the ELF. Dynamic load and stores can do a runtime check against available memory, these will be slow and should be avoided.
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
## Loader
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
Since there is only one RX segment, the context for the contract instance is passed into each entry point as well as the event data for that entry point.
A client will create a transaction to create a new loader instance:
`Solana_NewLoader(Loader Instance PubKey, proof of key ownership, space I need for my elf)`
A client will then do a bunch of transactions to load its elf into the loader instance they created:
`Loader_UploadElf(Loader Instance PubKey, proof of key ownership, pos start, pos end, data)`
At this point the client can create a new instance of the module with its own instance address:
`Loader_NewInstance(Loader Instance PubKey, proof of key ownership, Instance PubKey, proof of key ownership)`
Once the instance has been created, the client may need to upload more user data to solana to configure this instance:
`Instance_UploadModuleData(Instance PubKey, proof of key ownership, pos start, pos end, data)`
Now clients can `start` the instance:
`Instance_Start(Instance PubKey, proof of key ownership)`
## Runtime
Our goal with the runtime is to have a general purpose execution environment that is highly parallelizable and doesn't require dynamic resource management. We want to execute as many contracts as we can in parallel, and have them pass or fail without a destructive state change.
### State and Entry Point
State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. The output of keys is a subset of the input. Three basic kinds of state exist:
* Instance State
* Participant State
* Caller State
There isn't any difference in how each is implemented, but conceptually Participant State is memory that is allocated for each participant in the contract. Instance State is memory that is allocated for the contract itself, and Caller State is memory that the transactions caller has allocated.
### Call
```
void call(
const struct instance_data *data,
const uint8_t kind[], //instance|participant|caller|read|write
const uint8_t *keys[],
uint8_t *data[],
int num,
uint8_t dirty[], //dirty memory bits
uint8_t *userdata, //current transaction data
);
```
To call this operation, the transaction that is destined to the contract instance specifies what keyed state it should present to the `call` function. To allocate the state memory or a call context, the client has to first call a function on the contract with the designed address that will own the state.
At its core, this is a system call that requires cryptographic proof of ownership of memory regions instead of an OS that checks page tables for access rights.
* `Instance_AllocateContext(Instance PubKey, My PubKey, Proof of key ownership)`
Any transaction can then call `call` on the contract with a set of keys. It's up to the contract itself to manage ownership:
* `Instance_Call(Instance PubKey, [Context PubKeys], proofs of ownership, userdata...)`
Contracts should be able to read any state that is part of solana, but only write to state that the contract allocated.
#### Caller State
Caller `state` is memory allocated for the `call` that belongs to the public key that is issuing the `call`. This is the caller's context.
#### Instance State
Instance `state` is memory that belongs to this contract instance. We may also need module-wide `state` as well.
#### Participant State
Participant `state` is any other memory. In some cases it may make sense to have these allocated as part of the call by the caller.
### Reduce
Some operations on the contract will require iteration over all the keys. To make this parallelizable the iteration is broken up into reduce calls which are combined.
```
void reduce_m(
const struct instance_data *data,
const uint8_t *keys[],
const uint8_t *data[],
int num,
uint8_t *reduce_data,
);
void reduce_r(
const struct instance_data *data,
const uint8_t *reduce_data[],
int num,
uint8_t *reduce_data,
);
```
### Execution
Transactions are batched and processed in parallel at each stage.
```
+-----------+ +--------------+ +-----------+ +---------------+
| sigverify |-+->| debit commit |---+->| execution |-+->| memory commit |
+-----------+ | +--------------+ | +-----------+ | +---------------+
| | |
| +---------------+ | | +--------------+
|->| memory verify |->+ +->| debit undo |
+---------------+ | +--------------+
|
| +---------------+
+->| credit commit |
+---------------+
```
The `debit verify` stage is very similar to `memory verify`. Proof of key ownership is used to check if the callers key has some state allocated with the contract, then the memory is loaded and executed. After execution stage, the dirty pages are written back by the contract. Because know all the memory accesses during execution, we can batch transactions that do not interfere with each other. We can also apply the `debit undo` and `credit commit` stages of the transaction. `debit undo` is run in case of an exception during contract execution, only transfers may be reversed, fees are commited to solana.
### GPU execution
A single contract can read and write to separate key pairs without interference. These separate calls to the same contract can execute on the same GPU thread over different memory using different SIMD lanes.
## Notes
1. There is no dynamic memory allocation.
2. Persistant Memory is allocated to a Key with ownership
3. Contracts can `call` to update key owned state
4. Contracts can `reduce` over the memory to aggregate state
5. `call` is just a *syscall* that does a cryptographic check of memory owndershp

43
scripts/perf-plot.py Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import json
import sys
stages_to_counters = {}
stages_to_time = {}
if len(sys.argv) != 2:
print("USAGE: {} <input file>".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1]) as fh:
for line in fh.readlines():
if "COUNTER" in line:
json_part = line[line.find("{"):]
x = json.loads(json_part)
counter = x['name']
if not (counter in stages_to_counters):
stages_to_counters[counter] = []
stages_to_time[counter] = []
stages_to_counters[counter].append(x['counts'])
stages_to_time[counter].append(x['now'])
fig, ax = plt.subplots()
for stage in stages_to_counters.keys():
plt.plot(stages_to_time[stage], stages_to_counters[stage], label=stage)
plt.xlabel('ms')
plt.ylabel('count')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.locator_params(axis='x', nbins=10)
plt.grid(True)
plt.savefig("perf.pdf")

69
snap/snapcraft.yaml Normal file
View File

@@ -0,0 +1,69 @@
name: solana
version: git
summary: Blockchain, Rebuilt for Scale
description: |
710,000 tx/s with off-the-shelf hardware and no sharding.
Scales with Moore's Law.
grade: devel
# TODO: solana-perf-fullnode does not yet run with 'strict' confinement due to the
# CUDA dependency, so use 'devmode' confinement for now
confinement: devmode
apps:
drone:
command: solana-drone
plugs:
- network
- network-bind
fullnode:
command: solana-fullnode
plugs:
- network
- network-bind
- home
fullnode-cuda:
command: solana-fullnode-cuda
plugs:
- network
- network-bind
- home
fullnode-config:
command: solana-fullnode-config
plugs:
- network
- network-bind
genesis:
command: solana-genesis
genesis-demo:
command: solana-genesis-demo
mint:
command: solana-mint
mint-demo:
command: solana-mint-demo
client-demo:
command: solana-client-demo
parts:
solana-cuda:
plugin: rust
rust-channel: stable
rust-features:
- erasure
- cuda
prime:
- bin/solana-fullnode-cuda
- usr/lib/libgf_complete.so.1
- usr/lib/libJerasure.so.2
override-build: |
./fetch-perf-libs.sh
snapcraftctl build
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
solana:
plugin: rust
rust-channel: stable

View File

@@ -1,80 +1,108 @@
//! The `bank` module tracks client balances, and the progress of pending //! The `bank` module tracks client balances and the progress of smart
//! transactions. It offers a high-level public API that signs transactions //! contracts. It offers a high-level API that signs transactions
//! on behalf of the caller, and a private low-level API for when they have //! on behalf of the caller, and a low-level API for when they have
//! already been signed and verified. //! already been signed and verified.
extern crate libc; extern crate libc;
use chrono::prelude::*; use chrono::prelude::*;
use entry::Entry; use entry::Entry;
use event::Event;
use hash::Hash; use hash::Hash;
use mint::Mint; use mint::Mint;
use plan::{Payment, Plan, Witness}; use payment_plan::{Payment, PaymentPlan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, PublicKey, Signature}; use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied; use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque}; use std::collections::{HashMap, HashSet, VecDeque};
use std::result; use std::result;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering}; use std::time::Instant;
use transaction::{Instruction, Transaction}; use timing::duration_as_us;
use transaction::{Instruction, Plan, Transaction};
pub const MAX_ENTRY_IDS: usize = 1024 * 4; /// The number of most recent `last_id` values that the bank will track the signatures
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
/// but requires clients to update its `last_id` more frequently. Raising the value
/// lengthens the time a client must wait to be certain a missing transaction will
/// not be processed by the network.
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
/// Reasons a transaction might be rejected.
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub enum BankError { pub enum BankError {
/// Attempt to debit from `PublicKey`, but no found no record of a prior credit.
AccountNotFound(PublicKey), AccountNotFound(PublicKey),
/// The requested debit from `PublicKey` has the potential to draw the balance
/// below zero. This can occur when a debit and credit are processed in parallel.
/// The bank may reject the debit or push it to a future entry.
InsufficientFunds(PublicKey), InsufficientFunds(PublicKey),
InvalidTransferSignature(Signature),
/// The bank has seen `Signature` before. This can occur under normal operation
/// when a UDP packet is duplicated, as a user error from a client not updating
/// its `last_id`, or as a double-spend attack.
DuplicateSiganture(Signature),
/// The bank has not seen the given `last_id` or the transaction is too old and
/// the `last_id` has been discarded.
LastIdNotFound(Hash),
/// The transaction is invalid and has requested a debit or credit of negative
/// tokens.
NegativeTokens,
} }
pub type Result<T> = result::Result<T, BankError>; pub type Result<T> = result::Result<T, BankError>;
/// Commit funds to the 'to' party. /// The state of all accounts and contracts after processing its entries.
fn apply_payment(balances: &RwLock<HashMap<PublicKey, AtomicIsize>>, payment: &Payment) {
// First we check balances with a read lock to maximize potential parallelization.
if balances
.read()
.expect("'balances' read lock in apply_payment")
.contains_key(&payment.to)
{
let bals = balances.read().expect("'balances' read lock");
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
} else {
// Now we know the key wasn't present a nanosecond ago, but it might be there
// by the time we aquire a write lock, so we'll have to check again.
let mut bals = balances.write().expect("'balances' write lock");
if bals.contains_key(&payment.to) {
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
} else {
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
}
}
}
pub struct Bank { pub struct Bank {
balances: RwLock<HashMap<PublicKey, AtomicIsize>>, /// A map of account public keys to the balance in that account.
balances: RwLock<HashMap<PublicKey, i64>>,
/// A map of smart contract transaction signatures to what remains of its payment
/// plan. Each transaction that targets the plan should cause it to be reduced.
/// Once it cannot be reduced, final payments are made and it is discarded.
pending: RwLock<HashMap<Signature, Plan>>, pending: RwLock<HashMap<Signature, Plan>>,
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
/// A FIFO queue of `last_id` items, where each item is a set of signatures
/// that have been processed using that `last_id`. Rejected `last_id`
/// values are so old that the `last_id` has been pulled out of the queue.
last_ids: RwLock<VecDeque<Hash>>,
// Mapping of hashes to signature sets. The bank uses this data to
/// reject transactions with signatures its seen before
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
/// outside this set will be discarded. Note that if validators do not have the
/// same set as leaders, they may interpret the ledger differently.
time_sources: RwLock<HashSet<PublicKey>>, time_sources: RwLock<HashSet<PublicKey>>,
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
/// to every smart contract when it enters the system. If it is waiting on a
/// timestamp witness before that timestamp, the bank will execute it immediately.
last_time: RwLock<DateTime<Utc>>, last_time: RwLock<DateTime<Utc>>,
/// The number of transactions the bank has processed without error since the
/// start of the ledger.
transaction_count: AtomicUsize, transaction_count: AtomicUsize,
} }
impl Bank { impl Bank {
/// Create an Bank using a deposit. /// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self { pub fn new_from_deposit(deposit: &Payment) -> Self {
let balances = RwLock::new(HashMap::new()); let bank = Bank {
apply_payment(&balances, deposit); balances: RwLock::new(HashMap::new()),
Bank {
balances,
pending: RwLock::new(HashMap::new()), pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()), last_ids: RwLock::new(VecDeque::new()),
last_ids_sigs: RwLock::new(HashMap::new()),
time_sources: RwLock::new(HashSet::new()), time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)), last_time: RwLock::new(Utc.timestamp(0, 0)),
transaction_count: AtomicUsize::new(0), transaction_count: AtomicUsize::new(0),
} };
bank.apply_payment(deposit, &mut bank.balances.write().unwrap());
bank
} }
/// Create an Bank with only a Mint. Typically used by unit tests. /// Create an Bank with only a Mint. Typically used by unit tests.
@@ -88,59 +116,56 @@ impl Bank {
bank bank
} }
/// Return the last entry ID registered /// Commit funds to the `payment.to` party.
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
if balances.contains_key(&payment.to) {
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
} else {
balances.insert(payment.to, payment.tokens);
}
}
/// Return the last entry ID registered.
pub fn last_id(&self) -> Hash { pub fn last_id(&self) -> Hash {
let last_ids = self.last_ids.read().expect("'last_ids' read lock"); let last_ids = self.last_ids.read().expect("'last_ids' read lock");
let last_item = last_ids.iter().last().expect("empty 'last_ids' list"); let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
last_item.0 *last_item
} }
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool { /// Store the given signature. The bank will reject any transaction with the same signature.
if signatures fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
.read() if let Some(sig) = signatures.get(sig) {
.expect("'signatures' read lock") return Err(BankError::DuplicateSiganture(*sig));
.contains(sig)
{
return false;
} }
signatures signatures.insert(*sig);
.write() Ok(())
.expect("'signatures' write lock")
.insert(*sig);
true
} }
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool { /// Forget the given `signature` because its transaction was rejected.
signatures fn forget_signature(signatures: &mut HashSet<Signature>, signature: &Signature) {
.write() signatures.remove(signature);
.expect("'signatures' write lock in forget_signature")
.remove(sig)
} }
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool { /// Forget the given `signature` with `last_id` because the transaction was rejected.
if let Some(entry) = self.last_ids fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) {
.read() if let Some(entry) = self.last_ids_sigs
.write()
.expect("'last_ids' read lock in forget_signature_with_last_id") .expect("'last_ids' read lock in forget_signature_with_last_id")
.iter() .get_mut(last_id)
.rev()
.find(|x| x.0 == *last_id)
{ {
return Self::forget_signature(&entry.1, sig); Self::forget_signature(entry, signature);
} }
return false;
} }
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool { fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
if let Some(entry) = self.last_ids if let Some(entry) = self.last_ids_sigs
.read() .write()
.expect("'last_ids' read lock in reserve_signature_with_last_id") .expect("'last_ids' read lock in reserve_signature_with_last_id")
.iter() .get_mut(last_id)
.rev()
.find(|x| x.0 == *last_id)
{ {
return Self::reserve_signature(&entry.1, sig); return Self::reserve_signature(entry, signature);
} }
false Err(BankError::LastIdNotFound(*last_id))
} }
/// Tell the bank which Entry IDs exist on the ledger. This function /// Tell the bank which Entry IDs exist on the ledger. This function
@@ -151,158 +176,152 @@ impl Bank {
let mut last_ids = self.last_ids let mut last_ids = self.last_ids
.write() .write()
.expect("'last_ids' write lock in register_entry_id"); .expect("'last_ids' write lock in register_entry_id");
let mut last_ids_sigs = self.last_ids_sigs
.write()
.expect("last_ids_sigs write lock");
if last_ids.len() >= MAX_ENTRY_IDS { if last_ids.len() >= MAX_ENTRY_IDS {
last_ids.pop_front(); let id = last_ids.pop_front().unwrap();
last_ids_sigs.remove(&id);
} }
last_ids.push_back((*last_id, RwLock::new(HashSet::new()))); last_ids_sigs.insert(*last_id, HashSet::new());
last_ids.push_back(*last_id);
} }
/// Deduct tokens from the 'from' address the account has sufficient /// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate. /// funds and isn't a duplicate.
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> { fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
if let Instruction::NewContract(contract) = &tr.instruction { let option = bals.get_mut(&tx.from);
trace!("Transaction {}", contract.tokens);
}
let bals = self.balances
.read()
.expect("'balances' read lock in process_verified_transaction_debits");
let option = bals.get(&tr.from);
if option.is_none() { if option.is_none() {
return Err(BankError::AccountNotFound(tr.from)); return Err(BankError::AccountNotFound(tx.from));
} }
let bal = option.unwrap();
if !self.reserve_signature_with_last_id(&tr.sig, &tr.last_id) { self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
return Err(BankError::InvalidTransferSignature(tr.sig));
}
loop { if let Instruction::NewContract(contract) = &tx.instruction {
let result = if let Instruction::NewContract(contract) = &tr.instruction { if contract.tokens < 0 {
let bal = option.expect("assignment of option to bal"); return Err(BankError::NegativeTokens);
let current = bal.load(Ordering::Relaxed) as i64; }
if current < contract.tokens { if *bal < contract.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.last_id); self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
return Err(BankError::InsufficientFunds(tr.from)); return Err(BankError::InsufficientFunds(tx.from));
} }
bal.compare_exchange( *bal -= contract.tokens;
current as isize, };
(current - contract.tokens) as isize, Ok(())
Ordering::Relaxed,
Ordering::Relaxed,
)
} else {
Ok(0)
};
match result {
Ok(_) => {
self.transaction_count.fetch_add(1, Ordering::Relaxed);
return Ok(());
}
Err(_) => continue,
};
}
} }
pub fn process_verified_transaction_credits(&self, tr: &Transaction) { /// Apply only a transaction's credits. Credits from multiple transactions
match &tr.instruction { /// may safely be applied in parallel.
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
match &tx.instruction {
Instruction::NewContract(contract) => { Instruction::NewContract(contract) => {
let mut plan = contract.plan.clone(); let mut plan = contract.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time plan.apply_witness(&Witness::Timestamp(*self.last_time
.read() .read()
.expect("timestamp creation in process_verified_transaction_credits"))); .expect("timestamp creation in apply_credits")));
if let Some(ref payment) = plan.final_payment() { if let Some(payment) = plan.final_payment() {
apply_payment(&self.balances, payment); self.apply_payment(&payment, balances);
} else { } else {
let mut pending = self.pending let mut pending = self.pending
.write() .write()
.expect("'pending' write lock in process_verified_transaction_credits"); .expect("'pending' write lock in apply_credits");
pending.insert(tr.sig, plan); pending.insert(tx.sig, plan);
} }
} }
Instruction::ApplyTimestamp(dt) => { Instruction::ApplyTimestamp(dt) => {
let _ = self.process_verified_timestamp(tr.from, *dt); let _ = self.apply_timestamp(tx.from, *dt);
} }
Instruction::ApplySignature(tx_sig) => { Instruction::ApplySignature(tx_sig) => {
let _ = self.process_verified_sig(tr.from, *tx_sig); let _ = self.apply_signature(tx.from, *tx_sig);
} }
} }
} }
/// Process a Transaction that has already been verified. /// Process a Transaction. If it contains a payment plan that requires a witness
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> { /// to progress, the payment plan will be stored in the bank.
self.process_verified_transaction_debits(tr)?; fn process_transaction(&self, tx: &Transaction) -> Result<()> {
self.process_verified_transaction_credits(tr); let bals = &mut self.balances.write().unwrap();
self.apply_debits(tx, bals)?;
self.apply_credits(tx, bals);
self.transaction_count.fetch_add(1, Ordering::Relaxed);
Ok(()) Ok(())
} }
/// Process a batch of verified transactions. /// Process a batch of transactions.
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> { #[must_use]
// Run all debits first to filter out any transactions that can't be processed pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
// in parallel deterministically. let bals = &mut self.balances.write().unwrap();
info!("processing Transactions {}", trs.len()); debug!("processing Transactions {}", txs.len());
let results: Vec<_> = trs.into_par_iter() let txs_len = txs.len();
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr)) let now = Instant::now();
let results: Vec<_> = txs.into_iter()
.map(|tx| self.apply_debits(&tx, bals).map(|_| tx))
.collect(); // Calling collect() here forces all debits to complete before moving on. .collect(); // Calling collect() here forces all debits to complete before moving on.
results let debits = now.elapsed();
.into_par_iter() let now = Instant::now();
let res: Vec<_> = results
.into_iter()
.map(|result| { .map(|result| {
result.map(|tr| { result.map(|tx| {
self.process_verified_transaction_credits(&tr); self.apply_credits(&tx, bals);
tr tx
}) })
}) })
.collect()
}
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
(
events
.into_iter()
.map(|Event::Transaction(tr)| tr)
.collect(),
vec![],
)
}
pub fn process_verified_events(&self, events: Vec<Event>) -> Vec<Result<Event>> {
let (trs, rest) = Self::partition_events(events);
let mut results: Vec<_> = self.process_verified_transactions(trs)
.into_iter()
.map(|x| x.map(Event::Transaction))
.collect(); .collect();
for event in rest { debug!(
results.push(self.process_verified_event(event)); "debits: {} us credits: {:?} us tx: {}",
} duration_as_us(&debits),
duration_as_us(&now.elapsed()),
txs_len
);
results let mut tx_count = 0;
for r in &res {
if r.is_ok() {
tx_count += 1;
} else {
info!("tx error: {:?}", r);
}
}
self.transaction_count
.fetch_add(tx_count, Ordering::Relaxed);
res
} }
pub fn process_verified_entries(&self, entries: Vec<Entry>) -> Result<()> { /// Process an ordered list of entries.
pub fn process_entries<I>(&self, entries: I) -> Result<()>
where
I: IntoIterator<Item = Entry>,
{
for entry in entries { for entry in entries {
self.register_entry_id(&entry.id); if !entry.transactions.is_empty() {
for result in self.process_verified_events(entry.events) { for result in self.process_transactions(entry.transactions) {
result?; result?;
}
} }
self.register_entry_id(&entry.id);
} }
Ok(()) Ok(())
} }
/// Process a Witness Signature that has already been verified. /// Process a Witness Signature. Any payment plans waiting on this signature
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> { /// will progress one step.
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending if let Occupied(mut e) = self.pending
.write() .write()
.expect("write() in process_verified_sig") .expect("write() in apply_signature")
.entry(tx_sig) .entry(tx_sig)
{ {
e.get_mut().apply_witness(&Witness::Signature(from)); e.get_mut().apply_witness(&Witness::Signature(from));
if let Some(payment) = e.get().final_payment() { if let Some(payment) = e.get().final_payment() {
apply_payment(&self.balances, &payment); self.apply_payment(&payment, &mut self.balances.write().unwrap());
e.remove_entry(); e.remove_entry();
} }
}; };
@@ -310,8 +329,9 @@ impl Bank {
Ok(()) Ok(())
} }
/// Process a Witness Timestamp that has already been verified. /// Process a Witness Timestamp. Any payment plans waiting on this timestamp
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> { /// will progress one step.
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block, // If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it. // so we'll trust it.
if *self.last_time if *self.last_time
@@ -344,13 +364,13 @@ impl Bank {
// double-spend if it enters before the modified plan is removed from 'pending'. // double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending let mut pending = self.pending
.write() .write()
.expect("'pending' write lock in process_verified_timestamp"); .expect("'pending' write lock in apply_timestamp");
for (key, plan) in pending.iter_mut() { for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time plan.apply_witness(&Witness::Timestamp(*self.last_time
.read() .read()
.expect("'last_time' read lock when creating timestamp"))); .expect("'last_time' read lock when creating timestamp")));
if let Some(ref payment) = plan.final_payment() { if let Some(payment) = plan.final_payment() {
apply_payment(&self.balances, payment); self.apply_payment(&payment, &mut self.balances.write().unwrap());
completed.push(key.clone()); completed.push(key.clone());
} }
} }
@@ -362,14 +382,6 @@ impl Bank {
Ok(()) Ok(())
} }
/// Process an Transaction or Witness that has already been verified.
pub fn process_verified_event(&self, event: Event) -> Result<Event> {
match event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
}?;
Ok(event)
}
/// Create, sign, and process a Transaction from `keypair` to `to` of /// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client. /// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer( pub fn transfer(
@@ -379,9 +391,9 @@ impl Bank {
to: PublicKey, to: PublicKey,
last_id: Hash, last_id: Hash,
) -> Result<Signature> { ) -> Result<Signature> {
let tr = Transaction::new(keypair, to, n, last_id); let tx = Transaction::new(keypair, to, n, last_id);
let sig = tr.sig; let sig = tx.sig;
self.process_verified_transaction(&tr).map(|_| sig) self.process_transaction(&tx).map(|_| sig)
} }
/// Create, sign, and process a postdated Transaction from `keypair` /// Create, sign, and process a postdated Transaction from `keypair`
@@ -395,16 +407,16 @@ impl Bank {
dt: DateTime<Utc>, dt: DateTime<Utc>,
last_id: Hash, last_id: Hash,
) -> Result<Signature> { ) -> Result<Signature> {
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id); let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tr.sig; let sig = tx.sig;
self.process_verified_transaction(&tr).map(|_| sig) self.process_transaction(&tx).map(|_| sig)
} }
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> { pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances let bals = self.balances
.read() .read()
.expect("'balances' read lock in get_balance"); .expect("'balances' read lock in get_balance");
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64) bals.get(pubkey).map(|x| *x)
} }
pub fn transaction_count(&self) -> usize { pub fn transaction_count(&self) -> usize {
@@ -416,11 +428,12 @@ impl Bank {
mod tests { mod tests {
use super::*; use super::*;
use bincode::serialize; use bincode::serialize;
use entry::next_entry;
use hash::hash; use hash::hash;
use signature::KeyPairUtil; use signature::KeyPairUtil;
#[test] #[test]
fn test_bank() { fn test_two_payments_to_one_party() {
let mint = Mint::new(10_000); let mint = Mint::new(10_000);
let pubkey = KeyPair::new().pubkey(); let pubkey = KeyPair::new().pubkey();
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
@@ -436,6 +449,18 @@ mod tests {
assert_eq!(bank.transaction_count(), 2); assert_eq!(bank.transaction_count(), 2);
} }
#[test]
fn test_negative_tokens() {
let mint = Mint::new(1);
let pubkey = KeyPair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::NegativeTokens)
);
assert_eq!(bank.transaction_count(), 0);
}
#[test] #[test]
fn test_account_not_found() { fn test_account_not_found() {
let mint = Mint::new(1); let mint = Mint::new(1);
@@ -449,7 +474,7 @@ mod tests {
} }
#[test] #[test]
fn test_invalid_transfer() { fn test_insufficient_funds() {
let mint = Mint::new(11_000); let mint = Mint::new(11_000);
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey(); let pubkey = KeyPair::new().pubkey();
@@ -498,14 +523,14 @@ mod tests {
// Now, acknowledge the time in the condition occurred and // Now, acknowledge the time in the condition occurred and
// that pubkey's funds are now available. // that pubkey's funds are now available.
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap(); bank.apply_timestamp(mint.pubkey(), dt).unwrap();
assert_eq!(bank.get_balance(&pubkey), Some(1)); assert_eq!(bank.get_balance(&pubkey), Some(1));
// tx count is still 1, because we chose not to count timestamp events // tx count is still 1, because we chose not to count timestamp transactions
// tx count. // tx count.
assert_eq!(bank.transaction_count(), 1); assert_eq!(bank.transaction_count(), 1);
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction. bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(bank.get_balance(&pubkey), Some(2)); assert_ne!(bank.get_balance(&pubkey), Some(2));
} }
@@ -515,7 +540,7 @@ mod tests {
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey(); let pubkey = KeyPair::new().pubkey();
let dt = Utc::now(); let dt = Utc::now();
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap(); bank.apply_timestamp(mint.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately. // It's now past now, so this transfer should be processed immediately.
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id()) bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
@@ -545,24 +570,30 @@ mod tests {
assert_eq!(bank.get_balance(&pubkey), None); assert_eq!(bank.get_balance(&pubkey), None);
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them. // Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
bank.process_verified_sig(mint.pubkey(), sig).unwrap(); bank.apply_signature(mint.pubkey(), sig).unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1)); assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
assert_eq!(bank.get_balance(&pubkey), None); assert_eq!(bank.get_balance(&pubkey), None);
// Assert cancel doesn't cause count to go backward. // Assert cancel doesn't cause count to go backward.
assert_eq!(bank.transaction_count(), 1); assert_eq!(bank.transaction_count(), 1);
bank.process_verified_sig(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction. bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2)); assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
} }
#[test] #[test]
fn test_duplicate_event_signature() { fn test_duplicate_transaction_signature() {
let mint = Mint::new(1); let mint = Mint::new(1);
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
let sig = Signature::default(); let sig = Signature::default();
assert!(bank.reserve_signature_with_last_id(&sig, &mint.last_id())); assert!(
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id())); bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.is_ok()
);
assert_eq!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
Err(BankError::DuplicateSiganture(sig))
);
} }
#[test] #[test]
@@ -570,13 +601,17 @@ mod tests {
let mint = Mint::new(1); let mint = Mint::new(1);
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
let sig = Signature::default(); let sig = Signature::default();
bank.reserve_signature_with_last_id(&sig, &mint.last_id()); bank.reserve_signature_with_last_id(&sig, &mint.last_id())
assert!(bank.forget_signature_with_last_id(&sig, &mint.last_id())); .unwrap();
assert!(!bank.forget_signature_with_last_id(&sig, &mint.last_id())); bank.forget_signature_with_last_id(&sig, &mint.last_id());
assert!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.is_ok()
);
} }
#[test] #[test]
fn test_max_entry_ids() { fn test_reject_old_last_id() {
let mint = Mint::new(1); let mint = Mint::new(1);
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
let sig = Signature::default(); let sig = Signature::default();
@@ -585,7 +620,10 @@ mod tests {
bank.register_entry_id(&last_id); bank.register_entry_id(&last_id);
} }
// Assert we're no longer able to use the oldest entry ID. // Assert we're no longer able to use the oldest entry ID.
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id())); assert_eq!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
Err(BankError::LastIdNotFound(mint.last_id()))
);
} }
#[test] #[test]
@@ -593,15 +631,34 @@ mod tests {
let mint = Mint::new(2); let mint = Mint::new(2);
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tr0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id()); let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
let tr1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id()); let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
let trs = vec![tr0, tr1]; let txs = vec![tx0, tx1];
let results = bank.process_verified_transactions(trs); let results = bank.process_transactions(txs);
assert!(results[1].is_err()); assert!(results[1].is_err());
// Assert bad transactions aren't counted. // Assert bad transactions aren't counted.
assert_eq!(bank.transaction_count(), 1); assert_eq!(bank.transaction_count(), 1);
} }
#[test]
fn test_process_empty_entry_is_registered() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
let entry = next_entry(&mint.last_id(), 1, vec![]);
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
// First, ensure the TX is rejected because of the unregistered last ID
assert_eq!(
bank.process_transaction(&tx),
Err(BankError::LastIdNotFound(entry.id))
);
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
bank.process_entries(vec![entry]).unwrap();
assert!(bank.process_transaction(&tx).is_ok());
}
} }
#[cfg(all(feature = "unstable", test))] #[cfg(all(feature = "unstable", test))]
@@ -611,10 +668,11 @@ mod bench {
use bank::*; use bank::*;
use bincode::serialize; use bincode::serialize;
use hash::hash; use hash::hash;
use rayon::prelude::*;
use signature::KeyPairUtil; use signature::KeyPairUtil;
#[bench] #[bench]
fn process_verified_event_bench(bencher: &mut Bencher) { fn bench_process_transaction(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000); let mint = Mint::new(100_000_000);
let bank = Bank::new(&mint); let bank = Bank::new(&mint);
// Create transactions between unrelated parties. // Create transactions between unrelated parties.
@@ -623,16 +681,16 @@ mod bench {
.map(|i| { .map(|i| {
// Seed the 'from' account. // Seed the 'from' account.
let rando0 = KeyPair::new(); let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id()); let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
bank.process_verified_transaction(&tr).unwrap(); bank.process_transaction(&tx).unwrap();
// Seed the 'to' account and a cell for its signature. // Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id); bank.register_entry_id(&last_id);
let rando1 = KeyPair::new(); let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id); let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
bank.process_verified_transaction(&tr).unwrap(); bank.process_transaction(&tx).unwrap();
// Finally, return a transaction that's unique // Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id) Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
@@ -640,12 +698,12 @@ mod bench {
.collect(); .collect();
bencher.iter(|| { bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures. // Since benchmarker runs this multiple times, we need to clear the signatures.
for sigs in bank.last_ids.read().unwrap().iter() { for (_, sigs) in bank.last_ids_sigs.write().unwrap().iter_mut() {
sigs.1.write().unwrap().clear(); sigs.clear();
} }
assert!( assert!(
bank.process_verified_transactions(transactions.clone()) bank.process_transactions(transactions.clone())
.iter() .iter()
.all(|x| x.is_ok()) .all(|x| x.is_ok())
); );

View File

@@ -1,28 +1,39 @@
//! The `banking_stage` processes Event messages. //! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use bank::Bank; use bank::Bank;
use bincode::deserialize; use bincode::deserialize;
use event::Event; use counter::Counter;
use packet; use packet;
use packet::SharedPackets; use packet::SharedPackets;
use rayon::prelude::*; use rayon::prelude::*;
use record_stage::Signal; use record_stage::Signal;
use result::Result; use result::Result;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::mpsc::{channel, Receiver, Sender};
use std::thread::{spawn, JoinHandle}; use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration; use std::time::Duration;
use std::time::Instant; use std::time::Instant;
use timing; use timing;
use transaction::Transaction;
/// Stores the stage's thread handle and output receiver.
pub struct BankingStage { pub struct BankingStage {
/// Handle to the stage's thread.
pub thread_hdl: JoinHandle<()>, pub thread_hdl: JoinHandle<()>,
/// Output receiver for the following stage.
pub signal_receiver: Receiver<Signal>, pub signal_receiver: Receiver<Signal>,
} }
impl BankingStage { impl BankingStage {
/// Create the stage using `bank`. Exit when either `exit` is set or
/// when `verified_receiver` or the stage's output receiver is dropped.
/// Discard input packets using `packet_recycler` to minimize memory
/// allocations in a previous stage such as the `fetch_stage`.
pub fn new( pub fn new(
bank: Arc<Bank>, bank: Arc<Bank>,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
@@ -30,26 +41,31 @@ impl BankingStage {
packet_recycler: packet::PacketRecycler, packet_recycler: packet::PacketRecycler,
) -> Self { ) -> Self {
let (signal_sender, signal_receiver) = channel(); let (signal_sender, signal_receiver) = channel();
let thread_hdl = spawn(move || loop { let thread_hdl = Builder::new()
let e = Self::process_packets( .name("solana-banking-stage".to_string())
bank.clone(), .spawn(move || loop {
&verified_receiver, let e = Self::process_packets(
&signal_sender, bank.clone(),
&packet_recycler, &verified_receiver,
); &signal_sender,
if e.is_err() { &packet_recycler,
if exit.load(Ordering::Relaxed) { );
break; if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
} }
} })
}); .unwrap();
BankingStage { BankingStage {
thread_hdl, thread_hdl,
signal_receiver, signal_receiver,
} }
} }
fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> { /// Convert the transactions from a blob of binary data to a vector of transactions and
/// an unused `SocketAddr` that could be used to send a response.
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
p.packets p.packets
.par_iter() .par_iter()
.map(|x| { .map(|x| {
@@ -60,6 +76,8 @@ impl BankingStage {
.collect() .collect()
} }
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
/// Discard packets via `packet_recycler`.
fn process_packets( fn process_packets(
bank: Arc<Bank>, bank: Arc<Bank>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>, verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
@@ -77,41 +95,44 @@ impl BankingStage {
timing::duration_as_ms(&recv_start.elapsed()), timing::duration_as_ms(&recv_start.elapsed()),
mms.len(), mms.len(),
); );
let count = mms.iter().map(|x| x.1.len()).sum();
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
let proc_start = Instant::now(); let proc_start = Instant::now();
for (msgs, vers) in mms { for (msgs, vers) in mms {
let events = Self::deserialize_events(&msgs.read().unwrap()); let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
reqs_len += events.len(); reqs_len += transactions.len();
let events = events let transactions = transactions
.into_iter() .into_iter()
.zip(vers) .zip(vers)
.filter_map(|(event, ver)| match event { .filter_map(|(tx, ver)| match tx {
None => None, None => None,
Some((event, _addr)) => if event.verify() && ver != 0 { Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
Some(event) Some(tx)
} else { } else {
None None
}, },
}) })
.collect(); .collect();
debug!("process_events"); debug!("process_transactions");
let results = bank.process_verified_events(events); let results = bank.process_transactions(transactions);
let events = results.into_iter().filter_map(|x| x.ok()).collect(); let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
signal_sender.send(Signal::Events(events))?; signal_sender.send(Signal::Transactions(transactions))?;
debug!("done process_events"); debug!("done process_transactions");
packet_recycler.recycle(msgs); packet_recycler.recycle(msgs);
} }
let total_time_s = timing::duration_as_s(&proc_start.elapsed()); let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed()); let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
info!( info!(
"@{:?} done processing event batches: {} time: {:?}ms reqs: {} reqs/s: {}", "@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(), timing::timestamp(),
mms_len, mms_len,
total_time_ms, total_time_ms,
reqs_len, reqs_len,
(reqs_len as f32) / (total_time_s) (reqs_len as f32) / (total_time_s)
); );
inc_counter!(COUNTER, count, proc_start);
Ok(()) Ok(())
} }
} }
@@ -120,7 +141,6 @@ impl BankingStage {
//use bank::Bank; //use bank::Bank;
//use entry::Entry; //use entry::Entry;
//use event::Event;
//use hash::Hash; //use hash::Hash;
//use record_stage::RecordStage; //use record_stage::RecordStage;
//use record_stage::Signal; //use record_stage::Signal;
@@ -128,18 +148,17 @@ impl BankingStage {
//use std::sync::mpsc::{channel, Sender}; //use std::sync::mpsc::{channel, Sender};
//use std::sync::{Arc, Mutex}; //use std::sync::{Arc, Mutex};
//use std::time::Duration; //use std::time::Duration;
//use transaction::Transaction;
// //
//#[cfg(test)] //#[cfg(test)]
//mod tests { //mod tests {
// use bank::Bank; // use bank::Bank;
// use event::Event;
// use event_processor::EventProcessor;
// use mint::Mint; // use mint::Mint;
// use signature::{KeyPair, KeyPairUtil}; // use signature::{KeyPair, KeyPairUtil};
// use transaction::Transaction; // use transaction::Transaction;
// //
// #[test] // #[test]
// // TODO: Move this test banking_stage. Calling process_events() directly // // TODO: Move this test banking_stage. Calling process_transactions() directly
// // defeats the purpose of this test. // // defeats the purpose of this test.
// fn test_banking_sequential_consistency() { // fn test_banking_sequential_consistency() {
// // In this attack we'll demonstrate that a verifier can interpret the ledger // // In this attack we'll demonstrate that a verifier can interpret the ledger
@@ -147,18 +166,18 @@ impl BankingStage {
// // Entry OR if the verifier tries to parallelize across multiple Entries. // // Entry OR if the verifier tries to parallelize across multiple Entries.
// let mint = Mint::new(2); // let mint = Mint::new(2);
// let bank = Bank::new(&mint); // let bank = Bank::new(&mint);
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None); // let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
// //
// // Process a batch that includes a transaction that receives two tokens. // // Process a batch that includes a transaction that receives two tokens.
// let alice = KeyPair::new(); // let alice = KeyPair::new();
// let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id()); // let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
// let events = vec![Event::Transaction(tr)]; // let transactions = vec![tx];
// let entry0 = event_processor.process_events(events).unwrap(); // let entry0 = banking_stage.process_transactions(transactions).unwrap();
// //
// // Process a second batch that spends one of those tokens. // // Process a second batch that spends one of those tokens.
// let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id()); // let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
// let events = vec![Event::Transaction(tr)]; // let transactions = vec![tx];
// let entry1 = event_processor.process_events(events).unwrap(); // let entry1 = banking_stage.process_transactions(transactions).unwrap();
// //
// // Collect the ledger and feed it to a new bank. // // Collect the ledger and feed it to a new bank.
// let entries = vec![entry0, entry1]; // let entries = vec![entry0, entry1];
@@ -170,7 +189,7 @@ impl BankingStage {
// for entry in entries { // for entry in entries {
// assert!( // assert!(
// bank // bank
// .process_verified_events(entry.events) // .process_transactions(entry.transactions)
// .into_iter() // .into_iter()
// .all(|x| x.is_ok()) // .all(|x| x.is_ok())
// ); // );
@@ -185,7 +204,6 @@ impl BankingStage {
// use self::test::Bencher; // use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS}; // use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize; // use bincode::serialize;
// use event_processor::*;
// use hash::hash; // use hash::hash;
// use mint::Mint; // use mint::Mint;
// use rayon::prelude::*; // use rayon::prelude::*;
@@ -195,7 +213,7 @@ impl BankingStage {
// use transaction::Transaction; // use transaction::Transaction;
// //
// #[bench] // #[bench]
// fn process_events_bench(_bencher: &mut Bencher) { // fn bench_process_transactions(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000); // let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint); // let bank = Bank::new(&mint);
// // Create transactions between unrelated parties. // // Create transactions between unrelated parties.
@@ -217,36 +235,31 @@ impl BankingStage {
// //
// // Seed the 'from' account. // // Seed the 'from' account.
// let rando0 = KeyPair::new(); // let rando0 = KeyPair::new();
// let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id); // let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_verified_transaction(&tr).unwrap(); // bank.process_transaction(&tx).unwrap();
// //
// let rando1 = KeyPair::new(); // let rando1 = KeyPair::new();
// let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id); // let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_verified_transaction(&tr).unwrap(); // bank.process_transaction(&tx).unwrap();
// //
// // Finally, return a transaction that's unique // // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id) // Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// }) // })
// .collect(); // .collect();
// //
// let events: Vec<_> = transactions // let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
// .into_iter()
// .map(|tr| Event::Transaction(tr))
// .collect();
//
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
// //
// let now = Instant::now(); // let now = Instant::now();
// assert!(event_processor.process_events(events).is_ok()); // assert!(banking_stage.process_transactions(transactions).is_ok());
// let duration = now.elapsed(); // let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0; // let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec; // let tps = txs as f64 / sec;
// //
// // Ensure that all transactions were successfully logged. // // Ensure that all transactions were successfully logged.
// drop(event_processor.historian_input); // drop(banking_stage.historian_input);
// let entries: Vec<Entry> = event_processor.output.lock().unwrap().iter().collect(); // let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1); // assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].events.len(), txs as usize); // assert_eq!(entries[0].transactions.len(), txs as usize);
// //
// println!("{} tps", tps); // println!("{} tps", tps);
// } // }
@@ -258,29 +271,141 @@ mod bench {
use self::test::Bencher; use self::test::Bencher;
use bank::*; use bank::*;
use banking_stage::BankingStage; use banking_stage::BankingStage;
use event::Event; use logger;
use mint::Mint; use mint::Mint;
use packet::{to_packets, PacketRecycler}; use packet::{to_packets_chunked, PacketRecycler};
use rayon::prelude::*;
use record_stage::Signal; use record_stage::Signal;
use signature::{KeyPair, KeyPairUtil}; use signature::{KeyPair, KeyPairUtil};
use std::iter; use std::iter;
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc; use std::sync::Arc;
use std::sync::mpsc::channel; use transaction::Transaction;
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
let mut total = 0;
for _ in 0..batches {
let signal = receiver.recv().unwrap();
if let Signal::Transactions(transactions) = signal {
total += transactions.len();
} else {
assert!(false);
}
}
assert_eq!(total, ref_tx_count);
}
#[bench] #[bench]
fn stage_bench(bencher: &mut Bencher) { fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let tx = 100_usize; logger::setup();
let mint = Mint::new(1_000_000_000); let tx = 30_000_usize;
let pubkey = KeyPair::new().pubkey(); let mint_total = 1_000_000_000_000;
let mint = Mint::new(mint_total);
let num_dst_accounts = 8 * 1024;
let num_src_accounts = 8 * 1024;
let events: Vec<_> = (0..tx) let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
.map(|i| Event::new_transaction(&mint.keypair(), pubkey, i as i64, mint.last_id())) let dstkeys: Vec<_> = (0..num_dst_accounts)
.map(|_| KeyPair::new().pubkey())
.collect();
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
let transactions: Vec<_> = (0..tx)
.map(|i| {
Transaction::new(
&srckeys[i % num_src_accounts],
dstkeys[i % num_dst_accounts],
i as i64,
mint.last_id(),
)
})
.collect();
info!("created transactions");
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, 192)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let setup_transactions: Vec<_> = (0..num_src_accounts)
.map(|i| {
Transaction::new(
&mint.keypair(),
srckeys[i].pubkey(),
mint_total / num_src_accounts as i64,
mint.last_id(),
)
})
.collect();
let verified_setup: Vec<_> = to_packets_chunked(&packet_recycler, setup_transactions, tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
verified_sender.send(verified_setup.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified_setup.len(), &signal_receiver, num_src_accounts);
verified_sender.send(verified.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified.len(), &signal_receiver, tx);
});
}
#[bench]
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
logger::setup();
let tx = 20_000_usize;
let mint = Mint::new(1_000_000_000_000);
let mut pubkeys = Vec::new();
let num_keys = 8;
for _ in 0..num_keys {
pubkeys.push(KeyPair::new().pubkey());
}
let transactions: Vec<_> = (0..tx)
.into_par_iter()
.map(|i| {
Transaction::new(
&mint.keypair(),
pubkeys[i % num_keys],
i as i64,
mint.last_id(),
)
})
.collect(); .collect();
let (verified_sender, verified_receiver) = channel(); let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel(); let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default(); let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets(&packet_recycler, events) let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, tx)
.into_iter() .into_iter()
.map(|x| { .map(|x| {
let len = (*x).read().unwrap().packets.len(); let len = (*x).read().unwrap().packets.len();
@@ -297,12 +422,9 @@ mod bench {
&signal_sender, &signal_sender,
&packet_recycler, &packet_recycler,
).unwrap(); ).unwrap();
let signal = signal_receiver.recv().unwrap();
if let Signal::Events(ref events) = signal { check_txs(verified.len(), &signal_receiver, tx);
assert_eq!(events.len(), tx);
} else {
assert!(false);
}
}); });
} }
} }

View File

@@ -1,23 +1,32 @@
extern crate futures; extern crate atty;
extern crate env_logger;
extern crate getopts; extern crate getopts;
extern crate isatty;
extern crate rayon; extern crate rayon;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
use futures::Future; use atty::{is, Stream};
use getopts::Options; use getopts::Options;
use isatty::stdin_isatty;
use rayon::prelude::*; use rayon::prelude::*;
use solana::crdt::{get_ip_addr, Crdt, ReplicatedData};
use solana::hash::Hash;
use solana::mint::MintDemo; use solana::mint::MintDemo;
use solana::signature::{GenKeys, KeyPairUtil}; use solana::ncp::Ncp;
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
use solana::streamer::default_window;
use solana::thin_client::ThinClient; use solana::thin_client::ThinClient;
use solana::timing::{duration_as_ms, duration_as_s};
use solana::transaction::Transaction; use solana::transaction::Transaction;
use std::env; use std::env;
use std::fs::File;
use std::io::{stdin, Read}; use std::io::{stdin, Read};
use std::net::{SocketAddr, UdpSocket}; use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::process::exit; use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep; use std::thread::sleep;
use std::thread::Builder;
use std::thread::JoinHandle;
use std::time::Duration; use std::time::Duration;
use std::time::Instant; use std::time::Instant;
@@ -30,15 +39,125 @@ fn print_usage(program: &str, opts: Options) {
print!("{}", opts.usage(&brief)); print!("{}", opts.usage(&brief));
} }
fn sample_tx_count(
thread_addr: Arc<RwLock<SocketAddr>>,
exit: Arc<AtomicBool>,
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
first_count: u64,
v: ReplicatedData,
sample_period: u64,
) {
let mut client = mk_client(&thread_addr, &v);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
let mut max_tps = 0.0;
let mut total;
loop {
let tx_count = client.transaction_count();
let duration = now.elapsed();
now = Instant::now();
let sample = tx_count - initial_tx_count;
initial_tx_count = tx_count;
println!("{}: Transactions processed {}", v.transactions_addr, sample);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
if tps > max_tps {
max_tps = tps;
}
println!("{}: {:.2} tps", v.transactions_addr, tps);
total = tx_count - first_count;
println!(
"{}: Total Transactions processed {}",
v.transactions_addr, total
);
sleep(Duration::new(sample_period, 0));
if exit.load(Ordering::Relaxed) {
println!("exiting validator thread");
maxes.write().unwrap().push((max_tps, total));
break;
}
}
}
fn generate_and_send_txs(
client: &mut ThinClient,
keypair_pairs: &Vec<&[KeyPair]>,
leader: &ReplicatedData,
txs: i64,
last_id: &mut Hash,
threads: usize,
client_addr: Arc<RwLock<SocketAddr>>,
) {
println!(
"Signing transactions... {} {}",
keypair_pairs.len(),
keypair_pairs[0].len()
);
let signing_start = Instant::now();
let transactions: Vec<_> = keypair_pairs
.par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, *last_id))
.collect();
let duration = signing_start.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
println!(
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
bsps * 1_000_000_f64,
nsps / 1_000_f64,
duration_as_ms(&duration),
);
println!("Transfering {} transactions in {} batches", txs, threads);
let transfer_start = Instant::now();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|txs| {
println!(
"Transferring 1 unit {} times... to {:?}",
txs.len(),
leader.transactions_addr
);
let client = mk_client(&client_addr, &leader);
for tx in txs {
client.transfer_signed(tx.clone()).unwrap();
}
});
println!(
"Transfer done. {:?} ms {} tps",
duration_as_ms(&transfer_start.elapsed()),
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
);
*last_id = client.get_last_id();
}
fn main() { fn main() {
env_logger::init();
let mut threads = 4usize; let mut threads = 4usize;
let mut server_addr: String = "127.0.0.1:8000".to_string(); let mut num_nodes = 1usize;
let mut requests_addr: String = "127.0.0.1:8010".to_string(); let mut time_sec = 60;
let mut opts = Options::new(); let mut opts = Options::new();
opts.optopt("s", "", "server address", "host:port"); opts.optopt("l", "", "leader", "leader.json");
opts.optopt("c", "", "client address", "host:port"); opts.optopt("c", "", "client port", "port");
opts.optopt("t", "", "number of threads", &format!("{}", threads)); opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optopt(
"s",
"",
"send transactions for this many seconds",
&format!("{}", time_sec),
);
opts.optopt(
"n",
"",
"number of nodes to converge to",
&format!("{}", num_nodes),
);
opts.optflag("h", "help", "print help"); opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect(); let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) { let matches = match opts.parse(&args[1..]) {
@@ -54,21 +173,44 @@ fn main() {
print_usage(&program, opts); print_usage(&program, opts);
return; return;
} }
if matches.opt_present("s") { let mut addr: SocketAddr = "0.0.0.0:8100".parse().unwrap();
server_addr = matches.opt_str("s").unwrap();
}
if matches.opt_present("c") { if matches.opt_present("c") {
requests_addr = matches.opt_str("c").unwrap(); let port = matches.opt_str("c").unwrap().parse().unwrap();
addr.set_port(port);
} }
if matches.opt_present("d") {
addr.set_ip(get_ip_addr().unwrap());
}
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
if matches.opt_present("t") { if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer"); threads = matches.opt_str("t").unwrap().parse().expect("integer");
} }
if matches.opt_present("n") {
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
}
if matches.opt_present("s") {
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
}
let mut events_addr: SocketAddr = requests_addr.parse().unwrap(); let leader = if matches.opt_present("l") {
let requests_port = events_addr.port(); read_leader(matches.opt_str("l").unwrap())
events_addr.set_port(requests_port + 1); } else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
ReplicatedData::new_leader(&server_addr)
};
if stdin_isatty() { let signal = Arc::new(AtomicBool::new(false));
let mut c_threads = vec![];
let validators = converge(
&client_addr,
&leader,
signal.clone(),
num_nodes,
&mut c_threads,
);
assert_eq!(validators.len(), num_nodes);
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file"); eprintln!("nothing found on stdin, expected a json file");
exit(1); exit(1);
} }
@@ -85,91 +227,173 @@ fn main() {
eprintln!("failed to parse json: {}", e); eprintln!("failed to parse json: {}", e);
exit(1); exit(1);
}); });
let mut client = mk_client(&client_addr, &leader);
println!("Binding to {}", requests_addr);
let requests_socket = UdpSocket::bind(&requests_addr).unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let events_socket = UdpSocket::bind(&events_addr).unwrap();
let requests_addr: SocketAddr = server_addr.parse().unwrap();
let requests_port = requests_addr.port();
let mut events_server_addr = requests_addr.clone();
events_server_addr.set_port(requests_port + 3);
let mut client = ThinClient::new(
requests_addr,
requests_socket,
events_server_addr,
events_socket,
);
println!("Get last ID..."); println!("Get last ID...");
let last_id = client.get_last_id().wait().unwrap(); let mut last_id = client.get_last_id();
println!("Got last ID {:?}", last_id); println!("Got last ID {:?}", last_id);
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes()); let mut seed = [0u8; 32];
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
let rnd = GenKeys::new(seed);
println!("Creating keypairs..."); println!("Creating keypairs...");
let txs = demo.num_accounts / 2; let txs = demo.num_accounts / 2;
let keypairs = rnd.gen_n_keypairs(demo.num_accounts); let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect(); let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions..."); let first_count = client.transaction_count();
let now = Instant::now(); println!("initial count {}", first_count);
let transactions: Vec<_> = keypair_pairs
.into_par_iter() println!("Sampling tps every second...",);
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
// Setup a thread per validator to sample every period
// collect the max transaction rate and total tx count seen
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
let v_threads: Vec<_> = validators
.into_iter()
.map(|v| {
let exit = signal.clone();
let thread_addr = client_addr.clone();
let maxes = maxes.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(thread_addr, exit, maxes, first_count, v, sample_period);
})
.unwrap()
})
.collect(); .collect();
let mut duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); // generate and send transactions for the specified duration
let bsps = txs as f64 / ns as f64; let time = Duration::new(time_sec, 0);
let nsps = ns as f64 / txs as f64; let now = Instant::now();
while now.elapsed() < time {
generate_and_send_txs(
&mut client,
&keypair_pairs,
&leader,
txs,
&mut last_id,
threads,
client_addr.clone(),
);
}
// Stop the sampling threads so it will collect the stats
signal.store(true, Ordering::Relaxed);
for t in v_threads {
t.join().unwrap();
}
// Compute/report stats
let mut max_of_maxes = 0.0;
let mut total_txs = 0;
for (max, txs) in maxes.read().unwrap().iter() {
if *max > max_of_maxes {
max_of_maxes = *max;
}
total_txs += *txs;
}
println!( println!(
"Done. {} thousand signatures per second, {}us per signature", "\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
bsps * 1_000_000_f64, max_of_maxes,
nsps / 1_000_f64 sample_period,
total_txs,
maxes.read().unwrap().len()
); );
let initial_tx_count = client.transaction_count(); // join the crdt client threads
println!("initial count {}", initial_tx_count); for t in c_threads {
t.join().unwrap();
println!("Transfering {} transactions in {} batches", txs, threads);
let now = Instant::now();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times... to", trs.len());
let requests_addr: SocketAddr = server_addr.parse().unwrap();
let mut requests_cb_addr = requests_addr.clone();
requests_cb_addr.set_port(0);
let requests_socket = UdpSocket::bind(requests_cb_addr).unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let mut events_addr: SocketAddr = requests_addr.clone();
events_addr.set_port(0);
let events_socket = UdpSocket::bind(&events_addr).unwrap();
let client = ThinClient::new(
requests_addr,
requests_socket,
events_server_addr,
events_socket,
);
for tr in trs {
client.transfer_signed(tr.clone()).unwrap();
}
});
println!("Waiting for transactions to complete...",);
let mut tx_count;
for _ in 0..10 {
tx_count = client.transaction_count();
duration = now.elapsed();
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("{} tps", tps);
sleep(Duration::new(1, 0));
} }
} }
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
let mut addr = locked_addr.write().unwrap();
let port = addr.port();
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
addr.set_port(port + 2);
ThinClient::new(
r.requests_addr,
requests_socket,
r.transactions_addr,
transactions_socket,
)
}
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
let mut addr = client_addr.write().unwrap();
let port = addr.port();
let gossip = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let daddr = "0.0.0.0:0".parse().unwrap();
let pubkey = KeyPair::new().pubkey();
let node = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
daddr,
daddr,
daddr,
daddr,
);
(node, gossip)
}
fn converge(
client_addr: &Arc<RwLock<SocketAddr>>,
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let daddr = "0.0.0.0:0".parse().unwrap();
let (spy, spy_gossip) = spy_node(client_addr);
let mut spy_crdt = Crdt::new(spy);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let window = default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
spy_ref.clone(),
window.clone(),
spy_gossip,
gossip_send_socket,
exit.clone(),
).expect("DataReplicator::new");
let mut rv = vec![];
//wait for the network to converge, 30 seconds should be plenty
for _ in 0..30 {
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.requests_addr != daddr)
.cloned()
.collect();
if v.len() >= num_nodes {
println!("CONVERGED!");
rv.extend(v.into_iter());
break;
}
sleep(Duration::new(1, 0));
}
threads.extend(ncp.thread_hdls.into_iter());
rv
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
}

168
src/bin/drone.rs Normal file
View File

@@ -0,0 +1,168 @@
extern crate atty;
extern crate bincode;
extern crate env_logger;
extern crate getopts;
extern crate serde_json;
extern crate solana;
extern crate tokio;
extern crate tokio_codec;
extern crate tokio_io;
use atty::{is, Stream as atty_stream};
use bincode::deserialize;
use getopts::Options;
use solana::crdt::{get_ip_addr, ReplicatedData};
use solana::drone::{Drone, DroneRequest};
use solana::mint::MintDemo;
use std::env;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::process::exit;
use std::sync::{Arc, Mutex};
use std::thread;
use tokio::net::TcpListener;
use tokio::prelude::*;
use tokio_codec::{BytesCodec, Decoder};
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint-demo.json> | {} [options]\n\n", program);
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init();
let mut opts = Options::new();
opts.optopt(
"t",
"",
"time",
"time slice over which to limit token requests to drone",
);
opts.optopt("c", "", "cap", "request limit for time slice");
opts.optopt("l", "", "leader", "leader.json");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let time_slice: Option<u64>;
if matches.opt_present("t") {
time_slice = matches
.opt_str("t")
.expect("unexpected string from input")
.parse()
.ok();
} else {
time_slice = None;
}
let request_cap: Option<u64>;
if matches.opt_present("c") {
request_cap = matches
.opt_str("c")
.expect("unexpected string from input")
.parse()
.ok();
} else {
request_cap = None;
}
let leader = if matches.opt_present("l") {
read_leader(matches.opt_str("l").unwrap())
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
ReplicatedData::new_leader(&server_addr)
};
if is(atty_stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mint_keypair = demo.mint.keypair();
let mut drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
drone_addr.set_ip(get_ip_addr().unwrap());
let drone = Arc::new(Mutex::new(Drone::new(
mint_keypair,
drone_addr,
leader.transactions_addr,
leader.requests_addr,
time_slice,
request_cap,
)));
let drone1 = drone.clone();
thread::spawn(move || loop {
let time = drone1.lock().unwrap().time_slice;
thread::sleep(time);
drone1.lock().unwrap().clear_request_count();
});
let socket = TcpListener::bind(&drone_addr).unwrap();
println!("Drone started. Listening on: {}", drone_addr);
let done = socket
.incoming()
.map_err(|e| println!("failed to accept socket; error = {:?}", e))
.for_each(move |socket| {
let drone2 = drone.clone();
// let client_ip = socket.peer_addr().expect("drone peer_addr").ip();
let framed = BytesCodec::new().framed(socket);
let (_writer, reader) = framed.split();
let processor = reader
.for_each(move |bytes| {
let req: DroneRequest =
deserialize(&bytes).expect("deserialize packet in drone");
println!("Airdrop requested...");
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
let res1 = drone2.lock().unwrap().send_airdrop(req);
match res1 {
Ok(_) => println!("Airdrop sent!"),
Err(_) => println!("Request limit reached for this time slice"),
}
Ok(())
})
.and_then(|()| {
println!("Socket received FIN packet and closed connection");
Ok(())
})
.or_else(|err| {
println!("Socket closed with error: {:?}", err);
Err(err)
})
.then(|result| {
println!("Socket closed with result: {:?}", result);
Ok(())
});
tokio::spawn(processor)
});
tokio::run(done);
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
}

View File

@@ -0,0 +1,52 @@
extern crate getopts;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
use std::env;
use std::io;
use std::net::SocketAddr;
use std::process::exit;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: {} [options]\n\n", program);
brief += " Create a solana fullnode config file\n";
print!("{}", opts.usage(&brief));
}
fn main() {
let mut opts = Options::new();
opts.optopt("b", "", "bind", "bind to port or address");
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let bind_addr: SocketAddr = {
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
if matches.opt_present("d") {
let ip = get_ip_addr().unwrap();
bind_addr.set_ip(ip);
}
bind_addr
};
// we need all the receiving sockets to be bound within the expected
// port range that we open on aws
let repl_data = ReplicatedData::new_leader(&bind_addr);
let stdout = io::stdout();
serde_json::to_writer(stdout, &repl_data).expect("serialize");
}

180
src/bin/fullnode.rs Normal file
View File

@@ -0,0 +1,180 @@
extern crate atty;
extern crate env_logger;
extern crate getopts;
extern crate log;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use getopts::Options;
use solana::bank::Bank;
use solana::crdt::ReplicatedData;
use solana::entry::Entry;
use solana::payment_plan::PaymentPlan;
use solana::server::Server;
use solana::transaction::Instruction;
use std::env;
use std::fs::File;
use std::io::{stdin, stdout, BufRead, Write};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
//use std::time::Duration;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init();
let mut opts = Options::new();
opts.optflag("h", "help", "print help");
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
opts.optopt(
"t",
"",
"testnet; connect to the network at this gossip entry point",
"HOST:PORT",
);
opts.optopt(
"o",
"",
"output log to FILE, defaults to stdout (ignored by validators)",
"FILE",
);
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let stdin = stdin();
let mut entries = stdin.lock().lines().map(|line| {
let entry: Entry = serde_json::from_str(&line.unwrap()).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
entry
});
eprintln!("done parsing...");
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().expect("invalid ledger: empty");
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries
.next()
.expect("invalid ledger: need at least 2 entries");
let tx = &entry1.transactions[0];
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
contract.plan.final_payment()
} else {
None
}.expect("invalid ledger, needs to start with a contract");
eprintln!("creating bank...");
let bank = Bank::new_from_deposit(&deposit);
bank.register_entry_id(&entry0.id);
bank.register_entry_id(&entry1.id);
eprintln!("processing entries...");
bank.process_entries(entries).expect("process_entries");
eprintln!("creating networking stack...");
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
if matches.opt_present("l") {
let path = matches.opt_str("l").unwrap();
if let Ok(file) = File::open(path.clone()) {
if let Ok(data) = serde_json::from_reader(file) {
repl_data = data;
} else {
eprintln!("failed to parse {}", path);
exit(1);
}
} else {
eprintln!("failed to read {}", path);
exit(1);
}
}
let exit = Arc::new(AtomicBool::new(false));
let threads = if matches.opt_present("t") {
let testnet_address_string = matches.opt_str("t").unwrap();
eprintln!(
"starting validator... {} connecting to {}",
repl_data.requests_addr, testnet_address_string
);
let testnet_addr = testnet_address_string.parse().unwrap();
let newtwork_entry_point = ReplicatedData::new_entry_point(testnet_addr);
let s = Server::new_validator(
bank,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
UdpSocket::bind(repl_data.repair_addr).unwrap(),
newtwork_entry_point,
exit.clone(),
);
s.thread_hdls
} else {
eprintln!("starting leader... {}", repl_data.requests_addr);
repl_data.current_leader_id = repl_data.id.clone();
let outfile: Box<Write + Send + 'static> = if matches.opt_present("o") {
let path = matches.opt_str("o").unwrap();
Box::new(
File::create(&path).expect(&format!("unable to open output file \"{}\"", path)),
)
} else {
Box::new(stdout())
};
let server = Server::new_leader(
bank,
//Some(Duration::from_millis(1000)),
None,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
exit.clone(),
outfile,
);
server.thread_hdls
};
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
for t in threads {
t.join().expect("join");
}
}

View File

@@ -1,21 +1,22 @@
extern crate isatty; extern crate atty;
extern crate rayon; extern crate rayon;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
use isatty::stdin_isatty; use atty::{is, Stream};
use rayon::prelude::*; use rayon::prelude::*;
use solana::bank::MAX_ENTRY_IDS; use solana::bank::MAX_ENTRY_IDS;
use solana::entry::{next_entry, Entry}; use solana::entry::next_entry;
use solana::event::Event; use solana::ledger::next_entries;
use solana::mint::MintDemo; use solana::mint::MintDemo;
use solana::signature::{GenKeys, KeyPairUtil}; use solana::signature::{GenKeys, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::{stdin, Read}; use std::io::{stdin, Read};
use std::process::exit; use std::process::exit;
// Generate a ledger with lots and lots of accounts. // Generate a ledger with lots and lots of accounts.
fn main() { fn main() {
if stdin_isatty() { if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file"); eprintln!("nothing found on stdin, expected a json file");
exit(1); exit(1);
} }
@@ -32,42 +33,50 @@ fn main() {
exit(1); exit(1);
}); });
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes()); let mut seed = [0u8; 32];
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
let rnd = GenKeys::new(seed);
let num_accounts = demo.num_accounts; let num_accounts = demo.num_accounts;
let tokens_per_user = 1_000; let tokens_per_user = 500;
let keypairs = rnd.gen_n_keypairs(num_accounts); let keypairs = rnd.gen_n_keypairs(num_accounts);
let mint_keypair = demo.mint.keypair(); let mint_keypair = demo.mint.keypair();
let last_id = demo.mint.last_id(); let last_id = demo.mint.last_id();
eprintln!("Signing {} transactions...", num_accounts);
let events: Vec<_> = keypairs
.into_par_iter()
.map(|rando| {
let last_id = demo.mint.last_id();
Event::new_transaction(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
})
.collect();
for entry in demo.mint.create_entries() { for entry in demo.mint.create_entries() {
println!("{}", serde_json::to_string(&entry).unwrap()); println!("{}", serde_json::to_string(&entry).unwrap());
} }
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entry = Entry::new(&last_id, 0, events);
println!("{}", serde_json::to_string(&entry).unwrap());
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS); eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
// Offer client lots of entry IDs to use for each transaction's last_id. // Offer client lots of entry IDs to use for each transaction's last_id.
let mut last_id = last_id; let mut last_id = last_id;
let mut last_ids = vec![];
for _ in 0..MAX_ENTRY_IDS { for _ in 0..MAX_ENTRY_IDS {
let entry = next_entry(&last_id, 1, vec![]); let entry = next_entry(&last_id, 1, vec![]);
last_id = entry.id; last_id = entry.id;
last_ids.push(last_id);
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| { let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e); eprintln!("failed to serialize: {}", e);
exit(1); exit(1);
}); });
println!("{}", serialized); println!("{}", serialized);
} }
eprintln!("Creating {} transactions...", num_accounts);
let transactions: Vec<_> = keypairs
.into_par_iter()
.enumerate()
.map(|(i, rando)| {
let last_id = last_ids[i % MAX_ENTRY_IDS];
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
})
.collect();
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entries = next_entries(&last_id, 0, transactions);
for entry in entries {
println!("{}", serde_json::to_string(&entry).unwrap());
}
} }

View File

@@ -1,16 +1,16 @@
//! A command-line executable for generating the chain's genesis block. //! A command-line executable for generating the chain's genesis block.
extern crate isatty; extern crate atty;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
use isatty::stdin_isatty; use atty::{is, Stream};
use solana::mint::Mint; use solana::mint::Mint;
use std::io::{stdin, Read}; use std::io::{stdin, Read};
use std::process::exit; use std::process::exit;
fn main() { fn main() {
if stdin_isatty() { if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file"); eprintln!("nothing found on stdin, expected a json file");
exit(1); exit(1);
} }

View File

@@ -1,13 +1,21 @@
extern crate atty;
extern crate rayon; extern crate rayon;
extern crate ring; extern crate ring;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
use atty::{is, Stream};
use solana::mint::{Mint, MintDemo}; use solana::mint::{Mint, MintDemo};
use std::io; use std::io;
use std::process::exit;
fn main() { fn main() {
let mut input_text = String::new(); let mut input_text = String::new();
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}
io::stdin().read_line(&mut input_text).unwrap(); io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim(); let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap(); let tokens = trimmed.parse::<i64>().unwrap();

View File

@@ -1,15 +1,15 @@
extern crate isatty; extern crate atty;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
use isatty::stdin_isatty; use atty::{is, Stream};
use solana::mint::Mint; use solana::mint::Mint;
use std::io; use std::io;
use std::process::exit; use std::process::exit;
fn main() { fn main() {
let mut input_text = String::new(); let mut input_text = String::new();
if stdin_isatty() { if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a token number"); eprintln!("nothing found on stdin, expected a token number");
exit(1); exit(1);
} }

View File

@@ -1,261 +0,0 @@
extern crate futures;
extern crate getopts;
extern crate isatty;
extern crate rayon;
extern crate serde_json;
extern crate solana;
use futures::Future;
use getopts::Options;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::crdt::{Crdt, ReplicatedData};
use solana::mint::MintDemo;
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use solana::transaction::Transaction;
use std::env;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use std::thread::sleep;
use std::time::Duration;
use std::time::Instant;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
brief += " Solana client demo creates a number of transactions and\n";
brief += " sends them to a target node.";
brief += " Takes json formatted mint file to stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
let mut threads = 4usize;
let mut num_nodes = 10usize;
let mut leader = "leader.json".to_string();
let mut opts = Options::new();
opts.optopt("l", "", "leader", "leader.json");
opts.optopt("c", "", "client address", "host:port");
opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optopt(
"n",
"",
"number of nodes to converge to",
&format!("{}", num_nodes),
);
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if matches.opt_present("l") {
leader = matches.opt_str("l").unwrap();
}
let client_addr: Arc<RwLock<SocketAddr>> = if matches.opt_present("c") {
let addr = matches.opt_str("c").unwrap().parse().unwrap();
Arc::new(RwLock::new(addr))
} else {
Arc::new(RwLock::new("127.0.0.1:8010".parse().unwrap()))
};
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
}
if matches.opt_present("n") {
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
}
let leader: ReplicatedData = read_leader(leader);
let signal = Arc::new(AtomicBool::new(false));
let mut c_threads = vec![];
let validators = converge(
&client_addr,
&leader,
signal.clone(),
num_nodes + 2,
&mut c_threads,
);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
println!("Parsing stdin...");
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mut client = mk_client(&client_addr, &leader);
println!("Get last ID...");
let last_id = client.get_last_id().wait().unwrap();
println!("Got last ID {:?}", last_id);
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
println!("Creating keypairs...");
let txs = demo.num_accounts / 2;
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions...");
let now = Instant::now();
let transactions: Vec<_> = keypair_pairs
.into_par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
.collect();
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
println!(
"Done. {} thousand signatures per second, {}us per signature",
bsps * 1_000_000_f64,
nsps / 1_000_f64
);
let first_count = client.transaction_count();
println!("initial count {}", first_count);
println!("Transfering {} transactions in {} batches", txs, threads);
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times... to", trs.len());
let client = mk_client(&client_addr, &leader);
for tr in trs {
client.transfer_signed(tr.clone()).unwrap();
}
});
println!("Sampling tps every second...",);
validators.into_par_iter().for_each(|val| {
let mut client = mk_client(&client_addr, &val);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
for i in 0..100 {
let tx_count = client.transaction_count();
let duration = now.elapsed();
now = Instant::now();
let sample = tx_count - initial_tx_count;
initial_tx_count = tx_count;
println!("{}: Transactions processed {}", val.events_addr, sample);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
println!("{}: {} tps", val.events_addr, tps);
let total = tx_count - first_count;
println!(
"{}: Total Transactions processed {}",
val.events_addr, total
);
if total == transactions.len() as u64 {
break;
}
if i > 20 && sample == 0 {
break;
}
sleep(Duration::new(1, 0));
}
});
signal.store(true, Ordering::Relaxed);
for t in c_threads {
t.join().unwrap();
}
}
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
let mut addr = locked_addr.write().unwrap();
let port = addr.port();
let events_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 2);
ThinClient::new(
r.requests_addr,
requests_socket,
r.events_addr,
events_socket,
)
}
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
let mut addr = client_addr.write().unwrap();
let port = addr.port();
let gossip = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let daddr = "0.0.0.0:0".parse().unwrap();
let pubkey = KeyPair::new().pubkey();
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
(node, gossip)
}
fn converge(
client_addr: &Arc<RwLock<SocketAddr>>,
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let daddr = "0.0.0.0:0".parse().unwrap();
let (spy, spy_gossip) = spy_node(client_addr);
let mut spy_crdt = Crdt::new(spy);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
//wait for the network to converge
for _ in 0..30 {
let min = spy_ref.read().unwrap().convergence();
if num_nodes as u64 == min {
println!("converged!");
break;
}
sleep(Duration::new(1, 0));
}
threads.push(t_spy_listen);
threads.push(t_spy_gossip);
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.requests_addr != daddr)
.map(|x| x.clone())
.collect();
v.clone()
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path).expect("file");
serde_json::from_reader(file).expect("parse")
}

View File

@@ -1,237 +0,0 @@
extern crate env_logger;
extern crate getopts;
extern crate isatty;
extern crate pnet;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use isatty::stdin_isatty;
use pnet::datalink;
use solana::bank::Bank;
use solana::crdt::ReplicatedData;
use solana::entry::Entry;
use solana::event::Event;
use solana::server::Server;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Instruction;
use std::env;
use std::fs::File;
use std::io::{stdin, stdout, Read};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::time::Duration;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init().unwrap();
let mut opts = Options::new();
opts.optopt("b", "", "bind", "bind to port or address");
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optopt("s", "", "save", "save my identity to path.json");
opts.optflag("h", "help", "print help");
opts.optopt(
"v",
"",
"validator",
"run as replicate with path to leader.json",
);
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let bind_addr: SocketAddr = {
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
if matches.opt_present("d") {
let ip = get_ip_addr().unwrap();
bind_addr.set_ip(ip);
}
bind_addr
};
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let mut entries = buffer.lines().map(|line| {
serde_json::from_str(&line).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
})
});
eprintln!("done parsing...");
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1: Entry = entries.next().unwrap();
let Event::Transaction(ref tr) = entry1.events[0];
let deposit = if let Instruction::NewContract(contract) = &tr.instruction {
contract.plan.final_payment()
} else {
None
};
eprintln!("creating bank...");
let bank = Bank::new_from_deposit(&deposit.unwrap());
bank.register_entry_id(&entry0.id);
bank.register_entry_id(&entry1.id);
eprintln!("processing entries...");
let mut last_id = entry1.id;
for entry in entries {
last_id = entry.id;
let results = bank.process_verified_events(entry.events);
for result in results {
if let Err(e) = result {
eprintln!("failed to process event {:?}", e);
exit(1);
}
}
bank.register_entry_id(&last_id);
}
eprintln!("creating networking stack...");
let exit = Arc::new(AtomicBool::new(false));
// we need all the receiving sockets to be bound within the expected
// port range that we open on aws
let mut repl_data = make_repl_data(&bind_addr);
let threads = if matches.opt_present("v") {
eprintln!("starting validator... {}", repl_data.requests_addr);
let path = matches.opt_str("v").unwrap();
let file = File::open(path).expect("file");
let leader = serde_json::from_reader(file).expect("parse");
let s = Server::new_validator(
bank,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
leader,
exit.clone(),
);
s.thread_hdls
} else {
eprintln!("starting leader... {}", repl_data.requests_addr);
repl_data.current_leader_id = repl_data.id.clone();
let server = Server::new_leader(
bank,
last_id,
Some(Duration::from_millis(1000)),
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind(repl_data.events_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
exit.clone(),
stdout(),
);
server.thread_hdls
};
if matches.opt_present("s") {
let path = matches.opt_str("s").unwrap();
let file = File::create(path).expect("file");
serde_json::to_writer(file, &repl_data).expect("serialize");
}
eprintln!("Ready. Listening on {}", bind_addr);
for t in threads {
t.join().expect("join");
}
}
fn next_port(server_addr: &SocketAddr, nxt: u16) -> SocketAddr {
let mut gossip_addr = server_addr.clone();
gossip_addr.set_port(server_addr.port() + nxt);
gossip_addr
}
fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
let events_addr = bind_addr.clone();
let gossip_addr = next_port(&bind_addr, 1);
let replicate_addr = next_port(&bind_addr, 2);
let requests_addr = next_port(&bind_addr, 3);
let pubkey = KeyPair::new().pubkey();
ReplicatedData::new(
pubkey,
gossip_addr,
replicate_addr,
requests_addr,
events_addr,
)
}
fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
if let Some(addrstr) = optstr {
if let Ok(port) = addrstr.parse() {
let mut addr = daddr.clone();
addr.set_port(port);
addr
} else if let Ok(addr) = addrstr.parse() {
addr
} else {
daddr
}
} else {
daddr
}
}
fn get_ip_addr() -> Option<IpAddr> {
for iface in datalink::interfaces() {
for p in iface.ips {
if !p.ip().is_loopback() && !p.ip().is_multicast() {
return Some(p.ip());
}
}
}
None
}
#[test]
fn test_parse_port_or_addr() {
let p1 = parse_port_or_addr(Some("9000".to_string()));
assert_eq!(p1.port(), 9000);
let p2 = parse_port_or_addr(Some("127.0.0.1:7000".to_string()));
assert_eq!(p2.port(), 7000);
let p3 = parse_port_or_addr(None);
assert_eq!(p3.port(), 8000);
}

47
src/blob_fetch_stage.rs Normal file
View File

@@ -0,0 +1,47 @@
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
use packet;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
pub struct BlobFetchStage {
pub blob_receiver: streamer::BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl BlobFetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
) -> Self {
Self::new_multi_socket(vec![socket], exit, blob_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
) -> Self {
let (blob_sender, blob_receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
.map(|socket| {
streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
socket,
blob_sender.clone(),
).expect("blob receiver init")
})
.collect();
BlobFetchStage {
blob_receiver,
thread_hdls,
}
}
}

175
src/budget.rs Normal file
View File

@@ -0,0 +1,175 @@
//! The `budget` module provides a domain-specific language for payment plans. Users create Budget objects that
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
//! which it uses to reduce the payment plan. When the budget is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::PublicKey;
use std::mem;
/// A data type representing a `Witness` that the payment plan is waiting on.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Condition {
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
Timestamp(DateTime<Utc>),
/// Wait for a `Signature` `Witness` from `PublicKey`.
Signature(PublicKey),
}
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
match (self, witness) {
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
_ => false,
}
}
}
/// A data type reprsenting a payment plan.
#[repr(C)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Budget {
/// Make a payment.
Pay(Payment),
/// Make a payment after some condition.
After(Condition, Payment),
/// Either make a payment after one condition or a different payment after another
/// condition, which ever condition is satisfied first.
Or((Condition, Payment), (Condition, Payment)),
}
impl Budget {
/// Create the simplest budget - one that pays `tokens` to PublicKey.
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Budget::Pay(Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Budget::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after the given DateTime
/// unless cancelled by `from`.
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
tokens: i64,
to: PublicKey,
) -> Self {
Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
}
impl PaymentPlan for Budget {
/// Return Payment if the budget requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment> {
match self {
Budget::Pay(payment) => Some(payment.clone()),
_ => None,
}
}
/// Return true if the budget spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool {
match self {
Budget::Pay(payment) | Budget::After(_, payment) => payment.tokens == spendable_tokens,
Budget::Or(a, b) => a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens,
}
}
/// Apply a witness to the budget to see if the budget can be reduced.
/// If so, modify the budget in-place.
fn apply_witness(&mut self, witness: &Witness) {
let new_payment = match self {
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
_ => None,
}.cloned();
if let Some(payment) = new_payment {
mem::replace(self, Budget::Pay(payment));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_signature_satisfied() {
let sig = PublicKey::default();
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
}
#[test]
fn test_timestamp_satisfied() {
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
}
#[test]
fn test_verify() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
assert!(Budget::new_payment(42, to).verify(42));
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
}
#[test]
fn test_authorized_payment() {
let from = PublicKey::default();
let to = PublicKey::default();
let mut budget = Budget::new_authorized_payment(from, 42, to);
budget.apply_witness(&Witness::Signature(from));
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let to = PublicKey::default();
let mut budget = Budget::new_future_payment(dt, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_cancelable_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
assert_eq!(budget, Budget::new_payment(42, to));
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Signature(from));
assert_eq!(budget, Budget::new_payment(42, from));
}
}

69
src/counter.rs Normal file
View File

@@ -0,0 +1,69 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use timing;
pub struct Counter {
pub name: &'static str,
pub counts: AtomicUsize,
pub nanos: AtomicUsize,
pub times: AtomicUsize,
pub lograte: usize,
}
macro_rules! create_counter {
($name:expr, $lograte:expr) => {
Counter {
name: $name,
counts: AtomicUsize::new(0),
nanos: AtomicUsize::new(0),
times: AtomicUsize::new(0),
lograte: $lograte,
}
};
}
macro_rules! inc_counter {
($name:expr, $count:expr, $start:expr) => {
unsafe { $name.inc($count, $start.elapsed()) };
};
}
impl Counter {
pub fn inc(&mut self, events: usize, dur: Duration) {
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
let times = self.times.fetch_add(1, Ordering::Relaxed);
if times % self.lograte == 0 && times > 0 {
info!(
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
self.name,
counts,
nanos,
times,
counts as f64 * 1e9 / nanos as f64,
timing::timestamp(),
);
}
}
}
#[cfg(test)]
mod tests {
use counter::Counter;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
#[test]
fn test_counter() {
static mut COUNTER: Counter = create_counter!("test", 100);
let start = Instant::now();
let count = 1;
inc_counter!(COUNTER, count, start);
unsafe {
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
assert_eq!(COUNTER.lograte, 100);
assert_eq!(COUNTER.name, "test");
}
}
}

File diff suppressed because it is too large Load Diff

312
src/drone.rs Normal file
View File

@@ -0,0 +1,312 @@
//! The `drone` module provides an object for launching a Solana Drone,
//! which is the custodian of any remaining tokens in a mint.
//! The Solana Drone builds and send airdrop transactions,
//! checking requests against a request cap for a given time time_slice
//! and (to come) an IP rate limit.
use signature::{KeyPair, PublicKey};
use std::io;
use std::io::{Error, ErrorKind};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::Duration;
use thin_client::ThinClient;
use transaction::Transaction;
pub const TIME_SLICE: u64 = 60;
pub const REQUEST_CAP: u64 = 150_000;
#[derive(Serialize, Deserialize, Debug)]
pub enum DroneRequest {
GetAirdrop {
airdrop_request_amount: u64,
client_public_key: PublicKey,
},
}
pub struct Drone {
mint_keypair: KeyPair,
ip_cache: Vec<IpAddr>,
_airdrop_addr: SocketAddr,
transactions_addr: SocketAddr,
requests_addr: SocketAddr,
pub time_slice: Duration,
request_cap: u64,
pub request_current: u64,
}
impl Drone {
pub fn new(
mint_keypair: KeyPair,
_airdrop_addr: SocketAddr,
transactions_addr: SocketAddr,
requests_addr: SocketAddr,
time_input: Option<u64>,
request_cap_input: Option<u64>,
) -> Drone {
let time_slice = match time_input {
Some(time) => Duration::new(time, 0),
None => Duration::new(TIME_SLICE, 0),
};
let request_cap = match request_cap_input {
Some(cap) => cap,
None => REQUEST_CAP,
};
Drone {
mint_keypair,
ip_cache: Vec::new(),
_airdrop_addr,
transactions_addr,
requests_addr,
time_slice,
request_cap,
request_current: 0,
}
}
pub fn check_request_limit(&mut self, request_amount: u64) -> bool {
(self.request_current + request_amount) <= self.request_cap
}
pub fn clear_request_count(&mut self) {
self.request_current = 0;
}
pub fn add_ip_to_cache(&mut self, ip: IpAddr) {
self.ip_cache.push(ip);
}
pub fn clear_ip_cache(&mut self) {
self.ip_cache.clear();
}
pub fn check_rate_limit(&mut self, ip: IpAddr) -> Result<IpAddr, IpAddr> {
// [WIP] This is placeholder code for a proper rate limiter.
// Right now it will only allow one total drone request per IP
if self.ip_cache.contains(&ip) {
// Add proper error handling here
Err(ip)
} else {
self.add_ip_to_cache(ip);
Ok(ip)
}
}
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<usize, io::Error> {
let tx: Transaction;
let request_amount: u64;
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
self.requests_addr,
requests_socket,
self.transactions_addr,
transactions_socket,
);
let last_id = client.get_last_id();
match req {
DroneRequest::GetAirdrop {
airdrop_request_amount,
client_public_key,
} => {
request_amount = airdrop_request_amount.clone();
tx = Transaction::new(
&self.mint_keypair,
client_public_key,
airdrop_request_amount as i64,
last_id,
);
}
}
if self.check_request_limit(request_amount) {
self.request_current += request_amount;
client.transfer_signed(tx)
} else {
Err(Error::new(ErrorKind::Other, "token limit reached"))
}
}
}
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::{get_ip_addr, TestNode};
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
use logger;
use mint::Mint;
use server::Server;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
use thin_client::ThinClient;
#[test]
fn test_check_request_limit() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(
keypair,
addr,
transactions_addr,
requests_addr,
None,
Some(3),
);
assert!(drone.check_request_limit(1));
drone.request_current = 3;
assert!(!drone.check_request_limit(1));
}
#[test]
fn test_clear_request_count() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
drone.request_current = drone.request_current + 256;
assert_eq!(drone.request_current, 256);
drone.clear_request_count();
assert_eq!(drone.request_current, 0);
}
#[test]
fn test_add_ip_to_cache() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
assert_eq!(drone.ip_cache.len(), 0);
drone.add_ip_to_cache(ip);
assert_eq!(drone.ip_cache.len(), 1);
assert!(drone.ip_cache.contains(&ip));
}
#[test]
fn test_clear_ip_cache() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
assert_eq!(drone.ip_cache.len(), 0);
drone.add_ip_to_cache(ip);
assert_eq!(drone.ip_cache.len(), 1);
drone.clear_ip_cache();
assert_eq!(drone.ip_cache.len(), 0);
assert!(drone.ip_cache.is_empty());
}
#[test]
fn test_drone_default_init() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let time_slice: Option<u64> = None;
let request_cap: Option<u64> = None;
let drone = Drone::new(
keypair,
addr,
transactions_addr,
requests_addr,
time_slice,
request_cap,
);
assert_eq!(drone.time_slice, Duration::new(TIME_SLICE, 0));
assert_eq!(drone.request_cap, REQUEST_CAP);
}
#[test]
fn test_send_airdrop() {
const SMALL_BATCH: i64 = 50;
const TPS_BATCH: i64 = 5_000_000;
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let carlos_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(900));
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
addr.set_ip(get_ip_addr().expect("drone get_ip_addr"));
let mut drone = Drone::new(
alice.keypair(),
addr,
leader.data.transactions_addr,
leader.data.requests_addr,
None,
Some(5_000_050),
);
let bob_req = DroneRequest::GetAirdrop {
airdrop_request_amount: 50,
client_public_key: bob_pubkey,
};
let bob_result = drone.send_airdrop(bob_req).expect("send airdrop test");
assert!(bob_result > 0);
let carlos_req = DroneRequest::GetAirdrop {
airdrop_request_amount: 5_000_000,
client_public_key: carlos_pubkey,
};
let carlos_result = drone.send_airdrop(carlos_req).expect("send airdrop test");
assert!(carlos_result > 0);
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
let transactions_socket =
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.transactions_addr,
transactions_socket,
);
let bob_balance = client.poll_get_balance(&bob_pubkey);
info!("Small request balance: {:?}", bob_balance);
assert_eq!(bob_balance.unwrap(), SMALL_BATCH);
let carlos_balance = client.poll_get_balance(&carlos_pubkey);
info!("TPS request balance: {:?}", carlos_balance);
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
}

View File

@@ -2,88 +2,105 @@
//! unique ID that is the hash of the Entry before it, plus the hash of the //! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes` //! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created. //! represents an approximate amount of time since the last Entry was created.
use event::Event; use bincode::serialized_size;
use hash::{extend_and_hash, hash, Hash}; use hash::{extend_and_hash, hash, Hash};
use packet::BLOB_DATA_SIZE;
use rayon::prelude::*; use rayon::prelude::*;
use transaction::Transaction;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number /// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result /// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `events` /// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
/// field points to Events that took place shortly after `id` was generated. /// field points to Transactions that took place shortly before `id` was generated.
/// ///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you /// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases /// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally. /// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the /// An upper bound on Duration can be estimated by assuming each hash was generated by the
/// fastest processor. Duration should therefore be estimated by assuming that the hash /// world's fastest processor at the time the entry was recorded. Or said another way, it
/// was generated by the fastest processor at the time the entry was recorded. /// is physically not possible for a shorter duration to have occurred if one assumes the
/// hash was computed by the world's fastest processor at that time. The hash chain is both
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
/// Work consensus!)
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry { pub struct Entry {
/// The number of hashes since the previous Entry ID.
pub num_hashes: u64, pub num_hashes: u64,
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
pub id: Hash, pub id: Hash,
pub events: Vec<Event>,
/// An unordered list of transactions that were observed before the Entry ID was
/// generated. The may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>,
} }
impl Entry { impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`. /// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Self { pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 }; let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &events); let id = next_hash(start_hash, 0, &transactions);
Entry { let entry = Entry {
num_hashes, num_hashes,
id, id,
events, transactions,
} };
} assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Self {
let entry = Self::new(start_hash, *cur_hashes, events);
*start_hash = entry.id;
*cur_hashes = 0;
entry entry
} }
/// Creates a Entry from the number of hashes `num_hashes` since the previous event /// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(
start_hash: &mut Hash,
cur_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Self {
let entry = Self::new(start_hash, *cur_hashes, transactions);
*start_hash = entry.id;
*cur_hashes = 0;
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
entry
}
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
/// and that resulting `id`. /// and that resulting `id`.
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self { pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
Entry { Entry {
num_hashes, num_hashes,
id: *id, id: *id,
events: vec![], transactions: vec![],
} }
} }
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times. /// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the event is not a Tick, then hash that as well. /// If the transaction is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool { pub fn verify(&self, start_hash: &Hash) -> bool {
self.events.par_iter().all(|event| event.verify()) self.transactions.par_iter().all(|tx| tx.verify_plan())
&& self.id == next_hash(start_hash, self.num_hashes, &self.events) && self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
} }
} }
fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) { fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
match *event { hash_data.push(0u8);
Event::Transaction(ref tr) => { hash_data.extend_from_slice(&tx.sig);
hash_data.push(0u8);
hash_data.extend_from_slice(&tr.sig);
}
}
} }
/// Creates the hash `num_hashes` after `start_hash`. If the event contains /// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and /// a signature, the final hash will be a hash of both the previous ID and
/// the signature. /// the signature. If num_hashes is zero and there's no transaction data,
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash { /// start_hash is returned.
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
let mut id = *start_hash; let mut id = *start_hash;
for _ in 1..num_hashes { for _ in 1..num_hashes {
id = hash(&id); id = hash(&id);
} }
// Hash all the event data // Hash all the transaction data
let mut hash_data = vec![]; let mut hash_data = vec![];
for event in events { for tx in transactions {
add_event_data(&mut hash_data, event); add_transaction_data(&mut hash_data, tx);
} }
if !hash_data.is_empty() { if !hash_data.is_empty() {
@@ -95,12 +112,13 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
} }
} }
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`. /// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(start_hash: &Hash, num_hashes: u64, events: Vec<Event>) -> Entry { pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
assert!(num_hashes > 0 || transactions.len() == 0);
Entry { Entry {
num_hashes, num_hashes,
id: next_hash(start_hash, num_hashes, &events), id: next_hash(start_hash, num_hashes, &transactions),
events: events, transactions,
} }
} }
@@ -109,7 +127,6 @@ mod tests {
use super::*; use super::*;
use chrono::prelude::*; use chrono::prelude::*;
use entry::Entry; use entry::Entry;
use event::Event;
use hash::hash; use hash::hash;
use signature::{KeyPair, KeyPairUtil}; use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction; use transaction::Transaction;
@@ -125,19 +142,19 @@ mod tests {
} }
#[test] #[test]
fn test_event_reorder_attack() { fn test_transaction_reorder_attack() {
let zero = Hash::default(); let zero = Hash::default();
// First, verify entries // First, verify entries
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tr0 = Event::new_transaction(&keypair, keypair.pubkey(), 0, zero); let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tr1 = Event::new_transaction(&keypair, keypair.pubkey(), 1, zero); let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero)); assert!(e0.verify(&zero));
// Next, swap two events and ensure verification fails. // Next, swap two transactions and ensure verification fails.
e0.events[0] = tr1; // <-- attack e0.transactions[0] = tx1; // <-- attack
e0.events[1] = tr0; e0.transactions[1] = tx0;
assert!(!e0.verify(&zero)); assert!(!e0.verify(&zero));
} }
@@ -147,18 +164,14 @@ mod tests {
// First, verify entries // First, verify entries
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new_timestamp(&keypair, Utc::now(), zero)); let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let tr1 = Event::Transaction(Transaction::new_signature( let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
&keypair, let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
Default::default(),
zero,
));
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
assert!(e0.verify(&zero)); assert!(e0.verify(&zero));
// Next, swap two witness events and ensure verification fails. // Next, swap two witness transactions and ensure verification fails.
e0.events[0] = tr1; // <-- attack e0.transactions[0] = tx1; // <-- attack
e0.events[1] = tr0; e0.transactions[1] = tx0;
assert!(!e0.verify(&zero)); assert!(!e0.verify(&zero));
} }
@@ -168,5 +181,24 @@ mod tests {
let tick = next_entry(&zero, 1, vec![]); let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1); assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero); assert_ne!(tick.id, zero);
let tick = next_entry(&zero, 0, vec![]);
assert_eq!(tick.num_hashes, 0);
assert_eq!(tick.id, zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
assert_eq!(entry0.num_hashes, 1);
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
}
#[test]
#[should_panic]
fn test_next_entry_panic() {
let zero = Hash::default();
let keypair = KeyPair::new();
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
next_entry(&zero, 0, vec![tx]);
} }
} }

View File

@@ -1,14 +1,16 @@
//! The `entry_writer` module helps implement the TPU's write stage. //! The `entry_writer` module helps implement the TPU's write stage. It
//! writes entries to the given writer, which is typically a file or
//! stdout, and then sends the Entry to its output channel.
use bank::Bank; use bank::Bank;
use entry::Entry; use entry::Entry;
use ledger; use ledger::Block;
use packet; use packet;
use result::Result; use result::Result;
use serde_json; use serde_json;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::io::Write;
use std::io::sink; use std::io::sink;
use std::io::Write;
use std::sync::mpsc::Receiver; use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::Duration; use std::time::Duration;
@@ -63,7 +65,7 @@ impl<'a> EntryWriter<'a> {
let mut q = VecDeque::new(); let mut q = VecDeque::new();
let list = self.write_entries(writer, entry_receiver)?; let list = self.write_entries(writer, entry_receiver)?;
trace!("New blobs? {}", list.len()); trace!("New blobs? {}", list.len());
ledger::process_entry_list_into_blobs(&list, blob_recycler, &mut q); list.to_blobs(blob_recycler, &mut q);
if !q.is_empty() { if !q.is_empty() {
trace!("broadcasting {}", q.len()); trace!("broadcasting {}", q.len());
broadcast.send(q)?; broadcast.send(q)?;

View File

@@ -1,17 +1,18 @@
// Support erasure coding // Support erasure coding
use packet::{BlobRecycler, SharedBlob}; use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
use std::result; use std::result;
//TODO(sakridge) pick these values //TODO(sakridge) pick these values
const NUM_CODED: usize = 10; pub const NUM_CODED: usize = 20;
const MAX_MISSING: usize = 2; pub const MAX_MISSING: usize = 4;
const NUM_DATA: usize = NUM_CODED - MAX_MISSING; const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub enum ErasureError { pub enum ErasureError {
NotEnoughBlocksToDecode, NotEnoughBlocksToDecode,
DecodeError, DecodeError,
EncodeError,
InvalidBlockSize, InvalidBlockSize,
} }
@@ -73,12 +74,22 @@ pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Resul
let mut data_arg = Vec::new(); let mut data_arg = Vec::new();
for block in data { for block in data {
if block_len != block.len() { if block_len != block.len() {
trace!(
"data block size incorrect {} expected {}",
block.len(),
block_len
);
return Err(ErasureError::InvalidBlockSize); return Err(ErasureError::InvalidBlockSize);
} }
data_arg.push(block.as_ptr()); data_arg.push(block.as_ptr());
} }
for mut block in coding { for mut block in coding {
if block_len != block.len() { if block_len != block.len() {
trace!(
"coding block size incorrect {} expected {}",
block.len(),
block_len
);
return Err(ErasureError::InvalidBlockSize); return Err(ErasureError::InvalidBlockSize);
} }
coding_arg.push(block.as_mut_ptr()); coding_arg.push(block.as_mut_ptr());
@@ -150,59 +161,128 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32])
Ok(()) Ok(())
} }
// Generate coding blocks in window from consumed to consumed+NUM_DATA // Allocate some coding blobs and insert into the blobs array
pub fn add_coding_blobs(recycler: &BlobRecycler, blobs: &mut Vec<SharedBlob>, consumed: u64) {
let mut added = 0;
let blobs_len = blobs.len() as u64;
for i in consumed..consumed + blobs_len {
let is = i as usize;
if is != 0 && ((is + MAX_MISSING) % NUM_CODED) == 0 {
for _ in 0..MAX_MISSING {
trace!("putting coding at {}", (i - consumed));
let new_blob = recycler.allocate();
let new_blob_clone = new_blob.clone();
let mut new_blob_l = new_blob_clone.write().unwrap();
new_blob_l.set_size(0);
new_blob_l.set_coding().unwrap();
drop(new_blob_l);
blobs.insert((i - consumed) as usize, new_blob);
added += 1;
}
}
}
info!(
"add_coding consumed: {} blobs.len(): {} added: {}",
consumed,
blobs.len(),
added
);
}
// Generate coding blocks in window starting from consumed
pub fn generate_coding( pub fn generate_coding(
re: &BlobRecycler, window: &mut Vec<Option<SharedBlob>>,
window: &mut Vec<SharedBlob>,
consumed: usize, consumed: usize,
num_blobs: usize,
) -> Result<()> { ) -> Result<()> {
let mut data_blobs = Vec::new(); let mut block_start = consumed - (consumed % NUM_CODED);
let mut coding_blobs = Vec::new();
let mut data_locks = Vec::new();
let mut data_ptrs: Vec<&[u8]> = Vec::new();
let mut coding_locks = Vec::new();
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
for i in consumed..consumed + NUM_DATA {
let n = i % window.len();
data_blobs.push(
window[n]
.clone()
.expect("'data_blobs' arr in pub fn generate_coding"),
);
}
for b in &data_blobs {
data_locks.push(b.write().expect("'b' write lock in pub fn generate_coding"));
}
for (i, l) in data_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
data_ptrs.push(&l.data);
}
// generate coding ptr array for i in consumed..consumed + num_blobs {
let coding_start = consumed + NUM_DATA; if (i % NUM_CODED) == (NUM_CODED - 1) {
let coding_end = consumed + NUM_CODED; let mut data_blobs = Vec::new();
for i in coding_start..coding_end { let mut coding_blobs = Vec::new();
let n = i % window.len(); let mut data_locks = Vec::new();
window[n] = re.allocate(); let mut data_ptrs: Vec<&[u8]> = Vec::new();
coding_blobs.push( let mut coding_locks = Vec::new();
window[n] let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
.clone()
.expect("'coding_blobs' arr in pub fn generate_coding"),
);
}
for b in &coding_blobs {
coding_locks.push(
b.write()
.expect("'coding_locks' arr in pub fn generate_coding"),
);
}
for (i, l) in coding_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
coding_ptrs.push(&mut l.data);
}
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?; info!(
trace!("consumed: {}", consumed); "generate_coding start: {} end: {} consumed: {} num_blobs: {}",
block_start,
block_start + NUM_DATA,
consumed,
num_blobs
);
for i in block_start..block_start + NUM_DATA {
let n = i % window.len();
trace!("window[{}] = {:?}", n, window[n]);
if window[n].is_none() {
trace!("data block is null @ {}", n);
return Ok(());
}
data_blobs.push(
window[n]
.clone()
.expect("'data_blobs' arr in pub fn generate_coding"),
);
}
let mut max_data_size = 0;
for b in &data_blobs {
let lck = b.write().expect("'b' write lock in pub fn generate_coding");
if lck.meta.size > max_data_size {
max_data_size = lck.meta.size;
}
data_locks.push(lck);
}
trace!("max_data_size: {}", max_data_size);
for (i, l) in data_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
data_ptrs.push(&l.data[..max_data_size]);
}
// generate coding ptr array
let coding_start = block_start + NUM_DATA;
let coding_end = block_start + NUM_CODED;
for i in coding_start..coding_end {
let n = i % window.len();
if window[n].is_none() {
trace!("coding block is null @ {}", n);
return Ok(());
}
let w_l = window[n].clone().unwrap();
w_l.write().unwrap().set_size(max_data_size);
if w_l.write().unwrap().set_coding().is_err() {
return Err(ErasureError::EncodeError);
}
coding_blobs.push(
window[n]
.clone()
.expect("'coding_blobs' arr in pub fn generate_coding"),
);
}
for b in &coding_blobs {
coding_locks.push(
b.write()
.expect("'coding_locks' arr in pub fn generate_coding"),
);
}
for (i, l) in coding_locks.iter_mut().enumerate() {
trace!("i: {} coding: {} size: {}", i, l.data[0], max_data_size);
coding_ptrs.push(&mut l.data_mut()[..max_data_size]);
}
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
debug!(
"consumed: {} data: {}:{} coding: {}:{}",
consumed,
block_start,
block_start + NUM_DATA,
coding_start,
coding_end
);
block_start += NUM_CODED;
}
}
Ok(()) Ok(())
} }
@@ -214,75 +294,142 @@ pub fn recover(
re: &BlobRecycler, re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>, window: &mut Vec<Option<SharedBlob>>,
consumed: usize, consumed: usize,
received: usize,
) -> Result<()> { ) -> Result<()> {
//recover with erasure coding //recover with erasure coding
let mut data_missing = 0; if received <= consumed {
let mut coded_missing = 0; return Ok(());
let coding_start = consumed + NUM_DATA;
let coding_end = consumed + NUM_CODED;
for i in consumed..coding_end {
let n = i % window.len();
if window[n].is_none() {
if i >= coding_start {
coded_missing += 1;
} else {
data_missing += 1;
}
}
} }
trace!("missing: data: {} coding: {}", data_missing, coded_missing); let num_blocks = (received - consumed) / NUM_CODED;
if data_missing > 0 { let mut block_start = consumed - (consumed % NUM_CODED);
if (data_missing + coded_missing) <= MAX_MISSING {
let mut blobs: Vec<SharedBlob> = Vec::new(); if num_blocks > 0 {
let mut locks = Vec::new(); debug!(
let mut data_ptrs: Vec<&mut [u8]> = Vec::new(); "num_blocks: {} received: {} consumed: {}",
let mut coding_ptrs: Vec<&[u8]> = Vec::new(); num_blocks, received, consumed
let mut erasures: Vec<i32> = Vec::new(); );
for i in consumed..coding_end { }
let j = i % window.len();
let mut b = &mut window[j]; for i in 0..num_blocks {
if b.is_some() { if i > 100 {
blobs.push(b.clone().expect("'blobs' arr in pb fn recover")); break;
continue;
}
let n = re.allocate();
*b = Some(n.clone());
//mark the missing memory
blobs.push(n);
erasures.push((i - consumed) as i32);
}
erasures.push(-1);
trace!("erasures: {:?}", erasures);
//lock everything
for b in &blobs {
locks.push(b.write().expect("'locks' arr in pb fn recover"));
}
for (i, l) in locks.iter_mut().enumerate() {
if i >= NUM_DATA {
trace!("pushing coding: {}", i);
coding_ptrs.push(&l.data);
} else {
trace!("pushing data: {}", i);
data_ptrs.push(&mut l.data);
}
}
trace!(
"coding_ptrs.len: {} data_ptrs.len {}",
coding_ptrs.len(),
data_ptrs.len()
);
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
} else {
return Err(ErasureError::NotEnoughBlocksToDecode);
} }
let mut data_missing = 0;
let mut coded_missing = 0;
let coding_start = block_start + NUM_DATA;
let coding_end = block_start + NUM_CODED;
trace!(
"recover: block_start: {} coding_start: {} coding_end: {}",
block_start,
coding_start,
coding_end
);
for i in block_start..coding_end {
let n = i % window.len();
if window[n].is_none() {
if i >= coding_start {
coded_missing += 1;
} else {
data_missing += 1;
}
}
}
if (data_missing + coded_missing) != NUM_CODED && (data_missing + coded_missing) != 0 {
debug!(
"1: start: {} recovering: data: {} coding: {}",
block_start, data_missing, coded_missing
);
}
if data_missing > 0 {
if (data_missing + coded_missing) <= MAX_MISSING {
debug!(
"2: recovering: data: {} coding: {}",
data_missing, coded_missing
);
let mut blobs: Vec<SharedBlob> = Vec::new();
let mut locks = Vec::new();
let mut erasures: Vec<i32> = Vec::new();
let mut meta = None;
let mut size = None;
for i in block_start..coding_end {
let j = i % window.len();
let mut b = &mut window[j];
if b.is_some() {
if i >= NUM_DATA && size.is_none() {
let bl = b.clone().unwrap();
size = Some(bl.read().unwrap().meta.size - BLOB_HEADER_SIZE);
}
if meta.is_none() {
let bl = b.clone().unwrap();
meta = Some(bl.read().unwrap().meta.clone());
}
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
continue;
}
let n = re.allocate();
*b = Some(n.clone());
//mark the missing memory
blobs.push(n);
erasures.push((i - block_start) as i32);
}
erasures.push(-1);
trace!(
"erasures: {:?} data_size: {} header_size: {}",
erasures,
size.unwrap(),
BLOB_HEADER_SIZE
);
//lock everything
for b in &blobs {
locks.push(b.write().expect("'locks' arr in pb fn recover"));
}
{
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
for (i, l) in locks.iter_mut().enumerate() {
if i >= NUM_DATA {
trace!("pushing coding: {}", i);
coding_ptrs.push(&l.data()[..size.unwrap()]);
} else {
trace!("pushing data: {}", i);
data_ptrs.push(&mut l.data[..size.unwrap()]);
}
}
trace!(
"coding_ptrs.len: {} data_ptrs.len {}",
coding_ptrs.len(),
data_ptrs.len()
);
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
}
for i in &erasures[..erasures.len() - 1] {
let idx = *i as usize;
let data_size = locks[idx].get_data_size().unwrap() - BLOB_HEADER_SIZE as u64;
locks[idx].meta = meta.clone().unwrap();
locks[idx].set_size(data_size as usize);
trace!(
"erasures[{}] size: {} data[0]: {}",
*i,
data_size,
locks[idx].data()[0]
);
}
}
}
block_start += NUM_CODED;
} }
Ok(()) Ok(())
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crdt;
use erasure; use erasure;
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE}; use logger;
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::sync::{Arc, RwLock};
#[test] #[test]
pub fn test_coding() { pub fn test_coding() {
@@ -338,10 +485,15 @@ mod test {
for (i, w) in window.iter().enumerate() { for (i, w) in window.iter().enumerate() {
print!("window({}): ", i); print!("window({}): ", i);
if w.is_some() { if w.is_some() {
let window_lock = w.clone().unwrap(); let window_l1 = w.clone().unwrap();
let window_data = window_lock.read().unwrap().data; let window_l2 = window_l1.read().unwrap();
print!(
"index: {:?} meta.size: {} data: ",
window_l2.get_index(),
window_l2.meta.size
);
for i in 0..8 { for i in 0..8 {
print!("{} ", window_data[i]); print!("{} ", window_l2.data()[i]);
} }
} else { } else {
print!("null"); print!("null");
@@ -350,45 +502,102 @@ mod test {
} }
} }
#[test] fn generate_window(
pub fn test_window_recover() { data_len: usize,
let mut window = Vec::new(); blob_recycler: &BlobRecycler,
let blob_recycler = BlobRecycler::default(); offset: usize,
let offset = 4; num_blobs: usize,
for i in 0..(4 * erasure::NUM_CODED + 1) { ) -> (Vec<Option<SharedBlob>>, usize) {
let mut window = vec![None; 32];
let mut blobs = Vec::new();
for i in 0..num_blobs {
let b = blob_recycler.allocate(); let b = blob_recycler.allocate();
let b_ = b.clone(); let b_ = b.clone();
let data_len = b.read().unwrap().data.len();
let mut w = b.write().unwrap(); let mut w = b.write().unwrap();
w.set_index(i as u64).unwrap(); w.set_size(data_len);
assert_eq!(i as u64, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
for k in 0..data_len { for k in 0..data_len {
w.data[k] = (k + i) as u8; w.data_mut()[k] = (k + i) as u8;
} }
window.push(Some(b_)); blobs.push(b_);
} }
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
let blobs_len = blobs.len();
let d = crdt::ReplicatedData::new(
KeyPair::new().pubkey(),
"127.0.0.1:1234".parse().unwrap(),
"127.0.0.1:1235".parse().unwrap(),
"127.0.0.1:1236".parse().unwrap(),
"127.0.0.1:1237".parse().unwrap(),
"127.0.0.1:1238".parse().unwrap(),
);
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
for b in blobs {
let idx = b.read().unwrap().get_index().unwrap() as usize;
window[idx] = Some(b);
}
(window, blobs_len)
}
#[test]
pub fn test_window_recover_basic() {
logger::setup();
let data_len = 16;
let blob_recycler = BlobRecycler::default();
// Generate a window
let offset = 1;
let num_blobs = erasure::NUM_DATA + 2;
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, 0, num_blobs);
println!("** after-gen-window:");
print_window(&window);
// Generate the coding blocks
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
println!("** after-gen-coding:");
print_window(&window);
let erase_offset = offset;
// Create a hole in the window
let refwindow = window[erase_offset].clone();
window[erase_offset] = None;
// Recover it from coding
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
println!("** after-recover:");
print_window(&window);
// Check the result
let window_l = window[erase_offset].clone().unwrap();
let window_l2 = window_l.read().unwrap();
let ref_l = refwindow.clone().unwrap();
let ref_l2 = ref_l.read().unwrap();
assert_eq!(
window_l2.data[..(data_len + BLOB_HEADER_SIZE)],
ref_l2.data[..(data_len + BLOB_HEADER_SIZE)]
);
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
assert_eq!(window_l2.meta.addr, ref_l2.meta.addr);
assert_eq!(window_l2.meta.port, ref_l2.meta.port);
assert_eq!(window_l2.meta.v6, ref_l2.meta.v6);
assert_eq!(window_l2.get_index().unwrap(), erase_offset as u64);
}
//TODO This needs to be reworked
#[test]
#[ignore]
pub fn test_window_recover() {
logger::setup();
let blob_recycler = BlobRecycler::default();
let offset = 4;
let data_len = 16;
let num_blobs = erasure::NUM_DATA + 2;
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, offset, num_blobs);
println!("** after-gen:"); println!("** after-gen:");
print_window(&window); print_window(&window);
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok()); assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
assert!(
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
.is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
println!("** after-coding:"); println!("** after-coding:");
print_window(&window); print_window(&window);
let refwindow = window[offset + 1].clone(); let refwindow = window[offset + 1].clone();
@@ -402,29 +611,14 @@ mod test {
window_l0.write().unwrap().data[0] = 55; window_l0.write().unwrap().data[0] = 55;
println!("** after-nulling:"); println!("** after-nulling:");
print_window(&window); print_window(&window);
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok()); assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_err()
);
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
println!("** after-restore:"); println!("** after-restore:");
print_window(&window); print_window(&window);
let window_l = window[offset + 1].clone().unwrap(); let window_l = window[offset + 1].clone().unwrap();
let ref_l = refwindow.clone().unwrap(); let ref_l = refwindow.clone().unwrap();
assert_eq!( assert_eq!(
window_l.read().unwrap().data.to_vec(), window_l.read().unwrap().data()[..data_len],
ref_l.read().unwrap().data.to_vec() ref_l.read().unwrap().data()[..data_len]
); );
} }
} }

View File

@@ -1,31 +0,0 @@
//! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending
//! Transaction.
use hash::Hash;
use signature::{KeyPair, PublicKey};
use transaction::Transaction;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Event {
Transaction(Transaction),
}
impl Event {
pub fn new_transaction(
from_keypair: &KeyPair,
to: PublicKey,
tokens: i64,
last_id: Hash,
) -> Self {
let tr = Transaction::new(from_keypair, to, tokens, last_id);
Event::Transaction(tr)
}
/// Verify the Event's signature's are valid and if a transaction, that its
/// spending plan is valid.
pub fn verify(&self) -> bool {
match *self {
Event::Transaction(ref tr) => tr.verify_plan(),
}
}
}

47
src/fetch_stage.rs Normal file
View File

@@ -0,0 +1,47 @@
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
use packet;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
pub struct FetchStage {
pub packet_receiver: streamer::PacketReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl FetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
packet_recycler: packet::PacketRecycler,
) -> Self {
Self::new_multi_socket(vec![socket], exit, packet_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
packet_recycler: packet::PacketRecycler,
) -> Self {
let (packet_sender, packet_receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
.map(|socket| {
streamer::receiver(
socket,
exit.clone(),
packet_recycler.clone(),
packet_sender.clone(),
)
})
.collect();
FetchStage {
packet_receiver,
thread_hdls,
}
}
}

View File

@@ -1,7 +1,7 @@
//! The `hash` module provides functions for creating SHA-256 hashes. //! The `hash` module provides functions for creating SHA-256 hashes.
use generic_array::GenericArray;
use generic_array::typenum::U32; use generic_array::typenum::U32;
use generic_array::GenericArray;
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
pub type Hash = GenericArray<u8, U32>; pub type Hash = GenericArray<u8, U32>;
@@ -10,7 +10,10 @@ pub type Hash = GenericArray<u8, U32>;
pub fn hash(val: &[u8]) -> Hash { pub fn hash(val: &[u8]) -> Hash {
let mut hasher = Sha256::default(); let mut hasher = Sha256::default();
hasher.input(val); hasher.input(val);
hasher.result()
// At the time of this writing, the sha2 library is stuck on an old version
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
GenericArray::clone_from_slice(hasher.result().as_slice())
} }
/// Return the hash of the given hash extended with the given value. /// Return the hash of the given hash extended with the given value.

View File

@@ -1,21 +1,21 @@
//! The `ledger` module provides functions for parallel verification of the //! The `ledger` module provides functions for parallel verification of the
//! Proof of History ledger. //! Proof of History ledger.
use bincode::{deserialize, serialize_into}; use bincode::{self, deserialize, serialize_into, serialized_size};
use entry::{next_entry, Entry}; use entry::Entry;
use event::Event;
use hash::Hash; use hash::Hash;
use packet; use packet::{self, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
use rayon::prelude::*; use rayon::prelude::*;
use std::cmp::min;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::io::Cursor; use std::io::Cursor;
use std::mem::size_of; use transaction::Transaction;
// a Block is a slice of Entries
pub trait Block { pub trait Block {
/// Verifies the hashes and counts of a slice of events are all consistent. /// Verifies the hashes and counts of a slice of transactions are all consistent.
fn verify(&self, start_hash: &Hash) -> bool; fn verify(&self, start_hash: &Hash) -> bool;
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>);
} }
impl Block for [Entry] { impl Block for [Entry] {
@@ -24,110 +24,117 @@ impl Block for [Entry] {
let entry_pairs = genesis.par_iter().chain(self).zip(self); let entry_pairs = genesis.par_iter().chain(self).zip(self);
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id)) entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
} }
}
/// Create a vector of Entries of length `event_set.len()` from `start_hash` hash, `num_hashes`, and `event_set`. fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
pub fn next_entries(start_hash: &Hash, num_hashes: u64, event_set: Vec<Vec<Event>>) -> Vec<Entry> { for entry in self {
let mut id = *start_hash; let blob = blob_recycler.allocate();
let mut entries = vec![];
for event_list in &event_set {
let events = event_list.clone();
let entry = next_entry(&id, num_hashes, events);
id = entry.id;
entries.push(entry);
}
entries
}
pub fn process_entry_list_into_blobs(
list: &Vec<Entry>,
blob_recycler: &packet::BlobRecycler,
q: &mut VecDeque<SharedBlob>,
) {
let mut start = 0;
let mut end = 0;
while start < list.len() {
let mut entries: Vec<Vec<Entry>> = Vec::new();
let mut total = 0;
for i in &list[start..] {
total += size_of::<Event>() * i.events.len();
total += size_of::<Entry>();
if total >= BLOB_DATA_SIZE {
break;
}
end += 1;
}
// See if we need to split the events
if end <= start {
let mut event_start = 0;
let num_events_per_blob = BLOB_DATA_SIZE / size_of::<Event>();
let total_entry_chunks =
(list[end].events.len() + num_events_per_blob - 1) / num_events_per_blob;
trace!(
"splitting events end: {} total_chunks: {}",
end,
total_entry_chunks
);
for _ in 0..total_entry_chunks {
let event_end = min(event_start + num_events_per_blob, list[end].events.len());
let mut entry = Entry {
num_hashes: list[end].num_hashes,
id: list[end].id,
events: list[end].events[event_start..event_end].to_vec(),
};
entries.push(vec![entry]);
event_start = event_end;
}
end += 1;
} else {
entries.push(list[start..end].to_vec());
}
for entry in entries {
let b = blob_recycler.allocate();
let pos = { let pos = {
let mut bd = b.write().unwrap(); let mut bd = blob.write().unwrap();
let mut out = Cursor::new(bd.data_mut()); let mut out = Cursor::new(bd.data_mut());
serialize_into(&mut out, &entry).expect("failed to serialize output"); serialize_into(&mut out, &entry).expect("failed to serialize output");
out.position() as usize out.position() as usize
}; };
assert!(pos < BLOB_SIZE); assert!(pos < BLOB_SIZE);
b.write().unwrap().set_size(pos); blob.write().unwrap().set_size(pos);
q.push_back(b); q.push_back(blob);
} }
start = end;
} }
} }
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> { pub fn reconstruct_entries_from_blobs(
let mut entries_to_apply: Vec<Entry> = Vec::new(); blobs: VecDeque<SharedBlob>,
let mut last_id = Hash::default(); blob_recycler: &packet::BlobRecycler,
for msgs in blobs { ) -> bincode::Result<Vec<Entry>> {
let blob = msgs.read().unwrap(); let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries { for blob in blobs {
if entry.id == last_id { let entry = {
if let Some(last_entry) = entries_to_apply.last_mut() { let msg = blob.read().unwrap();
last_entry.events.extend(entry.events); deserialize(&msg.data()[..msg.meta.size])
} };
} else { blob_recycler.recycle(blob);
last_id = entry.id;
entries_to_apply.push(entry); match entry {
Ok(entry) => entries.push(entry),
Err(err) => {
trace!("reconstruct_entry_from_blobs: {}", err);
return Err(err);
} }
} }
//TODO respond back to leader with hash of the state
} }
entries_to_apply Ok(entries)
}
/// Creates the next entries for given transactions, outputs
/// updates start_hash to id of last Entry, sets cur_hashes to 0
pub fn next_entries_mut(
start_hash: &mut Hash,
cur_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
if transactions.is_empty() {
vec![Entry::new_mut(start_hash, cur_hashes, transactions)]
} else {
let mut chunk_len = transactions.len();
// check for fit, make sure they can be serialized
while serialized_size(&Entry {
num_hashes: 0,
id: Hash::default(),
transactions: transactions[0..chunk_len].to_vec(),
}).unwrap() > BLOB_DATA_SIZE as u64
{
chunk_len /= 2;
}
let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1);
for chunk in transactions.chunks(chunk_len) {
entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec()));
}
entries
}
}
/// Creates the next Entries for given transactions
pub fn next_entries(
start_hash: &Hash,
cur_hashes: u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
let mut id = *start_hash;
let mut num_hashes = cur_hashes;
next_entries_mut(&mut id, &mut num_hashes, transactions)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use entry::{next_entry, Entry};
use hash::hash; use hash::hash;
use packet::BlobRecycler; use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil}; use signature::{KeyPair, KeyPairUtil};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use transaction::Transaction; use transaction::Transaction;
/// Create a vector of Entries of length `transaction_batches.len()`
/// from `start_hash` hash, `num_hashes`, and `transaction_batches`.
fn next_entries_batched(
start_hash: &Hash,
cur_hashes: u64,
transaction_batches: Vec<Vec<Transaction>>,
) -> Vec<Entry> {
let mut id = *start_hash;
let mut entries = vec![];
let mut num_hashes = cur_hashes;
for transactions in transaction_batches {
let mut entry_batch = next_entries_mut(&mut id, &mut num_hashes, transactions);
entries.append(&mut entry_batch);
}
entries
}
#[test] #[test]
fn test_verify_slice() { fn test_verify_slice() {
let zero = Hash::default(); let zero = Hash::default();
@@ -135,46 +142,57 @@ mod tests {
assert!(vec![][..].verify(&zero)); // base case assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(next_entries(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step assert!(next_entries_batched(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
let mut bad_ticks = next_entries(&zero, 0, vec![vec![]; 2]); let mut bad_ticks = next_entries_batched(&zero, 0, vec![vec![]; 2]);
bad_ticks[1].id = one; bad_ticks[1].id = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad assert!(!bad_ticks.verify(&zero)); // inductive step, bad
} }
#[test] #[test]
fn test_entry_to_blobs() { fn test_entries_to_blobs() {
let zero = Hash::default(); let zero = Hash::default();
let one = hash(&zero); let one = hash(&zero);
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, one)); let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let events = vec![tr0.clone(); 10000]; let transactions = vec![tx0; 10_000];
let e0 = Entry::new(&zero, 0, events); let entries = next_entries(&zero, 0, transactions);
let entry_list = vec![e0.clone(); 1];
let blob_recycler = BlobRecycler::default(); let blob_recycler = BlobRecycler::default();
let mut blob_q = VecDeque::new(); let mut blob_q = VecDeque::new();
process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q); entries.to_blobs(&blob_recycler, &mut blob_q);
let entries = reconstruct_entries_from_blobs(&blob_q);
assert_eq!(entry_list, entries); assert_eq!(
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
entries
);
} }
#[test] #[test]
fn test_next_entries() { fn test_bad_blobs_attack() {
let blob_recycler = BlobRecycler::default();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
assert!(reconstruct_entries_from_blobs(blobs_q, &blob_recycler).is_err());
}
#[test]
fn test_next_entries_batched() {
// this also tests next_entries, ugly, but is an easy way to do vec of vec (batch)
let mut id = Hash::default(); let mut id = Hash::default();
let next_id = hash(&id); let next_id = hash(&id);
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, next_id)); let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
let events = vec![tr0.clone(); 5];
let event_set = vec![events.clone(); 5]; let transactions = vec![tx0; 5];
let entries0 = next_entries(&id, 0, event_set); let transaction_batches = vec![transactions.clone(); 5];
let entries0 = next_entries_batched(&id, 0, transaction_batches);
assert_eq!(entries0.len(), 5); assert_eq!(entries0.len(), 5);
let mut entries1 = vec![]; let mut entries1 = vec![];
for _ in 0..5 { for _ in 0..5 {
let entry = next_entry(&id, 0, events.clone()); let entry = next_entry(&id, 1, transactions.clone());
id = entry.id; id = entry.id;
entries1.push(entry); entries1.push(entry);
} }
@@ -186,14 +204,30 @@ mod tests {
mod bench { mod bench {
extern crate test; extern crate test;
use self::test::Bencher; use self::test::Bencher;
use hash::hash;
use ledger::*; use ledger::*;
use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[bench] #[bench]
fn event_bench(bencher: &mut Bencher) { fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
let start_hash = Hash::default(); let zero = Hash::default();
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]); let one = hash(&zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let transactions = vec![tx0; 10];
let entries = next_entries(&zero, 1, transactions);
let blob_recycler = BlobRecycler::default();
bencher.iter(|| { bencher.iter(|| {
assert!(entries.verify(&start_hash)); let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
assert_eq!(
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
entries
);
}); });
} }
} }

View File

@@ -1,19 +1,32 @@
//! The `solana` library implements the Solana high-performance blockchain architecture.
//! It includes a full Rust implementation of the architecture (see
//! [Server](server/struct.Server.html)) as well as hooks to GPU implementations of its most
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
//! command-line tools to spin up fullnodes and a Rust library
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
//!
#![cfg_attr(feature = "unstable", feature(test))] #![cfg_attr(feature = "unstable", feature(test))]
#[macro_use]
pub mod counter;
pub mod bank; pub mod bank;
pub mod banking_stage; pub mod banking_stage;
pub mod blob_fetch_stage;
pub mod budget;
pub mod crdt; pub mod crdt;
pub mod ecdsa; pub mod drone;
pub mod entry; pub mod entry;
pub mod entry_writer; pub mod entry_writer;
#[cfg(feature = "erasure")] #[cfg(feature = "erasure")]
pub mod erasure; pub mod erasure;
pub mod event; pub mod fetch_stage;
pub mod hash; pub mod hash;
pub mod ledger; pub mod ledger;
pub mod logger; pub mod logger;
pub mod mint; pub mod mint;
pub mod ncp;
pub mod packet; pub mod packet;
pub mod plan; pub mod payment_plan;
pub mod record_stage; pub mod record_stage;
pub mod recorder; pub mod recorder;
pub mod replicate_stage; pub mod replicate_stage;
@@ -23,14 +36,16 @@ pub mod request_stage;
pub mod result; pub mod result;
pub mod rpu; pub mod rpu;
pub mod server; pub mod server;
pub mod sig_verify_stage;
pub mod signature; pub mod signature;
pub mod sigverify;
pub mod sigverify_stage;
pub mod streamer; pub mod streamer;
pub mod thin_client; pub mod thin_client;
pub mod timing; pub mod timing;
pub mod tpu; pub mod tpu;
pub mod transaction; pub mod transaction;
pub mod tvu; pub mod tvu;
pub mod window_stage;
pub mod write_stage; pub mod write_stage;
extern crate bincode; extern crate bincode;
extern crate byteorder; extern crate byteorder;
@@ -44,12 +59,11 @@ extern crate ring;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate pnet_datalink;
extern crate serde_json; extern crate serde_json;
extern crate sha2; extern crate sha2;
extern crate untrusted; extern crate untrusted;
extern crate futures;
#[cfg(test)] #[cfg(test)]
#[macro_use] #[macro_use]
extern crate matches; extern crate matches;

View File

@@ -1,3 +1,6 @@
//! The `logger` module provides a setup function for `env_logger`. Its only function,
//! `setup()` may be called multiple times.
use std::sync::{Once, ONCE_INIT}; use std::sync::{Once, ONCE_INIT};
extern crate env_logger; extern crate env_logger;

View File

@@ -1,7 +1,6 @@
//! The `mint` module is a library for generating the chain's genesis block. //! The `mint` module is a library for generating the chain's genesis block.
use entry::Entry; use entry::Entry;
use event::Event;
use hash::{hash, Hash}; use hash::{hash, Hash};
use ring::rand::SystemRandom; use ring::rand::SystemRandom;
use signature::{KeyPair, KeyPairUtil, PublicKey}; use signature::{KeyPair, KeyPairUtil, PublicKey};
@@ -47,15 +46,15 @@ impl Mint {
self.pubkey self.pubkey
} }
pub fn create_events(&self) -> Vec<Event> { pub fn create_transactions(&self) -> Vec<Transaction> {
let keypair = self.keypair(); let keypair = self.keypair();
let tr = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed()); let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
vec![Event::Transaction(tr)] vec![tx]
} }
pub fn create_entries(&self) -> Vec<Entry> { pub fn create_entries(&self) -> Vec<Entry> {
let e0 = Entry::new(&self.seed(), 0, vec![]); let e0 = Entry::new(&self.seed(), 0, vec![]);
let e1 = Entry::new(&e0.id, 0, self.create_events()); let e1 = Entry::new(&e0.id, 0, self.create_transactions());
vec![e0, e1] vec![e0, e1]
} }
} }
@@ -69,20 +68,20 @@ pub struct MintDemo {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use budget::Budget;
use ledger::Block; use ledger::Block;
use plan::Plan; use transaction::{Instruction, Plan};
use transaction::Instruction;
#[test] #[test]
fn test_create_events() { fn test_create_transactions() {
let mut events = Mint::new(100).create_events().into_iter(); let mut transactions = Mint::new(100).create_transactions().into_iter();
let Event::Transaction(tr) = events.next().unwrap(); let tx = transactions.next().unwrap();
if let Instruction::NewContract(contract) = tr.instruction { if let Instruction::NewContract(contract) = tx.instruction {
if let Plan::Pay(payment) = contract.plan { if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
assert_eq!(tr.from, payment.to); assert_eq!(tx.from, payment.to);
} }
} }
assert_eq!(events.next(), None); assert_eq!(transactions.next(), None);
} }
#[test] #[test]

89
src/ncp.rs Normal file
View File

@@ -0,0 +1,89 @@
//! The `ncp` module implements the network control plane.
use crdt;
use packet;
use result::Result;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
pub struct Ncp {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Ncp {
pub fn new(
crdt: Arc<RwLock<crdt::Crdt>>,
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
gossip_listen_socket: UdpSocket,
gossip_send_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Result<Ncp> {
let blob_recycler = packet::BlobRecycler::default();
let (request_sender, request_receiver) = channel();
trace!(
"Ncp: id: {:?}, listening on: {:?}",
&crdt.read().unwrap().me[..4],
gossip_listen_socket.local_addr().unwrap()
);
let t_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
gossip_listen_socket,
request_sender,
)?;
let (response_sender, response_receiver) = channel();
let t_responder = streamer::responder(
gossip_send_socket,
exit.clone(),
blob_recycler.clone(),
response_receiver,
);
let t_listen = crdt::Crdt::listen(
crdt.clone(),
window,
blob_recycler.clone(),
request_receiver,
response_sender.clone(),
exit.clone(),
);
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
Ok(Ncp { thread_hdls })
}
}
#[cfg(test)]
mod tests {
use crdt::{Crdt, TestNode};
use ncp::Ncp;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
#[test]
#[ignore]
// test that stage will exit when flag is set
// TODO: Troubleshoot Docker-based coverage build and re-enabled
// this test. It is probably failing due to too many threads.
fn test_exit() {
let exit = Arc::new(AtomicBool::new(false));
let tn = TestNode::new();
let crdt = Crdt::new(tn.data.clone());
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,
exit.clone(),
).unwrap();
exit.store(true, Ordering::Relaxed);
for t in d.thread_hdls {
t.join().expect("thread join");
}
}
}

View File

@@ -1,6 +1,7 @@
//! The `packet` module defines data structures and methods to pull data from the network. //! The `packet` module defines data structures and methods to pull data from the network.
use bincode::{deserialize, serialize}; use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use counter::Counter;
use result::{Error, Result}; use result::{Error, Result};
use serde::Serialize; use serde::Serialize;
use signature::PublicKey; use signature::PublicKey;
@@ -9,7 +10,9 @@ use std::fmt;
use std::io; use std::io;
use std::mem::size_of; use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::atomic::AtomicUsize;
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use std::time::Instant;
pub type SharedPackets = Arc<RwLock<Packets>>; pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>; pub type SharedBlob = Arc<RwLock<Blob>>;
@@ -18,7 +21,7 @@ pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8; pub const NUM_PACKETS: usize = 1024 * 8;
pub const BLOB_SIZE: usize = 64 * 1024; pub const BLOB_SIZE: usize = 64 * 1024;
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END; pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
pub const PACKET_DATA_SIZE: usize = 256; pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE; pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
@@ -26,6 +29,7 @@ pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
#[repr(C)] #[repr(C)]
pub struct Meta { pub struct Meta {
pub size: usize, pub size: usize,
pub num_retransmits: u64,
pub addr: [u16; 8], pub addr: [u16; 8],
pub port: u16, pub port: u16,
pub v6: bool, pub v6: bool,
@@ -169,21 +173,24 @@ impl<T: Default> Recycler<T> {
impl Packets { impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> { fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
static mut COUNTER: Counter = create_counter!("packets", 10);
self.packets.resize(NUM_PACKETS, Packet::default()); self.packets.resize(NUM_PACKETS, Packet::default());
let mut i = 0; let mut i = 0;
//DOCUMENTED SIDE-EFFECT //DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll //Performance out of the IO without poll
// * block on the socket until its readable // * block on the socket until it's readable
// * set the socket to non blocking // * set the socket to non blocking
// * read until it fails // * read until it fails
// * set it back to blocking before returning // * set it back to blocking before returning
socket.set_nonblocking(false)?; socket.set_nonblocking(false)?;
let mut start = Instant::now();
for p in &mut self.packets { for p in &mut self.packets {
p.meta.size = 0; p.meta.size = 0;
trace!("receiving"); trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) { match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => { Err(_) if i > 0 => {
debug!("got {:?} messages", i); inc_counter!(COUNTER, i, start);
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break; break;
} }
Err(e) => { Err(e) => {
@@ -194,6 +201,7 @@ impl Packets {
p.meta.size = nrecv; p.meta.size = nrecv;
p.meta.set_addr(&from); p.meta.set_addr(&from);
if i == 0 { if i == 0 {
start = Instant::now();
socket.set_nonblocking(true)?; socket.set_nonblocking(true)?;
} }
} }
@@ -217,9 +225,13 @@ impl Packets {
} }
} }
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> { pub fn to_packets_chunked<T: Serialize>(
r: &PacketRecycler,
xs: Vec<T>,
chunks: usize,
) -> Vec<SharedPackets> {
let mut out = vec![]; let mut out = vec![];
for x in xs.chunks(NUM_PACKETS) { for x in xs.chunks(chunks) {
let p = r.allocate(); let p = r.allocate();
p.write() p.write()
.unwrap() .unwrap()
@@ -236,8 +248,52 @@ pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPac
return out; return out;
} }
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
to_packets_chunked(r, xs, NUM_PACKETS)
}
pub fn to_blob<T: Serialize>(
resp: T,
rsp_addr: SocketAddr,
blob_recycler: &BlobRecycler,
) -> Result<SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
assert!(len < BLOB_SIZE);
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
pub fn to_blobs<T: Serialize>(
rsps: Vec<(T, SocketAddr)>,
blob_recycler: &BlobRecycler,
) -> Result<VecDeque<SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
const BLOB_INDEX_END: usize = size_of::<u64>(); const BLOB_INDEX_END: usize = size_of::<u64>();
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>(); const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
macro_rules! align {
($x:expr, $align:expr) => {
$x + ($align - 1) & !($align - 1)
};
}
pub const BLOB_FLAG_IS_CODING: u32 = 0x1;
pub const BLOB_HEADER_SIZE: usize = align!(BLOB_SIZE_END, 64);
impl Blob { impl Blob {
pub fn get_index(&self) -> Result<u64> { pub fn get_index(&self) -> Result<u64> {
@@ -251,7 +307,8 @@ impl Blob {
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr); self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
Ok(()) Ok(())
} }
/// sender id, we use this for identifying if its a blob from the leader that we should
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
pub fn get_id(&self) -> Result<PublicKey> { pub fn get_id(&self) -> Result<PublicKey> {
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?; let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
Ok(e) Ok(e)
@@ -263,20 +320,57 @@ impl Blob {
Ok(()) Ok(())
} }
pub fn get_flags(&self) -> Result<u32> {
let mut rdr = io::Cursor::new(&self.data[BLOB_ID_END..BLOB_FLAGS_END]);
let r = rdr.read_u32::<LittleEndian>()?;
Ok(r)
}
pub fn set_flags(&mut self, ix: u32) -> Result<()> {
let mut wtr = vec![];
wtr.write_u32::<LittleEndian>(ix)?;
self.data[BLOB_ID_END..BLOB_FLAGS_END].clone_from_slice(&wtr);
Ok(())
}
pub fn is_coding(&self) -> bool {
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
}
pub fn set_coding(&mut self) -> Result<()> {
let flags = self.get_flags().unwrap();
self.set_flags(flags | BLOB_FLAG_IS_CODING)
}
pub fn get_data_size(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[BLOB_FLAGS_END..BLOB_SIZE_END]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_data_size(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[BLOB_FLAGS_END..BLOB_SIZE_END].clone_from_slice(&wtr);
Ok(())
}
pub fn data(&self) -> &[u8] { pub fn data(&self) -> &[u8] {
&self.data[BLOB_ID_END..] &self.data[BLOB_HEADER_SIZE..]
} }
pub fn data_mut(&mut self) -> &mut [u8] { pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_ID_END..] &mut self.data[BLOB_HEADER_SIZE..]
} }
pub fn set_size(&mut self, size: usize) { pub fn set_size(&mut self, size: usize) {
self.meta.size = size + BLOB_ID_END; let new_size = size + BLOB_HEADER_SIZE;
self.meta.size = new_size;
self.set_data_size(new_size as u64).unwrap();
} }
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> { pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
let mut v = VecDeque::new(); let mut v = VecDeque::new();
//DOCUMENTED SIDE-EFFECT //DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll //Performance out of the IO without poll
// * block on the socket until its readable // * block on the socket until it's readable
// * set the socket to non blocking // * set the socket to non blocking
// * read until it fails // * read until it fails
// * set it back to blocking before returning // * set it back to blocking before returning
@@ -285,9 +379,10 @@ impl Blob {
let r = re.allocate(); let r = re.allocate();
{ {
let mut p = r.write().expect("'r' write lock in pub fn recv_from"); let mut p = r.write().expect("'r' write lock in pub fn recv_from");
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) { match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => { Err(_) if i > 0 => {
trace!("got {:?} messages", i); trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break; break;
} }
Err(e) => { Err(e) => {
@@ -378,17 +473,17 @@ mod test {
#[test] #[test]
fn test_to_packets() { fn test_to_packets() {
let tr = Request::GetTransactionCount; let tx = Request::GetTransactionCount;
let re = PacketRecycler::default(); let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tr.clone(); 1]); let rv = to_packets(&re, vec![tx.clone(); 1]);
assert_eq!(rv.len(), 1); assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1); assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]); let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1); assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS); assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]); let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2); assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS); assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1); assert_eq!(rv[1].read().unwrap().packets.len(), 1);

40
src/payment_plan.rs Normal file
View File

@@ -0,0 +1,40 @@
//! The `plan` module provides a domain-specific language for payment plans. Users create Budget objects that
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
//! which it uses to reduce the payment plan. When the plan is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use signature::PublicKey;
/// The types of events a payment plan can process.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Witness {
/// The current time.
Timestamp(DateTime<Utc>),
/// A siganture from PublicKey.
Signature(PublicKey),
}
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Payment {
/// Amount to be paid.
pub tokens: i64,
/// The `PublicKey` that `tokens` should be paid to.
pub to: PublicKey,
}
/// Interface to smart contracts.
pub trait PaymentPlan {
/// Return Payment if the payment plan requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment>;
/// Return true if the plan spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool;
/// Apply a witness to the payment plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
fn apply_witness(&mut self, witness: &Witness);
}

View File

@@ -1,177 +0,0 @@
//! The `plan` module provides a domain-specific language for payment plans. Users create Plan objects that
//! are given to an interpreter. The interpreter listens for `Witness` events,
//! which it uses to reduce the payment plan. When the plan is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use signature::PublicKey;
use std::mem;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Witness {
Timestamp(DateTime<Utc>),
Signature(PublicKey),
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Condition {
Timestamp(DateTime<Utc>),
Signature(PublicKey),
}
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
match (self, witness) {
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
(&Condition::Timestamp(ref dt), &Witness::Timestamp(ref last_time)) => dt <= last_time,
_ => false,
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Payment {
pub tokens: i64,
pub to: PublicKey,
}
#[repr(C)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Plan {
Pay(Payment),
After(Condition, Payment),
Race((Condition, Payment), (Condition, Payment)),
}
impl Plan {
/// Create the simplest spending plan - one that pays `tokens` to PublicKey.
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Plan::Pay(Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after being witnessed by `from`.
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Timestamp(dt), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime
/// unless cancelled by `from`.
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
tokens: i64,
to: PublicKey,
) -> Self {
Plan::Race(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
/// Return Payment if the spending plan requires no additional Witnesses.
pub fn final_payment(&self) -> Option<Payment> {
match *self {
Plan::Pay(ref payment) => Some(payment.clone()),
_ => None,
}
}
/// Return true if the plan spends exactly `spendable_tokens`.
pub fn verify(&self, spendable_tokens: i64) -> bool {
match *self {
Plan::Pay(ref payment) | Plan::After(_, ref payment) => {
payment.tokens == spendable_tokens
}
Plan::Race(ref a, ref b) => {
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
}
}
}
/// Apply a witness to the spending plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
pub fn apply_witness(&mut self, witness: &Witness) {
let new_payment = match *self {
Plan::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
Plan::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
Plan::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
_ => None,
}.cloned();
if let Some(payment) = new_payment {
mem::replace(self, Plan::Pay(payment));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_signature_satisfied() {
let sig = PublicKey::default();
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
}
#[test]
fn test_timestamp_satisfied() {
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
}
#[test]
fn test_verify_plan() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
assert!(Plan::new_payment(42, to).verify(42));
assert!(Plan::new_authorized_payment(from, 42, to).verify(42));
assert!(Plan::new_future_payment(dt, 42, to).verify(42));
assert!(Plan::new_cancelable_future_payment(dt, from, 42, to).verify(42));
}
#[test]
fn test_authorized_payment() {
let from = PublicKey::default();
let to = PublicKey::default();
let mut plan = Plan::new_authorized_payment(from, 42, to);
plan.apply_witness(&Witness::Signature(from));
assert_eq!(plan, Plan::new_payment(42, to));
}
#[test]
fn test_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let to = PublicKey::default();
let mut plan = Plan::new_future_payment(dt, 42, to);
plan.apply_witness(&Witness::Timestamp(dt));
assert_eq!(plan, Plan::new_payment(42, to));
}
#[test]
fn test_cancelable_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
plan.apply_witness(&Witness::Timestamp(dt));
assert_eq!(plan, Plan::new_payment(42, to));
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
plan.apply_witness(&Witness::Signature(from));
assert_eq!(plan, Plan::new_payment(42, from));
}
}

View File

@@ -1,22 +1,22 @@
//! The `record_stage` module provides an object for generating a Proof of History. //! The `record_stage` module provides an object for generating a Proof of History.
//! It records Event items on behalf of its users. It continuously generates //! It records Transaction items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It //! new hashes, only stopping to check if it has been sent an Transaction item. It
//! tags each Event with an Entry, and sends it back. The Entry includes the //! tags each Transaction with an Entry, and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event. //! Transaction, the latest hash, and the number of hashes since the last transaction.
//! The resulting stream of entries represents ordered events in time. //! The resulting stream of entries represents ordered transactions in time.
use entry::Entry; use entry::Entry;
use event::Event;
use hash::Hash; use hash::Hash;
use recorder::Recorder; use recorder::Recorder;
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError}; use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
use std::thread::{spawn, JoinHandle}; use std::thread::{Builder, JoinHandle};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use transaction::Transaction;
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))] #[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
pub enum Signal { pub enum Signal {
Tick, Tick,
Events(Vec<Event>), Transactions(Vec<Transaction>),
} }
pub struct RecordStage { pub struct RecordStage {
@@ -25,33 +25,19 @@ pub struct RecordStage {
} }
impl RecordStage { impl RecordStage {
/// A background thread that will continue tagging received Event messages and /// A background thread that will continue tagging received Transaction messages and
/// sending back Entry messages until either the receiver or sender channel is closed. /// sending back Entry messages until either the receiver or sender channel is closed.
pub fn new( pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
event_receiver: Receiver<Signal>,
start_hash: &Hash,
tick_duration: Option<Duration>,
) -> Self {
let (entry_sender, entry_receiver) = channel(); let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone(); let start_hash = start_hash.clone();
let thread_hdl = spawn(move || { let thread_hdl = Builder::new()
let mut recorder = Recorder::new(start_hash); .name("solana-record-stage".to_string())
let duration_data = tick_duration.map(|dur| (Instant::now(), dur)); .spawn(move || {
loop { let mut recorder = Recorder::new(start_hash);
if let Err(_) = Self::process_events( let _ = Self::process_signals(&mut recorder, &signal_receiver, &entry_sender);
&mut recorder, })
duration_data, .unwrap();
&event_receiver,
&entry_sender,
) {
return;
}
if duration_data.is_some() {
recorder.hash();
}
}
});
RecordStage { RecordStage {
entry_receiver, entry_receiver,
@@ -59,29 +45,88 @@ impl RecordStage {
} }
} }
pub fn process_events( /// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
pub fn new_with_clock(
signal_receiver: Receiver<Signal>,
start_hash: &Hash,
tick_duration: Duration,
) -> Self {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let thread_hdl = Builder::new()
.name("solana-record-stage".to_string())
.spawn(move || {
let mut recorder = Recorder::new(start_hash);
let start_time = Instant::now();
loop {
if let Err(_) = Self::try_process_signals(
&mut recorder,
start_time,
tick_duration,
&signal_receiver,
&entry_sender,
) {
return;
}
recorder.hash();
}
})
.unwrap();
RecordStage {
entry_receiver,
thread_hdl,
}
}
fn process_signal(
signal: Signal,
recorder: &mut Recorder,
sender: &Sender<Entry>,
) -> Result<(), ()> {
let txs = if let Signal::Transactions(txs) = signal {
txs
} else {
vec![]
};
let entries = recorder.record(txs);
let mut result = Ok(());
for entry in entries {
result = sender.send(entry).map_err(|_| ());
if result.is_err() {
break;
}
}
result
}
fn process_signals(
recorder: &mut Recorder, recorder: &mut Recorder,
duration_data: Option<(Instant, Duration)>,
receiver: &Receiver<Signal>, receiver: &Receiver<Signal>,
sender: &Sender<Entry>, sender: &Sender<Entry>,
) -> Result<(), ()> { ) -> Result<(), ()> {
loop { loop {
if let Some((start_time, tick_duration)) = duration_data { match receiver.recv() {
if let Some(entry) = recorder.tick(start_time, tick_duration) { Ok(signal) => Self::process_signal(signal, recorder, sender)?,
sender.send(entry).or(Err(()))?; Err(RecvError) => return Err(()),
} }
}
}
fn try_process_signals(
recorder: &mut Recorder,
start_time: Instant,
tick_duration: Duration,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
) -> Result<(), ()> {
loop {
if let Some(entry) = recorder.tick(start_time, tick_duration) {
sender.send(entry).or(Err(()))?;
} }
match receiver.try_recv() { match receiver.try_recv() {
Ok(signal) => match signal { Ok(signal) => Self::process_signal(signal, recorder, sender)?,
Signal::Tick => {
let entry = recorder.record(vec![]);
sender.send(entry).or(Err(()))?;
}
Signal::Events(events) => {
let entry = recorder.record(events);
sender.send(entry).or(Err(()))?;
}
},
Err(TryRecvError::Empty) => return Ok(()), Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(()), Err(TryRecvError::Disconnected) => return Err(()),
}; };
@@ -99,15 +144,15 @@ mod tests {
#[test] #[test]
fn test_historian() { fn test_historian() {
let (input, event_receiver) = channel(); let (tx_sender, tx_receiver) = channel();
let zero = Hash::default(); let zero = Hash::default();
let record_stage = RecordStage::new(event_receiver, &zero, None); let record_stage = RecordStage::new(tx_receiver, &zero);
input.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000)); sleep(Duration::new(0, 1_000_000));
input.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000)); sleep(Duration::new(0, 1_000_000));
input.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
let entry0 = record_stage.entry_receiver.recv().unwrap(); let entry0 = record_stage.entry_receiver.recv().unwrap();
let entry1 = record_stage.entry_receiver.recv().unwrap(); let entry1 = record_stage.entry_receiver.recv().unwrap();
@@ -117,7 +162,7 @@ mod tests {
assert_eq!(entry1.num_hashes, 0); assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0); assert_eq!(entry2.num_hashes, 0);
drop(input); drop(tx_sender);
assert_eq!(record_stage.thread_hdl.join().unwrap(), ()); assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
assert!([entry0, entry1, entry2].verify(&zero)); assert!([entry0, entry1, entry2].verify(&zero));
@@ -125,38 +170,40 @@ mod tests {
#[test] #[test]
fn test_historian_closed_sender() { fn test_historian_closed_sender() {
let (input, event_receiver) = channel(); let (tx_sender, tx_receiver) = channel();
let zero = Hash::default(); let zero = Hash::default();
let record_stage = RecordStage::new(event_receiver, &zero, None); let record_stage = RecordStage::new(tx_receiver, &zero);
drop(record_stage.entry_receiver); drop(record_stage.entry_receiver);
input.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
assert_eq!(record_stage.thread_hdl.join().unwrap(), ()); assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
} }
#[test] #[test]
fn test_events() { fn test_transactions() {
let (input, signal_receiver) = channel(); let (tx_sender, signal_receiver) = channel();
let zero = Hash::default(); let zero = Hash::default();
let record_stage = RecordStage::new(signal_receiver, &zero, None); let record_stage = RecordStage::new(signal_receiver, &zero);
let alice_keypair = KeyPair::new(); let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey(); let bob_pubkey = KeyPair::new().pubkey();
let event0 = Event::new_transaction(&alice_keypair, bob_pubkey, 1, zero); let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
let event1 = Event::new_transaction(&alice_keypair, bob_pubkey, 2, zero); let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
input.send(Signal::Events(vec![event0, event1])).unwrap(); tx_sender
drop(input); .send(Signal::Transactions(vec![tx0, tx1]))
.unwrap();
drop(tx_sender);
let entries: Vec<_> = record_stage.entry_receiver.iter().collect(); let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
assert_eq!(entries.len(), 1); assert_eq!(entries.len(), 1);
} }
#[test] #[test]
#[ignore] fn test_clock() {
fn test_ticking_historian() { let (tx_sender, tx_receiver) = channel();
let (input, event_receiver) = channel();
let zero = Hash::default(); let zero = Hash::default();
let record_stage = RecordStage::new(event_receiver, &zero, Some(Duration::from_millis(20))); let record_stage =
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
sleep(Duration::from_millis(900)); sleep(Duration::from_millis(900));
input.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
drop(input); drop(tx_sender);
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect(); let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
assert!(entries.len() > 1); assert!(entries.len() > 1);

View File

@@ -1,10 +1,11 @@
//! The `recorder` module provides an object for generating a Proof of History. //! The `recorder` module provides an object for generating a Proof of History.
//! It records Event items on behalf of its users. //! It records Transaction items on behalf of its users.
use entry::Entry; use entry::Entry;
use event::Event;
use hash::{hash, Hash}; use hash::{hash, Hash};
use ledger::next_entries_mut;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use transaction::Transaction;
pub struct Recorder { pub struct Recorder {
last_hash: Hash, last_hash: Hash,
@@ -26,15 +27,19 @@ impl Recorder {
self.num_hashes += 1; self.num_hashes += 1;
} }
pub fn record(&mut self, events: Vec<Event>) -> Entry { pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, events) next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
} }
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> { pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) { if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
// TODO: don't let this overflow u32 // TODO: don't let this overflow u32
self.num_ticks += 1; self.num_ticks += 1;
Some(self.record(vec![])) Some(Entry::new_mut(
&mut self.last_hash,
&mut self.num_hashes,
vec![],
))
} else { } else {
None None
} }

View File

@@ -4,9 +4,9 @@ use bank::Bank;
use ledger; use ledger;
use packet; use packet;
use result::Result; use result::Result;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::thread::{spawn, JoinHandle}; use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration; use std::time::Duration;
use streamer; use streamer;
@@ -15,23 +15,21 @@ pub struct ReplicateStage {
} }
impl ReplicateStage { impl ReplicateStage {
/// Process verified blobs, already in order /// Process entry blobs, already in order
fn replicate_requests( fn replicate_requests(
bank: &Arc<Bank>, bank: &Arc<Bank>,
verified_receiver: &streamer::BlobReceiver, blob_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler, blob_recycler: &packet::BlobRecycler,
) -> Result<()> { ) -> Result<()> {
let timer = Duration::new(1, 0); let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?; let blobs = blob_receiver.recv_timeout(timer)?;
let entries = ledger::reconstruct_entries_from_blobs(&blobs); let blobs_len = blobs.len();
let res = bank.process_verified_entries(entries); let entries = ledger::reconstruct_entries_from_blobs(blobs, &blob_recycler)?;
let res = bank.process_entries(entries);
if res.is_err() { if res.is_err() {
error!("process_verified_entries {} {:?}", blobs.len(), res); error!("process_entries {} {:?}", blobs_len, res);
} }
res?; res?;
for blob in blobs {
blob_recycler.recycle(blob);
}
Ok(()) Ok(())
} }
@@ -41,12 +39,15 @@ impl ReplicateStage {
window_receiver: streamer::BlobReceiver, window_receiver: streamer::BlobReceiver,
blob_recycler: packet::BlobRecycler, blob_recycler: packet::BlobRecycler,
) -> Self { ) -> Self {
let thread_hdl = spawn(move || loop { let thread_hdl = Builder::new()
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler); .name("solana-replicate-stage".to_string())
if e.is_err() && exit.load(Ordering::Relaxed) { .spawn(move || loop {
break; let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
} if e.is_err() && exit.load(Ordering::Relaxed) {
}); break;
}
})
.unwrap();
ReplicateStage { thread_hdl } ReplicateStage { thread_hdl }
} }
} }

View File

@@ -1,20 +1,9 @@
//! The `request_stage` processes thin client Request messages. //! The `request_processor` processes thin client Request messages.
use bank::Bank; use bank::Bank;
use bincode::{deserialize, serialize};
use event::Event;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use request::{Request, Response}; use request::{Request, Response};
use result::Result;
use std::collections::VecDeque;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use std::sync::mpsc::Receiver;
use std::time::Instant;
use streamer;
use timing;
pub struct RequestProcessor { pub struct RequestProcessor {
bank: Arc<Bank>, bank: Arc<Bank>,
@@ -62,104 +51,4 @@ impl RequestProcessor {
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr)) .filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect() .collect()
} }
fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
// Copy-paste of deserialize_requests() because I can't figure out how to
// route the lifetimes in a generic version.
pub fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Split Request list into verified transactions and the rest
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
pub fn process_request_packets(
&self,
packet_receiver: &Receiver<SharedPackets>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
info!(
"@{:?} request_stage: processing: {}",
timing::timestamp(),
batch_len
);
let mut reqs_len = 0;
let proc_start = Instant::now();
for msgs in batch {
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
.into_iter()
.filter_map(|x| x)
.collect();
reqs_len += reqs.len();
let rsps = self.process_requests(reqs);
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
info!("process: sending blobs: {}", blobs.len());
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
info!(
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
batch_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
Ok(())
}
} }

View File

@@ -1,13 +1,20 @@
//! The `request_stage` processes thin client Request messages. //! The `request_stage` processes thin client Request messages.
use bincode::deserialize;
use packet; use packet;
use packet::SharedPackets; use packet::SharedPackets;
use rayon::prelude::*;
use request::Request;
use request_processor::RequestProcessor; use request_processor::RequestProcessor;
use std::sync::Arc; use result::Result;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver}; use std::sync::mpsc::{channel, Receiver};
use std::thread::{spawn, JoinHandle}; use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Instant;
use streamer; use streamer;
use timing;
pub struct RequestStage { pub struct RequestStage {
pub thread_hdl: JoinHandle<()>, pub thread_hdl: JoinHandle<()>,
@@ -16,6 +23,63 @@ pub struct RequestStage {
} }
impl RequestStage { impl RequestStage {
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
pub fn process_request_packets(
request_processor: &RequestProcessor,
packet_receiver: &Receiver<SharedPackets>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
debug!(
"@{:?} request_stage: processing: {}",
timing::timestamp(),
batch_len
);
let mut reqs_len = 0;
let proc_start = Instant::now();
for msgs in batch {
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
.into_iter()
.filter_map(|x| x)
.collect();
reqs_len += reqs.len();
let rsps = request_processor.process_requests(reqs);
let blobs = packet::to_blobs(rsps, blob_recycler)?;
if !blobs.is_empty() {
info!("process: sending blobs: {}", blobs.len());
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
debug!(
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
batch_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
Ok(())
}
pub fn new( pub fn new(
request_processor: RequestProcessor, request_processor: RequestProcessor,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
@@ -26,19 +90,23 @@ impl RequestStage {
let request_processor = Arc::new(request_processor); let request_processor = Arc::new(request_processor);
let request_processor_ = request_processor.clone(); let request_processor_ = request_processor.clone();
let (blob_sender, blob_receiver) = channel(); let (blob_sender, blob_receiver) = channel();
let thread_hdl = spawn(move || loop { let thread_hdl = Builder::new()
let e = request_processor_.process_request_packets( .name("solana-request-stage".to_string())
&packet_receiver, .spawn(move || loop {
&blob_sender, let e = Self::process_request_packets(
&packet_recycler, &request_processor_,
&blob_recycler, &packet_receiver,
); &blob_sender,
if e.is_err() { &packet_recycler,
if exit.load(Ordering::Relaxed) { &blob_recycler,
break; );
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
} }
} })
}); .unwrap();
RequestStage { RequestStage {
thread_hdl, thread_hdl,
blob_receiver, blob_receiver,

View File

@@ -80,9 +80,9 @@ mod tests {
use std::io::Write; use std::io::Write;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::panic; use std::panic;
use std::sync::mpsc::channel;
use std::sync::mpsc::RecvError; use std::sync::mpsc::RecvError;
use std::sync::mpsc::RecvTimeoutError; use std::sync::mpsc::RecvTimeoutError;
use std::sync::mpsc::channel;
use std::thread; use std::thread;
fn addr_parse_error() -> Result<SocketAddr> { fn addr_parse_error() -> Result<SocketAddr> {

View File

@@ -1,14 +1,36 @@
//! The `rpu` module implements the Request Processing Unit, a //! The `rpu` module implements the Request Processing Unit, a
//! 5-stage transaction processing pipeline in software. //! 3-stage transaction processing pipeline in software. It listens
//! for `Request` messages from clients and replies with `Response`
//! messages.
//!
//! ```text
//! .------.
//! | Bank |
//! `---+--`
//! |
//! .------------------|-------------------.
//! | RPU | |
//! | v |
//! .---------. | .-------. .---------. .---------. | .---------.
//! | Alice |--->| | | | | +---->| Alice |
//! `---------` | | Fetch | | Request | | Respond | | `---------`
//! | | Stage |->| Stage |->| Stage | |
//! .---------. | | | | | | | | .---------.
//! | Bob |--->| | | | | +---->| Bob |
//! `---------` | `-------` `---------` `---------` | `---------`
//! | |
//! | |
//! `--------------------------------------`
//! ```
use bank::Bank; use bank::Bank;
use packet; use packet;
use request_processor::RequestProcessor; use request_processor::RequestProcessor;
use request_stage::RequestStage; use request_stage::RequestStage;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::Arc;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle; use std::thread::JoinHandle;
use streamer; use streamer;

View File

@@ -1,15 +1,17 @@
//! The `server` module hosts all the server microservices. //! The `server` module hosts all the server microservices.
use bank::Bank; use bank::Bank;
use crdt::ReplicatedData; use crdt::{Crdt, ReplicatedData};
use hash::Hash; use ncp::Ncp;
use packet;
use rpu::Rpu; use rpu::Rpu;
use std::io::Write; use std::io::Write;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::Arc;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle; use std::thread::JoinHandle;
use std::time::Duration; use std::time::Duration;
use streamer;
use tpu::Tpu; use tpu::Tpu;
use tvu::Tvu; use tvu::Tvu;
@@ -18,13 +20,36 @@ pub struct Server {
} }
impl Server { impl Server {
/// Create a server instance acting as a leader.
///
/// ```text
/// .---------------------.
/// | Leader |
/// | |
/// .--------. | .-----. |
/// | |---->| | |
/// | Client | | | RPU | |
/// | |<----| | |
/// `----+---` | `-----` |
/// | | ^ |
/// | | | |
/// | | .--+---. |
/// | | | Bank | |
/// | | `------` |
/// | | ^ |
/// | | | | .------------.
/// | | .--+--. .-----. | | |
/// `-------->| TPU +-->| NCP +------>| Validators |
/// | `-----` `-----` | | |
/// | | `------------`
/// `---------------------`
/// ```
pub fn new_leader<W: Write + Send + 'static>( pub fn new_leader<W: Write + Send + 'static>(
bank: Bank, bank: Bank,
start_hash: Hash,
tick_duration: Option<Duration>, tick_duration: Option<Duration>,
me: ReplicatedData, me: ReplicatedData,
requests_socket: UdpSocket, requests_socket: UdpSocket,
events_socket: UdpSocket, transactions_socket: UdpSocket,
broadcast_socket: UdpSocket, broadcast_socket: UdpSocket,
respond_socket: UdpSocket, respond_socket: UdpSocket,
gossip_socket: UdpSocket, gossip_socket: UdpSocket,
@@ -35,43 +60,145 @@ impl Server {
let mut thread_hdls = vec![]; let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone()); let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls); thread_hdls.extend(rpu.thread_hdls);
let blob_recycler = packet::BlobRecycler::default();
let tpu = Tpu::new( let tpu = Tpu::new(
bank.clone(), bank.clone(),
start_hash,
tick_duration, tick_duration,
me, transactions_socket,
events_socket, blob_recycler.clone(),
broadcast_socket,
gossip_socket,
exit.clone(), exit.clone(),
writer, writer,
); );
thread_hdls.extend(tpu.thread_hdls); thread_hdls.extend(tpu.thread_hdls);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
thread_hdls.extend(ncp.thread_hdls);
let t_broadcast = streamer::broadcaster(
broadcast_socket,
exit.clone(),
crdt,
window,
blob_recycler.clone(),
tpu.blob_receiver,
);
thread_hdls.extend(vec![t_broadcast]);
Server { thread_hdls } Server { thread_hdls }
} }
/// Create a server instance acting as a validator.
///
/// ```text
/// .-------------------------------.
/// | Validator |
/// | |
/// .--------. | .-----. |
/// | |-------------->| | |
/// | Client | | | RPU | |
/// | |<--------------| | |
/// `--------` | `-----` |
/// | ^ |
/// | | |
/// | .--+---. |
/// | | Bank | |
/// | `------` |
/// | ^ |
/// .--------. | | | .------------.
/// | | | .--+--. | | |
/// | Leader |<------------->| TVU +<--------------->| |
/// | | | `-----` | | Validators |
/// | | | ^ | | |
/// | | | | | | |
/// | | | .--+--. | | |
/// | |<------------->| NCP +<--------------->| |
/// | | | `-----` | | |
/// `--------` | | `------------`
/// `-------------------------------`
/// ```
pub fn new_validator( pub fn new_validator(
bank: Bank, bank: Bank,
me: ReplicatedData, me: ReplicatedData,
requests_socket: UdpSocket, requests_socket: UdpSocket,
respond_socket: UdpSocket, respond_socket: UdpSocket,
replicate_socket: UdpSocket, replicate_socket: UdpSocket,
gossip_socket: UdpSocket, gossip_listen_socket: UdpSocket,
leader_repl_data: ReplicatedData, repair_socket: UdpSocket,
entry_point: ReplicatedData,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
) -> Self { ) -> Self {
let bank = Arc::new(bank); let bank = Arc::new(bank);
let mut thread_hdls = vec![]; let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone()); let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls); thread_hdls.extend(rpu.thread_hdls);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(&entry_point);
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_listen_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
let tvu = Tvu::new( let tvu = Tvu::new(
bank.clone(), bank.clone(),
me, crdt.clone(),
gossip_socket, window.clone(),
replicate_socket, replicate_socket,
leader_repl_data, repair_socket,
retransmit_socket,
exit.clone(), exit.clone(),
); );
thread_hdls.extend(tvu.thread_hdls); thread_hdls.extend(tvu.thread_hdls);
thread_hdls.extend(ncp.thread_hdls);
Server { thread_hdls } Server { thread_hdls }
} }
} }
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::TestNode;
use mint::Mint;
use server::Server;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[test]
fn validator_exit() {
let tn = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let exit = Arc::new(AtomicBool::new(false));
let v = Server::new_validator(
bank,
tn.data.clone(),
tn.sockets.requests,
tn.sockets.respond,
tn.sockets.replicate,
tn.sockets.gossip,
tn.sockets.repair,
tn.data,
exit.clone(),
);
exit.store(true, Ordering::Relaxed);
for t in v.thread_hdls {
t.join().unwrap();
}
}
}

View File

@@ -1,7 +1,7 @@
//! The `signature` module provides functionality for public, and private keys. //! The `signature` module provides functionality for public, and private keys.
use generic_array::GenericArray;
use generic_array::typenum::{U32, U64}; use generic_array::typenum::{U32, U64};
use generic_array::GenericArray;
use rand::{ChaChaRng, Rng, SeedableRng}; use rand::{ChaChaRng, Rng, SeedableRng};
use rayon::prelude::*; use rayon::prelude::*;
use ring::error::Unspecified; use ring::error::Unspecified;
@@ -56,9 +56,8 @@ pub struct GenKeys {
} }
impl GenKeys { impl GenKeys {
pub fn new(seed: &[u8]) -> GenKeys { pub fn new(seed: [u8; 32]) -> GenKeys {
let seed32: Vec<_> = seed.iter().map(|&x| x as u32).collect(); let rng = ChaChaRng::from_seed(seed);
let rng = ChaChaRng::from_seed(&seed32);
GenKeys { GenKeys {
generator: RefCell::new(rng), generator: RefCell::new(rng),
} }
@@ -68,7 +67,7 @@ impl GenKeys {
KeyPair::generate_pkcs8(self).unwrap().to_vec() KeyPair::generate_pkcs8(self).unwrap().to_vec()
} }
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 16]> { pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 32]> {
let mut rng = self.generator.borrow_mut(); let mut rng = self.generator.borrow_mut();
(0..n).map(|_| rng.gen()).collect() (0..n).map(|_| rng.gen()).collect()
} }
@@ -77,7 +76,7 @@ impl GenKeys {
self.gen_n_seeds(n) self.gen_n_seeds(n)
.into_par_iter() .into_par_iter()
.map(|seed| { .map(|seed| {
let pkcs8 = GenKeys::new(&seed).new_key(); let pkcs8 = GenKeys::new(seed).new_key();
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap() KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
}) })
.collect() .collect()
@@ -87,7 +86,7 @@ impl GenKeys {
impl SecureRandom for GenKeys { impl SecureRandom for GenKeys {
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> { fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
let mut rng = self.generator.borrow_mut(); let mut rng = self.generator.borrow_mut();
rng.fill_bytes(dest); rng.fill(dest);
Ok(()) Ok(())
} }
} }
@@ -99,17 +98,17 @@ mod tests {
#[test] #[test]
fn test_new_key_is_deterministic() { fn test_new_key_is_deterministic() {
let seed = [1, 2, 3, 4]; let seed = [0u8; 32];
let rng0 = GenKeys::new(&seed); let rng0 = GenKeys::new(seed);
let rng1 = GenKeys::new(&seed); let rng1 = GenKeys::new(seed);
for _ in 0..100 { for _ in 0..100 {
assert_eq!(rng0.new_key(), rng1.new_key()); assert_eq!(rng0.new_key(), rng1.new_key());
} }
} }
fn gen_n_pubkeys(seed: &[u8], n: i64) -> HashSet<PublicKey> { fn gen_n_pubkeys(seed: [u8; 32], n: i64) -> HashSet<PublicKey> {
GenKeys::new(&seed) GenKeys::new(seed)
.gen_n_keypairs(n) .gen_n_keypairs(n)
.into_iter() .into_iter()
.map(|x| x.pubkey()) .map(|x| x.pubkey())
@@ -118,8 +117,8 @@ mod tests {
#[test] #[test]
fn test_gen_n_pubkeys_deterministic() { fn test_gen_n_pubkeys_deterministic() {
let seed = [1, 2, 3, 4]; let seed = [0u8; 32];
assert_eq!(gen_n_pubkeys(&seed, 50), gen_n_pubkeys(&seed, 50)); assert_eq!(gen_n_pubkeys(seed, 50), gen_n_pubkeys(seed, 50));
} }
} }
@@ -132,8 +131,7 @@ mod bench {
#[bench] #[bench]
fn bench_gen_keys(b: &mut Bencher) { fn bench_gen_keys(b: &mut Bencher) {
let seed: &[_] = &[1, 2, 3, 4]; let rnd = GenKeys::new([0u8; 32]);
let rnd = GenKeys::new(seed);
b.iter(|| rnd.gen_n_keypairs(1000)); b.iter(|| rnd.gen_n_keypairs(1000));
} }
} }

View File

@@ -1,8 +1,17 @@
//! The `sigverify` module provides digital signature verification functions.
//! By default, signatures are verified in parallel using all available CPU
//! cores. When `--features=cuda` is enabled, signature verification is
//! offloaded to the GPU.
//!
use counter::Counter;
use packet::{Packet, SharedPackets}; use packet::{Packet, SharedPackets};
use std::mem::size_of; use std::mem::size_of;
use std::sync::atomic::AtomicUsize;
use std::time::Instant;
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET}; use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
pub const TX_OFFSET: usize = 4; pub const TX_OFFSET: usize = 0;
#[cfg(feature = "cuda")] #[cfg(feature = "cuda")]
#[repr(C)] #[repr(C)]
@@ -55,14 +64,17 @@ fn batch_size(batches: &Vec<SharedPackets>) -> usize {
batches batches
.iter() .iter()
.map(|p| p.read().unwrap().packets.len()) .map(|p| p.read().unwrap().packets.len())
.fold(0, |x, y| x + y) .sum()
} }
#[cfg(not(feature = "cuda"))] #[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> { pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*; use rayon::prelude::*;
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CPU ECDSA for {}", batch_size(batches)); info!("CPU ECDSA for {}", batch_size(batches));
batches let rv = batches
.into_par_iter() .into_par_iter()
.map(|p| { .map(|p| {
p.read() p.read()
@@ -72,13 +84,17 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
.map(verify_packet) .map(verify_packet)
.collect() .collect()
}) })
.collect() .collect();
inc_counter!(COUNTER, count, start);
rv
} }
#[cfg(feature = "cuda")] #[cfg(feature = "cuda")]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> { pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use packet::PACKET_DATA_SIZE; use packet::PACKET_DATA_SIZE;
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CUDA ECDSA for {}", batch_size(batches)); info!("CUDA ECDSA for {}", batch_size(batches));
let mut out = Vec::new(); let mut out = Vec::new();
let mut elems = Vec::new(); let mut elems = Vec::new();
@@ -137,39 +153,39 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
num += 1; num += 1;
} }
} }
inc_counter!(COUNTER, count, start);
rvs rvs
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use bincode::serialize; use bincode::serialize;
use ecdsa;
use event::Event;
use packet::{Packet, Packets, SharedPackets}; use packet::{Packet, Packets, SharedPackets};
use sigverify;
use std::sync::RwLock; use std::sync::RwLock;
use transaction::Transaction; use transaction::Transaction;
use transaction::{memfind, test_tx}; use transaction::{memfind, test_tx};
#[test] #[test]
fn test_layout() { fn test_layout() {
let tr = test_tx(); let tx = test_tx();
let tx = serialize(&tr).unwrap(); let tx_bytes = serialize(&tx).unwrap();
let packet = serialize(&Event::Transaction(tr)).unwrap(); let packet = serialize(&tx).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET)); assert_matches!(memfind(&packet, &tx_bytes), Some(sigverify::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None); assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
} }
fn make_packet_from_transaction(tr: Transaction) -> Packet { fn make_packet_from_transaction(tx: Transaction) -> Packet {
let tx = serialize(&Event::Transaction(tr)).unwrap(); let tx_bytes = serialize(&tx).unwrap();
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.meta.size = tx.len(); packet.meta.size = tx_bytes.len();
packet.data[..packet.meta.size].copy_from_slice(&tx); packet.data[..packet.meta.size].copy_from_slice(&tx_bytes);
return packet; return packet;
} }
fn test_verify_n(n: usize, modify_data: bool) { fn test_verify_n(n: usize, modify_data: bool) {
let tr = test_tx(); let tx = test_tx();
let mut packet = make_packet_from_transaction(tr); let mut packet = make_packet_from_transaction(tx);
// jumble some data to test failure // jumble some data to test failure
if modify_data { if modify_data {
@@ -186,7 +202,7 @@ mod tests {
let batches = vec![shared_packets.clone(), shared_packets.clone()]; let batches = vec![shared_packets.clone(), shared_packets.clone()];
// verify packets // verify packets
let ans = ecdsa::ed25519_verify(&batches); let ans = sigverify::ed25519_verify(&batches);
// check result // check result
let ref_ans = if modify_data { 0u8 } else { 1u8 }; let ref_ans = if modify_data { 0u8 } else { 1u8 };

View File

@@ -1,9 +1,14 @@
//! The `sig_verify_stage` implements the signature verification stage of the TPU. //! The `sigverify_stage` implements the signature verification stage of the TPU. It
//! receives a list of lists of packets and outputs the same list, but tags each
//! top-level list with a list of booleans, telling the next stage whether the
//! signature in that packet is valid. It assumes each packet contains one
//! transaction. All processing is done on the CPU by default and on a GPU
//! if the `cuda` feature is enabled with `--features=cuda`.
use ecdsa;
use packet::SharedPackets; use packet::SharedPackets;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use result::Result; use result::Result;
use sigverify;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
@@ -28,7 +33,7 @@ impl SigVerifyStage {
} }
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> { fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
let r = ecdsa::ed25519_verify(&batch); let r = sigverify::ed25519_verify(&batch);
batch.into_iter().zip(r).collect() batch.into_iter().zip(r).collect()
} }

View File

@@ -1,15 +1,16 @@
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets. //! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
//!
use crdt::Crdt; use crdt::Crdt;
#[cfg(feature = "erasure")] #[cfg(feature = "erasure")]
use erasure; use erasure;
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets}; use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, BLOB_SIZE};
use result::{Error, Result}; use result::{Error, Result};
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::{SocketAddr, UdpSocket}; use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::{spawn, JoinHandle}; use std::thread::{Builder, JoinHandle};
use std::time::Duration; use std::time::Duration;
pub const WINDOW_SIZE: usize = 2 * 1024; pub const WINDOW_SIZE: usize = 2 * 1024;
@@ -17,6 +18,7 @@ pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
pub type PacketSender = mpsc::Sender<SharedPackets>; pub type PacketSender = mpsc::Sender<SharedPackets>;
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>; pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>; pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
fn recv_loop( fn recv_loop(
sock: &UdpSocket, sock: &UdpSocket,
@@ -57,10 +59,13 @@ pub fn receiver(
if res.is_err() { if res.is_err() {
panic!("streamer::receiver set_read_timeout error"); panic!("streamer::receiver set_read_timeout error");
} }
spawn(move || { Builder::new()
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender); .name("solana-receiver".to_string())
() .spawn(move || {
}) let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
()
})
.unwrap()
} }
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> { fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
@@ -95,16 +100,20 @@ pub fn responder(
recycler: BlobRecycler, recycler: BlobRecycler,
r: BlobReceiver, r: BlobReceiver,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
spawn(move || loop { Builder::new()
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) { .name("solana-responder".to_string())
break; .spawn(move || loop {
} if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
}) break;
}
})
.unwrap()
} }
//TODO, we would need to stick block authentication before we create the //TODO, we would need to stick block authentication before we create the
//window. //window.
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> { fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
trace!("receiving on {}", sock.local_addr().unwrap());
let dq = Blob::recv_from(recycler, sock)?; let dq = Blob::recv_from(recycler, sock)?;
if !dq.is_empty() { if !dq.is_empty() {
s.send(dq)?; s.send(dq)?;
@@ -122,17 +131,20 @@ pub fn blob_receiver(
//1 second timeout on socket read //1 second timeout on socket read
let timer = Duration::new(1, 0); let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?; sock.set_read_timeout(Some(timer))?;
let t = spawn(move || loop { let t = Builder::new()
if exit.load(Ordering::Relaxed) { .name("solana-blob_receiver".to_string())
break; .spawn(move || loop {
} if exit.load(Ordering::Relaxed) {
let _ = recv_blobs(&recycler, &sock, &s); break;
}); }
let _ = recv_blobs(&recycler, &sock, &s);
})
.unwrap();
Ok(t) Ok(t)
} }
fn find_next_missing( fn find_next_missing(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>, locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>, crdt: &Arc<RwLock<Crdt>>,
consumed: &mut usize, consumed: &mut usize,
received: &mut usize, received: &mut usize,
@@ -157,14 +169,26 @@ fn find_next_missing(
} }
fn repair_window( fn repair_window(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>, locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>, crdt: &Arc<RwLock<Crdt>>,
_recycler: &BlobRecycler,
last: &mut usize, last: &mut usize,
times: &mut usize, times: &mut usize,
consumed: &mut usize, consumed: &mut usize,
received: &mut usize, received: &mut usize,
) -> Result<()> { ) -> Result<()> {
let reqs = find_next_missing(locked_window, crdt, consumed, received)?; #[cfg(feature = "erasure")]
{
if erasure::recover(
_recycler,
&mut locked_window.write().unwrap(),
*consumed,
*received,
).is_err()
{
trace!("erasure::recover failed");
}
}
//exponential backoff //exponential backoff
if *last != *consumed { if *last != *consumed {
*times = 0; *times = 0;
@@ -176,17 +200,19 @@ fn repair_window(
trace!("repair_window counter {} {}", *times, *consumed); trace!("repair_window counter {} {}", *times, *consumed);
return Ok(()); return Ok(());
} }
info!("repair_window request {} {}", *consumed, *received); let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
let sock = UdpSocket::bind("0.0.0.0:0")?; let sock = UdpSocket::bind("0.0.0.0:0")?;
for (to, req) in reqs { for (to, req) in reqs {
//todo cache socket //todo cache socket
info!("repair_window request {} {} {}", *consumed, *received, to);
assert!(req.len() < BLOB_SIZE);
sock.send_to(&req, to)?; sock.send_to(&req, to)?;
} }
Ok(()) Ok(())
} }
fn recv_window( fn recv_window(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>, locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>, crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler, recycler: &BlobRecycler,
consumed: &mut usize, consumed: &mut usize,
@@ -220,11 +246,11 @@ fn recv_window(
); );
if p.get_id().expect("get_id in fn recv_window") == leader_id { if p.get_id().expect("get_id in fn recv_window") == leader_id {
//TODO //TODO
//need to copy the retransmited blob //need to copy the retransmitted blob
//otherwise we get into races with which thread //otherwise we get into races with which thread
//should do the recycling //should do the recycling
// //
//a better absraction would be to recycle when the blob //a better abstraction would be to recycle when the blob
//is dropped via a weakref to the recycler //is dropped via a weakref to the recycler
let nv = recycler.allocate(); let nv = recycler.allocate();
{ {
@@ -249,16 +275,26 @@ fn recv_window(
if pix > *received { if pix > *received {
*received = pix; *received = pix;
} }
// Got a blob which has already been consumed, skip it
// probably from a repair window request
if pix < *consumed {
debug!(
"received: {} but older than consumed: {} skipping..",
pix, *consumed
);
continue;
}
let w = pix % WINDOW_SIZE; let w = pix % WINDOW_SIZE;
//TODO, after the block are authenticated //TODO, after the block are authenticated
//if we get different blocks at the same index //if we get different blocks at the same index
//that is a network failure/attack //that is a network failure/attack
trace!("window w: {} size: {}", w, p.meta.size); trace!("window w: {} size: {}", w, p.meta.size);
drop(p);
{ {
let mut window = locked_window.write().unwrap(); let mut window = locked_window.write().unwrap();
if window[w].is_none() { if window[w].is_none() {
window[w] = Some(b_); window[w] = Some(b_);
} else if let &Some(ref cblob) = &window[w] { } else if let Some(cblob) = &window[w] {
if cblob.read().unwrap().get_index().unwrap() != pix as u64 { if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
warn!("overrun blob at index {:}", w); warn!("overrun blob at index {:}", w);
} else { } else {
@@ -271,31 +307,45 @@ fn recv_window(
if window[k].is_none() { if window[k].is_none() {
break; break;
} }
contq.push_back(window[k].clone().expect("clone in fn recv_window")); let mut is_coding = false;
window[k] = None; if let &Some(ref cblob) = &window[k] {
*consumed += 1; if cblob
.read()
.expect("blob read lock for flags streamer::window")
.is_coding()
{
is_coding = true;
}
}
if !is_coding {
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
*consumed += 1;
#[cfg(not(feature = "erasure"))]
{
window[k] = None;
}
} else {
#[cfg(feature = "erasure")]
{
let block_start = *consumed - (*consumed % erasure::NUM_CODED);
let coding_end = block_start + erasure::NUM_CODED;
// We've received all this block's data blobs, go and null out the window now
for j in block_start..coding_end {
window[j % WINDOW_SIZE] = None;
}
*consumed += erasure::MAX_MISSING;
debug!(
"skipping processing coding blob k: {} consumed: {}",
k, *consumed
);
}
}
} }
} }
} }
{ print_window(locked_window, *consumed);
let buf: Vec<_> = locked_window
.read()
.unwrap()
.iter()
.enumerate()
.map(|(i, v)| {
if i == (*consumed % WINDOW_SIZE) {
assert!(v.is_none());
"_"
} else if v.is_none() {
"0"
} else {
"1"
}
})
.collect();
trace!("WINDOW: {}", buf.join(""));
}
trace!("sending contq.len: {}", contq.len()); trace!("sending contq.len: {}", contq.len());
if !contq.is_empty() { if !contq.is_empty() {
trace!("sending contq.len: {}", contq.len()); trace!("sending contq.len: {}", contq.len());
@@ -304,68 +354,110 @@ fn recv_window(
Ok(()) Ok(())
} }
pub fn default_window() -> Arc<RwLock<Vec<Option<SharedBlob>>>> { fn print_window(locked_window: &Window, consumed: usize) {
{
let buf: Vec<_> = locked_window
.read()
.unwrap()
.iter()
.enumerate()
.map(|(i, v)| {
if i == (consumed % WINDOW_SIZE) {
"_"
} else if v.is_none() {
"0"
} else {
if let &Some(ref cblob) = &v {
if cblob.read().unwrap().is_coding() {
"C"
} else {
"1"
}
} else {
"0"
}
}
})
.collect();
debug!("WINDOW ({}): {}", consumed, buf.join(""));
}
}
pub fn default_window() -> Window {
Arc::new(RwLock::new(vec![None; WINDOW_SIZE])) Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
} }
pub fn window( pub fn window(
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>, crdt: Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>, window: Window,
recycler: BlobRecycler, recycler: BlobRecycler,
r: BlobReceiver, r: BlobReceiver,
s: BlobSender, s: BlobSender,
retransmit: BlobSender, retransmit: BlobSender,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
spawn(move || { Builder::new()
let mut consumed = 0; .name("solana-window".to_string())
let mut received = 0; .spawn(move || {
let mut last = 0; let mut consumed = 0;
let mut times = 0; let mut received = 0;
loop { let mut last = 0;
if exit.load(Ordering::Relaxed) { let mut times = 0;
break; loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = recv_window(
&window,
&crdt,
&recycler,
&mut consumed,
&mut received,
&r,
&s,
&retransmit,
);
let _ = repair_window(
&window,
&crdt,
&recycler,
&mut last,
&mut times,
&mut consumed,
&mut received,
);
} }
let _ = recv_window( })
&window, .unwrap()
&crdt,
&recycler,
&mut consumed,
&mut received,
&r,
&s,
&retransmit,
);
let _ = repair_window(
&window,
&crdt,
&mut last,
&mut times,
&mut consumed,
&mut received,
);
}
})
} }
fn broadcast( fn broadcast(
crdt: &Arc<RwLock<Crdt>>, crdt: &Arc<RwLock<Crdt>>,
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>, window: &Window,
recycler: &BlobRecycler, recycler: &BlobRecycler,
r: &BlobReceiver, r: &BlobReceiver,
sock: &UdpSocket, sock: &UdpSocket,
transmit_index: &mut u64, transmit_index: &mut u64,
receive_index: &mut u64,
) -> Result<()> { ) -> Result<()> {
let timer = Duration::new(1, 0); let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?; let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() { while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq); dq.append(&mut nq);
} }
let mut blobs = dq.into_iter().collect(); let mut blobs: Vec<_> = dq.into_iter().collect();
/// appends codes to the list of blobs allowing us to reconstruct the stream
print_window(window, *receive_index as usize);
// Insert the coding blobs into the blob stream
#[cfg(feature = "erasure")] #[cfg(feature = "erasure")]
erasure::generate_coding(re, blobs, consumed); erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
let blobs_len = blobs.len();
info!("broadcast blobs.len: {}", blobs_len);
// Index the blobs
Crdt::index_blobs(crdt, &blobs, receive_index)?;
// keep the cache of blobs that are broadcast // keep the cache of blobs that are broadcast
{ {
let mut win = window.write().unwrap(); let mut win = window.write().unwrap();
@@ -392,6 +484,24 @@ fn broadcast(
win[pos] = Some(b); win[pos] = Some(b);
} }
} }
// Fill in the coding blob data from the window data blobs
#[cfg(feature = "erasure")]
{
if erasure::generate_coding(
&mut window.write().unwrap(),
*receive_index as usize,
blobs_len,
).is_err()
{
return Err(Error::GenericError);
}
}
*receive_index += blobs_len as u64;
// Send blobs out from the window
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
Ok(()) Ok(())
} }
@@ -408,19 +518,31 @@ pub fn broadcaster(
sock: UdpSocket, sock: UdpSocket,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>, crdt: Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>, window: Window,
recycler: BlobRecycler, recycler: BlobRecycler,
r: BlobReceiver, r: BlobReceiver,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
spawn(move || { Builder::new()
let mut transmit_index = 0; .name("solana-broadcaster".to_string())
loop { .spawn(move || {
if exit.load(Ordering::Relaxed) { let mut transmit_index = 0;
break; let mut receive_index = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = broadcast(
&crdt,
&window,
&recycler,
&r,
&sock,
&mut transmit_index,
&mut receive_index,
);
} }
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index); })
} .unwrap()
})
} }
fn retransmit( fn retransmit(
@@ -460,24 +582,27 @@ pub fn retransmitter(
recycler: BlobRecycler, recycler: BlobRecycler,
r: BlobReceiver, r: BlobReceiver,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
spawn(move || { Builder::new()
trace!("retransmitter started"); .name("solana-retransmitter".to_string())
loop { .spawn(move || {
if exit.load(Ordering::Relaxed) { trace!("retransmitter started");
break; loop {
if exit.load(Ordering::Relaxed) {
break;
}
// TODO: handle this error
let _ = retransmit(&crdt, &recycler, &r, &sock);
} }
// TODO: handle this error trace!("exiting retransmitter");
let _ = retransmit(&crdt, &recycler, &r, &sock); })
} .unwrap()
trace!("exiting retransmitter");
})
} }
#[cfg(all(feature = "unstable", test))] #[cfg(all(feature = "unstable", test))]
mod bench { mod bench {
extern crate test; extern crate test;
use self::test::Bencher; use self::test::Bencher;
use packet::{Packet, PacketRecycler, PACKET_DATA_SIZE}; use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
use result::Result; use result::Result;
use std::net::{SocketAddr, UdpSocket}; use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
@@ -509,6 +634,7 @@ mod bench {
let mut num = 0; let mut num = 0;
for p in msgs_.read().unwrap().packets.iter() { for p in msgs_.read().unwrap().packets.iter() {
let a = p.meta.addr(); let a = p.meta.addr();
assert!(p.meta.size < BLOB_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap(); send.send_to(&p.data[..p.meta.size], &a).unwrap();
num += 1; num += 1;
} }
@@ -537,7 +663,8 @@ mod bench {
} }
}) })
} }
fn run_streamer_bench() -> Result<()> {
fn bench_streamer_with_result() -> Result<()> {
let read = UdpSocket::bind("127.0.0.1:0")?; let read = UdpSocket::bind("127.0.0.1:0")?;
read.set_read_timeout(Some(Duration::new(1, 0)))?; read.set_read_timeout(Some(Duration::new(1, 0)))?;
@@ -572,18 +699,15 @@ mod bench {
Ok(()) Ok(())
} }
#[bench] #[bench]
pub fn streamer_bench(_bench: &mut Bencher) { pub fn bench_streamer(_bench: &mut Bencher) {
run_streamer_bench().unwrap(); bench_streamer_with_result().unwrap();
} }
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crdt::{Crdt, ReplicatedData}; use crdt::{Crdt, TestNode};
use logger;
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE}; use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -591,10 +715,9 @@ mod test {
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
use streamer::{blob_receiver, receiver, responder, window};
use streamer::{default_window, BlobReceiver, PacketReceiver}; use streamer::{default_window, BlobReceiver, PacketReceiver};
use streamer::{blob_receiver, receiver, responder, retransmitter, window};
fn get_msgs(r: PacketReceiver, num: &mut usize) { fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 { for _t in 0..5 {
@@ -667,29 +790,21 @@ mod test {
#[test] #[test]
pub fn window_send_test() { pub fn window_send_test() {
let pubkey_me = KeyPair::new().pubkey(); let tn = TestNode::new();
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
let event = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let rep_data = ReplicatedData::new( let mut crdt_me = Crdt::new(tn.data.clone());
pubkey_me,
read.local_addr().unwrap(),
send.local_addr().unwrap(),
serve.local_addr().unwrap(),
event.local_addr().unwrap(),
);
let mut crdt_me = Crdt::new(rep_data);
let me_id = crdt_me.my_data().id; let me_id = crdt_me.my_data().id;
crdt_me.set_leader(me_id); crdt_me.set_leader(me_id);
let subs = Arc::new(RwLock::new(crdt_me)); let subs = Arc::new(RwLock::new(crdt_me));
let resp_recycler = BlobRecycler::default(); let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel(); let (s_reader, r_reader) = channel();
let t_receiver = let t_receiver = blob_receiver(
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap(); exit.clone(),
resp_recycler.clone(),
tn.sockets.gossip,
s_reader,
).unwrap();
let (s_window, r_window) = channel(); let (s_window, r_window) = channel();
let (s_retransmit, r_retransmit) = channel(); let (s_retransmit, r_retransmit) = channel();
let win = default_window(); let win = default_window();
@@ -703,7 +818,12 @@ mod test {
s_retransmit, s_retransmit,
); );
let (s_responder, r_responder) = channel(); let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder); let t_responder = responder(
tn.sockets.replicate,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let mut msgs = VecDeque::new(); let mut msgs = VecDeque::new();
for v in 0..10 { for v in 0..10 {
let i = 9 - v; let i = 9 - v;
@@ -714,7 +834,7 @@ mod test {
w.set_id(me_id).unwrap(); w.set_id(me_id).unwrap();
assert_eq!(i, w.get_index().unwrap()); assert_eq!(i, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE; w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr); w.meta.set_addr(&tn.data.gossip_addr);
msgs.push_back(b_); msgs.push_back(b_);
} }
s_responder.send(msgs).expect("send"); s_responder.send(msgs).expect("send");
@@ -731,111 +851,4 @@ mod test {
t_responder.join().expect("join"); t_responder.join().expect("join");
t_window.join().expect("join"); t_window.join().expect("join");
} }
fn test_node() -> (Arc<RwLock<Crdt>>, UdpSocket, UdpSocket, UdpSocket) {
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
let event = UdpSocket::bind("127.0.0.1:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
event.local_addr().unwrap(),
);
trace!("data: {:?}", d);
let crdt = Crdt::new(d);
(Arc::new(RwLock::new(crdt)), gossip, replicate, serve)
}
#[test]
#[ignore]
//retransmit from leader to replicate target
pub fn retransmit() {
logger::setup();
trace!("retransmit test start");
let exit = Arc::new(AtomicBool::new(false));
let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node();
let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node();
let leader_data = crdt_leader.read().unwrap().my_data().clone();
crdt_leader.write().unwrap().insert(&leader_data);
crdt_leader.write().unwrap().set_leader(leader_data.id);
let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone());
let window_leader = Arc::new(RwLock::new(vec![]));
let t_crdt_leader_l = Crdt::listen(
crdt_leader.clone(),
window_leader,
sock_gossip_leader,
exit.clone(),
);
crdt_target.write().unwrap().insert(&leader_data);
crdt_target.write().unwrap().set_leader(leader_data.id);
let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone());
let window_target = Arc::new(RwLock::new(vec![]));
let t_crdt_target_l = Crdt::listen(
crdt_target.clone(),
window_target,
sock_gossip_target,
exit.clone(),
);
//leader retransmitter
let (s_retransmit, r_retransmit) = channel();
let blob_recycler = BlobRecycler::default();
let saddr = sock_leader.local_addr().unwrap();
let t_retransmit = retransmitter(
sock_leader,
exit.clone(),
crdt_leader.clone(),
blob_recycler.clone(),
r_retransmit,
);
//target receiver
let (s_blob_receiver, r_blob_receiver) = channel();
let t_receiver = blob_receiver(
exit.clone(),
blob_recycler.clone(),
sock_replicate_target,
s_blob_receiver,
).unwrap();
for _ in 0..10 {
let done = crdt_target.read().unwrap().update_index == 2
&& crdt_leader.read().unwrap().update_index == 2;
if done {
break;
}
let timer = Duration::new(1, 0);
sleep(timer);
}
//send the data through
let mut bq = VecDeque::new();
let b = blob_recycler.allocate();
b.write().unwrap().meta.size = 10;
bq.push_back(b);
s_retransmit.send(bq).unwrap();
let timer = Duration::new(5, 0);
trace!("Waiting for timeout");
let mut oq = r_blob_receiver.recv_timeout(timer).unwrap();
assert_eq!(oq.len(), 1);
let o = oq.pop_front().unwrap();
let ro = o.read().unwrap();
assert_eq!(ro.meta.size, 10);
assert_eq!(ro.meta.addr(), saddr);
exit.store(true, Ordering::Relaxed);
let threads = vec![
t_receiver,
t_retransmit,
t_crdt_target_g,
t_crdt_target_l,
t_crdt_leader_g,
t_crdt_leader_l,
];
for t in threads {
t.join().unwrap();
}
}
} }

View File

@@ -4,8 +4,6 @@
//! unstable and may change in future releases. //! unstable and may change in future releases.
use bincode::{deserialize, serialize}; use bincode::{deserialize, serialize};
use event::Event;
use futures::future::{ok, FutureResult};
use hash::Hash; use hash::Hash;
use request::{Request, Response}; use request::{Request, Response};
use signature::{KeyPair, PublicKey, Signature}; use signature::{KeyPair, PublicKey, Signature};
@@ -14,11 +12,12 @@ use std::io;
use std::net::{SocketAddr, UdpSocket}; use std::net::{SocketAddr, UdpSocket};
use transaction::Transaction; use transaction::Transaction;
/// An object for querying and sending transactions to the network.
pub struct ThinClient { pub struct ThinClient {
requests_addr: SocketAddr, requests_addr: SocketAddr,
requests_socket: UdpSocket, requests_socket: UdpSocket,
events_addr: SocketAddr, transactions_addr: SocketAddr,
events_socket: UdpSocket, transactions_socket: UdpSocket,
last_id: Option<Hash>, last_id: Option<Hash>,
transaction_count: u64, transaction_count: u64,
balances: HashMap<PublicKey, Option<i64>>, balances: HashMap<PublicKey, Option<i64>>,
@@ -26,19 +25,19 @@ pub struct ThinClient {
impl ThinClient { impl ThinClient {
/// Create a new ThinClient that will interface with Rpu /// Create a new ThinClient that will interface with Rpu
/// over `requests_socket` and `events_socket`. To receive responses, the caller must bind `socket` /// over `requests_socket` and `transactions_socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking ThinClient methods. /// to a public address before invoking ThinClient methods.
pub fn new( pub fn new(
requests_addr: SocketAddr, requests_addr: SocketAddr,
requests_socket: UdpSocket, requests_socket: UdpSocket,
events_addr: SocketAddr, transactions_addr: SocketAddr,
events_socket: UdpSocket, transactions_socket: UdpSocket,
) -> Self { ) -> Self {
let client = ThinClient { let client = ThinClient {
requests_addr, requests_addr,
requests_socket, requests_socket,
events_addr, transactions_addr,
events_socket, transactions_socket,
last_id: None, last_id: None,
transaction_count: 0, transaction_count: 0,
balances: HashMap::new(), balances: HashMap::new(),
@@ -74,10 +73,10 @@ impl ThinClient {
/// Send a signed Transaction to the server for processing. This method /// Send a signed Transaction to the server for processing. This method
/// does not wait for a response. /// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> { pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
let event = Event::Transaction(tr); let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
let data = serialize(&event).expect("serialize Transaction in pub fn transfer_signed"); self.transactions_socket
self.events_socket.send_to(&data, &self.events_addr) .send_to(&data, &self.transactions_addr)
} }
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests. /// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
@@ -88,9 +87,9 @@ impl ThinClient {
to: PublicKey, to: PublicKey,
last_id: &Hash, last_id: &Hash,
) -> io::Result<Signature> { ) -> io::Result<Signature> {
let tr = Transaction::new(keypair, to, n, *last_id); let tx = Transaction::new(keypair, to, n, *last_id);
let sig = tr.sig; let sig = tx.sig;
self.transfer_signed(tr).map(|_| sig) self.transfer_signed(tx).map(|_| sig)
} }
/// Request the balance of the user holding `pubkey`. This method blocks /// Request the balance of the user holding `pubkey`. This method blocks
@@ -107,7 +106,7 @@ impl ThinClient {
while !done { while !done {
let resp = self.recv_response()?; let resp = self.recv_response()?;
trace!("recv_response {:?}", resp); trace!("recv_response {:?}", resp);
if let &Response::Balance { ref key, .. } = &resp { if let Response::Balance { key, .. } = &resp {
done = key == pubkey; done = key == pubkey;
} }
self.process_response(resp); self.process_response(resp);
@@ -122,78 +121,77 @@ impl ThinClient {
let req = Request::GetTransactionCount; let req = Request::GetTransactionCount;
let data = let data =
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count"); serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn transaction_count");
let mut done = false; let mut done = false;
while !done { while !done {
let resp = self.recv_response().expect("transaction count dropped"); self.requests_socket
info!("recv_response {:?}", resp); .send_to(&data, &self.requests_addr)
if let &Response::TransactionCount { .. } = &resp { .expect("buffer error in pub fn transaction_count");
done = true;
if let Ok(resp) = self.recv_response() {
info!("recv_response {:?}", resp);
if let &Response::TransactionCount { .. } = &resp {
done = true;
}
self.process_response(resp);
} }
self.process_response(resp);
} }
self.transaction_count self.transaction_count
} }
/// Request the last Entry ID from the server. This method blocks /// Request the last Entry ID from the server. This method blocks
/// until the server sends a response. /// until the server sends a response.
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> { pub fn get_last_id(&mut self) -> Hash {
info!("get_last_id"); info!("get_last_id");
let req = Request::GetLastId; let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id"); let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_last_id");
let mut done = false; let mut done = false;
while !done { while !done {
let resp = self.recv_response().expect("get_last_id response"); self.requests_socket
if let &Response::LastId { .. } = &resp { .send_to(&data, &self.requests_addr)
done = true; .expect("buffer error in pub fn get_last_id");
if let Ok(resp) = self.recv_response() {
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
} }
self.process_response(resp);
}
ok(self.last_id.expect("some last_id"))
}
}
#[cfg(test)]
pub fn poll_get_balance(client: &mut ThinClient, pubkey: &PublicKey) -> io::Result<i64> {
use std::time::Instant;
let mut balance;
let now = Instant::now();
loop {
balance = client.get_balance(pubkey);
if balance.is_ok() || now.elapsed().as_secs() > 1 {
break;
} }
self.last_id.expect("some last_id")
} }
balance pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
use std::time::Instant;
let mut balance;
let now = Instant::now();
loop {
balance = self.get_balance(pubkey);
if balance.is_ok() || now.elapsed().as_secs() > 1 {
break;
}
}
balance
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use bank::Bank; use bank::Bank;
use crdt::{Crdt, ReplicatedData}; use budget::Budget;
use futures::Future; use crdt::TestNode;
use logger; use logger;
use mint::Mint; use mint::Mint;
use plan::Plan;
use server::Server; use server::Server;
use signature::{KeyPair, KeyPairUtil}; use signature::{KeyPair, KeyPairUtil};
use std::io::sink; use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::thread::JoinHandle;
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
use streamer::default_window; use transaction::{Instruction, Plan};
use transaction::Instruction;
use tvu::tests::TestNode;
#[test] #[test]
fn test_thin_client() { fn test_thin_client() {
@@ -207,11 +205,10 @@ mod tests {
let server = Server::new_leader( let server = Server::new_leader(
bank, bank,
alice.last_id(),
Some(Duration::from_millis(30)), Some(Duration::from_millis(30)),
leader.data.clone(), leader.data.clone(),
leader.sockets.requests, leader.sockets.requests,
leader.sockets.event, leader.sockets.transaction,
leader.sockets.broadcast, leader.sockets.broadcast,
leader.sockets.respond, leader.sockets.respond,
leader.sockets.gossip, leader.sockets.gossip,
@@ -221,19 +218,19 @@ mod tests {
sleep(Duration::from_millis(900)); sleep(Duration::from_millis(900));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new( let mut client = ThinClient::new(
leader.data.requests_addr, leader.data.requests_addr,
requests_socket, requests_socket,
leader.data.events_addr, leader.data.transactions_addr,
events_socket, transactions_socket,
); );
let last_id = client.get_last_id().wait().unwrap(); let last_id = client.get_last_id();
let _sig = client let _sig = client
.transfer(500, &alice.keypair(), bob_pubkey, &last_id) .transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap(); .unwrap();
let balance = poll_get_balance(&mut client, &bob_pubkey); let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500); assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls { for t in server.thread_hdls {
@@ -252,11 +249,10 @@ mod tests {
let server = Server::new_leader( let server = Server::new_leader(
bank, bank,
alice.last_id(),
Some(Duration::from_millis(30)), Some(Duration::from_millis(30)),
leader.data.clone(), leader.data.clone(),
leader.sockets.requests, leader.sockets.requests,
leader.sockets.event, leader.sockets.transaction,
leader.sockets.broadcast, leader.sockets.broadcast,
leader.sockets.respond, leader.sockets.respond,
leader.sockets.gossip, leader.sockets.gossip,
@@ -269,186 +265,33 @@ mod tests {
requests_socket requests_socket
.set_read_timeout(Some(Duration::new(5, 0))) .set_read_timeout(Some(Duration::new(5, 0)))
.unwrap(); .unwrap();
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new( let mut client = ThinClient::new(
leader.data.requests_addr, leader.data.requests_addr,
requests_socket, requests_socket,
leader.data.events_addr, leader.data.transactions_addr,
events_socket, transactions_socket,
); );
let last_id = client.get_last_id().wait().unwrap(); let last_id = client.get_last_id();
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id); let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = client.transfer_signed(tr).unwrap(); let _sig = client.transfer_signed(tx).unwrap();
let last_id = client.get_last_id().wait().unwrap(); let last_id = client.get_last_id();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id); let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
if let Instruction::NewContract(contract) = &mut tr2.instruction { if let Instruction::NewContract(contract) = &mut tr2.instruction {
contract.tokens = 502; contract.tokens = 502;
contract.plan = Plan::new_payment(502, bob_pubkey); contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
} }
let _sig = client.transfer_signed(tr2).unwrap(); let _sig = client.transfer_signed(tr2).unwrap();
let balance = poll_get_balance(&mut client, &bob_pubkey); let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500); assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls { for t in server.thread_hdls {
t.join().unwrap(); t.join().unwrap();
} }
} }
fn validator(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
alice: &Mint,
threads: &mut Vec<JoinHandle<()>>,
) {
let validator = TestNode::new();
let replicant_bank = Bank::new(&alice);
let mut ts = Server::new_validator(
replicant_bank,
validator.data.clone(),
validator.sockets.requests,
validator.sockets.respond,
validator.sockets.replicate,
validator.sockets.gossip,
leader.clone(),
exit.clone(),
);
threads.append(&mut ts.thread_hdls);
}
fn converge(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let mut spy = TestNode::new();
let daddr = "0.0.0.0:0".parse().unwrap();
let me = spy.data.id.clone();
spy.data.replicate_addr = daddr;
spy.data.requests_addr = daddr;
let mut spy_crdt = Crdt::new(spy.data);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let t_spy_listen = Crdt::listen(
spy_ref.clone(),
spy_window,
spy.sockets.gossip,
exit.clone(),
);
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
//wait for the network to converge
let mut converged = false;
for _ in 0..30 {
let num = spy_ref.read().unwrap().convergence();
if num == num_nodes as u64 {
converged = true;
break;
}
sleep(Duration::new(1, 0));
}
assert!(converged);
threads.push(t_spy_listen);
threads.push(t_spy_gossip);
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.id != me)
.map(|x| x.clone())
.collect();
v.clone()
}
#[test]
fn test_multi_node() {
logger::setup();
const N: usize = 5;
trace!("test_multi_accountant_stub");
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_bank = Bank::new(&alice);
let server = Server::new_leader(
leader_bank,
alice.last_id(),
None,
leader.data.clone(),
leader.sockets.requests,
leader.sockets.event,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
let mut threads = server.thread_hdls;
for _ in 0..N {
validator(&leader.data, exit.clone(), &alice, &mut threads);
}
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
//contains the leader addr as well
assert_eq!(servers.len(), N + 1);
//verify leader can do transfer
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
assert_eq!(leader_balance, 500);
//verify validator has the same balance
let mut success = 0usize;
for server in servers.iter() {
let mut client = mk_client(server);
if let Ok(bal) = poll_get_balance(&mut client, &bob_pubkey) {
trace!("validator balance {}", bal);
if bal == leader_balance {
success += 1;
}
}
}
assert_eq!(success, servers.len());
exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
}
}
fn mk_client(leader: &ReplicatedData) -> ThinClient {
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
ThinClient::new(
leader.requests_addr,
requests_socket,
leader.events_addr,
events_socket,
)
}
fn tx_and_retry_get_balance(
leader: &ReplicatedData,
alice: &Mint,
bob_pubkey: &PublicKey,
) -> io::Result<i64> {
let mut client = mk_client(leader);
trace!("getting leader last_id");
let last_id = client.get_last_id().wait().unwrap();
info!("executing leader transer");
let _sig = client
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
.unwrap();
poll_get_balance(&mut client, bob_pubkey)
}
} }

View File

@@ -1,6 +1,11 @@
//! The `timing` module provides std::time utility functions.
use std::time::Duration; use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
pub fn duration_as_us(d: &Duration) -> u64 {
return (d.as_secs() * 1000 * 1000) + (d.subsec_nanos() as u64 / 1_000);
}
pub fn duration_as_ms(d: &Duration) -> u64 { pub fn duration_as_ms(d: &Duration) -> u64 {
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000); return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
} }

View File

@@ -1,60 +1,81 @@
//! The `tpu` module implements the Transaction Processing Unit, a //! The `tpu` module implements the Transaction Processing Unit, a
//! 5-stage transaction processing pipeline in software. //! 5-stage transaction processing pipeline in software.
//!
//! ```text
//! .---------------------------------------------------------------.
//! | TPU .-----. |
//! | | PoH | |
//! | `--+--` |
//! | | |
//! | v |
//! | .-------. .-----------. .---------. .--------. .-------. |
//! .---------. | | Fetch | | SigVerify | | Banking | | Record | | Write | | .------------.
//! | Clients |--->| Stage |->| Stage |->| Stage |->| Stage |->| Stage +--->| Validators |
//! `---------` | | | | | | | | | | | | `------------`
//! | `-------` `-----------` `----+----` `--------` `---+---` |
//! | | | |
//! | | | |
//! | | | |
//! | | | |
//! `---------------------------------|-----------------------|-----`
//! | |
//! v v
//! .------. .--------.
//! | Bank | | Ledger |
//! `------` `--------`
//! ```
use bank::Bank; use bank::Bank;
use banking_stage::BankingStage; use banking_stage::BankingStage;
use crdt::{Crdt, ReplicatedData}; use fetch_stage::FetchStage;
use hash::Hash; use packet::{BlobRecycler, PacketRecycler};
use packet;
use record_stage::RecordStage; use record_stage::RecordStage;
use sig_verify_stage::SigVerifyStage; use sigverify_stage::SigVerifyStage;
use std::io::Write; use std::io::Write;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel; use std::sync::{Arc, Mutex};
use std::sync::{Arc, Mutex, RwLock};
use std::thread::JoinHandle; use std::thread::JoinHandle;
use std::time::Duration; use std::time::Duration;
use streamer; use streamer::BlobReceiver;
use write_stage::WriteStage; use write_stage::WriteStage;
pub struct Tpu { pub struct Tpu {
pub blob_receiver: BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>, pub thread_hdls: Vec<JoinHandle<()>>,
} }
impl Tpu { impl Tpu {
pub fn new<W: Write + Send + 'static>( pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>, bank: Arc<Bank>,
start_hash: Hash,
tick_duration: Option<Duration>, tick_duration: Option<Duration>,
me: ReplicatedData, transactions_socket: UdpSocket,
events_socket: UdpSocket, blob_recycler: BlobRecycler,
broadcast_socket: UdpSocket,
gossip: UdpSocket,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
writer: W, writer: W,
) -> Self { ) -> Self {
let packet_recycler = packet::PacketRecycler::default(); let packet_recycler = PacketRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver = streamer::receiver(
events_socket,
exit.clone(),
packet_recycler.clone(),
packet_sender,
);
let sig_verify_stage = SigVerifyStage::new(exit.clone(), packet_receiver); let fetch_stage =
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
let blob_recycler = packet::BlobRecycler::default();
let banking_stage = BankingStage::new( let banking_stage = BankingStage::new(
bank.clone(), bank.clone(),
exit.clone(), exit.clone(),
sig_verify_stage.verified_receiver, sigverify_stage.verified_receiver,
packet_recycler.clone(), packet_recycler.clone(),
); );
let record_stage = let record_stage = match tick_duration {
RecordStage::new(banking_stage.signal_receiver, &start_hash, tick_duration); Some(tick_duration) => RecordStage::new_with_clock(
banking_stage.signal_receiver,
&bank.last_id(),
tick_duration,
),
None => RecordStage::new(banking_stage.signal_receiver, &bank.last_id()),
};
let write_stage = WriteStage::new( let write_stage = WriteStage::new(
bank.clone(), bank.clone(),
@@ -63,31 +84,16 @@ impl Tpu {
Mutex::new(writer), Mutex::new(writer),
record_stage.entry_receiver, record_stage.entry_receiver,
); );
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
let window = streamer::default_window();
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
let t_broadcast = streamer::broadcaster(
broadcast_socket,
exit.clone(),
crdt.clone(),
window,
blob_recycler.clone(),
write_stage.blob_receiver,
);
let mut thread_hdls = vec![ let mut thread_hdls = vec![
t_receiver,
banking_stage.thread_hdl, banking_stage.thread_hdl,
record_stage.thread_hdl, record_stage.thread_hdl,
write_stage.thread_hdl, write_stage.thread_hdl,
t_gossip,
t_listen,
t_broadcast,
]; ];
thread_hdls.extend(sig_verify_stage.thread_hdls.into_iter()); thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
Tpu { thread_hdls } thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
Tpu {
blob_receiver: write_stage.blob_receiver,
thread_hdls,
}
} }
} }

View File

@@ -1,71 +1,138 @@
//! The `transaction` module provides functionality for creating log transactions. //! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize; use bincode::serialize;
use budget::{Budget, Condition};
use chrono::prelude::*; use chrono::prelude::*;
use hash::Hash; use hash::Hash;
use plan::{Condition, Payment, Plan}; use payment_plan::{Payment, PaymentPlan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil}; use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
pub const SIGNED_DATA_OFFSET: usize = 112; pub const SIGNED_DATA_OFFSET: usize = 112;
pub const SIG_OFFSET: usize = 8; pub const SIG_OFFSET: usize = 8;
pub const PUB_KEY_OFFSET: usize = 80; pub const PUB_KEY_OFFSET: usize = 80;
/// The type of payment plan. Each item must implement the PaymentPlan trait.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Plan {
/// The builtin contract language Budget.
Budget(Budget),
}
// A proxy for the underlying DSL.
impl PaymentPlan for Plan {
fn final_payment(&self) -> Option<Payment> {
match self {
Plan::Budget(budget) => budget.final_payment(),
}
}
fn verify(&self, spendable_tokens: i64) -> bool {
match self {
Plan::Budget(budget) => budget.verify(spendable_tokens),
}
}
fn apply_witness(&mut self, witness: &Witness) {
match self {
Plan::Budget(budget) => budget.apply_witness(witness),
}
}
}
/// A smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Contract { pub struct Contract {
/// The number of tokens allocated to the `Plan` and any transaction fees.
pub tokens: i64, pub tokens: i64,
pub plan: Plan, pub plan: Plan,
} }
/// An instruction to progress the smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Instruction { pub enum Instruction {
/// Declare and instanstansiate `Contract`.
NewContract(Contract), NewContract(Contract),
/// Tell a payment plan acknowledge the given `DateTime` has past.
ApplyTimestamp(DateTime<Utc>), ApplyTimestamp(DateTime<Utc>),
/// Tell the payment plan that the `NewContract` with `Signature` has been
/// signed by the containing transaction's `PublicKey`.
ApplySignature(Signature), ApplySignature(Signature),
} }
/// An instruction signed by a client with `PublicKey`.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction { pub struct Transaction {
/// A digital signature of `instruction`, `last_id` and `fee`, signed by `PublicKey`.
pub sig: Signature, pub sig: Signature,
/// The `PublicKey` of the entity that signed the transaction data.
pub from: PublicKey, pub from: PublicKey,
/// The action the server should take.
pub instruction: Instruction, pub instruction: Instruction,
/// The ID of a recent ledger entry.
pub last_id: Hash, pub last_id: Hash,
/// The number of tokens paid for processing and storage of this transaction.
pub fee: i64,
} }
impl Transaction { impl Transaction {
/// Create a signed transaction from the given `Instruction`.
fn new_from_instruction( fn new_from_instruction(
from_keypair: &KeyPair, from_keypair: &KeyPair,
instruction: Instruction, instruction: Instruction,
last_id: Hash, last_id: Hash,
fee: i64,
) -> Self { ) -> Self {
let from = from_keypair.pubkey(); let from = from_keypair.pubkey();
let mut tr = Transaction { let mut tx = Transaction {
sig: Signature::default(), sig: Signature::default(),
instruction, instruction,
last_id, last_id,
from, from,
fee,
}; };
tr.sign(from_keypair); tx.sign(from_keypair);
tr tx
}
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new_taxed(
from_keypair: &KeyPair,
to: PublicKey,
tokens: i64,
fee: i64,
last_id: Hash,
) -> Self {
let payment = Payment {
tokens: tokens - fee,
to,
};
let budget = Budget::Pay(payment);
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens });
Self::new_from_instruction(from_keypair, instruction, last_id, fee)
} }
/// Create and sign a new Transaction. Used for unit-testing. /// Create and sign a new Transaction. Used for unit-testing.
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self { pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
let plan = Plan::Pay(Payment { tokens, to }); Self::new_taxed(from_keypair, to, tokens, 0, last_id)
let instruction = Instruction::NewContract(Contract { plan, tokens });
Self::new_from_instruction(from_keypair, instruction, last_id)
} }
/// Create and sign a new Witness Timestamp. Used for unit-testing. /// Create and sign a new Witness Timestamp. Used for unit-testing.
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self { pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
let instruction = Instruction::ApplyTimestamp(dt); let instruction = Instruction::ApplyTimestamp(dt);
Self::new_from_instruction(from_keypair, instruction, last_id) Self::new_from_instruction(from_keypair, instruction, last_id, 0)
} }
/// Create and sign a new Witness Signature. Used for unit-testing. /// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self { pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
let instruction = Instruction::ApplySignature(tx_sig); let instruction = Instruction::ApplySignature(tx_sig);
Self::new_from_instruction(from_keypair, instruction, last_id) Self::new_from_instruction(from_keypair, instruction, last_id, 0)
} }
/// Create and sign a postdated Transaction. Used for unit-testing. /// Create and sign a postdated Transaction. Used for unit-testing.
@@ -77,25 +144,24 @@ impl Transaction {
last_id: Hash, last_id: Hash,
) -> Self { ) -> Self {
let from = from_keypair.pubkey(); let from = from_keypair.pubkey();
let plan = Plan::Race( let budget = Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }), (Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }), (Condition::Signature(from), Payment { tokens, to: from }),
); );
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens }); let instruction = Instruction::NewContract(Contract { plan, tokens });
let mut tr = Transaction { Self::new_from_instruction(from_keypair, instruction, last_id, 0)
instruction,
from,
last_id,
sig: Signature::default(),
};
tr.sign(from_keypair);
tr
} }
/// Get the transaction data to sign.
fn get_sign_data(&self) -> Vec<u8> { fn get_sign_data(&self) -> Vec<u8> {
let mut data = serialize(&(&self.instruction)).expect("serialize Contract"); let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id"); let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
data.extend_from_slice(&last_id_data); data.extend_from_slice(&last_id_data);
let fee_data = serialize(&(&self.fee)).expect("serialize last_id");
data.extend_from_slice(&fee_data);
data data
} }
@@ -105,13 +171,18 @@ impl Transaction {
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref()); self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
} }
/// Verify only the transaction signature.
pub fn verify_sig(&self) -> bool { pub fn verify_sig(&self) -> bool {
warn!("transaction signature verification called");
self.sig.verify(&self.from, &self.get_sign_data()) self.sig.verify(&self.from, &self.get_sign_data())
} }
/// Verify only the payment plan.
pub fn verify_plan(&self) -> bool { pub fn verify_plan(&self) -> bool {
if let Instruction::NewContract(contract) = &self.instruction { if let Instruction::NewContract(contract) = &self.instruction {
contract.plan.verify(contract.tokens) self.fee >= 0
&& self.fee <= contract.tokens
&& contract.plan.verify(contract.tokens - self.fee)
} else { } else {
true true
} }
@@ -138,21 +209,6 @@ pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
None None
} }
/// Verify a batch of signatures.
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.verify_sig())
}
/// Verify a batch of spending plans.
pub fn verify_plans(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.verify_plan())
}
/// Verify a batch of transactions.
pub fn verify_transactions(transactions: &[Transaction]) -> bool {
verify_signatures(transactions) && verify_plans(transactions)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -162,8 +218,8 @@ mod tests {
fn test_claim() { fn test_claim() {
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let zero = Hash::default(); let zero = Hash::default();
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero); let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
assert!(tr0.verify_plan()); assert!(tx0.verify_plan());
} }
#[test] #[test]
@@ -172,22 +228,34 @@ mod tests {
let keypair0 = KeyPair::new(); let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new(); let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey(); let pubkey1 = keypair1.pubkey();
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero); let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero);
assert!(tr0.verify_plan()); assert!(tx0.verify_plan());
}
#[test]
fn test_transfer_with_fee() {
let zero = Hash::default();
let keypair0 = KeyPair::new();
let pubkey1 = KeyPair::new().pubkey();
assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
} }
#[test] #[test]
fn test_serialize_claim() { fn test_serialize_claim() {
let plan = Plan::Pay(Payment { let budget = Budget::Pay(Payment {
tokens: 0, tokens: 0,
to: Default::default(), to: Default::default(),
}); });
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 }); let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
let claim0 = Transaction { let claim0 = Transaction {
instruction, instruction,
from: Default::default(), from: Default::default(),
last_id: Default::default(), last_id: Default::default(),
sig: Default::default(), sig: Default::default(),
fee: 0,
}; };
let buf = serialize(&claim0).unwrap(); let buf = serialize(&claim0).unwrap();
let claim1: Transaction = deserialize(&buf).unwrap(); let claim1: Transaction = deserialize(&buf).unwrap();
@@ -199,15 +267,15 @@ mod tests {
let zero = Hash::default(); let zero = Hash::default();
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let pubkey = keypair.pubkey(); let pubkey = keypair.pubkey();
let mut tr = Transaction::new(&keypair, pubkey, 42, zero); let mut tx = Transaction::new(&keypair, pubkey, 42, zero);
if let Instruction::NewContract(contract) = &mut tr.instruction { if let Instruction::NewContract(contract) = &mut tx.instruction {
contract.tokens = 1_000_000; // <-- attack, part 1! contract.tokens = 1_000_000; // <-- attack, part 1!
if let Plan::Pay(ref mut payment) = contract.plan { if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = contract.tokens; // <-- attack, part 2! payment.tokens = contract.tokens; // <-- attack, part 2!
} }
} }
assert!(tr.verify_plan()); assert!(tx.verify_plan());
assert!(!tr.verify_sig()); assert!(!tx.verify_sig());
} }
#[test] #[test]
@@ -217,23 +285,23 @@ mod tests {
let thief_keypair = KeyPair::new(); let thief_keypair = KeyPair::new();
let pubkey1 = keypair1.pubkey(); let pubkey1 = keypair1.pubkey();
let zero = Hash::default(); let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero); let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero);
if let Instruction::NewContract(contract) = &mut tr.instruction { if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Pay(ref mut payment) = contract.plan { if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.to = thief_keypair.pubkey(); // <-- attack! payment.to = thief_keypair.pubkey(); // <-- attack!
} }
} }
assert!(tr.verify_plan()); assert!(tx.verify_plan());
assert!(!tr.verify_sig()); assert!(!tx.verify_sig());
} }
#[test] #[test]
fn test_layout() { fn test_layout() {
let tr = test_tx(); let tx = test_tx();
let sign_data = tr.get_sign_data(); let sign_data = tx.get_sign_data();
let tx = serialize(&tr).unwrap(); let tx_bytes = serialize(&tx).unwrap();
assert_matches!(memfind(&tx, &sign_data), Some(SIGNED_DATA_OFFSET)); assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET));
assert_matches!(memfind(&tx, &tr.sig), Some(SIG_OFFSET)); assert_matches!(memfind(&tx_bytes, &tx.sig), Some(SIG_OFFSET));
assert_matches!(memfind(&tx, &tr.from), Some(PUB_KEY_OFFSET)); assert_matches!(memfind(&tx_bytes, &tx.from), Some(PUB_KEY_OFFSET));
} }
#[test] #[test]
@@ -241,55 +309,20 @@ mod tests {
let keypair0 = KeyPair::new(); let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new(); let keypair1 = KeyPair::new();
let zero = Hash::default(); let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero); let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
if let Instruction::NewContract(contract) = &mut tr.instruction { if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Pay(ref mut payment) = contract.plan { if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = 2; // <-- attack! payment.tokens = 2; // <-- attack!
} }
} }
assert!(!tr.verify_plan()); assert!(!tx.verify_plan());
// Also, ensure all branchs of the plan spend all tokens // Also, ensure all branchs of the plan spend all tokens
if let Instruction::NewContract(contract) = &mut tr.instruction { if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Pay(ref mut payment) = contract.plan { if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = 0; // <-- whoops! payment.tokens = 0; // <-- whoops!
} }
} }
assert!(!tr.verify_plan()); assert!(!tx.verify_plan());
}
#[test]
fn test_verify_transactions() {
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
let carol_pubkey = KeyPair::new().pubkey();
let last_id = Hash::default();
let tr0 = Transaction::new(&alice_keypair, bob_pubkey, 1, last_id);
let tr1 = Transaction::new(&alice_keypair, carol_pubkey, 1, last_id);
let transactions = vec![tr0, tr1];
assert!(verify_transactions(&transactions));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use transaction::*;
#[bench]
fn verify_signatures_bench(bencher: &mut Bencher) {
let alice_keypair = KeyPair::new();
let last_id = Hash::default();
let transactions: Vec<_> = (0..64)
.into_par_iter()
.map(|_| {
let rando_pubkey = KeyPair::new().pubkey();
Transaction::new(&alice_keypair, rando_pubkey, 1, last_id)
})
.collect();
bencher.iter(|| {
assert!(verify_signatures(&transactions));
});
} }
} }

View File

@@ -1,35 +1,50 @@
//! The `tvu` module implements the Transaction Validation Unit, a //! The `tvu` module implements the Transaction Validation Unit, a
//! 5-stage transaction validation pipeline in software. //! 3-stage transaction validation pipeline in software.
//! 1. streamer //!
//! - Incoming blobs are picked up from the replicate socket. //! ```text
//! 2. verifier //! .------------------------------------------.
//! - TODO Blobs are sent to the GPU, and while the memory is there the PoH stream is verified //! | TVU |
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs //! | |
//! with errors are dropped, or marked for slashing. //! | | .------------.
//! 3.a retransmit //! | .------------------------>| Validators |
//! - Blobs originating from the parent (leader atm is the only parent), are retransmit to all the //! | .-------. | | `------------`
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate //! .--------. | | | .----+---. .-----------. |
//! address. //! | Leader |--------->| Blob | | Window | | Replicate | |
//! 3.b window //! `--------` | | Fetch |-->| Stage |-->| Stage | |
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could //! .------------. | | Stage | | | | | |
//! be the PoH counter if its monitonically increasing in each blob. Easure coding is used to //! | Validators |----->| | `--------` `----+------` |
//! recover any missing packets, and requests are made at random to peers and parents to retransmit //! `------------` | `-------` | |
//! a missing packet. //! | | |
//! 4. accountant //! | | |
//! - Contigous blobs are sent to the accountant for processing transactions //! | | |
//! 5. validator //! `--------------------------------|---------`
//! - TODO Validation messages are sent back to the leader //! |
//! v
//! .------.
//! | Bank |
//! `------`
//! ```
//!
//! 1. Fetch Stage
//! - Incoming blobs are picked up from the replicate socket and repair socket.
//! 2. Window Stage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. Replicate Stage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
use bank::Bank; use bank::Bank;
use crdt::{Crdt, ReplicatedData}; use blob_fetch_stage::BlobFetchStage;
use crdt::Crdt;
use packet; use packet;
use replicate_stage::ReplicateStage; use replicate_stage::ReplicateStage;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::JoinHandle; use std::thread::JoinHandle;
use streamer; use streamer;
use window_stage::WindowStage;
pub struct Tvu { pub struct Tvu {
pub thread_hdls: Vec<JoinHandle<()>>, pub thread_hdls: Vec<JoinHandle<()>>,
@@ -40,128 +55,63 @@ impl Tvu {
/// on the bank state. /// on the bank state.
/// # Arguments /// # Arguments
/// * `bank` - The bank state. /// * `bank` - The bank state.
/// * `me` - my configuration /// * `crdt` - The crdt state.
/// * `gossip` - my gosisp socket /// * `window` - The window state.
/// * `replicte` - my replicte socket /// * `replicate_socket` - my replicate socket
/// * `leader` - leader configuration /// * `repair_socket` - my repair socket
/// * `retransmit_socket` - my retransmit socket
/// * `exit` - The exit signal. /// * `exit` - The exit signal.
pub fn new( pub fn new(
bank: Arc<Bank>, bank: Arc<Bank>,
me: ReplicatedData, crdt: Arc<RwLock<Crdt>>,
gossip: UdpSocket, window: streamer::Window,
replicate: UdpSocket, replicate_socket: UdpSocket,
leader: ReplicatedData, repair_socket: UdpSocket,
retransmit_socket: UdpSocket,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
) -> Self { ) -> Self {
//replicate pipeline
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
crdt.write()
.expect("'crdt' write lock in pub fn replicate")
.set_leader(leader.id);
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(&leader);
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
let window = streamer::default_window();
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
// TODO pull this socket out through the public interface
// make sure we are on the same interface
let mut local = replicate.local_addr().expect("tvu: get local address");
local.set_port(0);
let write = UdpSocket::bind(local).expect("tvu: bind to local socket");
let blob_recycler = packet::BlobRecycler::default(); let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel(); let fetch_stage = BlobFetchStage::new_multi_socket(
let t_blob_receiver = streamer::blob_receiver( vec![replicate_socket, repair_socket],
exit.clone(), exit.clone(),
blob_recycler.clone(), blob_recycler.clone(),
replicate,
blob_sender.clone(),
).expect("tvu: blob receiver creation");
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
crdt.clone(),
blob_recycler.clone(),
retransmit_receiver,
); );
//TODO //TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified //the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction //then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window( let window_stage = WindowStage::new(
exit.clone(), crdt,
crdt.clone(),
window, window,
blob_recycler.clone(), retransmit_socket,
blob_receiver,
window_sender,
retransmit_sender,
);
let replicate_stage = ReplicateStage::new(
bank.clone(),
exit.clone(), exit.clone(),
window_receiver,
blob_recycler.clone(), blob_recycler.clone(),
fetch_stage.blob_receiver,
); );
let threads = vec![ let replicate_stage =
//replicate threads ReplicateStage::new(bank, exit, window_stage.blob_receiver, blob_recycler);
t_blob_receiver,
t_retransmit, let mut threads = vec![replicate_stage.thread_hdl];
t_window, threads.extend(fetch_stage.thread_hdls.into_iter());
replicate_stage.thread_hdl, threads.extend(window_stage.thread_hdls.into_iter());
t_gossip,
t_listen,
];
Tvu { Tvu {
thread_hdls: threads, thread_hdls: threads,
} }
} }
} }
#[cfg(test)]
use std::time::Duration;
#[cfg(test)]
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
use signature::{KeyPair, KeyPairUtil};
let events_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
requests_socket.local_addr().unwrap(),
events_socket.local_addr().unwrap(),
);
(d, gossip, replicate, requests_socket, events_socket)
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use bank::Bank; use bank::Bank;
use bincode::serialize; use bincode::serialize;
use crdt::Crdt; use crdt::{Crdt, TestNode};
use crdt::ReplicatedData;
use entry::Entry; use entry::Entry;
use event::Event;
use hash::{hash, Hash}; use hash::{hash, Hash};
use logger; use logger;
use mint::Mint; use mint::Mint;
use ncp::Ncp;
use packet::BlobRecycler; use packet::BlobRecycler;
use result::Result;
use signature::{KeyPair, KeyPairUtil}; use signature::{KeyPair, KeyPairUtil};
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::UdpSocket; use std::net::UdpSocket;
@@ -170,9 +120,20 @@ pub mod tests {
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::time::Duration; use std::time::Duration;
use streamer; use streamer;
use transaction::Transaction;
use tvu::Tvu; use tvu::Tvu;
/// Test that mesasge sent from leader to target1 and repliated to target2 fn new_ncp(
crdt: Arc<RwLock<Crdt>>,
listen: UdpSocket,
exit: Arc<AtomicBool>,
) -> Result<(Ncp, streamer::Window)> {
let window = streamer::default_window();
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
Ok((ncp, window))
}
/// Test that message sent from leader to target1 and replicated to target2
#[test] #[test]
fn test_replicate() { fn test_replicate() {
logger::setup(); logger::setup();
@@ -186,9 +147,7 @@ pub mod tests {
crdt_l.set_leader(leader.data.id); crdt_l.set_leader(leader.data.id);
let cref_l = Arc::new(RwLock::new(crdt_l)); let cref_l = Arc::new(RwLock::new(crdt_l));
let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone()); let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
let window1 = streamer::default_window();
let t_l_listen = Crdt::listen(cref_l, window1, leader.sockets.gossip, exit.clone());
//start crdt2 //start crdt2
let mut crdt2 = Crdt::new(target2.data.clone()); let mut crdt2 = Crdt::new(target2.data.clone());
@@ -196,9 +155,7 @@ pub mod tests {
crdt2.set_leader(leader.data.id); crdt2.set_leader(leader.data.id);
let leader_id = leader.data.id; let leader_id = leader.data.id;
let cref2 = Arc::new(RwLock::new(crdt2)); let cref2 = Arc::new(RwLock::new(crdt2));
let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone()); let dr_2 = new_ncp(cref2, target2.sockets.gossip, exit.clone()).unwrap();
let window2 = streamer::default_window();
let t2_listen = Crdt::listen(cref2, window2, target2.sockets.gossip, exit.clone());
// setup some blob services to send blobs into the socket // setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to // to simulate the source peer and get blobs out of the socket to
@@ -226,33 +183,37 @@ pub mod tests {
let mint = Mint::new(starting_balance); let mint = Mint::new(starting_balance);
let replicate_addr = target1.data.replicate_addr; let replicate_addr = target1.data.replicate_addr;
let bank = Arc::new(Bank::new(&mint)); let bank = Arc::new(Bank::new(&mint));
//start crdt1
let mut crdt1 = Crdt::new(target1.data.clone());
crdt1.insert(&leader.data);
crdt1.set_leader(leader.data.id);
let cref1 = Arc::new(RwLock::new(crdt1));
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone()).unwrap();
let tvu = Tvu::new( let tvu = Tvu::new(
bank.clone(), bank.clone(),
target1.data, cref1,
target1.sockets.gossip, dr_1.1,
target1.sockets.replicate, target1.sockets.replicate,
leader.data, target1.sockets.repair,
target1.sockets.retransmit,
exit.clone(), exit.clone(),
); );
let mut alice_ref_balance = starting_balance; let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new(); let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default(); let mut cur_hash = Hash::default();
let num_blobs = 10; let mut blob_id = 0;
let num_transfers = 10;
let transfer_amount = 501; let transfer_amount = 501;
let bob_keypair = KeyPair::new(); let bob_keypair = KeyPair::new();
for i in 0..num_blobs { for i in 0..num_transfers {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
w.set_id(leader_id).unwrap();
let entry0 = Entry::new(&cur_hash, i, vec![]); let entry0 = Entry::new(&cur_hash, i, vec![]);
bank.register_entry_id(&cur_hash); bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash); cur_hash = hash(&cur_hash);
let tr1 = Event::new_transaction( let tx0 = Transaction::new(
&mint.keypair(), &mint.keypair(),
bob_keypair.pubkey(), bob_keypair.pubkey(),
transfer_amount, transfer_amount,
@@ -260,19 +221,28 @@ pub mod tests {
); );
bank.register_entry_id(&cur_hash); bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash); cur_hash = hash(&cur_hash);
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tr1]); let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0]);
bank.register_entry_id(&cur_hash); bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash); cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount; alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap(); for entry in vec![entry0, entry1] {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(blob_id).unwrap();
blob_id += 1;
w.set_id(leader_id).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry); let serialized_entry = serialize(&entry).unwrap();
w.set_size(serialized_entry.len());
w.meta.set_addr(&replicate_addr); w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
drop(w); w.set_size(serialized_entry.len());
msgs.push_back(b_); w.meta.set_addr(&replicate_addr);
drop(w);
msgs.push_back(b_);
}
} }
// send the blobs into the socket // send the blobs into the socket
@@ -280,10 +250,8 @@ pub mod tests {
// receive retransmitted messages // receive retransmitted messages
let timer = Duration::new(1, 0); let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) { while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg); trace!("msg: {:?}", msg);
msgs.push(msg);
} }
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap(); let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
@@ -296,52 +264,16 @@ pub mod tests {
for t in tvu.thread_hdls { for t in tvu.thread_hdls {
t.join().expect("join"); t.join().expect("join");
} }
t2_gossip.join().expect("join"); for t in dr_l.0.thread_hdls {
t2_listen.join().expect("join"); t.join().expect("join");
}
for t in dr_2.0.thread_hdls {
t.join().expect("join");
}
for t in dr_1.0.thread_hdls {
t.join().expect("join");
}
t_receiver.join().expect("join"); t_receiver.join().expect("join");
t_responder.join().expect("join"); t_responder.join().expect("join");
t_l_gossip.join().expect("join");
t_l_listen.join().expect("join");
}
pub struct Sockets {
pub gossip: UdpSocket,
pub requests: UdpSocket,
pub replicate: UdpSocket,
pub event: UdpSocket,
pub respond: UdpSocket,
pub broadcast: UdpSocket,
}
pub struct TestNode {
pub data: ReplicatedData,
pub sockets: Sockets,
}
impl TestNode {
pub fn new() -> TestNode {
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
let event = UdpSocket::bind("0.0.0.0:0").unwrap();
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let data = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
requests.local_addr().unwrap(),
event.local_addr().unwrap(),
);
TestNode {
data: data,
sockets: Sockets {
gossip,
requests,
replicate,
event,
respond,
broadcast,
},
}
}
} }
} }

52
src/window_stage.rs Normal file
View File

@@ -0,0 +1,52 @@
//! The `window_stage` maintains the blob window
use crdt::Crdt;
use packet;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
pub struct WindowStage {
pub blob_receiver: streamer::BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl WindowStage {
pub fn new(
crdt: Arc<RwLock<Crdt>>,
window: streamer::Window,
retransmit_socket: UdpSocket,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
fetch_stage_receiver: streamer::BlobReceiver,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = streamer::retransmitter(
retransmit_socket,
exit.clone(),
crdt.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
let (blob_sender, blob_receiver) = channel();
let t_window = streamer::window(
exit.clone(),
crdt.clone(),
window,
blob_recycler.clone(),
fetch_stage_receiver,
blob_sender,
retransmit_sender,
);
let thread_hdls = vec![t_retransmit, t_window];
WindowStage {
blob_receiver,
thread_hdls,
}
}
}

View File

@@ -1,4 +1,6 @@
//! The `write_stage` module implements write stage of the RPU. //! The `write_stage` module implements the TPU's write stage. It
//! writes entries to the given writer, which is typically a file or
//! stdout, and then sends the Entry to its output channel.
use bank::Bank; use bank::Bank;
use entry::Entry; use entry::Entry;
@@ -8,7 +10,7 @@ use std::io::Write;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver}; use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle}; use std::thread::{Builder, JoinHandle};
use streamer; use streamer;
pub struct WriteStage { pub struct WriteStage {
@@ -26,19 +28,22 @@ impl WriteStage {
entry_receiver: Receiver<Entry>, entry_receiver: Receiver<Entry>,
) -> Self { ) -> Self {
let (blob_sender, blob_receiver) = channel(); let (blob_sender, blob_receiver) = channel();
let thread_hdl = spawn(move || loop { let thread_hdl = Builder::new()
let entry_writer = EntryWriter::new(&bank); .name("solana-writer".to_string())
let _ = entry_writer.write_and_send_entries( .spawn(move || loop {
&blob_sender, let entry_writer = EntryWriter::new(&bank);
&blob_recycler, let _ = entry_writer.write_and_send_entries(
&writer, &blob_sender,
&entry_receiver, &blob_recycler,
); &writer,
if exit.load(Ordering::Relaxed) { &entry_receiver,
info!("broadcat_service exiting"); );
break; if exit.load(Ordering::Relaxed) {
} info!("broadcat_service exiting");
}); break;
}
})
.unwrap();
WriteStage { WriteStage {
thread_hdl, thread_hdl,
@@ -52,16 +57,19 @@ impl WriteStage {
entry_receiver: Receiver<Entry>, entry_receiver: Receiver<Entry>,
) -> Self { ) -> Self {
let (_blob_sender, blob_receiver) = channel(); let (_blob_sender, blob_receiver) = channel();
let thread_hdl = spawn(move || { let thread_hdl = Builder::new()
let entry_writer = EntryWriter::new(&bank); .name("solana-drain".to_string())
loop { .spawn(move || {
let _ = entry_writer.drain_entries(&entry_receiver); let entry_writer = EntryWriter::new(&bank);
if exit.load(Ordering::Relaxed) { loop {
info!("drain_service exiting"); let _ = entry_writer.drain_entries(&entry_receiver);
break; if exit.load(Ordering::Relaxed) {
info!("drain_service exiting");
break;
}
} }
} })
}); .unwrap();
WriteStage { WriteStage {
thread_hdl, thread_hdl,

185
tests/data_replicator.rs Normal file
View File

@@ -0,0 +1,185 @@
#[macro_use]
extern crate log;
extern crate rayon;
extern crate solana;
use rayon::iter::*;
use solana::crdt::{Crdt, TestNode};
use solana::logger;
use solana::ncp::Ncp;
use solana::packet::Blob;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
let tn = TestNode::new();
let crdt = Crdt::new(tn.data.clone());
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,
exit,
).unwrap();
(c, d, tn.sockets.replicate)
}
/// Test that the network converges.
/// Run until every node in the network has a full ReplicatedData set.
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(topo: F)
where
F: Fn(&Vec<(Arc<RwLock<Crdt>>, Ncp, UdpSocket)>) -> (),
{
let num: usize = 5;
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = false;
trace!("round {}", i);
for (c, _, _) in &listen {
if num == c.read().unwrap().convergence() as usize {
done = true;
break;
}
}
//at least 1 node converged
if done == true {
break;
}
sleep(Duration::new(1, 0));
}
exit.store(true, Ordering::Relaxed);
for (c, dr, _) in listen.into_iter() {
for j in dr.thread_hdls.into_iter() {
j.join().unwrap();
}
// make it clear what failed
// protocol is to chatty, updates should stop after everyone receives `num`
assert!(c.read().unwrap().update_index <= num as u64);
// protocol is not chatty enough, everyone should get `num` entries
assert_eq!(c.read().unwrap().table.len(), num);
}
assert!(done);
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut yd = yv.table[&yv.me].clone();
yd.version = 0;
xv.insert(&yd);
trace!("star leader {:?}", &xv.me[..4]);
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
let xd = {
let xv = listen[0].0.read().unwrap();
xv.table[&xv.me].clone()
};
trace!("rstar leader {:?}", &xd.id[..4]);
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let mut yv = listen[y].0.write().unwrap();
yv.insert(&xd);
trace!("rstar insert {:?} into {:?}", &xd.id[..4], &yv.me[..4]);
}
});
}
#[test]
pub fn crdt_retransmit() {
logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_data = c1.read().unwrap().my_data().clone();
c1.write().unwrap().set_leader(c1_data.id);
c2.write().unwrap().insert(&c1_data);
c3.write().unwrap().insert(&c1_data);
c2.write().unwrap().set_leader(c1_data.id);
c3.write().unwrap().set_leader(c1_data.id);
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.read().unwrap().table.len() == 3
&& c2.read().unwrap().table.len() == 3
&& c3.read().unwrap().table.len() == 3;
if done {
break;
}
sleep(Duration::new(1, 0));
}
assert!(done);
let mut b = Blob::default();
b.meta.size = 10;
Crdt::retransmit(&c1, &Arc::new(RwLock::new(b)), &tn1).unwrap();
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let res = s.recv_from(&mut b.data);
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
let mut threads = vec![];
threads.extend(dr1.thread_hdls.into_iter());
threads.extend(dr2.thread_hdls.into_iter());
threads.extend(dr3.thread_hdls.into_iter());
for t in threads.into_iter() {
t.join().unwrap();
}
}

175
tests/multinode.rs Normal file
View File

@@ -0,0 +1,175 @@
#[macro_use]
extern crate log;
extern crate bincode;
extern crate solana;
use solana::bank::Bank;
use solana::crdt::TestNode;
use solana::crdt::{Crdt, ReplicatedData};
use solana::logger;
use solana::mint::Mint;
use solana::ncp::Ncp;
use solana::server::Server;
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use std::io;
use std::io::sink;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::JoinHandle;
use std::time::Duration;
fn validator(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
alice: &Mint,
threads: &mut Vec<JoinHandle<()>>,
) {
let validator = TestNode::new();
let replicant_bank = Bank::new(&alice);
let mut ts = Server::new_validator(
replicant_bank,
validator.data.clone(),
validator.sockets.requests,
validator.sockets.respond,
validator.sockets.replicate,
validator.sockets.gossip,
validator.sockets.repair,
leader.clone(),
exit.clone(),
);
threads.append(&mut ts.thread_hdls);
}
fn converge(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let mut spy = TestNode::new();
let daddr = "0.0.0.0:0".parse().unwrap();
let me = spy.data.id.clone();
spy.data.replicate_addr = daddr;
spy.data.requests_addr = daddr;
let mut spy_crdt = Crdt::new(spy.data);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let dr = Ncp::new(
spy_ref.clone(),
spy_window,
spy.sockets.gossip,
spy.sockets.gossip_send,
exit,
).unwrap();
//wait for the network to converge
let mut converged = false;
for _ in 0..30 {
let num = spy_ref.read().unwrap().convergence();
if num == num_nodes as u64 {
converged = true;
break;
}
sleep(Duration::new(1, 0));
}
assert!(converged);
threads.extend(dr.thread_hdls.into_iter());
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.id != me)
.map(|x| x.clone())
.collect();
v.clone()
}
#[test]
fn test_multi_node() {
logger::setup();
const N: usize = 5;
trace!("test_multi_accountant_stub");
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_bank = Bank::new(&alice);
let server = Server::new_leader(
leader_bank,
None,
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
let mut threads = server.thread_hdls;
for _ in 0..N {
validator(&leader.data, exit.clone(), &alice, &mut threads);
}
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
//contains the leader addr as well
assert_eq!(servers.len(), N + 1);
//verify leader can do transfer
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
assert_eq!(leader_balance, 500);
//verify validator has the same balance
let mut success = 0usize;
for server in servers.iter() {
let mut client = mk_client(server);
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
trace!("validator balance {}", bal);
if bal == leader_balance {
success += 1;
}
}
}
assert_eq!(success, servers.len());
exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
}
}
fn mk_client(leader: &ReplicatedData) -> ThinClient {
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
ThinClient::new(
leader.requests_addr,
requests_socket,
leader.transactions_addr,
transactions_socket,
)
}
fn tx_and_retry_get_balance(
leader: &ReplicatedData,
alice: &Mint,
bob_pubkey: &PublicKey,
) -> io::Result<i64> {
let mut client = mk_client(leader);
trace!("getting leader last_id");
let last_id = client.get_last_id();
info!("executing leader transer");
let _sig = client
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
.unwrap();
client.poll_get_balance(bob_pubkey)
}