Compare commits

...

660 Commits

Author SHA1 Message Date
Michael Vines
be5f2ef9b9 Consolidate CI jobs 2018-06-24 22:28:24 -07:00
Greg Fitzgerald
adfcb79387 Force install cargo-cov 2018-06-24 15:34:30 -06:00
Greg Fitzgerald
73c4c0ac5f Revert "cargo-cov installed by default in nightly?"
This reverts commit 6fc601f696.
2018-06-24 15:34:30 -06:00
Greg Fitzgerald
6fc601f696 cargo-cov installed by default in nightly? 2018-06-24 12:17:42 -06:00
Greg Fitzgerald
07111fb7bb Use llvm-cov instead of gcov
@marco-c called this a hack, but since grcov isn't working
out-of-the-box (panics on call to gcov), we'll take a stab at using
llvm-cov.
2018-06-24 12:17:42 -06:00
Greg Fitzgerald
a06d2170b0 No need for rustfmt on nightly 2018-06-24 12:17:42 -06:00
Greg Fitzgerald
7f53ea3bf3 Generate coverage with Rust nightly
Fixes #177

Thanks @marco-c!
2018-06-24 12:17:42 -06:00
Michael Vines
b2accd1c2a Run snap build sooner to better mask the delay 2018-06-24 10:24:32 -07:00
Anatoly Yakovenko
8ef8a8dea7 borrow checker 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
e929404676 comments 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
c2258bedae fixed! 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
215fdbb7ed nits 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
ee998f6882 fix docs 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
826e95afca fix logs 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
47583d48e7 get rid of dummy test 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
e759cdf061 tests 2018-06-24 11:17:55 -06:00
Anatoly Yakovenko
88503c2a09 generic array fail case 2018-06-24 11:17:55 -06:00
Tyera Eulberg
d5be23dffe fmt 2018-06-24 10:44:17 -06:00
Tyera Eulberg
80c01dc085 Use leader.json or ReplicatedData to get ports for drone 2018-06-24 10:44:17 -06:00
Tyera Eulberg
45b2549fa9 Reset bad TestNode edit 2018-06-24 10:44:17 -06:00
Greg Fitzgerald
c7ce454188 Use pnet_datalink instead of all of pnet
pnet_transport takes a long time to build. It's been especially
painful from within a docker container for reasons I don't care
to understand. pnet_datalink is the only part of pnet we're using
so booting the rest.
2018-06-24 10:39:59 -06:00
Anatoly Yakovenko
7059ea42d6 comments 2018-06-24 09:19:05 -06:00
Anatoly Yakovenko
8ea1c29c9b more notes 2018-06-24 09:19:05 -06:00
Michael Vines
33bbfdbc9b Retry flaky coverage/cuda builds on initial failure 2018-06-23 16:17:25 -07:00
Michael Vines
5de54f8853 Make cuda/erasure build logs public 2018-06-23 16:17:25 -07:00
Michael Vines
a1ac41218a Document CUDA version 2018-06-23 16:17:25 -07:00
Rob Walker
55fc647568 fix more shellcheck 2018-06-23 16:00:17 -07:00
Rob Walker
e83e898eed fix shellcheck's concerns 2018-06-23 16:00:17 -07:00
Rob Walker
eb07e4588b remove IPADDR, which was making Rob feel ill
IPADDR is simple, but not exactly what we need for testnet, where NAT'd
  folks need to join in, need to advertize themselves as on the interweb.

  myip() helps, but there's some TODOs: fullnode-config probably needs to
  be told where it lives in the real world (machine interfaces tell us dick),
  or incorporate something like the "ifconfig.co" code in myip.sh
2018-06-23 16:00:17 -07:00
Michael Vines
563f834c96 Document how to update the snap 2018-06-23 15:29:22 -07:00
Michael Vines
183178681d Simply fetching perf libs 2018-06-23 12:54:38 -07:00
anatoly yakovenko
8dba53e494 debit undo (#423) 2018-06-23 06:14:52 -07:00
Michael Vines
e4782b19a3 Document GCP setup 2018-06-23 02:12:20 -07:00
Michael Vines
ec86b1dffa Adapt to GCP-based CI 2018-06-23 02:12:20 -07:00
anatoly yakovenko
6cb8266c7b cleanup (#419) 2018-06-22 23:26:42 -07:00
Greg Fitzgerald
9c50302a39 Update rfc-001-smart-contracts-engine.md 2018-06-22 22:45:22 -07:00
Anatoly Yakovenko
3313c69898 remove ccal 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
530c6ca7ec a bunch of updates 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
07ed2fb523 cleanup 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
d9ec380a15 cleanup 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
b60eb3a899 edits 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
b4df69791b cleanup 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
c21b8a22b9 update 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
475a76e656 wip 2018-06-22 22:43:54 -07:00
Anatoly Yakovenko
7ba5d5ef86 first! 2018-06-22 22:43:54 -07:00
Greg Fitzgerald
737dc1ddde Per rustc 1.27.0, we can ensure nested results are used 2018-06-22 22:42:47 -07:00
Greg Fitzgerald
164bf19b36 Update LICENSE 2018-06-22 22:41:04 -07:00
Greg Fitzgerald
25976771d9 Version bump 2018-06-22 22:38:18 -07:00
Greg Fitzgerald
f2198c2e9a cargo fmt
rustc 1.27.0

```
$ cargo fmt --version
rustfmt 0.6.1-stable (49279d71 2018-05-08)
```
2018-06-22 22:23:55 -07:00
Rob Walker
eec19c6d2c move genesis to new Entry generation 2018-06-22 17:46:45 -07:00
Michael Vines
30e03feb5f Add initial CI subsystem documentation 2018-06-22 15:30:29 -07:00
Michael Vines
58cd3bde9f Add drone to snap package 2018-06-22 15:27:25 -07:00
Tyera Eulberg
662bfb7b88 fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
5f3e3a17d3 Fix test_send_airdrop cap; add helpful panic msgs 2018-06-22 14:52:36 -07:00
Tyera Eulberg
feba2d9975 Set request cap to a reasonable number, based on 30min reset noted in issue #341 2018-06-22 14:52:36 -07:00
Tyera Eulberg
e3e3a1c457 Better drone request cap handling 2018-06-22 14:52:36 -07:00
Tyera Eulberg
90628f3c8d Edit TestNode port logic to be consistent with new_leader (fixes hanging test_send_airdrop) 2018-06-22 14:52:36 -07:00
Tyera Eulberg
f6bcadb79d Make airdrop amount variable 2018-06-22 14:52:36 -07:00
Tyera Eulberg
d4ac16773c fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
96f044d2bf Clean up; add new_from_server_addr routine 2018-06-22 14:52:36 -07:00
Tyera Eulberg
f31868b913 Rename drone bin; fix usage statement 2018-06-22 14:52:36 -07:00
Tyera Eulberg
73b0ff5b55 Add request-count check and tests; fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
64cf69045a Add request-count check; Clean up solana-drone and fmt 2018-06-22 14:52:36 -07:00
Tyera Eulberg
e57dae0f31 Update config and dependencies for solana-drone 2018-06-22 14:52:36 -07:00
Tyera Eulberg
6386e7d5cf Leave some tokens in the mint for solana-drone 2018-06-22 14:52:36 -07:00
Tyera Eulberg
4bad103da9 Add solana-drone CLI 2018-06-22 14:52:36 -07:00
Tyera Eulberg
30a26adb7c Add solana-drone module to library 2018-06-22 14:52:36 -07:00
Stephen Akridge
8be4adfc0a Rename tr => tx and add back comments 2018-06-22 14:34:46 -07:00
Stephen Akridge
fed4cc3965 Remove commented code/imports 2018-06-22 14:34:46 -07:00
Stephen Akridge
7d1e074683 bump last_ids 2018-06-22 14:34:46 -07:00
Stephen Akridge
00516e50a1 last_ids opt 2018-06-22 14:34:46 -07:00
Stephen Akridge
e83d76fbd9 Remove mutexes 2018-06-22 14:34:46 -07:00
Stephen Akridge
304f152315 rwlock balances table 2018-06-22 14:34:46 -07:00
Stephen Akridge
3a82ebf7fd Add multiple source accounts for benchmark 2018-06-22 14:34:46 -07:00
Pankaj Garg
0253d34467 Address review comments 2018-06-22 14:18:45 -07:00
Pankaj Garg
9209f9acde Run multiple instances from same workspace
* Support running leader and validators from multiple machines
  using the same NFS mounted workspace.
* Changes to setup, leader and validator scripts
2018-06-22 14:18:45 -07:00
Rob Walker
3dbbb398df use next_entries() in recorder, recycle blobs in reconstruct_from_blobs 2018-06-22 14:17:36 -07:00
Michael Vines
17e8ad110f Temporarily disable failing CI to get back to green 2018-06-22 11:29:31 -07:00
Rob Walker
5e91d31ed3 issue 309 part 1
* limit the number of Tntries per Blob to at most one
* limit the number of Transactions per Entry such that an Entry will
    always fit in a Blob

With a one-to-one map of Entries to Blobs, recovery of a validator
  is a simple fast-forward from the end of the initial genesis.log
  and tx-*.logs Entries.

TODO: initialize validators' blob index with initial # of Entries.
2018-06-22 09:58:51 -07:00
Greg Fitzgerald
fad9d20820 Add assertion for now next_entry must be called 2018-06-21 21:24:32 -07:00
Greg Fitzgerald
fe9a1c8580 Fix comment 2018-06-21 21:24:32 -07:00
Greg Fitzgerald
cd6d7d5198 Remove redundant clones 2018-06-21 21:24:32 -07:00
Michael Vines
771478bc68 Add simple CUDA version check, warn on mismatch 2018-06-21 13:42:06 -07:00
Michael Vines
c4a59896f8 Run test-erasure in a container 2018-06-21 13:00:40 -07:00
Michael Vines
3eb1608403 Skip --user if SOLANA_DOCKER_RUN_NOSETUID is set 2018-06-21 12:24:52 -07:00
Michael Vines
8fde70d4dc Erasure tests do not require a CUDA agent 2018-06-21 11:42:37 -07:00
Michael Vines
5a047833ed Run snap publishing directly on CUDA agent
This is necessary until we build a docker image that also contains a CUDA
installation
2018-06-21 11:42:37 -07:00
Michael Vines
f6c28e6be1 Update snapcraft docker image contain snapcraft 2.42.1 2018-06-21 11:42:37 -07:00
Michael Vines
0ebf10d19d Snap cuda fullnode 2018-06-21 11:42:37 -07:00
Pankaj Garg
d3005d3ef3 Updated setup and leader scripts
* Setup will us -b to set validator ports
* Leader script fixed to append .log to the log file
* Updated readme file
2018-06-20 19:05:38 -07:00
Anatoly Yakovenko
effcef2184 fixed sleep bug 2018-06-20 16:58:10 -07:00
Michael Vines
89fc0ad7a9 Add convenience script to download performance libraries 2018-06-20 16:48:32 -07:00
Greg Fitzgerald
410272ee1d Update generic_array
Warning: this may have performance implications.
2018-06-20 11:41:54 -07:00
Greg Fitzgerald
1c97bf50b6 Fix nightly
No longer ignore failures in the nightly build.
2018-06-19 17:38:04 -07:00
Rob Walker
4ecd2c9d0b update demo scripts
* add setup to combine init steps, configurable initial mint
  * bash -e -> bash and be explicit about errors with || exit $?
  * feed transaction logs to validator, too
2018-06-19 17:04:44 -07:00
Michael Vines
e592243a09 De-double quote 2018-06-19 13:20:47 -07:00
Greg Fitzgerald
2f4a92e352 Cleanup test 2018-06-19 12:36:02 -07:00
OEM Configuration (temporary user)
ceafc29040 fix linting errors, add retransmission fix to repair requests 2018-06-19 12:36:02 -07:00
OEM Configuration (temporary user)
b20efabfd2 added retransmission of repair messages 2018-06-19 12:36:02 -07:00
Michael Vines
85b6e7293c Add cleanup script to manage build agent disk space 2018-06-19 12:22:45 -07:00
Rob Walker
6aced927ad improve ledger initialization for fullnode
* use a line iterator on stdin instead of a line iterator on a buffer
 * move some unwrap() to expect(), documenting failures
 * bind entry type earlier (for kicks)
2018-06-19 09:28:35 -07:00
Michael Vines
75997e6c08 Allow BUILDKITE_BRANCH in containers 2018-06-18 22:51:30 -07:00
Michael Vines
9040d00110 Package solana as a snap 2018-06-18 17:36:03 -07:00
Michael Vines
8ebc5c6b07 Suggest different validator port by default to coexist with leader port on the same machine 2018-06-18 17:36:03 -07:00
Michael Vines
d4807790ff Add snapcraft login credentials
This file was created as follows:
$ snapcraft export-login --snaps solana --channels beta,edge snapcraft.credentials
$ openssl aes-256-cbc -e -in snapcraft.credentials -out snapcraft.credentials.enc
2018-06-18 17:36:03 -07:00
Rob Walker
0de5e7a285 attempt to understand entry 2018-06-18 16:48:59 -07:00
Greg Fitzgerald
c40000aeda Fix compiler warning 2018-06-18 15:49:41 -07:00
Stephen Akridge
31198bc105 Fix cargo bench nightly 2018-06-18 13:20:39 -07:00
Michael Vines
92599acfca Abort when -l is not present or unreadable 2018-06-16 09:55:03 -07:00
Greg Fitzgerald
f6e70779fe Don't panic if sent a bad packet 2018-06-16 09:51:45 -06:00
Greg Fitzgerald
3017bde686 Update README.md 2018-06-16 09:43:23 -06:00
Greg Fitzgerald
9d84ec4bb3 Delete TODO
That comment predates the separation of RPU and TPU.
2018-06-16 08:59:30 -06:00
Anatoly Yakovenko
586141adb2 Cleanup TVU docs 2018-06-15 22:45:35 -06:00
Michael Vines
3f763f99e2 Fail fast in CI when |cargo fmt| says no 2018-06-15 17:10:00 -07:00
Michael Vines
15c7f36ea3 Improve error reporting 2018-06-15 17:10:00 -07:00
Michael Vines
04d1a083fa Skip |sudo sysctl ...| on macOS 2018-06-15 17:10:00 -07:00
Greg Fitzgerald
327ee1dae8 Apply feedback from @aeyakovenko 2018-06-15 17:01:38 -06:00
Greg Fitzgerald
22885c3e64 Add TVU ASCII art 2018-06-15 17:01:38 -06:00
Stephen Akridge
94ededb54c Add comments and limit digits for tps prints 2018-06-15 11:54:01 -06:00
Stephen Akridge
af6a07697a Change client-demo to run continuosly for some amount of time
Also retry for get_last_id/transaction_count if dropped.
2018-06-15 11:54:01 -06:00
Stephen Akridge
5f1d8c95eb Fix blob data size 2018-06-15 11:54:01 -06:00
Anatoly Yakovenko
7d9e032407 make sure we test large tables 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
bc918a5ad5 purger 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
ee54ce4727 min table size before purge 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
e85bf2f2d5 tests pass 2018-06-15 06:56:35 -06:00
Anatoly Yakovenko
a7460ffbd1 purge validators we havent seen for a long time 2018-06-15 06:56:35 -06:00
Rob Walker
7fe1fd2f95 clean up fullnode cmdline
* fix documentation, other opt parameters
 * add support for a named output file, remove hardcoded "leader.log"
 * resurrect stdout as the default output
2018-06-15 00:41:07 -07:00
Rob Walker
d30670e92e clean up demo bash scripts
* allow other level of RUST logging
 * avoid "echo" in favor of printf (builtin)
 * single quotes for literals, double quotes for variables
2018-06-14 23:12:11 -06:00
Greg Fitzgerald
9b202c6e1e No longer flood log with emtpy entries 2018-06-14 18:04:36 -06:00
Stephen Akridge
87946eafd5 Lower processing transaction message to debug by default 2018-06-14 17:08:11 -06:00
Greg Fitzgerald
7575d3c726 Add timestamp to log messages
Upgraded env_logger and now we have timestamps and colorful messages.

Fixes #318
2018-06-14 17:07:58 -06:00
Stephen Akridge
8b9713a934 Skip link_local v4 addresses and v6 address when v6 is not enabled 2018-06-14 16:10:31 -06:00
Stephen Akridge
ec713c18c4 Revert client.sh script to use cargo 2018-06-14 11:56:36 -06:00
anatoly yakovenko
c24b0a1a3f TVU rework (#352)
Refactored TVU, into stages
* blob fetch stage for blobs
* window stage for maintaining the blob window
* pulled out NCP out of the TVU so they can be separate units
TVU is now just the fetch -> window -> request and bank processing
2018-06-13 21:52:23 -07:00
Robert Kelly
34e0cb0092 cargo fmt 2018-06-13 19:17:21 -07:00
Robert Kelly
7b7c7cba21 changed atty library 2018-06-13 19:17:21 -07:00
Anatoly Yakovenko
c45343dd30 comments 2018-06-13 16:11:44 -06:00
Anatoly Yakovenko
b7f6603c1f fix coverage build 2018-06-13 16:11:44 -06:00
anatoly yakovenko
2d3b052dea allow for insertion of dummy entry points into the local table (#346)
* Needed for #341. Create a dummy entry with public key 0..., but with a valid gossip address that we can ask for updates. This will allow validators to discover the full network by just knowing a single node's gossip address without knowing anything else about their identity.
* once we start removing dead validators this entry should get purged since we will never see a message from public key 0, #344
2018-06-13 11:42:30 -07:00
Tyera Eulberg
dcb6234771 Fix relative link to client demo 2018-06-12 23:29:04 -06:00
Greg Fitzgerald
e44d423e83 Make version syntax consistent
Using no symbol implies its a symver caret requirement.

https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
2018-06-11 15:51:25 -06:00
Greg Fitzgerald
5435bb734c Upgrade rand 2018-06-11 15:51:25 -06:00
Michael Vines
13f59adf61 Update build status badge link to publicly available build log 2018-06-10 22:12:09 -07:00
Stephen Akridge
0fce3368d3 Fix json perf counter print and add script to generate a chart 2018-06-09 10:55:22 -07:00
Stephen Akridge
1ee5c81267 Fix benchmarking banking_stage 2018-06-08 15:50:36 -07:00
Stephen Akridge
3bb9d5eb50 Use timing::timestamp for counters 2018-06-08 15:50:36 -07:00
Grimes
efb23f7cf9 Ensure stuck builds eventually timeout 2018-06-07 19:08:03 -07:00
Grimes
013f4674de Target cuda agents 2018-06-07 19:08:03 -07:00
Greg Fitzgerald
6966b25d9c Don't mark a build as failed if line coverage drops
It's not always a problem if line coverage drops. For example,
coverage will drop if you make well-tested code more succinct.
It just means the uncovered code is just a larger percentage of
the codebase.
2018-06-07 19:09:25 -06:00
Greg Fitzgerald
d513f56c8c Readme version bump 2018-06-07 17:32:07 -06:00
Greg Fitzgerald
7aa05618a3 data_replicator -> ncp
Fixes #327
2018-06-07 17:11:17 -06:00
Greg Fitzgerald
cdfbbe5e60 Fix diagram typos 2018-06-07 17:11:17 -06:00
Greg Fitzgerald
fe7d1cb81c Race -> Or
Thanks for the suggestion @FishmanL!
2018-06-07 17:11:03 -06:00
Anatoly Yakovenko
c2a9395a4b perf counters 2018-06-07 14:59:21 -07:00
Greg Fitzgerald
586279bcfc Add server diagrams 2018-06-07 15:24:44 -06:00
Greg Fitzgerald
8bd10e7c4c Cleanup top-level lib doc 2018-06-07 15:24:44 -06:00
Greg Fitzgerald
928e6165bc Add TPU & RPU diagrams 2018-06-07 15:24:44 -06:00
anatoly yakovenko
77c9e801aa fixed client demo (#325)
* fixed client demo
2018-06-07 13:51:15 -07:00
Anatoly Yakovenko
c78132417f fix deadlock 2018-06-07 13:52:33 -06:00
Anatoly Yakovenko
849928887e undo 2018-06-07 13:52:33 -06:00
Anatoly Yakovenko
ba1163d49f fix logs 2018-06-07 13:52:33 -06:00
Anatoly Yakovenko
6f9c89af39 fix deadlock 2018-06-07 13:52:33 -06:00
Greg Fitzgerald
246b8b1242 No longer cat scripts
Because we keep changing those scripts and not updating the readme.

Also, this removes the "-b 9000" starting validators. Is that right?
Or should we be passing that to the validator config?
2018-06-07 12:17:43 -06:00
Stephen Akridge
f0db68cb75 Add note about validator.json and -d flag to config generating scripts 2018-06-07 11:15:41 -06:00
Greg Fitzgerald
f0d1fdfb46 Add missing module descriptions 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
3b8b2e030a Better docs for transaction 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
b4fee677a5 Better docs for payment_plan 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
fe706583f9 Better docs for sigverify_stage 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
d0e0c17ece Better docs for rpu 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
5aaa38bcaf Better docs for write_stage 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
6ff9b27f8e Better docs for entry 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
3f4e035506 Better docs for budget 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
57d9fbb927 Better docs for banking_stage 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
ee44e51b30 Better docs for the bank 2018-06-07 09:25:36 -06:00
Greg Fitzgerald
5011f24123 Move more interesting content into first header
The first header and its content is the only text displayed on
GitHub's mobile page. Reorder so that the disclaimer is the only
information people see.

Disclaimer: IANAL and assume reordering these doesn't matter. :)
2018-06-07 09:25:36 -06:00
Anatoly Yakovenko
d1eda334f3 gdb 2018-06-07 09:25:08 -06:00
Hleb Albau
2ae5ce9f2c Do not use cuda for multinode-demo validator component 2018-06-07 07:04:33 -06:00
Greg Fitzgerald
4f5ac78b7e Add readme to crates.io 2018-06-06 15:00:25 -06:00
Stephen Akridge
074c9af020 Shellcheck again 2018-06-05 15:32:25 -06:00
Stephen Akridge
2da2d4e365 More shellcheck 2018-06-05 15:32:25 -06:00
Stephen Akridge
8eb76ab2a5 Fix shellcheck 2018-06-05 15:32:25 -06:00
Stephen Akridge
a710d95243 Fix non-erasure blob nulling 2018-06-05 15:32:25 -06:00
Stephen Akridge
a06535d7ed cargo fmt 2018-06-05 15:32:25 -06:00
Stephen Akridge
f511ac9be7 Fixes for receiving old blobs and nulling the window with coding 2018-06-05 15:32:25 -06:00
Stephen Akridge
e28ad2177e Receive fixes 2018-06-05 15:32:25 -06:00
Stephen Akridge
cb16fe84cd Rework to fix coding blob insertion 2018-06-05 15:32:25 -06:00
Stephen Akridge
ec3569aa39 Move receive_index to correct place 2018-06-05 15:32:25 -06:00
Stephen Akridge
246edecf53 Add receive_index for broadcast blobs and fix blobs_len position 2018-06-05 15:32:25 -06:00
Stephen Akridge
34834c5af9 Store another size in the data block so it is coded as well 2018-06-05 15:32:25 -06:00
Stephen Akridge
b845245614 Restore more of the blob window and add is_coding helper 2018-06-05 15:32:25 -06:00
Stephen Akridge
5711fb9969 Generate coding for the current blob set not just the first coding set 2018-06-05 15:32:25 -06:00
Stephen Akridge
d1eaecde9a Fix deadlock and only push to contq if it's not a coding blob 2018-06-05 15:32:25 -06:00
Stephen Akridge
00c8505d1e Handle set_flags error 2018-06-05 15:32:25 -06:00
Stephen Akridge
33f01efe69 Fixes for erasure coding 2018-06-05 15:32:25 -06:00
Stephen Akridge
377d312c81 Revert log levels 2018-06-05 15:32:25 -06:00
Stephen Akridge
badf5d5412 Add window recovery 2018-06-05 15:32:25 -06:00
Stephen Akridge
0339f90b40 Fix gf-complete url and symlinks 2018-06-05 15:32:25 -06:00
Stephen Akridge
5455e8e6a9 Review comments 2018-06-05 15:32:25 -06:00
Stephen Akridge
6843b71a0d Debug erasure ci script 2018-06-05 15:32:25 -06:00
Stephen Akridge
634408b5e8 Add erasure build to ci 2018-06-05 15:32:25 -06:00
Stephen Akridge
d053f78b74 Erasure refinements, fix generating orders table 2018-06-05 15:32:25 -06:00
Stephen Akridge
93b6fceb2f generate coding after indexing 2018-06-05 15:32:25 -06:00
Stephen Akridge
ac7860c35d indexing blobs then coding 2018-06-05 15:32:25 -06:00
Stephen Akridge
b0eab8729f Add erasure ci script 2018-06-05 15:32:25 -06:00
Stephen Akridge
cb81f80b31 Enable logging for client demo 2018-06-05 15:32:25 -06:00
Stephen Akridge
ea97529185 Fix erasure compilation 2018-06-05 15:32:25 -06:00
Greg Fitzgerald
f1075191fe Clean up comments: Event -> Transaction 2018-06-04 21:43:46 -06:00
Greg Fitzgerald
74c479fbc9 Delete bitrotted docs 2018-06-04 21:43:46 -06:00
Greg Fitzgerald
7e788d3a17 No longer need explicit refs in rustc 1.26 2018-06-04 21:43:46 -06:00
anatoly yakovenko
69b3c75f0d Power of two chance (#314)
* fix validator script
* 1/2^30 that we fail due to random returning the same value
2018-06-04 13:32:34 -07:00
Anatoly Yakovenko
b2c2fa40a2 comments 2018-06-03 22:08:25 -06:00
Anatoly Yakovenko
50458d9524 more tests 2018-06-03 22:08:25 -06:00
Anatoly Yakovenko
9679e3e356 more tests 2018-06-03 22:08:25 -06:00
Anatoly Yakovenko
6db9f92b8a crdt gossip tests 2018-06-03 22:08:25 -06:00
Stephen Akridge
4a44498d45 Fix args in validator script, readme version, client-demo perf print 2018-06-02 21:55:27 -06:00
anatoly yakovenko
216510c573 repair socket and receiver thread (#303)
repair socket and receiver thread
2018-06-02 08:32:51 -07:00
Stephen Akridge
fd338c3097 Run release binary for leader node 2018-06-01 17:10:48 -06:00
Greg Fitzgerald
b66ebf5dec Version bump 2018-06-01 17:10:37 -06:00
Greg Fitzgerald
5da99de579 Review feedback 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
3aa2907bd6 Restore shellcheck 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
05d1618659 Add more detail to testnet setup 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
86113811f2 Readme/demo cleanup 2018-06-01 13:43:38 -06:00
Greg Fitzgerald
53ecaa03f1 Need another beta 2018-05-31 19:08:09 -06:00
Greg Fitzgerald
205c1aa505 Version bump 2018-05-31 18:49:41 -06:00
Greg Fitzgerald
9b54c1542b Move defaults from bash to Rust 2018-05-31 17:18:11 -07:00
Greg Fitzgerald
93d5d1b2ad Default to 1 node 2018-05-31 17:18:11 -07:00
Greg Fitzgerald
4c0f3ed6f3 Attempt to revive the singlenode demo 2018-05-31 17:18:11 -07:00
Greg Fitzgerald
2580155bf2 Enable last of the ignored tests 2018-05-31 16:45:21 -06:00
Greg Fitzgerald
6ab0dd4df9 Remove config options from fullnode 2018-05-31 16:15:02 -06:00
Greg Fitzgerald
4b8c36b6b9 Add solana-fullnode-config 2018-05-31 16:15:02 -06:00
Greg Fitzgerald
359a8397c0 Make bootstrapping functions accessible to other binaries 2018-05-31 16:15:02 -06:00
Greg Fitzgerald
c9fd5d74b5 Boot futures 0.1
We added them thinking it'd be a good stepping stone towards an
asynchronous thin client, but it's used inconsistently and where
it used, the function is still synchronous, which is just confusing.
2018-05-31 14:13:09 -06:00
Greg Fitzgerald
391744af97 Speed up the creation of the million accounts
All threads were locked on the same set of signatures.
2018-05-31 12:13:18 -06:00
Greg Fitzgerald
587ab29e09 Don't register entry ID until after processing its transactions 2018-05-31 12:13:18 -06:00
Greg Fitzgerald
80f07dadc5 Generalize process_entries()
And use it in fullnode
2018-05-31 12:13:18 -06:00
Greg Fitzgerald
60609a44ba Initialize recorder from bank's last_id 2018-05-31 12:13:18 -06:00
Greg Fitzgerald
30c8fa46b4 rustc version bump 2018-05-30 20:49:55 -06:00
Greg Fitzgerald
7aab7d2f82 Sleep between events if PoH is disabled 2018-05-30 15:55:10 -06:00
Anatoly Yakovenko
a8e1c44663 names 2018-05-30 14:50:53 -06:00
Anatoly Yakovenko
a2b92c35e1 thread names 2018-05-30 14:50:53 -06:00
Anatoly Yakovenko
9f2086c772 names 2018-05-30 14:50:53 -06:00
Anatoly Yakovenko
3eb005d492 names for threds 2018-05-30 14:50:53 -06:00
Stephen Akridge
68955bfcf4 Change multinode script argument to leader path
Some may have cloned their code in different place
2018-05-30 14:49:42 -06:00
Anatoly Yakovenko
9ac7070e08 fix ci 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
e44e81bd17 fmt 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
f5eedd2d19 fmt 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
46059a37eb skip shell check 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
adc655a3a2 scripts 2018-05-30 14:04:48 -06:00
Ubuntu
3058f80489 log 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
df98cae4b6 cleanup 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
d327e0aabd warn on tx verify sig 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
17d3a6763c update 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
02c5b0343b fixed cloned 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
2888e45fea comments 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
f1311075d9 integration tests 2018-05-30 14:04:48 -06:00
Ubuntu
6c380e04a3 fix 2018-05-30 14:04:48 -06:00
Anatoly Yakovenko
cef1c208a5 Crdt pipeline, coalesce window repair requests in the listener by examining all of them at once, and ublock those threads from doing io. 2018-05-30 14:04:48 -06:00
Greg Fitzgerald
ef8eac92e3 Version bump 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
9c9c63572b cargo fmt
rustfmt was updated with 1.26.1
2018-05-29 20:33:45 -07:00
Greg Fitzgerald
6c0c6de1d0 Better error names 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
b57aecc24c Better error if Bank doesn't recognize tx last_id 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
290dde60a0 Test invalid tokens and fees 2018-05-29 20:33:45 -07:00
Greg Fitzgerald
38623785f9 Add fee to Transaction
Fixes #161
2018-05-29 20:33:45 -07:00
Grimes
256ecc7208 Build status badge now excludes pull requests 2018-05-29 20:33:34 -07:00
Greg Fitzgerald
76b06b47ba Delete dead code 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
cf15cf587f spending plan -> budget
Review feedback from @sakridge
2018-05-29 18:09:03 -06:00
Greg Fitzgerald
134c7add57 Fix bench build 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
ac0791826a plan.rs -> payment_plan.rs 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
d2622b7798 Allow for addtional smart contract languages
Fixes #159
2018-05-29 18:09:03 -06:00
Greg Fitzgerald
f82cbf3a27 Move Budget EDSL into its own module 2018-05-29 18:09:03 -06:00
Greg Fitzgerald
aa7e3df8d6 Plan -> Budget
Budget is now an EDSL. PaymentPlan is the interface to it.
2018-05-29 18:09:03 -06:00
Greg Fitzgerald
ad00d7bd9c Move plan methods to a trait 2018-05-29 18:09:03 -06:00
Anatoly Yakovenko
8d1f82c34d breaks 2018-05-29 16:53:26 -07:00
Anatoly Yakovenko
0cb2036e3a comment on bad blob usage 2018-05-29 16:53:26 -07:00
Greg Fitzgerald
2b1e90b0a5 More idiomatic Rust 2018-05-29 14:04:27 -06:00
Greg Fitzgerald
f2ccc133a2 Finally made fetch happen 2018-05-29 14:04:27 -06:00
Greg Fitzgerald
5e824b39dd Move multinode communication outside TPU 2018-05-29 14:04:27 -06:00
Greg Fitzgerald
41efcae64b Remove dead code
History: we thought SigVerifyStage would use these, but it does
signature verification before deserializing transactions.
2018-05-29 10:38:58 -06:00
Greg Fitzgerald
cf5671d058 tr -> tx
Missed a few.
2018-05-29 10:38:58 -06:00
Greg Fitzgerald
2570bba6b1 Make apply_payment a method
History: the function was pulled out of Bank when each field wasn't
wrapped in a RwLock, and that locking 'balances' meant to lock
everything in the bank. Now that the RwLocks are here to stay,
we can make it a method again.
2018-05-29 10:38:58 -06:00
Greg Fitzgerald
71cb7d5c97 Better names 2018-05-29 10:38:58 -06:00
Greg Fitzgerald
0df6541d5e Fewer public functions 2018-05-29 10:38:58 -06:00
Greg Fitzgerald
52145caf7e Cleanup: make 'verified' qualifier implicit
History: Qualifying the method names with 'verified' was done to
distinguish them from methods that first did signature verification.
After we moved all signature verication to SigVerifyStage, we removed
those methods from Bank, leaving only the 'verified' ones.

This patch removes the word 'verified' from all method names, since
it is now implied by any code running after SigVerifyStage.
2018-05-29 10:38:58 -06:00
Grimes
86a50ae9e1 Add RUST_BACKTRACE 2018-05-28 22:23:25 -07:00
Grimes
c64cfb74f3 Update code coverage command 2018-05-28 22:23:25 -07:00
Grimes
26153d9919 Avoid docker buildkite plugin, which is not supported by bkrun 2018-05-28 22:23:25 -07:00
Grimes
5af922722f Add local buildkite CI runner 2018-05-28 22:23:25 -07:00
Grimes
b70d730b32 Support local .a, skip if unable to find .a 2018-05-28 22:23:25 -07:00
Grimes
bf4b856e0c Don't fail if CODECOV_TOKEN is undefined 2018-05-28 22:23:25 -07:00
Grimes
0cf0ae6755 s/label:/name:/g 2018-05-28 22:23:25 -07:00
Grimes
29061cff39 Delint existing shell scripts 2018-05-28 05:18:46 -06:00
Grimes
b7eec4c89f Lint shell scripts in CI 2018-05-28 05:18:46 -06:00
Greg Fitzgerald
a3854c229e More rebase typos 2018-05-26 20:48:42 -06:00
Greg Fitzgerald
dcde256433 Fix rebase typo 2018-05-26 20:28:22 -06:00
Greg Fitzgerald
931bdbd5cd Fix typo 2018-05-26 20:25:44 -06:00
Greg Fitzgerald
b7bd59c344 Cleanup whitespace
And delete rebasing artifact
2018-05-26 20:23:18 -06:00
Anatoly Yakovenko
2dbf9a6017 rename 2018-05-26 20:13:42 -06:00
Anatoly Yakovenko
fe93bba457 logs
poll both endpoints in client

logs

logs

logs

names

verify plan not sig

log

set udp buffer to max

drop output

more verbose about window requests

log the leader

load leader identity

readme for single node demo

update

asserts

update

replay all

rsync

dynamic file read in testnode

fix

cleanup

readme

sum

fix scripts

cleanup

cleanup

readme
2018-05-26 20:13:42 -06:00
Grimes
6e35f54738 Simplify environment blocks 2018-05-26 14:38:26 -07:00
Grimes
089294a85e 'ignored' step failures are no longer ignored 2018-05-26 11:00:20 -07:00
Grimes
25c0b44641 Run ignored build step in docker 2018-05-26 11:00:20 -07:00
Greg Fitzgerald
58c1589688 More typos 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
bb53f69016 Fix typos 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
75659ca042 Light up coverage build 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
fc00594ea4 Move multinode test to integration tests 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
8d26be8b89 Run benchmarks in nightly
And name functions the same way as test functions
2018-05-26 00:36:50 -06:00
Greg Fitzgerald
af4e95ae0f Only check formatting in stable build 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
ffb4a7aa78 Boot TravisCI configuration 2018-05-26 00:36:50 -06:00
Greg Fitzgerald
dcaeacc507 request_stage::serialize_packets -> packet::to_blobs
Good stuff - no need to hide them.
2018-05-25 17:31:07 -06:00
Greg Fitzgerald
4f377e6710 Generalize serialize_responses 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
122db85727 Move channel-oriented code into request_stage 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
a598e4aa74 Fix comments 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
733b31ebbd testnode -> fullnode
It's the real deal.
2018-05-25 17:31:07 -06:00
Greg Fitzgerald
dac9775de0 Replace client-demo with multinode-demo 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
46c19a5783 Rename sigverify modules 2018-05-25 17:31:07 -06:00
Greg Fitzgerald
aaeb5ba52f tr -> tx 2018-05-25 16:47:21 -06:00
Greg Fitzgerald
9f5a3d6064 events -> transactions 2018-05-25 16:47:21 -06:00
Greg Fitzgerald
4cdf873f98 Delete event.rs 2018-05-25 16:47:21 -06:00
Greg Fitzgerald
b43ae748c3 Update publish.sh 2018-05-25 16:08:14 -06:00
Greg Fitzgerald
02ddd89653 Version bump
And solana.io -> solana.com
2018-05-25 15:37:07 -06:00
Anatoly Yakovenko
bbe6eccefe log 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
6677a7b66a verify plan not sig 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
75c37fcc73 names 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
5be71a8a9d logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
b9ae7d1ebb logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
8b02e0f57c logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
342cc7350a poll both endpoints in client 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
2335a51ced logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
868df1824c fmt 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
83c11f0f9d logs 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
1022f1b0c6 logs 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
c2c80232e3 logs 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
115f4e54b8 update 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
669b1694b8 exponentail backoff for retransmit 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
2128c58fbe logs and tps counting 2018-05-24 10:35:23 -06:00
Greg Fitzgerald
e12e154877 Boot Event timestamp/singature constructors 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
73d3c17507 Migrate from Event to Transaction Timestramp/Signature 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
7f647a93da Add last_id to Event timestamp/signature constructors 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
ecb3dbbb60 Add witness tx constructors 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
cc907ba69d Add Instruction type 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
5a45eef1dc Exit cleanup (#252)
* Ignore record_stage exit reason. We only really care about panic exit versus graceful exit.
* Ignore coverage build in CI
2018-05-24 10:03:17 -06:00
Greg Fitzgerald
0d980e89bc cargo fmt
@aeyakovenko: https://github.com/rust-lang/rust.vim#formatting-with-rustfmt
2018-05-23 20:05:08 -06:00
Anatoly Yakovenko
ef87832bff fixed 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
94507d1aca cuda 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
89924a38ff cuda 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
7faa2b8698 fixed demo 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
65352ce8e7 fix 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
f1988ee1e3 help 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
82ac8eb731 use client ports 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
ae47e34fa5 fix 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
28e781efc3 break early 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
5c3ceb8355 aws demo2 2018-05-23 17:24:58 -06:00
Greg Fitzgerald
c9113b381d Pull channel functionality into record_stage
This makes record_stage consistent with the other stages. The stage
manages the channels. Anything else is in a standalone object. In
the case of the record_stage, that leaves almost nothing!
2018-05-23 17:15:28 -06:00
Stephen Akridge
75e69eecfa Fix nightly bench 2018-05-23 17:15:03 -06:00
Anatoly Yakovenko
f3c4acc723 cleanup multi node test 2018-05-23 16:59:17 -06:00
Stephen Akridge
2a0095e322 Remove unused variable in multinode-demo fix compiler warning 2018-05-23 16:55:45 -06:00
anatoly yakovenko
9ad5f3c65b fix option (#246) 2018-05-23 14:48:00 -07:00
Greg Fitzgerald
579de64d49 Delete binary again 2018-05-23 14:15:59 -06:00
Greg Fitzgerald
d4200a7b1e Fix build
GenKeys() fix and new multinode module crossed in flight.
2018-05-23 14:10:26 -06:00
Greg Fitzgerald
84477835dc Fix nondeterministic key generation (#243)
Our one and only unsafe operation was ...unsafe.
2018-05-23 14:04:07 -06:00
anatoly yakovenko
504b318ef1 Hooks for binaries to run as leader or replicator and attach to network (#221) 2018-05-23 14:03:19 -06:00
Greg Fitzgerald
f154c8c490 Add data to errors to ease debugging 2018-05-23 12:50:23 -06:00
Greg Fitzgerald
d4959bc157 Test cleanup
GenKey unit tests were in the benchmark suite.
2018-05-23 12:50:23 -06:00
Anatoly Yakovenko
87e025fe22 fmt 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
8049323ca8 @garious review 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
b38c7ea2ff fmt 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
239b925fb3 woop 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
60da7f7aaf wip 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
8646ff4927 refactor wip 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
59be94a81f cleanup 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
437c485e5c cleanup 2018-05-23 12:07:44 -06:00
Michael Vines
79a58da6a9 Merge pull request #240 from mvines/master
CI overhaul follow-ups
2018-05-22 23:27:19 -07:00
Michael Vines
ae29641a18 Run most CI steps in docker 2018-05-22 23:16:25 -07:00
Michael Vines
9c3f65bca9 Update build status badge 2018-05-22 22:59:19 -07:00
Greg Fitzgerald
086365b4c4 Merge pull request #237 from garious/hoist-lastid
Hoist last_id
2018-05-22 17:48:25 -06:00
Greg Fitzgerald
64044da49c Merge pull request #239 from sakridge/fix_bad_sig_mac
Fix test_bad_sig on mac
2018-05-22 17:48:01 -06:00
Stephen Akridge
7b5b7feb63 Fix test_bad_sig on mac 2018-05-22 16:40:01 -07:00
Greg Fitzgerald
2e059f8504 Rename TransactionData to Contract
No longer a single place to get all the data that was signed.
2018-05-22 17:00:40 -06:00
Greg Fitzgerald
207b6686d1 Hoist last_id
First step in unifying Witness processing and Transaction processing
2018-05-22 17:00:40 -06:00
Greg Fitzgerald
abfd7d6951 Merge pull request #234 from sakridge/fix_events_addr
Send events to the right address
2018-05-22 16:59:28 -06:00
anatoly yakovenko
7fc166b5ba Merge pull request #238 from aeyakovenko/tvu_cleanup
tvu cleanup
2018-05-22 15:41:33 -07:00
Anatoly Yakovenko
021953d59a cleanup 2018-05-22 15:30:46 -07:00
Anatoly Yakovenko
bbe89df2ff fmt 2018-05-22 15:18:07 -07:00
Anatoly Yakovenko
a638ec5911 builds 2018-05-22 15:17:59 -07:00
Anatoly Yakovenko
26272a3600 split out stages 2018-05-22 14:26:28 -07:00
Stephen Akridge
8454eb79d0 Send events to the right address and set recv socket timeout 2018-05-22 13:52:50 -07:00
Michael Vines
796f4b981b Merge pull request #233 from mvines/ci
Add in-tree buildkite pipeline
2018-05-22 13:06:24 -07:00
Michael Vines
34514d65bc Add in-tree buildkite pipeline 2018-05-21 23:43:27 -07:00
Greg Fitzgerald
2786357082 Merge pull request #230 from garious/generalize-topackets
Benchmark the banking stage
2018-05-18 19:47:26 -07:00
anatoly yakovenko
4badeacd1d Merge pull request #226 from aeyakovenko/converge_test
check convergence
2018-05-16 23:44:23 -07:00
Anatoly Yakovenko
63a0ba6ec8 fixed 2018-05-16 23:28:03 -07:00
Anatoly Yakovenko
9a4ce6d70e fmt 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
35ee2d0ce1 cleanup 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
b04716d40d fmt 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
051fa6f1f1 cleanup 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
8dc1b07e75 docs 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
bee1e7ebaf compute convergence maximum 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
f3f0b9f0c5 update 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
a5cf745e1c check convergence 2018-05-16 23:27:26 -07:00
Greg Fitzgerald
273b800047 Benchmark the banking stage 2018-05-16 23:18:58 -07:00
Greg Fitzgerald
6c1f1c2a7a Promote create_entry() to Entry::new() 2018-05-16 23:18:58 -07:00
Greg Fitzgerald
9c62f8d81f Add Event::Transaction constructor 2018-05-16 23:18:58 -07:00
Greg Fitzgerald
82aef7ebe2 Merge pull request #225 from mvines/deploy
Auto deploy tagged versions of solana to crate.io
2018-05-16 23:36:15 -06:00
Michael Vines
57636d3d5f Auto deploy tagged versions of solana to crate.io 2018-05-16 21:38:14 -07:00
Greg Fitzgerald
dc87effc0a Merge pull request #229 from garious/fix-bench
Fix the benchmark build
2018-05-16 16:37:56 -06:00
Greg Fitzgerald
f0c9823e9f Merge pull request #228 from garious/generalize-topackets
request::to_request_packets -> packet::to_packets
2018-05-16 16:37:29 -06:00
Greg Fitzgerald
0b91dd6163 Fix the benchmark build 2018-05-16 16:35:50 -06:00
Greg Fitzgerald
4955c6f13a request::to_request_packets -> packet::to_packets 2018-05-16 16:11:53 -06:00
Greg Fitzgerald
2e7beca9ba Generalize to_request_packets 2018-05-16 16:01:19 -06:00
Greg Fitzgerald
59c1b9983d Merge pull request #220 from garious/add-tpu
Add tpu
2018-05-16 12:21:07 -06:00
Greg Fitzgerald
f7083e0923 Remove transaction processing from RPU and request processing from TVU 2018-05-15 12:15:29 -06:00
Greg Fitzgerald
6d4defdf96 Offload event processing to the TPU 2018-05-15 11:33:43 -06:00
Greg Fitzgerald
b826f837f8 First attempt to pull TPU into the server 2018-05-15 11:25:55 -06:00
Greg Fitzgerald
5855e18a4e Let server own the bank, not TPU/RPU 2018-05-15 11:21:48 -06:00
Greg Fitzgerald
3f38c0a245 Feed events socket into the server 2018-05-15 11:19:58 -06:00
Greg Fitzgerald
cfe8b3fc55 Wrap the RPU with new object Server 2018-05-15 11:00:01 -06:00
Greg Fitzgerald
e9ee020b5f Rename constructors 2018-05-15 10:45:36 -06:00
Greg Fitzgerald
1bcf3891b4 New TPU/RPU constructors 2018-05-15 10:44:47 -06:00
Greg Fitzgerald
5456de63e9 Less state 2018-05-15 10:38:17 -06:00
Greg Fitzgerald
9026c70952 Inline Rpu::new 2018-05-15 10:33:16 -06:00
Greg Fitzgerald
99dc4ea4a9 Spin up threads from Rpu/Tpu constructors 2018-05-15 10:30:52 -06:00
Greg Fitzgerald
0aaa500f7c Rpu/Tpu serve() functions now only spin up threads 2018-05-15 10:10:45 -06:00
Greg Fitzgerald
5f5be83a17 Hoist socket creation/configuration
TODO: Add a library for socket configuration.
2018-05-15 10:05:23 -06:00
Greg Fitzgerald
7e44005a0f Don't do error-prone things in functions that spawn threads 2018-05-15 09:53:51 -06:00
Greg Fitzgerald
ee3fb985ea Hoist set_timeout 2018-05-15 09:42:28 -06:00
Greg Fitzgerald
2a268aa528 Reorder to reflect dependencies 2018-05-15 09:17:48 -06:00
Greg Fitzgerald
cd262cf860 Merge pull request #223 from rlkelly/202__rust_refactor
202  rust refactor
2018-05-15 08:44:47 -06:00
Robert Kelly
a1889c32d4 fixed CrdtToSmall typo 2018-05-15 10:29:56 -04:00
Robert Kelly
d42d024d9c minor changes 2018-05-15 10:23:11 -04:00
anatoly yakovenko
7b88b8d159 Merge pull request #222 from aeyakovenko/fixed_ignore_tests
fix ignore tests
2018-05-14 22:18:38 -07:00
Anatoly Yakovenko
4131071b9a fix ignore tests 2018-05-14 22:06:42 -07:00
Greg Fitzgerald
ef6bd7e3b8 Add TPU 2018-05-14 17:36:19 -06:00
Greg Fitzgerald
374bff6550 Extract event processing from request_stage 2018-05-14 17:31:27 -06:00
Greg Fitzgerald
0a46bbe4f9 Merge pull request #219 from garious/add-write-stage
Move write_service and drain_service into new write_stage module
2018-05-14 17:18:04 -06:00
Greg Fitzgerald
f4971be236 Merge pull request #218 from aeyakovenko/multitest-rebase
multinode test
2018-05-14 17:17:34 -06:00
Anatoly Yakovenko
421273f862 disable tests that fail with kcov 2018-05-14 16:07:21 -07:00
Anatoly Yakovenko
2c7f229883 wait longer 2018-05-14 15:48:43 -07:00
Anatoly Yakovenko
904eabad2f waint longer 2018-05-14 15:48:24 -07:00
Anatoly Yakovenko
8b233f6be4 update 2018-05-14 15:43:26 -07:00
Anatoly Yakovenko
08fc821ca9 rebase 2018-05-14 15:35:54 -07:00
Greg Fitzgerald
81706f2d75 Move write_service and drain_service into new write_stage module 2018-05-14 16:31:31 -06:00
Anatoly Yakovenko
7b50c3910f fmt 2018-05-14 15:21:41 -07:00
Anatoly Yakovenko
2d635386af rebased 2018-05-14 15:20:41 -07:00
Greg Fitzgerald
a604dcb4c4 Merge pull request #217 from garious/add-historian-stage
Add record_stage to pipeline
2018-05-14 16:01:45 -06:00
Greg Fitzgerald
7736b9cac6 Boot Alice and Bob from the unit tests 2018-05-14 15:39:34 -06:00
Greg Fitzgerald
d2dd005a59 accountant -> bank 2018-05-14 15:33:11 -06:00
Greg Fitzgerald
6e8f99d9b2 Purge EventProcessor 2018-05-14 14:45:29 -06:00
Greg Fitzgerald
685de30047 Purge EventProcessor from RPU 2018-05-14 14:35:25 -06:00
Greg Fitzgerald
17cc9ab07f Rename Historian to RecordStage
Historian was a legacy name. The new name reflects the new pipelined
architecture.
2018-05-14 14:19:19 -06:00
Greg Fitzgerald
3f10bf44db Config recorder with any kind of Duration, not just milliseconds 2018-05-14 14:12:36 -06:00
Greg Fitzgerald
27984e469a Multiply duration, not milliseconds 2018-05-14 13:58:42 -06:00
Greg Fitzgerald
a2c05b112e Add historian to pipeline
No longer intercept entries to register_entry_id(). Intead,
register the ID in the Write stage.

EventProcessor is now just being used as a place to store data.

Fixes #216
2018-05-14 12:43:40 -06:00
Greg Fitzgerald
a578c1a5e3 Merge pull request #215 from garious/suppress_panic_message_in_tests
Don't output panic noise from panic test
2018-05-14 11:46:22 -06:00
Greg Fitzgerald
500aaed48e Merge pull request #211 from garious/add-tx-count
Drop EntryInfo subscriptions
2018-05-14 10:41:09 -06:00
Robert Kelly
4a94da8a94 Don't output panic noise from panic test
P.S. rustfmt 0.4.1-stable (7a807262 2018-04-20)
2018-05-14 10:38:59 -06:00
Greg Fitzgerald
cc447c0fda Drop support for EntryInfo subscriptions 2018-05-14 09:53:57 -06:00
Greg Fitzgerald
0ae69bdcd9 Get transactionn_count via GetTransactionCount instead of EntryInfo 2018-05-14 09:45:09 -06:00
Greg Fitzgerald
5ba20a94e8 Panic on error to get same signature as transaction_count() 2018-05-14 09:43:40 -06:00
Greg Fitzgerald
f168c377fd Get last_id via GetLastId instead of EntryInfo 2018-05-14 09:40:29 -06:00
Greg Fitzgerald
dfb754dd13 Revive GetLastId messages 2018-05-14 09:35:10 -06:00
Greg Fitzgerald
455050e19c Expose the server-side transaction count 2018-05-14 07:21:12 -06:00
Greg Fitzgerald
317031f455 Add transaction count to accountant 2018-05-14 06:49:51 -06:00
Greg Fitzgerald
b132ce1944 Merge pull request #210 from aeyakovenko/buildite_coverage
ignore unstable tests
2018-05-13 22:00:32 -06:00
Anatoly Yakovenko
8b226652aa unstable 2018-05-13 20:54:41 -07:00
Anatoly Yakovenko
2c7fe3ed8d unstable 2018-05-13 20:51:07 -07:00
Anatoly Yakovenko
3d5f2b3c28 unstable 2018-05-13 20:45:55 -07:00
Anatoly Yakovenko
7a79afe4a6 unstable 2018-05-13 20:41:54 -07:00
Anatoly Yakovenko
1f7387a39b increase sleep 2018-05-13 20:33:41 -07:00
Greg Fitzgerald
0fc2bee144 Merge pull request #208 from rlkelly/203__remove_old_genkey
removed old keygen
2018-05-13 19:04:23 -06:00
Robert Kelly
791ae852a2 removed old keygen 2018-05-13 18:14:10 -04:00
Greg Fitzgerald
c2fcd876d7 Merge pull request #206 from garious/add-accounting-stage
More modules
2018-05-12 18:05:10 -06:00
Greg Fitzgerald
d239d4a495 Add missing files 2018-05-12 17:57:28 -06:00
Greg Fitzgerald
aec05ef602 Move RequestProcessor into its own module 2018-05-12 17:50:55 -06:00
Greg Fitzgerald
e5d46d998b Move thin client messages into their own module 2018-05-12 17:41:27 -06:00
Greg Fitzgerald
b2e3299539 Only pass accountant write_service 2018-05-12 17:30:15 -06:00
Greg Fitzgerald
c308a6459f cargo fmt 2018-05-12 17:27:15 -06:00
Greg Fitzgerald
4eb1bc08a7 Merge pull request #205 from rlkelly/203__test_key_generation
203  test key generation
2018-05-12 17:26:46 -06:00
Robert Kelly
ff5e1c635f increased iterations 2018-05-12 18:18:18 -04:00
Robert Kelly
6149c2fcb5 added benchmarks for two GenKeys 2018-05-12 18:08:08 -04:00
Greg Fitzgerald
d7cd80dce5 Merge pull request #204 from garious/add-accounting-stage
TPU cleanup
2018-05-12 15:47:37 -06:00
Greg Fitzgerald
6264508f5e Consistent naming of senders and receivers 2018-05-12 15:24:20 -06:00
Greg Fitzgerald
a3869dd4c1 Move entry_receiver to RequestStage
This can move to AccountingStage once RequestStage stops
calling process_events().
2018-05-12 15:14:37 -06:00
Greg Fitzgerald
a3d2831f8c Free up the name 'accounting_stage' 2018-05-12 14:05:57 -06:00
Robert Kelly
4cd1fa8c38 refactored seed generation 2018-05-12 15:42:27 -04:00
Greg Fitzgerald
1511dc43d7 Move RequestProcessor out of Rpu/Tvu state 2018-05-12 11:39:24 -06:00
Greg Fitzgerald
3d82807965 Delete dead code 2018-05-12 11:24:40 -06:00
Greg Fitzgerald
4180571660 Don't pass events_socket to RPU 2018-05-12 11:11:30 -06:00
Greg Fitzgerald
421d9aa501 Free up the name 'tpu' 2018-05-12 10:53:25 -06:00
Greg Fitzgerald
898f4971a2 Free up name 'thin_client_service' 2018-05-12 10:50:22 -06:00
Greg Fitzgerald
7ab3331f01 Move validation processor to its own module 2018-05-12 00:31:32 -06:00
Greg Fitzgerald
b4ca414492 More object-oriented 2018-05-12 00:19:12 -06:00
Greg Fitzgerald
73abea088a No need for TPU dependency 2018-05-11 23:51:35 -06:00
Greg Fitzgerald
2376dfc139 Let thin client own the receiver channel 2018-05-11 23:46:04 -06:00
Greg Fitzgerald
d2f95d5319 Move thin client service thread into thin_client_service.rs 2018-05-11 23:37:44 -06:00
Greg Fitzgerald
cd96843699 Free up name ThinClientService 2018-05-11 23:37:14 -06:00
Greg Fitzgerald
ca80bc33c6 Move the writer stage's utilities to its own module 2018-05-11 22:36:16 -06:00
Greg Fitzgerald
19607886f7 Move sig verification stage into its own module 2018-05-11 21:51:37 -06:00
Greg Fitzgerald
3c11a91f77 Cleanup verifier error handling 2018-05-11 21:01:07 -06:00
Greg Fitzgerald
b781fdbd04 Reorganize 2018-05-11 20:50:50 -06:00
Greg Fitzgerald
765d901530 Better names 2018-05-11 20:18:04 -06:00
Greg Fitzgerald
3cedbc493e Reorder to reflect the pipeline order 2018-05-11 20:11:25 -06:00
Greg Fitzgerald
0488d0a82f Extract sig verify functions 2018-05-11 19:59:40 -06:00
Greg Fitzgerald
f0be595e4c Create function for thin client thread 2018-05-11 17:58:27 -06:00
Greg Fitzgerald
55100854d6 Better names 2018-05-11 16:41:35 -06:00
Greg Fitzgerald
600a1f8866 Initialize thin client with events port 2018-05-11 16:35:53 -06:00
Greg Fitzgerald
95bf68f3f5 Correct some strange naming 2018-05-11 16:24:18 -06:00
Greg Fitzgerald
bcdb058492 cargo fmt 2018-05-11 13:06:05 -06:00
Greg Fitzgerald
7f46aef624 Merge pull request #200 from jackson-sandland/153-panic-cleanup
issue #153 - panic cleanup
2018-05-11 13:05:04 -06:00
Code Cobain
e779496dfb Update signature.rs 2018-05-11 11:49:22 -07:00
Code Cobain
3d77fa5fbc Merge branch 'master' into 153-panic-cleanup 2018-05-11 11:40:20 -07:00
Jackson Sandland
250830ade9 cargo fmt run 2018-05-11 11:38:52 -07:00
Greg Fitzgerald
7b2eb7ccfc Merge pull request #189 from rlkelly/156__remove_user_keys_in_mintdemo
156  remove user keys in mintdemo
2018-05-11 12:19:32 -06:00
Code Cobain
458c27c6e9 Merge branch 'master' into 153-panic-cleanup 2018-05-11 11:18:45 -07:00
Robert Kelly
a49e664e63 Merge branch '156__remove_user_keys_in_mintdemo' of github.com:rlkelly/solana into 156__remove_user_keys_in_mintdemo 2018-05-11 14:07:48 -04:00
Robert Kelly
f20380d6b4 changed RwLock to RefCell 2018-05-11 14:07:41 -04:00
Rob Kelly
05a5e551d6 Merge branch 'master' into 156__remove_user_keys_in_mintdemo 2018-05-11 13:00:44 -04:00
Robert Kelly
d278b71cb2 added tests and utility method for key generation 2018-05-11 12:55:05 -04:00
Greg Fitzgerald
a485c141d5 Merge pull request #199 from garious/add-accounting-stage
Fix race condition in Accountant::apply_payment()
2018-05-11 10:54:32 -06:00
Greg Fitzgerald
8a9f6b9ae3 Merge pull request #201 from CriesofCarrots/master
Generalize next tick functions to carry events
2018-05-11 10:54:14 -06:00
Tyera Eulberg
7144090528 Fix whitespace 2018-05-11 10:40:31 -06:00
Tyera Eulberg
ee0015ac38 Fix whitespace 2018-05-11 10:34:46 -06:00
Tyera Eulberg
8b7f7f1088 Generalize next tick functions to carry events 2018-05-11 09:45:42 -06:00
Jackson Sandland
c95c6a75f8 tpu.rs - panic cleanup 2018-05-10 20:49:58 -07:00
Jackson Sandland
44bf79e35f transaction.rs - panic cleanup 2018-05-10 18:24:33 -07:00
Jackson Sandland
bb654f286c tpu.rs - panic cleanup 2018-05-10 18:21:10 -07:00
Greg Fitzgerald
1acd2aa8cf Fix race condition in Accountant::apply_payment() 2018-05-10 19:07:15 -06:00
Jackson Sandland
18d3659b91 timing.rs - panic cleanup 2018-05-10 17:47:27 -07:00
Jackson Sandland
63a4bafa72 thin_client - panic cleanup 2018-05-10 17:46:10 -07:00
Jackson Sandland
4eb2e84c9f streamer.rs - panic cleanup 2018-05-10 17:38:00 -07:00
Jackson Sandland
73c7fb87e8 signature.rs - panic cleanup 2018-05-10 17:15:53 -07:00
Jackson Sandland
c1496722aa packet.rs - panic cleanup 2018-05-10 17:11:31 -07:00
Jackson Sandland
d9f81b0c8c mint.rs - panic cleanup 2018-05-10 17:06:43 -07:00
Jackson Sandland
d69beaabe1 historian.rs - panic cleanup 2018-05-10 17:00:37 -07:00
Jackson Sandland
b7a0bd6347 event.rs - panic cleanup 2018-05-10 16:59:13 -07:00
Jackson Sandland
882ea6b672 erasure.rs - panic cleanup 2018-05-10 16:54:21 -07:00
Greg Fitzgerald
736d3eabae Merge pull request #198 from garious/add-accounting-stage
Move more code out of TPU
2018-05-10 17:24:22 -06:00
Greg Fitzgerald
af53197c04 cargo +nightly fmt 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
cf186c5762 Better names 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
f384a2ce85 Move streamer-specific utility into streamer module 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
803b76e997 More idiomatic Rust 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
230d7c3dd6 Move all Request processing into thin_client_service 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
4f629dd982 Add events socket instead of modifying the existing socket 2018-05-10 16:54:43 -06:00
Greg Fitzgerald
4fdd891b54 More precise function names 2018-05-10 16:54:43 -06:00
Greg Fitzgerald
64a892321a Merge pull request #197 from sakridge/fixes_for_entry_serialization
Fixes for serializing entries over blobs
2018-05-10 16:53:30 -06:00
Stephen Akridge
a80991f2b3 Fixes for serializing entries over blobs and reorg into ledger 2018-05-10 15:30:30 -07:00
Raj Gokal
c9cd81319a Set theme jekyll-theme-slate 2018-05-10 13:28:29 -07:00
Greg Fitzgerald
521ae21632 Merge pull request #193 from sakridge/serialize_entries_over_multiple_blobs
Serialize entries over multiple blobs
2018-05-10 13:53:48 -06:00
Jackson Sandland
bcd6606a16 ecdsa.rs - panic cleanup 2018-05-09 18:19:23 -07:00
Jackson Sandland
52ebb88205 accountant.rs - simplify error messages 2018-05-09 18:16:37 -07:00
Jackson Sandland
1e91d09be7 crdt.rs - panic cleanup 2018-05-09 18:10:48 -07:00
Jackson Sandland
02c573986b historian / transaction updates 2018-05-09 17:22:14 -07:00
Jackson Sandland
f2de486658 accountant.rs - panic cleanup 2018-05-09 17:19:12 -07:00
Stephen Akridge
900b4f2644 Serialize entries over multiple blobs 2018-05-09 16:03:47 -07:00
Greg Fitzgerald
1cfaa9afb6 Merge pull request #194 from garious/add-accounting-stage
Fix nightly
2018-05-09 16:53:45 -06:00
Greg Fitzgerald
801468d70d Fix nightly 2018-05-09 16:51:34 -06:00
Greg Fitzgerald
0601e05978 Merge pull request #192 from garious/add-accounting-stage
Add accounting stage
2018-05-09 16:47:50 -06:00
Greg Fitzgerald
7ce11b5d1c Cleanup: use full words for field names
and optionally for variable names
2018-05-09 16:19:42 -06:00
Greg Fitzgerald
f2d4799491 Cleanup: field names should be nouns 2018-05-09 16:14:40 -06:00
Greg Fitzgerald
ebc458cd32 Remove redundant Arcs 2018-05-09 15:45:10 -06:00
Greg Fitzgerald
43cd631579 Add thin_client_service 2018-05-09 14:56:34 -06:00
Greg Fitzgerald
bc824c1a6c Reference count the accountant
So that the thin client can reference the AccountingStage's accountant
from separate threads.
2018-05-09 14:33:20 -06:00
Greg Fitzgerald
4223aff840 Remove useless ref counts 2018-05-09 14:25:52 -06:00
Greg Fitzgerald
f107c6c2ca Don't wrap thread-safe objects with mutexes 2018-05-09 14:21:42 -06:00
Greg Fitzgerald
7daf14caa7 Don't depend on client from server 2018-05-09 13:33:33 -06:00
Greg Fitzgerald
ded28c705f Tuck away the Historian
The Historian is now just a utility of the accounting stage.
2018-05-09 12:25:19 -06:00
Greg Fitzgerald
778bec0777 Intercept historian output from accounting stage
We were accessing the accountant from multiple stages just to
register the ID the historian adds to Events.

This change should cause a whole lot of Arcs and Mutexes to go away.
2018-05-09 12:00:37 -06:00
Greg Fitzgerald
6967cf7f86 Boot sync_channel()
This is less useful now that we send Vec<Event> instead of Event.
2018-05-09 11:43:16 -06:00
Greg Fitzgerald
0ee3ec86bd Fix nightly 2018-05-09 10:48:56 -06:00
Greg Fitzgerald
e4c47e8417 Use AccountingStage in Tpu 2018-05-09 10:31:23 -06:00
Greg Fitzgerald
98ae80f4ed Hoist historian 2018-05-09 09:26:58 -06:00
Greg Fitzgerald
876c77d0bc Extract accounting stage code from tpu 2018-05-09 09:22:46 -06:00
Greg Fitzgerald
d44a6f7541 Move Accounting stage functionality into its own object 2018-05-09 09:03:00 -06:00
Greg Fitzgerald
9040c04d27 Remove redundant Tick 2018-05-09 08:18:52 -06:00
Greg Fitzgerald
ebbdef0538 Ignore flakey test 2018-05-09 08:16:59 -06:00
Greg Fitzgerald
bfbee988d0 No longer wait for a Tick signal to record events 2018-05-09 08:15:51 -06:00
Greg Fitzgerald
1d4d0272ca Drop support for logging a single event 2018-05-09 08:12:33 -06:00
Greg Fitzgerald
77a76f0783 Record a batch of events 2018-05-09 08:11:19 -06:00
Greg Fitzgerald
d9079de262 Add a way of sending a batch of events 2018-05-09 08:05:40 -06:00
Greg Fitzgerald
b3d732a1a1 No longer artificially limit the size of entries
Instead, serialize the entries and split them up over multiple
blobs.
2018-05-09 07:59:55 -06:00
Greg Fitzgerald
52f1a02938 Delete historical artifact
This was just to explain Proof of History. We have better explanations
elsewhere. Delete!
2018-05-09 07:53:24 -06:00
Jackson Sandland
fe51669e85 signature.rs - panic cleanup 2018-05-08 23:21:45 -07:00
Jackson Sandland
670a6c50c9 event.rs - panic cleanup 2018-05-08 22:58:48 -07:00
Jackson Sandland
86c1aaf7d8 transaction.rs - panic cleanup 2018-05-08 22:46:22 -07:00
Jackson Sandland
658e787b60 timing.rs panic cleanup 2018-05-08 22:40:07 -07:00
Robert Kelly
40c50aef50 deterministic random wallet generationg 2018-05-09 00:07:19 -04:00
Robert Kelly
a24c2bbe73 merge bug 2018-05-09 00:07:03 -04:00
Robert Kelly
bdbe90b891 Merge branch 'master' of github.com:solana-labs/solana 2018-05-08 23:40:54 -04:00
Greg Fitzgerald
3236be7877 Merge pull request #188 from garious/add-tpu
AccountantSkel -> Tpu
2018-05-08 19:50:58 -06:00
Greg Fitzgerald
1dca17fdb4 cargo +nightly fmt 2018-05-08 18:59:01 -06:00
Greg Fitzgerald
785e971698 AccountantSkel -> Tpu
The terms Stub and Skel come from OMG IDL and only made sense while
the Stub was acting as an RPC client for the the Accountant object.
Nowadays, the Stub interface looks nothing like the Accountant and
meanwhile we've recognized the multithreaded implementation is more
reminiscent of a pipelined CPU. Thus, we finally bite the bullet and
rename our modules.

AccountantSkel -> Tpu
AccountantStub -> ThinClient

Up next will be moving much of the TPU code into separate modules,
each representing a stage of the pipeline. The interface of each
will follow the precedent set by the Historian object.
2018-05-08 17:40:02 -06:00
Greg Fitzgerald
2bfa20ff85 Merge pull request #182 from garious/split-request
Control port prep
2018-05-08 17:11:34 -06:00
Greg Fitzgerald
474a9af78d Merge pull request #187 from sakridge/fix_blob_size_check
Trust the recorder not to give us more than we can serialize
2018-05-08 17:11:18 -06:00
Greg Fitzgerald
61425eacb8 Merge pull request #185 from sakridge/fix_default_client_port
Fix default client port, server uses 8000-8002 for gossip
2018-05-08 16:58:04 -06:00
Stephen Akridge
4870def1fb Fix default client port, server uses 8000-8002 for gossip. 2018-05-08 15:40:55 -07:00
Stephen Akridge
3e73fb9233 Trust the recorder not to give us more than we can serialize
Also run client for 10 seconds, 5 is bit too short
2018-05-08 15:23:41 -07:00
Greg Fitzgerald
5ad6061c3f Merge pull request #184 from sakridge/add_debug_msg_in_readme
Add message about trace debugging
2018-05-08 14:39:09 -06:00
Stephen Akridge
fae019b974 Add message about trace debugging 2018-05-08 13:26:09 -07:00
Greg Fitzgerald
3bb06d8364 Merge pull request #183 from sakridge/verify_thread_rework
Rework sig processing threads and add perf for process/verify
2018-05-08 13:15:41 -06:00
Greg Fitzgerald
c9c9afa472 Remove the note about git-lfs 2018-05-08 12:52:24 -06:00
Stephen Akridge
bd0671e123 Rework sig processing threads and add perf for process/verify 2018-05-08 11:49:29 -07:00
Greg Fitzgerald
6f3ec8d21f Merge pull request #181 from aeyakovenko/link
update link
2018-05-08 08:20:43 -06:00
Anatoly Yakovenko
9a0bf13feb update link 2018-05-08 06:44:24 -07:00
Greg Fitzgerald
9ff1a6f0cd Add a thread to support thin clients 2018-05-07 21:44:44 -06:00
Greg Fitzgerald
a59f64cae1 Merge pull request #179 from garious/update-readme
Update README with proposed way to download the gpu lib
2018-05-07 16:43:20 -06:00
Greg Fitzgerald
a4ecd09723 Delete .gitattributes
This was used by git-lfs.
2018-05-07 16:35:54 -06:00
Greg Fitzgerald
f159dfd15a Update README with proposed way to download the gpu lib
If you checked here yesterday, this was a top-level file in git-lfs,
but that made the developer workflow more painful so we boot that
file and are making it available via an http endpoint.
2018-05-07 16:33:27 -06:00
Greg Fitzgerald
9e8ec86fa3 Merge pull request #178 from garious/split-request
Refactoring for upcoming thin client port
2018-05-07 16:21:48 -06:00
Greg Fitzgerald
62bb78f58d Prepwork to hoist processing requests 2018-05-07 15:09:08 -06:00
Greg Fitzgerald
893011c3ba Process events instead of processing only transactions
Prep work to allow clients to send any type that can end up in
the ledger.
2018-05-07 14:51:13 -06:00
Greg Fitzgerald
880cb8e7cc Merge pull request #176 from aeyakovenko/multinode
Multinode
2018-05-07 09:05:12 -06:00
Anatoly Yakovenko
85f83f2c74 fmt 2018-05-06 22:29:33 -07:00
Anatoly Yakovenko
4751e459cc fixed! 2018-05-06 22:25:05 -07:00
Anatoly Yakovenko
138efa6cec fixed constant 2018-05-06 22:06:19 -07:00
Anatoly Yakovenko
a68e50935e useless timeouts i think 2018-05-06 21:48:46 -07:00
Stephen Akridge
e8f5fb35ac Multinode fixes and test
* Replace magic numbers for 64k event size
* Fix gossip, dont ping yourself
* Retransmit only to listening nodes
* Multinode test in stub marked unstable
2018-05-06 21:36:06 -07:00
sakridge
6af27669b0 Merge pull request #175 from garious/64k-entries
Limit 256 events per entry
2018-05-04 12:19:25 -07:00
Greg Fitzgerald
e162f24119 Limit 256 events per entry
Attempt to keep blob size under 64kb
2018-05-04 11:52:05 -06:00
Greg Fitzgerald
dbcc462a48 Merge pull request #173 from sakridge/entry_process_cleanup
Factor out entry processing and fix replicate test to call global setup fn
2018-05-04 11:19:28 -06:00
Stephen Akridge
2d5313639a Factor out entry processing and fix replicate test to call global setup fn 2018-05-03 22:24:30 -07:00
Greg Fitzgerald
38af0f436d Merge pull request #174 from sakridge/fix_bind_for_external
Fix bind so we can talk on external interfaces and surface send error
2018-05-03 18:20:00 -06:00
Stephen Akridge
888c2ffb20 Fix bind so we can talk on external interfaces and surface send error 2018-05-03 17:05:02 -07:00
Greg Fitzgerald
588593f619 Merge pull request #172 from sakridge/fix_entry_serialize
Fix entry serialize
2018-05-03 16:12:42 -06:00
Stephen Akridge
2cdd515b12 Compiles/fmt and add assert for forward progress 2018-05-03 14:58:08 -07:00
Anatoly Yakovenko
0aad71d46e fix entry serialize 2018-05-03 14:35:04 -07:00
Greg Fitzgerald
6f9285322d Merge pull request #171 from garious/cleanup-lastid
Cleanup last_id access in stub and skel
2018-05-03 14:57:28 -06:00
Greg Fitzgerald
68c7f992fa Sooth all versions of rustfmt 2018-05-03 13:56:10 -06:00
Greg Fitzgerald
1feff408ff Implement get_last_id() with transaction_count()
This is more precice than the previous implementation because it'll
drain the EntryInfo queue and return the most recent last_id instead
of the first one.
2018-05-03 13:34:57 -06:00
Greg Fitzgerald
f752e02487 Implement GetLastId with EntryInfo subscription 2018-05-03 13:31:43 -06:00
Greg Fitzgerald
c9c7fb0a27 Update comment
The last PR added a thread that logs entries without needing to
be driven by the client.
2018-05-03 13:27:37 -06:00
Greg Fitzgerald
de680c2a8e Remove duplicate state 2018-05-03 13:24:37 -06:00
Greg Fitzgerald
03695ba4c5 Merge pull request #169 from sakridge/broadcast_rebase
Add broadcast impl
2018-05-03 12:22:34 -06:00
Anatoly Yakovenko
c2e2960bf7 Add broadcast impl 2018-05-03 10:34:01 -07:00
Greg Fitzgerald
385d2a580c Merge pull request #168 from aeyakovenko/fix_multi_host_client_demo
multi host client demo
2018-05-03 10:21:41 -06:00
Greg Fitzgerald
7e02652068 Merge pull request #170 from garious/refactor-historian
Fix nightly build
2018-05-03 10:16:05 -06:00
Greg Fitzgerald
ae29c9b4a0 Fix nightly build 2018-05-03 09:38:59 -06:00
Anatoly Yakovenko
078f917e61 useless assert 2018-05-03 08:34:57 -07:00
Anatoly Yakovenko
b65f04d500 multi host client demo
Bind to the same interface as the user supplied client address.
2018-05-03 08:28:11 -07:00
Greg Fitzgerald
6acaffe581 Merge pull request #166 from garious/refactor-historian
TPU-friendly Historian
2018-05-02 18:13:30 -06:00
Greg Fitzgerald
e47ef42a33 Merge pull request #167 from djKooks/readme-version
Add comment about rustc version in README
2018-05-02 18:08:13 -06:00
kwangin
b950e33d81 Remove useless comment 2018-05-03 09:06:41 +09:00
kwangin
ec8cfc77ad Remove component adding part 2018-05-03 09:04:56 +09:00
kwangin
00a16db9cd Add comment about rustc version in README 2018-05-03 08:38:09 +09:00
Greg Fitzgerald
4b9f115586 Hoist Historian input 2018-05-02 16:35:37 -06:00
Greg Fitzgerald
c5cc91443e Rename sender/receiver to input/output 2018-05-02 15:54:53 -06:00
Greg Fitzgerald
48d94143e7 Fix CI 2018-05-02 11:05:11 -06:00
Greg Fitzgerald
8174a05156 Merge pull request #165 from rlkelly/126__atomic_balances
126  atomic balances
2018-05-02 10:43:31 -06:00
Robert Kelly
63cf6363a2 more rustfmt 2018-05-02 12:24:25 -04:00
Robert Kelly
cc6de605ac rustfmt 2018-05-02 12:21:20 -04:00
Robert Kelly
d0151d2b79 restored original test logic 2018-05-02 12:07:42 -04:00
Robert Kelly
6b45d453b8 modified verification map 2018-05-02 10:44:41 -04:00
Robert Kelly
b992a84d67 modified verification to loop until success or failure 2018-05-02 10:15:08 -04:00
Robert Kelly
cb362e9052 rust format 2018-05-01 16:38:15 -04:00
Robert Kelly
ccb478c1f6 improved error handling and atomic transactions 2018-05-01 16:38:15 -04:00
Greg Fitzgerald
6af3680f99 Version bump 2018-04-30 22:38:39 -06:00
Greg Fitzgerald
e6c3c215ab Add note about installing git-lfs 2018-04-30 15:26:31 -06:00
Greg Fitzgerald
5c66bbde01 Add a note about running with GPU optimizations 2018-04-30 15:20:39 -06:00
Anatoly Yakovenko
77dd1bdd4a move CI specific scripts to solana-labs/buildkite repo 2018-04-29 23:43:43 -07:00
Anatoly Yakovenko
6268d540a8 move CI specific scripts to solana-labs/buildkite repo
Former-commit-id: 77dd1bdd4a
2018-04-29 23:43:43 -07:00
Greg Fitzgerald
5918e38747 Version bump 2018-04-27 15:49:48 -07:00
Greg Fitzgerald
5eb80f8027 Add GPU library for Linux systems
To get solana to use the GPU, invoke cargo with "--features=cuda".
2018-04-27 15:47:22 -07:00
94 changed files with 8628 additions and 3312 deletions

View File

@@ -1,2 +1,5 @@
ignore:
- "src/bin"
coverage:
status:
patch: off

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
*.a filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
Cargo.lock
/target/
**/*.rs.bk
.cargo

View File

@@ -1,22 +0,0 @@
language: rust
required: sudo
services:
- docker
matrix:
allow_failures:
- rust: nightly
include:
- rust: stable
- rust: nightly
env:
- FEATURES='unstable'
before_script: |
export PATH="$PATH:$HOME/.cargo/bin"
rustup component add rustfmt-preview
script:
- cargo fmt -- --write-mode=diff
- cargo build --verbose --features "$FEATURES"
- cargo test --verbose --features "$FEATURES"
after_success: |
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
bash <(curl -s https://codecov.io/bash) -s target/cov

View File

@@ -1,28 +1,29 @@
[package]
name = "solana"
description = "High Performance Blockchain"
version = "0.5.0-beta"
description = "Blockchain, Rebuilt for Scale"
version = "0.7.0-alpha"
documentation = "https://docs.rs/solana"
homepage = "http://solana.io/"
homepage = "http://solana.com/"
readme = "README.md"
repository = "https://github.com/solana-labs/solana"
authors = [
"Anatoly Yakovenko <anatoly@solana.io>",
"Greg Fitzgerald <greg@solana.io>",
"Stephen Akridge <stephen@solana.io>",
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
]
license = "Apache-2.0"
[[bin]]
name = "solana-historian-demo"
path = "src/bin/historian-demo.rs"
[[bin]]
name = "solana-client-demo"
path = "src/bin/client-demo.rs"
[[bin]]
name = "solana-testnode"
path = "src/bin/testnode.rs"
name = "solana-fullnode"
path = "src/bin/fullnode.rs"
[[bin]]
name = "solana-fullnode-config"
path = "src/bin/fullnode-config.rs"
[[bin]]
name = "solana-genesis"
@@ -40,6 +41,10 @@ path = "src/bin/mint.rs"
name = "solana-mint-demo"
path = "src/bin/mint-demo.rs"
[[bin]]
name = "solana-drone"
path = "src/bin/drone.rs"
[badges]
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
@@ -52,7 +57,7 @@ erasure = []
[dependencies]
rayon = "1.0.0"
sha2 = "0.7.0"
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
serde = "1.0.27"
serde_derive = "1.0.27"
serde_json = "1.0.10"
@@ -60,11 +65,15 @@ ring = "0.12.1"
untrusted = "0.5.1"
bincode = "1.0.0"
chrono = { version = "0.4.0", features = ["serde"] }
log = "^0.4.1"
env_logger = "^0.4.1"
matches = "^0.1.6"
byteorder = "^1.2.1"
libc = "^0.2.1"
getopts = "^0.2"
isatty = "0.1"
futures = "0.1"
log = "0.4.2"
env_logger = "0.5.10"
matches = "0.1.6"
byteorder = "1.2.1"
libc = "0.2.1"
getopts = "0.2"
atty = "0.2"
rand = "0.5.1"
pnet_datalink = "0.21.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-io = "0.1"

View File

@@ -1,4 +1,4 @@
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
Copyright 2018 Solana Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

193
README.md
View File

@@ -1,27 +1,42 @@
[![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana)
[![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana)
[![Build Status](https://travis-ci.org/solana-labs/solana.svg?branch=master)](https://travis-ci.org/solana-labs/solana)
[![Build status](https://badge.buildkite.com/d4c4d7da9154e3a8fb7199325f430ccdb05be5fc1e92777e51.svg?branch=master)](https://solana-ci-gate.herokuapp.com/buildkite_public_log?https://buildkite.com/solana-labs/solana/builds/latest/master)
[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
Blockchain, Rebuilt for Scale
===
Solana&trade; is a new blockchain architecture built from the ground up for scale. The architecture supports
up to 710 thousand transactions per second on a gigabit network.
Disclaimer
===
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
Solana: High Performance Blockchain
===
Solana&trade; is a new architecture for a high performance blockchain. It aims to support
over 700 thousand transactions per second on a gigabit network.
Introduction
===
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 178 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
Running the demo
Testnet Demos
===
The Solana repo contains all the scripts you might need to spin up your own
local testnet. Depending on what you're looking to achieve, you may want to
run a different variation, as the full-fledged, performance-enhanced
multinode testnet is considerably more complex to set up than a Rust-only,
singlenode testnode. If you are looking to develop high-level features, such
as experimenting with smart contracts, save yourself some setup headaches and
stick to the Rust-only singlenode demo. If you're doing performance optimization
of the transaction pipeline, consider the enhanced singlenode demo. If you're
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
to reproduce our TPS metrics, run the enhanced multinode demo.
For all four variations, you'd need the latest Rust toolchain and the Solana
source code:
First, install Rust's package manager Cargo.
```bash
@@ -36,58 +51,107 @@ $ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
The testnode server is initialized with a ledger from stdin and
generates new ledger entries on stdout. To create the input ledger, we'll need
to create *the mint* and use it to generate a *genesis ledger*. It's done in
two steps because the mint-demo.json file contains private keys that will be
used later in this demo.
The demo code is sometimes broken between releases as we add new low-level
features, so if this is your first time running the demo, you'll improve
your odds of success if you check out the
[latest release](https://github.com/solana-labs/solana/releases)
before proceeding:
```bash
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
$ git checkout v0.6.1
```
Now you can start the server:
Configuration Setup
---
The network is initialized with a genesis ledger and leader/validator configuration files.
These files can be generated by running the following script.
```bash
$ cat genesis.log | cargo run --release --bin solana-testnode > transactions0.log
$ ./multinode-demo/setup.sh
```
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
to start sending it transactions.
Singlenode Testnet
---
Then, in a separate shell, let's execute some transactions. Note we pass in
Before you start a fullnode, make sure you know the IP address of the machine you
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
open on all the machines you want to test with.
Now start the server:
```bash
$ ./multinode-demo/leader.sh
```
To run a performance-enhanced fullnode on Linux, download `libcuda_verify_ed25519.a`. Enable
it by adding `--features=cuda` to the line that runs `solana-fullnode` in
`leader.sh`. [CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on your system.
```bash
$ ./fetch-perf-libs.sh
$ cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
```
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
receive transactions.
Multinode Testnet
---
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
```bash
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana #The leader machine
```
As with the leader node, you can run a performance-enhanced validator fullnode by adding
`--features=cuda` to the line that runs `solana-fullnode` in `validator.sh`.
```bash
$ cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json < genesis.log
```
Testnet Client Demo
---
Now that your singlenode or multinode testnet is up and running, in a separate shell, let's send it some transactions! Note we pass in
the JSON configuration file here, not the genesis ledger.
```bash
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
```
Now kill the server with Ctrl-C, and take a look at the ledger. You should
see something similar to:
```json
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}}
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
```
Now restart the server from where we left off. Pass it both the genesis ledger, and
the transaction ledger.
What just happened? The client demo spins up several threads to send 500,000 transactions
to the testnet as quickly as it can. The client then pings the testnet periodically to see
how many transactions it processed in that time. Take note that the demo intentionally
floods the network with UDP packets, such that the network will almost certainly drop a
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
demo completes after it has convinced itself the testnet won't process any additional
transactions. You should see several TPS measurements printed to the screen. In the
multinode variation, you'll see TPS measurements for each validator node as well.
Linux Snap
---
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
easily get Solana running on supported Linux systems without building anything
from source. The `edge` Snap channel is updated daily with the latest
development from the `master` branch. To install:
```bash
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode > transactions1.log
$ sudo snap install solana --edge --devmode
```
(`--devmode` flag is required only for `solana.fullnode-cuda`)
Lastly, run the client demo again, and verify that all funds were spent in the
previous round, and so no additional transactions are added.
Once installed the usual Solana programs will be available as `solona.*` instead
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
Update to the latest version at any time with
```bash
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
$ snap info solana
$ sudo snap refresh solana --devmode
```
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
Developing
===
@@ -102,6 +166,12 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt-preview
```
If your rustc version is lower than 1.26.1, please update it:
```bash
$ rustup update
```
Download the source code:
```bash
@@ -115,9 +185,37 @@ Testing
Run the test suite:
```bash
cargo test
$ cargo test
```
To emulate all the tests that will run on a Pull Request, run:
```bash
$ ./ci/run-local.sh
```
Debugging
---
There are some useful debug messages in the code, you can enable them on a per-module and per-level
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
```bash
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
```
to see the debug and info sections for streamer and server respectively. Generally
we are using debug for infrequent debug messages, trace for potentially frequent messages and
info for performance-related logging.
Attaching to a running process with gdb
```
$ sudo gdb
attach <PID>
set logging on
thread apply all bt
```
This will dump all the threads stack traces into gdb.txt
Benchmarking
---
@@ -136,12 +234,23 @@ $ cargo +nightly bench --features="unstable"
Code coverage
---
To generate code coverage statistics, run kcov via Docker:
To generate code coverage statistics, install cargo-cov. Note: the tool currently only works
in Rust nightly.
```bash
$ docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
$ cargo +nightly install cargo-cov
```
Run cargo-cov and generate a report:
```bash
$ cargo +nightly cov test
$ cargo +nightly cov report --open
```
The coverage report will be written to `./target/cov/report/index.html`
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running

1
_config.yml Normal file
View File

@@ -0,0 +1 @@
theme: jekyll-theme-slate

View File

@@ -11,5 +11,6 @@ fn main() {
}
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
println!("cargo:rustc-link-lib=dylib=Jerasure");
println!("cargo:rustc-link-lib=dylib=gf_complete");
}
}

3
ci/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
/node_modules/
/package-lock.json
/snapcraft.credentials

88
ci/README.md Normal file
View File

@@ -0,0 +1,88 @@
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
additional GitHub integration provided by https://github.com/mvines/ci-gate
## Agent Queues
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
`queue=default` and `queue=cuda`. The `default` queue should be favored and
runs on lower-cost CPU instances. The `cuda` queue is only necessary for
running **tests** that depend on GPU (via CUDA) access -- CUDA builds may still
be run on the `default` queue, and the [buildkite artifact
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
products over to a GPU instance for testing.
## Buildkite Agent Management
### Buildkite GCP Setup
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
VM Instances in each group is manually adjusted.
#### Updating a CI Disk Image
Each Instance group has its own disk image, `ci-default-vX` and
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
The process to update a disk image is as follows (TODO: make this less manual):
1. Create a new VM Instance using the disk image to modify.
2. Once the VM boots, ssh to it and modify the disk as desired.
3. Stop the VM Instance running the modified disk. Remember the name of the VM disk
4. From another machine, `gcloud auth login`, then create a new Disk Image based
off the modified VM Instance:
```
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
```
or
```
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
```
5. Delete the new VM instance.
6. Go to the Instance templates tab, find the existing template named
`ci-default-vX` or `ci-cuda-vY` and select it. Use the "Copy" button to create
a new Instance template called `ci-default-vX+1` or `ci-cuda-vY+1` with the
newly created Disk image.
7. Go to the Instance Groups tag and find the applicable group, `ci-default` or
`ci-cuda`. Edit the Instance Group in two steps: (a) Set the number of
instances to 0 and wait for them all to terminate, (b) Update the Instance
template and restore the number of instances to the original value.
8. Clean up the previous version by deleting it from Instance Templates and
Images.
## Reference
### Buildkite AWS CloudFormation Setup
**AWS CloudFormation is currently inactive, although it may be restored in the
future**
AWS CloudFormation can be used to scale machines up and down based on the
current CI load. If no machine is currently running it can take up to 60
seconds to spin up a new instance, please remain calm during this time.
#### AMI
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
Use the following process to update this AMI as dependencies change:
```bash
$ export AWS_ACCESS_KEY_ID=my_access_key
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
$ cd elastic-ci-stack-for-aws/
$ make build
$ make build-ami
```
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
AMI. For example:
```
amazon-ebs: AMI: ami-07118545e8b4ce6dc
```
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
*apply* the stack changes.

31
ci/buildkite.yml Normal file
View File

@@ -0,0 +1,31 @@
steps:
- command: "ci/docker-run.sh rust ci/test-stable.sh"
name: "stable [public]"
timeout_in_minutes: 20
- command: "ci/shellcheck.sh"
name: "shellcheck [public]"
timeout_in_minutes: 20
- wait
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
name: "nightly [public]"
timeout_in_minutes: 20
- command: "ci/test-stable-perf.sh"
name: "stable-perf [public]"
timeout_in_minutes: 20
retry:
automatic:
- exit_status: "*"
limit: 2
agents:
- "queue=cuda"
- command: "ci/snap.sh [public]"
timeout_in_minutes: 20
name: "snap [public]"
- wait
- command: "ci/publish-crate.sh [public]"
timeout_in_minutes: 20
name: "publish crate"
- command: "ci/hoover.sh [public]"
timeout_in_minutes: 20
name: "clean agent"

50
ci/docker-run.sh Executable file
View File

@@ -0,0 +1,50 @@
#!/bin/bash -e
usage() {
echo "Usage: $0 [docker image name] [command]"
echo
echo Runs command in the specified docker image with
echo a CI-appropriate environment
echo
}
cd "$(dirname "$0")/.."
IMAGE="$1"
if [[ -z "$IMAGE" ]]; then
echo Error: image not defined
exit 1
fi
docker pull "$IMAGE"
shift
ARGS=(
--workdir /solana
--volume "$PWD:/solana"
--env "HOME=/solana"
--rm
)
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
# kcov tries to set the personality of the binary which docker
# doesn't allow by default.
ARGS+=(--security-opt "seccomp=unconfined")
# Ensure files are created with the current host uid/gid
if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then
ARGS+=(--user "$(id -u):$(id -g)")
fi
# Environment variables to propagate into the container
ARGS+=(
--env BUILDKITE_BRANCH
--env BUILDKITE_TAG
--env CODECOV_TOKEN
--env CRATES_IO_TOKEN
--env SNAPCRAFT_CREDENTIALS_KEY
)
set -x
exec docker run "${ARGS[@]}" "$IMAGE" "$@"

View File

@@ -0,0 +1,7 @@
FROM snapcraft/xenial-amd64
# Update snapcraft to latest version
RUN apt-get update -qq \
&& apt-get install -y snapcraft \
&& rm -rf /var/lib/apt/lists/* \
&& snapcraft --version

6
ci/docker-snapcraft/build.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash -ex
cd "$(dirname "$0")"
docker build -t solanalabs/snapcraft .
docker push solanalabs/snapcraft

57
ci/hoover.sh Executable file
View File

@@ -0,0 +1,57 @@
#!/bin/bash
#
# Regular maintenance performed on a buildkite agent to control disk usage
#
echo --- Delete all exited containers first
(
set -x
exited=$(docker ps -aq --no-trunc --filter "status=exited")
if [[ -n "$exited" ]]; then
# shellcheck disable=SC2086 # Don't want to double quote "$exited"
docker rm $exited
fi
)
echo --- Delete untagged images
(
set -x
untagged=$(docker images | grep '<none>'| awk '{ print $3 }')
if [[ -n "$untagged" ]]; then
# shellcheck disable=SC2086 # Don't want to double quote "$untagged"
docker rmi $untagged
fi
)
echo --- Delete all dangling images
(
set -x
dangling=$(docker images --filter 'dangling=true' -q --no-trunc | sort | uniq)
if [[ -n "$dangling" ]]; then
# shellcheck disable=SC2086 # Don't want to double quote "$dangling"
docker rmi $dangling
fi
)
echo --- Remove unused docker networks
(
set -x
docker network prune -f
)
echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
(
set -x
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
)
echo --- System Status
(
set -x
docker images
docker ps
docker network ls
df -h
)
exit 0

19
ci/publish-crate.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
if [[ -z "$BUILDKITE_TAG" ]]; then
# Skip publish if this is not a tagged release
exit 0
fi
if [[ -z "$CRATES_IO_TOKEN" ]]; then
echo CRATES_IO_TOKEN undefined
exit 1
fi
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
ci/docker-run.sh rust \
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
exit 0

19
ci/run-local.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash -e
#
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
# Github pull request
#
cd "$(dirname "$0")/.."
BKRUN=ci/node_modules/.bin/bkrun
if [[ ! -x $BKRUN ]]; then
(
set -x
cd ci/
npm install bkrun
)
fi
set -x
exec ./ci/node_modules/.bin/bkrun ci/buildkite.yml

11
ci/shellcheck.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash -e
#
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
cd "$(dirname "$0")/.."
set -x
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
| xargs -0 \
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
exit 0

40
ci/snap.sh Executable file
View File

@@ -0,0 +1,40 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
DRYRUN=
if [[ -z $BUILDKITE_BRANCH || $BUILDKITE_BRANCH =~ pull/* ]]; then
DRYRUN="echo"
fi
if [[ -z "$BUILDKITE_TAG" ]]; then
SNAP_CHANNEL=edge
else
SNAP_CHANNEL=beta
fi
if [[ -z $DRYRUN ]]; then
[[ -n $SNAPCRAFT_CREDENTIALS_KEY ]] || {
echo SNAPCRAFT_CREDENTIALS_KEY not defined
exit 1;
}
(
openssl aes-256-cbc -d \
-in ci/snapcraft.credentials.enc \
-out ci/snapcraft.credentials \
-k "$SNAPCRAFT_CREDENTIALS_KEY"
snapcraft login --with ci/snapcraft.credentials
) || {
rm -f ci/snapcraft.credentials;
exit 1
}
fi
set -x
echo --- build
snapcraft
echo --- publish
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL

Binary file not shown.

32
ci/test-nightly.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
export RUST_BACKTRACE=1
rustc --version
cargo --version
_() {
echo "--- $*"
"$@"
}
_ cargo build --verbose --features unstable
_ cargo test --verbose --features unstable
_ cargo bench --verbose --features unstable
# Coverage ...
_ cargo install --force cargo-cov
_ cargo cov test
_ cargo cov report
echo --- Coverage report:
ls -l target/cov/report/index.html
if [[ -z "$CODECOV_TOKEN" ]]; then
echo CODECOV_TOKEN undefined
else
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
fi

12
ci/test-stable-perf.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
./fetch-perf-libs.sh
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
export PATH=$PATH:/usr/local/cuda/bin
export RUST_BACKTRACE=1
set -x
exec cargo test --features=cuda,erasure

18
ci/test-stable.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
export RUST_BACKTRACE=1
rustc --version
cargo --version
_() {
echo "--- $*"
"$@"
}
_ rustup component add rustfmt-preview
_ cargo fmt -- --write-mode=diff
_ cargo build --verbose
_ cargo test --verbose
_ cargo test -- --ignored

View File

@@ -1,65 +0,0 @@
The Historian
===
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
with by verifying each entry's hash can be generated from the hash in the previous entry:
![historian](https://user-images.githubusercontent.com/55449/36950845-459bdb58-1fb9-11e8-850e-894586f3729b.png)
```rust
extern crate solana;
use solana::historian::Historian;
use solana::ledger::{Block, Entry, Hash};
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
use std::thread::sleep;
use std::time::Duration;
use std::sync::mpsc::SendError;
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
sleep(Duration::from_millis(15));
let tokens = 42;
let keypair = generate_keypair();
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
hist.sender.send(event0)?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_ledger(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(entries[..].verify(&seed));
}
```
Running the program should produce a ledger similar to:
```rust
Entry { num_hashes: 0, id: [0, ...], event: Tick }
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
Entry { num_hashes: 3, id: [123, ...], event: Tick }
```
Proof-of-History
---
Take note of the last line:
```rust
assert!(entries[..].verify(&seed));
```
[It's a proof!](https://en.wikipedia.org/wiki/CurryHoward_correspondence) For each entry returned by the
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
included in the hash, the events cannot be reordered without regenerating all the hashes.

View File

@@ -1,18 +0,0 @@
msc {
client,historian,recorder;
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
client=>historian [ label = "Transaction(d0)" ] ;
historian=>recorder [ label = "Transaction(d0)" ] ;
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
client=>historian [ label = "collect()" ] ;
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
client=>client [ label = "entries.verify(h0)" ] ;
}

37
fetch-perf-libs.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/bin/bash -e
if [[ $(uname) != Linux ]]; then
echo Performance libraries are only available for Linux
exit 1
fi
if [[ $(uname -m) != x86_64 ]]; then
echo Performance libraries are only available for x86_64 architecture
exit 1
fi
(
set -x
curl -o solana-perf.tgz \
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
tar zxvf solana-perf.tgz
)
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
echo ==============================================
echo Warning: possible CUDA version mismatch
echo
echo "Expected version: $(cat cuda-version.txt)"
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
echo ==============================================
fi
else
echo ==============================================
echo Warning: unable to validate CUDA version
echo ==============================================
fi
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
exit 0

17
multinode-demo/client.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
if [[ -z $1 ]]; then
echo "usage: $0 [network path to solana repo on leader machine] <number of nodes in the network>"
exit 1
fi
LEADER=$1
COUNT=${2:-1}
rsync -vz "$LEADER"/{leader.json,mint-demo.json} . || exit $?
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
cargo run --release --bin solana-client-demo -- \
-n "$COUNT" -l leader.json -d < mint-demo.json 2>&1 | tee client.log

28
multinode-demo/leader.sh Executable file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
here=$(dirname "$0")
# shellcheck source=/dev/null
. "${here}"/myip.sh
myip=$(myip) || exit $?
[[ -f leader-"${myip}".json ]] || {
echo "I can't find a matching leader config file for \"${myip}\"...
Please run ${here}/setup.sh first.
"
exit 1
}
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
# this makes a leader.json file available alongside genesis, etc. for
# validators and clients
cp leader-"${myip}".json leader.json
cargo run --release --bin solana-fullnode -- \
-l leader-"${myip}".json \
< genesis.log tx-*.log \
> tx-"$(date -u +%Y%m%d%H%M%S%N)".log

58
multinode-demo/myip.sh Executable file
View File

@@ -0,0 +1,58 @@
#!/bin/bash
function myip()
{
declare ipaddrs=( )
# query interwebs
mapfile -t ipaddrs < <(curl -s ifconfig.co)
# machine's interfaces
mapfile -t -O "${#ipaddrs[*]}" ipaddrs < \
<(ifconfig | awk '/inet(6)? (addr:)?/ {print $2}')
ipaddrs=( "${extips[@]}" "${ipaddrs[@]}" )
if (( ! ${#ipaddrs[*]} ))
then
echo "
myip: error: I'm having trouble determining what our IP address is...
Are we connected to a network?
"
return 1
fi
declare prompt="
Please choose the IP address you want to advertise to the network:
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
"
for ((i=1; i < ${#ipaddrs[*]}; i++))
do
prompt+=" $i) ${ipaddrs[i]}
"
done
while read -r -p "${prompt}
please enter a number [0 for default]: " which
do
[[ -z ${which} ]] && break;
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
echo "Ug. invalid entry \"${which}\"...
"
sleep 1
done
which=${which:-0}
echo "${ipaddrs[which]}"
}
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
then
myip "$@"
fi

15
multinode-demo/setup.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
here=$(dirname "$0")
# shellcheck source=/dev/null
. "${here}"/myip.sh
myip=$(myip) || exit $?
num_tokens=${1:-1000000000}
cargo run --release --bin solana-mint-demo <<<"${num_tokens}" > mint-demo.json
cargo run --release --bin solana-genesis-demo < mint-demo.json > genesis.log
cargo run --release --bin solana-fullnode-config -- -d > leader-"${myip}".json
cargo run --release --bin solana-fullnode-config -- -b 9000 -d > validator-"${myip}".json

32
multinode-demo/validator.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
here=$(dirname "$0")
# shellcheck source=/dev/null
. "${here}"/myip.sh
leader=$1
[[ -z ${leader} ]] && {
echo "usage: $0 [network path to solana repo on leader machine]"
exit 1
}
myip=$(myip) || exit $?
[[ -f validator-"$myip".json ]] || {
echo "I can't find a matching validator config file for \"${myip}\"...
Please run ${here}/setup.sh first.
"
exit 1
}
rsync -vz "${leader}"/{mint-demo.json,leader.json,genesis.log,tx-*.log} . || exit $?
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
cargo run --release --bin solana-fullnode -- \
-l validator-"${myip}".json -v leader.json \
< genesis.log tx-*.log

View File

@@ -0,0 +1,182 @@
# Smart Contracts Engine
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
## Toolchain Stack
+---------------------+ +---------------------+
| | | |
| +------------+ | | +------------+ |
| | | | | | | |
| | frontend | | | | verifier | |
| | | | | | | |
| +-----+------+ | | +-----+------+ |
| | | | | |
| | | | | |
| +-----+------+ | | +-----+------+ |
| | | | | | | |
| | llvm | | | | loader | |
| | | +------>+ | | |
| +-----+------+ | | +-----+------+ |
| | | | | |
| | | | | |
| +-----+------+ | | +-----+------+ |
| | | | | | | |
| | ELF | | | | runtime | |
| | | | | | | |
| +------------+ | | +------------+ |
| | | |
| client | | solana |
+---------------------+ +---------------------+
[Figure 1. Smart Contracts Stack]
In Figure 1 an untrusted client, creates a program in the front-end language of her choice, (like C/C++/Rust/Lua), and compiles it with LLVM to a position independent shared object ELF, targeting BPF bytecode. Solana will safely load and execute the ELF.
## Bytecode
Our bytecode is based on Berkley Packet Filter. The requirements for BPF overlap almost exactly with the requirements we have:
1. Deterministic amount of time to execute the code
2. Bytecode that is portable between machine instruction sets
3. Verified memory accesses
4. Fast to load the object, verify the bytecode and JIT to local machine instruction set
For 1, that means that loops are unrolled, and for any jumps back we can guard them with a check against the number of instruction that have been executed at this point. If the limit is reached, the program yields its execution. This involves saving the stack and current instruction index.
For 2, the BPF bytecode already easily maps to x8664, arm64 and other instruction sets. 
For 3, every load and store that is relative can be checked to be within the expected memory that is passed into the ELF. Dynamic load and stores can do a runtime check against available memory, these will be slow and should be avoided.
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
## Loader
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
Since there is only one RX segment, the context for the contract instance is passed into each entry point as well as the event data for that entry point.
A client will create a transaction to create a new loader instance:
`Solana_NewLoader(Loader Instance PubKey, proof of key ownership, space I need for my elf)`
A client will then do a bunch of transactions to load its elf into the loader instance they created:
`Loader_UploadElf(Loader Instance PubKey, proof of key ownership, pos start, pos end, data)`
At this point the client can create a new instance of the module with its own instance address:
`Loader_NewInstance(Loader Instance PubKey, proof of key ownership, Instance PubKey, proof of key ownership)`
Once the instance has been created, the client may need to upload more user data to solana to configure this instance:
`Instance_UploadModuleData(Instance PubKey, proof of key ownership, pos start, pos end, data)`
Now clients can `start` the instance:
`Instance_Start(Instance PubKey, proof of key ownership)`
## Runtime
Our goal with the runtime is to have a general purpose execution environment that is highly parallelizable and doesn't require dynamic resource management. We want to execute as many contracts as we can in parallel, and have them pass or fail without a destructive state change.
### State and Entry Point
State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. The output of keys is a subset of the input. Three basic kinds of state exist:
* Instance State
* Participant State
* Caller State
There isn't any difference in how each is implemented, but conceptually Participant State is memory that is allocated for each participant in the contract. Instance State is memory that is allocated for the contract itself, and Caller State is memory that the transactions caller has allocated.
### Call
```
void call(
const struct instance_data *data,
const uint8_t kind[], //instance|participant|caller|read|write
const uint8_t *keys[],
uint8_t *data[],
int num,
uint8_t dirty[], //dirty memory bits
uint8_t *userdata, //current transaction data
);
```
To call this operation, the transaction that is destined to the contract instance specifies what keyed state it should present to the `call` function. To allocate the state memory or a call context, the client has to first call a function on the contract with the designed address that will own the state.
At its core, this is a system call that requires cryptographic proof of ownership of memory regions instead of an OS that checks page tables for access rights.
* `Instance_AllocateContext(Instance PubKey, My PubKey, Proof of key ownership)`
Any transaction can then call `call` on the contract with a set of keys. It's up to the contract itself to manage ownership:
* `Instance_Call(Instance PubKey, [Context PubKeys], proofs of ownership, userdata...)`
Contracts should be able to read any state that is part of solana, but only write to state that the contract allocated.
#### Caller State
Caller `state` is memory allocated for the `call` that belongs to the public key that is issuing the `call`. This is the caller's context.
#### Instance State
Instance `state` is memory that belongs to this contract instance. We may also need module-wide `state` as well.
#### Participant State
Participant `state` is any other memory. In some cases it may make sense to have these allocated as part of the call by the caller.
### Reduce
Some operations on the contract will require iteration over all the keys. To make this parallelizable the iteration is broken up into reduce calls which are combined.
```
void reduce_m(
const struct instance_data *data,
const uint8_t *keys[],
const uint8_t *data[],
int num,
uint8_t *reduce_data,
);
void reduce_r(
const struct instance_data *data,
const uint8_t *reduce_data[],
int num,
uint8_t *reduce_data,
);
```
### Execution
Transactions are batched and processed in parallel at each stage.
```
+-----------+ +--------------+ +-----------+ +---------------+
| sigverify |-+->| debit commit |---+->| execution |-+->| memory commit |
+-----------+ | +--------------+ | +-----------+ | +---------------+
| | |
| +---------------+ | | +--------------+
|->| memory verify |->+ +->| debit undo |
+---------------+ | +--------------+
|
| +---------------+
+->| credit commit |
+---------------+
```
The `debit verify` stage is very similar to `memory verify`. Proof of key ownership is used to check if the callers key has some state allocated with the contract, then the memory is loaded and executed. After execution stage, the dirty pages are written back by the contract. Because know all the memory accesses during execution, we can batch transactions that do not interfere with each other. We can also apply the `debit undo` and `credit commit` stages of the transaction. `debit undo` is run in case of an exception during contract execution, only transfers may be reversed, fees are commited to solana.
### GPU execution
A single contract can read and write to separate key pairs without interference. These separate calls to the same contract can execute on the same GPU thread over different memory using different SIMD lanes.
## Notes
1. There is no dynamic memory allocation.
2. Persistant Memory is allocated to a Key with ownership
3. Contracts can `call` to update key owned state
4. Contracts can `reduce` over the memory to aggregate state
5. `call` is just a *syscall* that does a cryptographic check of memory owndershp

View File

@@ -1,6 +0,0 @@
#!/bin/bash
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
source $HOME/.cargo/env
export PATH=$PATH:/usr/local/cuda/bin
cp /tmp/libcuda_verify_ed25519.a .
cargo test --features=cuda

43
scripts/perf-plot.py Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import json
import sys
stages_to_counters = {}
stages_to_time = {}
if len(sys.argv) != 2:
print("USAGE: {} <input file>".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1]) as fh:
for line in fh.readlines():
if "COUNTER" in line:
json_part = line[line.find("{"):]
x = json.loads(json_part)
counter = x['name']
if not (counter in stages_to_counters):
stages_to_counters[counter] = []
stages_to_time[counter] = []
stages_to_counters[counter].append(x['counts'])
stages_to_time[counter].append(x['now'])
fig, ax = plt.subplots()
for stage in stages_to_counters.keys():
plt.plot(stages_to_time[stage], stages_to_counters[stage], label=stage)
plt.xlabel('ms')
plt.ylabel('count')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.locator_params(axis='x', nbins=10)
plt.grid(True)
plt.savefig("perf.pdf")

69
snap/snapcraft.yaml Normal file
View File

@@ -0,0 +1,69 @@
name: solana
version: git
summary: Blockchain, Rebuilt for Scale
description: |
710,000 tx/s with off-the-shelf hardware and no sharding.
Scales with Moore's Law.
grade: devel
# TODO: solana-perf-fullnode does not yet run with 'strict' confinement due to the
# CUDA dependency, so use 'devmode' confinement for now
confinement: devmode
apps:
drone:
command: solana-drone
plugs:
- network
- network-bind
fullnode:
command: solana-fullnode
plugs:
- network
- network-bind
- home
fullnode-cuda:
command: solana-fullnode-cuda
plugs:
- network
- network-bind
- home
fullnode-config:
command: solana-fullnode-config
plugs:
- network
- network-bind
genesis:
command: solana-genesis
genesis-demo:
command: solana-genesis-demo
mint:
command: solana-mint
mint-demo:
command: solana-mint-demo
client-demo:
command: solana-client-demo
parts:
solana-cuda:
plugin: rust
rust-channel: stable
rust-features:
- erasure
- cuda
prime:
- bin/solana-fullnode-cuda
- usr/lib/libgf_complete.so.1
- usr/lib/libJerasure.so.2
override-build: |
./fetch-perf-libs.sh
snapcraftctl build
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
solana:
plugin: rust
rust-channel: stable

View File

@@ -1,526 +0,0 @@
//! The `accountant` module tracks client balances, and the progress of pending
//! transactions. It offers a high-level public API that signs transactions
//! on behalf of the caller, and a private low-level API for when they have
//! already been signed and verified.
extern crate libc;
use chrono::prelude::*;
use event::Event;
use hash::Hash;
use mint::Mint;
use plan::{Payment, Plan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::RwLock;
use transaction::Transaction;
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
#[derive(Debug, PartialEq, Eq)]
pub enum AccountingError {
AccountNotFound,
InsufficientFunds,
InvalidTransferSignature,
}
pub type Result<T> = result::Result<T, AccountingError>;
/// Commit funds to the 'to' party.
fn apply_payment(balances: &RwLock<HashMap<PublicKey, RwLock<i64>>>, payment: &Payment) {
if balances.read().unwrap().contains_key(&payment.to) {
let bals = balances.read().unwrap();
*bals[&payment.to].write().unwrap() += payment.tokens;
} else {
let mut bals = balances.write().unwrap();
bals.insert(payment.to, RwLock::new(payment.tokens));
}
}
pub struct Accountant {
balances: RwLock<HashMap<PublicKey, RwLock<i64>>>,
pending: RwLock<HashMap<Signature, Plan>>,
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
time_sources: RwLock<HashSet<PublicKey>>,
last_time: RwLock<DateTime<Utc>>,
}
impl Accountant {
/// Create an Accountant using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let balances = RwLock::new(HashMap::new());
apply_payment(&balances, deposit);
Accountant {
balances,
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
}
}
/// Create an Accountant with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let acc = Self::new_from_deposit(&deposit);
acc.register_entry_id(&mint.last_id());
acc
}
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
if signatures.read().unwrap().contains(sig) {
return false;
}
signatures.write().unwrap().insert(*sig);
true
}
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
signatures.write().unwrap().remove(sig)
}
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::forget_signature(&entry.1, sig);
}
return false;
}
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::reserve_signature(&entry.1, sig);
}
false
}
/// Tell the accountant which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// accountant will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids.write().unwrap();
if last_ids.len() >= MAX_ENTRY_IDS {
last_ids.pop_front();
}
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
}
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
let bals = self.balances.read().unwrap();
// Hold a write lock before the condition check, so that a debit can't occur
// between checking the balance and the withdraw.
let option = bals.get(&tr.from);
if option.is_none() {
return Err(AccountingError::AccountNotFound);
}
let mut bal = option.unwrap().write().unwrap();
if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) {
return Err(AccountingError::InvalidTransferSignature);
}
if *bal < tr.data.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
return Err(AccountingError::InsufficientFunds);
}
*bal -= tr.data.tokens;
Ok(())
}
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
let mut plan = tr.data.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
} else {
let mut pending = self.pending.write().unwrap();
pending.insert(tr.sig, plan);
}
}
/// Process a Transaction that has already been verified.
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
self.process_verified_transaction_debits(tr)?;
self.process_verified_transaction_credits(tr);
Ok(())
}
/// Process a batch of verified transactions.
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
// Run all debits first to filter out any transactions that can't be processed
// in parallel deterministically.
let results: Vec<_> = trs.into_par_iter()
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
.collect(); // Calling collect() here forces all debits to complete before moving on.
results
.into_par_iter()
.map(|result| {
result.map(|tr| {
self.process_verified_transaction_credits(&tr);
tr
})
})
.collect()
}
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
let mut trs = vec![];
let mut rest = vec![];
for event in events {
match event {
Event::Transaction(tr) => trs.push(tr),
_ => rest.push(event),
}
}
(trs, rest)
}
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> {
let (trs, rest) = Self::partition_events(events);
self.process_verified_transactions(trs);
for event in rest {
self.process_verified_event(&event)?;
}
Ok(())
}
/// Process a Witness Signature that has already been verified.
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending.write().unwrap().entry(tx_sig) {
e.get_mut().apply_witness(&Witness::Signature(from));
if let Some(ref payment) = e.get().final_payment() {
apply_payment(&self.balances, payment);
e.remove_entry();
}
};
Ok(())
}
/// Process a Witness Timestamp that has already been verified.
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if *self.last_time.read().unwrap() == Utc.timestamp(0, 0) {
self.time_sources.write().unwrap().insert(from);
}
if self.time_sources.read().unwrap().contains(&from) {
if dt > *self.last_time.read().unwrap() {
*self.last_time.write().unwrap() = dt;
}
} else {
return Ok(());
}
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
// double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending.write().unwrap();
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
completed.push(key.clone());
}
}
for key in completed {
pending.remove(&key);
}
Ok(())
}
/// Process an Transaction or Witness that has already been verified.
pub fn process_verified_event(&self, event: &Event) -> Result<()> {
match *event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
}
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new(keypair, to, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances.read().unwrap();
bals.get(pubkey).map(|x| *x.read().unwrap())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[test]
fn test_accountant() {
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let acc = Accountant::new(&alice);
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let acc = Accountant::new(&mint);
assert_eq!(
acc.transfer(1, &KeyPair::new(), mint.pubkey(), mint.last_id()),
Err(AccountingError::AccountNotFound)
);
}
#[test]
fn test_invalid_transfer() {
let alice = Mint::new(11_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.last_id()),
Err(AccountingError::InsufficientFunds)
);
let alice_pubkey = alice.keypair().pubkey();
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
}
#[test]
fn test_transfer_to_newb() {
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(500, &alice_keypair, bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
}
#[test]
fn test_transfer_on_date() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
// Bob's balance will be None because the funds have not been
// sent.
assert_eq!(acc.get_balance(&bob_pubkey), None);
// Now, acknowledge the time in the condition occurred and
// that bob's funds are now available.
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(acc.get_balance(&bob_pubkey), Some(2));
}
#[test]
fn test_transfer_after_date() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
}
#[test]
fn test_cancel_transfer() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
// Bob's balance will be None because the funds have not been
// sent.
assert_eq!(acc.get_balance(&bob_pubkey), None);
// Now, cancel the trancaction. Alice gets her funds back, Bob never sees them.
acc.process_verified_sig(alice.pubkey(), sig).unwrap();
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
assert_eq!(acc.get_balance(&bob_pubkey), None);
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
}
#[test]
fn test_duplicate_event_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
assert!(acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_forget_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
acc.reserve_signature_with_last_id(&sig, &alice.last_id());
assert!(acc.forget_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.forget_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_max_entry_ids() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let alice = KeyPair::new();
let tr0 = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let tr1 = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let trs = vec![tr0, tr1];
assert!(acc.process_verified_transactions(trs)[1].is_err());
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[bench]
fn process_verified_event_bench(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
acc.process_verified_transaction(&tr).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for sigs in acc.last_ids.read().unwrap().iter() {
sigs.1.write().unwrap().clear();
}
assert!(
acc.process_verified_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
}
}

View File

@@ -1,810 +0,0 @@
//! The `accountant_skel` module is a microservice that exposes the high-level
//! Accountant API to the network. Its message encoding is currently
//! in flux. Clients should use AccountantStub to interact with it.
use accountant::Accountant;
use bincode::{deserialize, serialize};
use ecdsa;
use entry::Entry;
use event::Event;
use hash::Hash;
use historian::Historian;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use recorder::Signal;
use result::Result;
use serde_json;
use signature::PublicKey;
use std::cmp::max;
use std::collections::VecDeque;
use std::io::Write;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use streamer;
use transaction::Transaction;
use subscribers;
pub struct AccountantSkel<W: Write + Send + 'static> {
acc: Accountant,
last_id: Hash,
writer: W,
historian: Historian,
entry_info_subscribers: Vec<SocketAddr>,
}
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
Transaction(Transaction),
GetBalance { key: PublicKey },
GetLastId,
Subscribe { subscriptions: Vec<Subscription> },
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Subscription {
EntryInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EntryInfo {
pub id: Hash,
pub num_hashes: u64,
pub num_events: u64,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
match *self {
Request::Transaction(ref tr) => tr.verify_plan(),
_ => true,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
EntryInfo(EntryInfo),
LastId { id: Hash },
}
impl<W: Write + Send + 'static> AccountantSkel<W> {
/// Create a new AccountantSkel that wraps the given Accountant.
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
AccountantSkel {
acc,
last_id,
writer,
historian,
entry_info_subscribers: vec![],
}
}
fn notify_entry_info_subscribers(&mut self, entry: &Entry) {
// TODO: No need to bind().
let socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
for addr in &self.entry_info_subscribers {
let entry_info = EntryInfo {
id: entry.id,
num_hashes: entry.num_hashes,
num_events: entry.events.len() as u64,
};
let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo");
let _res = socket.send_to(&data, addr);
}
}
/// Process any Entry items that have been published by the Historian.
pub fn sync(&mut self) -> Hash {
while let Ok(entry) = self.historian.receiver.try_recv() {
self.last_id = entry.id;
self.acc.register_entry_id(&self.last_id);
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
self.notify_entry_info_subscribers(&entry);
}
self.last_id
}
/// Process Request items sent by clients.
pub fn process_request(
&mut self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::GetBalance { key } => {
let val = self.acc.get_balance(&key);
Some((Response::Balance { key, val }, rsp_addr))
}
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
Request::Transaction(_) => unreachable!(),
Request::Subscribe { subscriptions } => {
for subscription in subscriptions {
match subscription {
Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr),
}
}
None
}
}
}
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
batch.push(more);
}
info!("batch len {}", batch.len());
Ok(batch)
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
let chunk_size = max(1, (batch.len() + 3) / 4);
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
batches
.into_par_iter()
.map(|batch| {
let r = ecdsa::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
})
.collect()
}
fn verifier(
recvr: &streamer::PacketReceiver,
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Result<()> {
let batch = Self::recv_batch(recvr)?;
let verified_batches = Self::verify_batch(batch);
for xs in verified_batches {
sendr.send(xs)?;
}
Ok(())
}
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Split Request list into verified transactions and the rest
fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
let mut trs = vec![];
let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers {
match msg {
Request::Transaction(tr) => {
if verify != 0 {
trs.push(tr);
}
}
_ => reqs.push((msg, rsp_addr)),
}
}
(trs, reqs)
}
fn process_packets(
&mut self,
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> Result<Vec<(Response, SocketAddr)>> {
let (trs, reqs) = Self::partition_requests(req_vers);
// Process the transactions in parallel and then log the successful ones.
for result in self.acc.process_verified_transactions(trs) {
if let Ok(tr) = result {
self.historian
.sender
.send(Signal::Event(Event::Transaction(tr)))?;
}
}
// Let validators know they should not attempt to process additional
// transactions in parallel.
self.historian.sender.send(Signal::Tick)?;
// Process the remaining requests serially.
let rsps = reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect();
Ok(rsps)
}
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
fn process(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mms = verified_receiver.recv_timeout(timer)?;
for (msgs, vers) in mms {
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
let req_vers = reqs.into_iter()
.zip(vers)
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
.filter(|x| x.0.verify())
.collect();
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
// Write new entries to the ledger and notify subscribers.
obj.lock().unwrap().sync();
}
Ok(())
}
/// Process verified blobs, already in order
/// Respond with a signed hash of the state
fn replicate_state(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
for msgs in &blobs {
let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries {
obj.lock().unwrap().acc.register_entry_id(&entry.id);
obj.lock()
.unwrap()
.acc
.process_verified_events(entry.events)?;
}
//TODO respond back to leader with hash of the state
}
for blob in blobs {
blob_recycler.recycle(blob);
}
Ok(())
}
/// Create a UDP microservice that forwards messages the given AccountantSkel.
/// This service is the network leader
/// Set `exit` to shutdown its threads.
pub fn serve(
obj: &Arc<Mutex<AccountantSkel<W>>>,
addr: &str,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver =
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
let (blob_sender, blob_receiver) = channel();
let t_responder =
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
let (verified_sender, verified_receiver) = channel();
let exit_ = exit.clone();
let t_verifier = spawn(move || loop {
let e = Self::verifier(&packet_receiver, &verified_sender);
if e.is_err() && exit_.load(Ordering::Relaxed) {
break;
}
});
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::process(
&skel,
&verified_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
// Assume this was a timeout, so sync any empty entries.
skel.lock().unwrap().sync();
if exit.load(Ordering::Relaxed) {
break;
}
}
});
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
}
/// This service receives messages from a leader in the network and processes the transactions
/// on the accountant state.
/// # Arguments
/// * `obj` - The accountant state.
/// * `rsubs` - The subscribers.
/// * `exit` - The exit signal.
/// # Remarks
/// The pipeline is constructed as follows:
/// 1. receive blobs from the network, these are out of order
/// 2. verify blobs, PoH, signatures (TODO)
/// 3. reconstruct contiguous window
/// a. order the blobs
/// b. use erasure coding to reconstruct missing blobs
/// c. ask the network for missing blobs, if erasure coding is insufficient
/// d. make sure that the blobs PoH sequences connect (TODO)
/// 4. process the transaction state machine
/// 5. respond with the hash of the state back to the leader
pub fn replicate(
obj: &Arc<Mutex<AccountantSkel<W>>>,
rsubs: subscribers::Subscribers,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(rsubs.me.addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
read,
blob_sender.clone(),
)?;
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let subs = Arc::new(RwLock::new(rsubs));
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
subs.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
subs,
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
});
Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server])
}
}
#[cfg(test)]
pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec<SharedPackets> {
let mut out = vec![];
for rrs in reqs.chunks(packet::NUM_PACKETS) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(rrs.len(), Default::default());
for (i, o) in rrs.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
#[cfg(test)]
mod tests {
use accountant_skel::{to_packets, Request};
use bincode::serialize;
use ecdsa;
use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS};
use transaction::{memfind, test_tx};
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use accountant_stub::AccountantStub;
use entry::Entry;
use futures::Future;
use historian::Historian;
use mint::Mint;
use plan::Plan;
use recorder::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
use transaction::Transaction;
use subscribers::{Node, Subscribers};
use streamer;
use std::sync::mpsc::channel;
use std::collections::VecDeque;
use hash::{hash, Hash};
use event::Event;
use entry;
use chrono::prelude::*;
#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Request::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
#[test]
fn test_to_packets() {
let tr = Request::Transaction(test_tx());
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tr.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test]
fn test_accounting_sequential_consistency() {
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
// Process a batch that includes a transaction that receives two tokens.
let alice = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Process a second batch that spends one of those tokens.
let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Collect the ledger and feed it to a new accountant.
skel.historian.sender.send(Signal::Tick).unwrap();
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
// Assert the user holds one token, not two. If the server only output one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
let acc = Accountant::new(&mint);
for entry in entries {
acc.process_verified_events(entry.events).unwrap();
}
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
}
#[test]
fn test_accountant_bad_sig() {
let serve_port = 9002;
let send_port = 9003;
let addr = format!("127.0.0.1:{}", serve_port);
let send_addr = format!("127.0.0.1:{}", send_port);
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = acc.transfer_signed(tr).unwrap();
let last_id = acc.get_last_id().wait().unwrap();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
tr2.data.tokens = 502;
tr2.data.plan = Plan::new_payment(502, bob_pubkey);
let _sig = acc.transfer_signed(tr2).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
fn setup() {
INIT.call_once(|| {
env_logger::init().unwrap();
});
}
#[test]
fn test_replicate() {
setup();
let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let leader_addr = leader_sock.local_addr().unwrap();
let me_addr = "127.0.0.1:9010".parse().unwrap();
let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let target_peer_addr = target_peer_sock.local_addr().unwrap();
let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr);
let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1];
let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr);
let subs = Subscribers::new(node_me, node_leader, &node_subs);
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target_peer_sock,
s_reader,
).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
source_peer_sock,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let alice = Mint::new(starting_balance);
let acc = Accountant::new(&alice);
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap();
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tr1 = Transaction::new(
&alice.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 =
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&me_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let alice_balance = acc.lock()
.unwrap()
.acc
.get_balance(&alice.keypair().pubkey())
.unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = acc.lock()
.unwrap()
.acc
.get_balance(&bob_keypair.pubkey())
.unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::{Accountant, MAX_ENTRY_IDS};
use accountant_skel::*;
use bincode::serialize;
use hash::hash;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::collections::HashSet;
use std::io::sink;
use std::time::Instant;
use transaction::Transaction;
#[bench]
fn process_packets_bench(_bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
// Create transactions between unrelated parties.
let txs = 100_000;
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
let transactions: Vec<_> = (0..txs)
.into_par_iter()
.map(|i| {
// Seed the 'to' account and a cell for its signature.
let dummy_id = i % (MAX_ENTRY_IDS as i32);
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
{
let mut last_ids = last_ids.lock().unwrap();
if !last_ids.contains(&last_id) {
last_ids.insert(last_id);
acc.register_entry_id(&last_id);
}
}
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
acc.process_verified_transaction(&tr).unwrap();
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
let req_vers = transactions
.into_iter()
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
.collect();
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
let now = Instant::now();
assert!(skel.process_packets(req_vers).is_ok());
let duration = now.elapsed();
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
let tps = txs as f64 / sec;
// Ensure that all transactions were successfully logged.
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].events.len(), txs as usize);
println!("{} tps", tps);
}
}

View File

@@ -1,201 +0,0 @@
//! The `accountant_stub` module is a client-side object that interfaces with a server-side Accountant
//! object via the network interface exposed by AccountantSkel. Client code should use
//! this object instead of writing messages to the network directly. The binary
//! encoding of its messages are unstable and may change in future releases.
use accountant_skel::{Request, Response, Subscription};
use bincode::{deserialize, serialize};
use futures::future::{ok, FutureResult};
use hash::Hash;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::UdpSocket;
use transaction::Transaction;
pub struct AccountantStub {
pub addr: String,
pub socket: UdpSocket,
last_id: Option<Hash>,
num_events: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl AccountantStub {
/// Create a new AccountantStub that will interface with AccountantSkel
/// over `socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking AccountantStub methods.
pub fn new(addr: &str, socket: UdpSocket) -> Self {
let stub = AccountantStub {
addr: addr.to_string(),
socket,
last_id: None,
num_events: 0,
balances: HashMap::new(),
};
stub.init();
stub
}
pub fn init(&self) {
let subscriptions = vec![Subscription::EntryInfo];
let req = Request::Subscribe { subscriptions };
let data = serialize(&req).expect("serialize Subscribe");
let _res = self.socket.send_to(&data, &self.addr);
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
self.socket.recv_from(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize balance");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
self.balances.insert(key, val);
}
Response::LastId { id } => {
self.last_id = Some(id);
}
Response::EntryInfo(entry_info) => {
self.last_id = Some(entry_info.id);
self.num_events += entry_info.num_events;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
let req = Request::Transaction(tr);
let data = serialize(&req).unwrap();
self.socket.send_to(&data, &self.addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tr = Transaction::new(keypair, to, n, *last_id);
let sig = tr.sig;
self.transfer_signed(tr).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult<i64, i64> {
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::Balance { ref key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
ok(self.balances[pubkey].unwrap())
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response. At the time of this writing,
/// it also has the side-effect of causing the server to log any
/// entries that have been published by the Historian.
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetId");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
ok(self.last_id.unwrap_or(Hash::default()))
}
/// Return the number of transactions the server processed since creating
/// this stub instance.
pub fn transaction_count(&mut self) -> u64 {
// Wait for at least one EntryInfo.
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::EntryInfo(_) = &resp {
done = true;
}
self.process_response(resp);
}
// Then take the rest.
self.socket.set_nonblocking(true).expect("set nonblocking");
loop {
match self.recv_response() {
Err(_) => break,
Ok(resp) => self.process_response(resp),
}
}
self.socket.set_nonblocking(false).expect("set blocking");
self.num_events
}
}
#[cfg(test)]
mod tests {
use super::*;
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use futures::Future;
use historian::Historian;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
// TODO: Figure out why this test sometimes hangs on TravisCI.
#[test]
fn test_accountant_stub() {
let addr = "127.0.0.1:9000";
let send_addr = "127.0.0.1:9001";
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
}

712
src/bank.rs Normal file
View File

@@ -0,0 +1,712 @@
//! The `bank` module tracks client balances and the progress of smart
//! contracts. It offers a high-level API that signs transactions
//! on behalf of the caller, and a low-level API for when they have
//! already been signed and verified.
extern crate libc;
use chrono::prelude::*;
use entry::Entry;
use hash::Hash;
use mint::Mint;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::RwLock;
use std::time::Instant;
use timing::duration_as_us;
use transaction::{Instruction, Plan, Transaction};
/// The number of most recent `last_id` values that the bank will track the signatures
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
/// but requires clients to update its `last_id` more frequently. Raising the value
/// lengthens the time a client must wait to be certain a missing transaction will
/// not be processed by the network.
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
/// Reasons a transaction might be rejected.
#[derive(Debug, PartialEq, Eq)]
pub enum BankError {
/// Attempt to debit from `PublicKey`, but no found no record of a prior credit.
AccountNotFound(PublicKey),
/// The requested debit from `PublicKey` has the potential to draw the balance
/// below zero. This can occur when a debit and credit are processed in parallel.
/// The bank may reject the debit or push it to a future entry.
InsufficientFunds(PublicKey),
/// The bank has seen `Signature` before. This can occur under normal operation
/// when a UDP packet is duplicated, as a user error from a client not updating
/// its `last_id`, or as a double-spend attack.
DuplicateSiganture(Signature),
/// The bank has not seen the given `last_id` or the transaction is too old and
/// the `last_id` has been discarded.
LastIdNotFound(Hash),
/// The transaction is invalid and has requested a debit or credit of negative
/// tokens.
NegativeTokens,
}
pub type Result<T> = result::Result<T, BankError>;
/// The state of all accounts and contracts after processing its entries.
pub struct Bank {
/// A map of account public keys to the balance in that account.
balances: RwLock<HashMap<PublicKey, i64>>,
/// A map of smart contract transaction signatures to what remains of its payment
/// plan. Each transaction that targets the plan should cause it to be reduced.
/// Once it cannot be reduced, final payments are made and it is discarded.
pending: RwLock<HashMap<Signature, Plan>>,
/// A FIFO queue of `last_id` items, where each item is a set of signatures
/// that have been processed using that `last_id`. Rejected `last_id`
/// values are so old that the `last_id` has been pulled out of the queue.
last_ids: RwLock<VecDeque<Hash>>,
// Mapping of hashes to signature sets. The bank uses this data to
/// reject transactions with signatures its seen before
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
/// outside this set will be discarded. Note that if validators do not have the
/// same set as leaders, they may interpret the ledger differently.
time_sources: RwLock<HashSet<PublicKey>>,
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
/// to every smart contract when it enters the system. If it is waiting on a
/// timestamp witness before that timestamp, the bank will execute it immediately.
last_time: RwLock<DateTime<Utc>>,
/// The number of transactions the bank has processed without error since the
/// start of the ledger.
transaction_count: AtomicUsize,
}
impl Bank {
/// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let bank = Bank {
balances: RwLock::new(HashMap::new()),
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
last_ids_sigs: RwLock::new(HashMap::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
transaction_count: AtomicUsize::new(0),
};
bank.apply_payment(deposit, &mut bank.balances.write().unwrap());
bank
}
/// Create an Bank with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let bank = Self::new_from_deposit(&deposit);
bank.register_entry_id(&mint.last_id());
bank
}
/// Commit funds to the `payment.to` party.
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
if balances.contains_key(&payment.to) {
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
} else {
balances.insert(payment.to, payment.tokens);
}
}
/// Return the last entry ID registered.
pub fn last_id(&self) -> Hash {
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
*last_item
}
/// Store the given signature. The bank will reject any transaction with the same signature.
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
if let Some(sig) = signatures.get(sig) {
return Err(BankError::DuplicateSiganture(*sig));
}
signatures.insert(*sig);
Ok(())
}
/// Forget the given `signature` because its transaction was rejected.
fn forget_signature(signatures: &mut HashSet<Signature>, signature: &Signature) {
signatures.remove(signature);
}
/// Forget the given `signature` with `last_id` because the transaction was rejected.
fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) {
if let Some(entry) = self.last_ids_sigs
.write()
.expect("'last_ids' read lock in forget_signature_with_last_id")
.get_mut(last_id)
{
Self::forget_signature(entry, signature);
}
}
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
if let Some(entry) = self.last_ids_sigs
.write()
.expect("'last_ids' read lock in reserve_signature_with_last_id")
.get_mut(last_id)
{
return Self::reserve_signature(entry, signature);
}
Err(BankError::LastIdNotFound(*last_id))
}
/// Tell the bank which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// bank will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids
.write()
.expect("'last_ids' write lock in register_entry_id");
let mut last_ids_sigs = self.last_ids_sigs
.write()
.expect("last_ids_sigs write lock");
if last_ids.len() >= MAX_ENTRY_IDS {
let id = last_ids.pop_front().unwrap();
last_ids_sigs.remove(&id);
}
last_ids_sigs.insert(*last_id, HashSet::new());
last_ids.push_back(*last_id);
}
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
let option = bals.get_mut(&tx.from);
if option.is_none() {
return Err(BankError::AccountNotFound(tx.from));
}
let bal = option.unwrap();
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
if let Instruction::NewContract(contract) = &tx.instruction {
if contract.tokens < 0 {
return Err(BankError::NegativeTokens);
}
if *bal < contract.tokens {
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
return Err(BankError::InsufficientFunds(tx.from));
}
*bal -= contract.tokens;
};
Ok(())
}
/// Apply only a transaction's credits. Credits from multiple transactions
/// may safely be applied in parallel.
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
match &tx.instruction {
Instruction::NewContract(contract) => {
let mut plan = contract.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("timestamp creation in apply_credits")));
if let Some(payment) = plan.final_payment() {
self.apply_payment(&payment, balances);
} else {
let mut pending = self.pending
.write()
.expect("'pending' write lock in apply_credits");
pending.insert(tx.sig, plan);
}
}
Instruction::ApplyTimestamp(dt) => {
let _ = self.apply_timestamp(tx.from, *dt);
}
Instruction::ApplySignature(tx_sig) => {
let _ = self.apply_signature(tx.from, *tx_sig);
}
}
}
/// Process a Transaction. If it contains a payment plan that requires a witness
/// to progress, the payment plan will be stored in the bank.
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
let bals = &mut self.balances.write().unwrap();
self.apply_debits(tx, bals)?;
self.apply_credits(tx, bals);
self.transaction_count.fetch_add(1, Ordering::Relaxed);
Ok(())
}
/// Process a batch of transactions.
#[must_use]
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
let bals = &mut self.balances.write().unwrap();
debug!("processing Transactions {}", txs.len());
let txs_len = txs.len();
let now = Instant::now();
let results: Vec<_> = txs.into_iter()
.map(|tx| self.apply_debits(&tx, bals).map(|_| tx))
.collect(); // Calling collect() here forces all debits to complete before moving on.
let debits = now.elapsed();
let now = Instant::now();
let res: Vec<_> = results
.into_iter()
.map(|result| {
result.map(|tx| {
self.apply_credits(&tx, bals);
tx
})
})
.collect();
debug!(
"debits: {} us credits: {:?} us tx: {}",
duration_as_us(&debits),
duration_as_us(&now.elapsed()),
txs_len
);
let mut tx_count = 0;
for r in &res {
if r.is_ok() {
tx_count += 1;
} else {
info!("tx error: {:?}", r);
}
}
self.transaction_count
.fetch_add(tx_count, Ordering::Relaxed);
res
}
/// Process an ordered list of entries.
pub fn process_entries<I>(&self, entries: I) -> Result<()>
where
I: IntoIterator<Item = Entry>,
{
for entry in entries {
if !entry.transactions.is_empty() {
for result in self.process_transactions(entry.transactions) {
result?;
}
}
self.register_entry_id(&entry.id);
}
Ok(())
}
/// Process a Witness Signature. Any payment plans waiting on this signature
/// will progress one step.
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending
.write()
.expect("write() in apply_signature")
.entry(tx_sig)
{
e.get_mut().apply_witness(&Witness::Signature(from));
if let Some(payment) = e.get().final_payment() {
self.apply_payment(&payment, &mut self.balances.write().unwrap());
e.remove_entry();
}
};
Ok(())
}
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
/// will progress one step.
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if *self.last_time
.read()
.expect("'last_time' read lock on first timestamp check")
== Utc.timestamp(0, 0)
{
self.time_sources
.write()
.expect("'time_sources' write lock on first timestamp")
.insert(from);
}
if self.time_sources
.read()
.expect("'time_sources' read lock")
.contains(&from)
{
if dt > *self.last_time.read().expect("'last_time' read lock") {
*self.last_time.write().expect("'last_time' write lock") = dt;
}
} else {
return Ok(());
}
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
// double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending
.write()
.expect("'pending' write lock in apply_timestamp");
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("'last_time' read lock when creating timestamp")));
if let Some(payment) = plan.final_payment() {
self.apply_payment(&payment, &mut self.balances.write().unwrap());
completed.push(key.clone());
}
}
for key in completed {
pending.remove(&key);
}
Ok(())
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: Hash,
) -> Result<Signature> {
let tx = Transaction::new(keypair, to, n, last_id);
let sig = tx.sig;
self.process_transaction(&tx).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Result<Signature> {
let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tx.sig;
self.process_transaction(&tx).map(|_| sig)
}
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances
.read()
.expect("'balances' read lock in get_balance");
bals.get(pubkey).map(|x| *x)
}
pub fn transaction_count(&self) -> usize {
self.transaction_count.load(Ordering::Relaxed)
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use entry::next_entry;
use hash::hash;
use signature::KeyPairUtil;
#[test]
fn test_two_payments_to_one_party() {
let mint = Mint::new(10_000);
let pubkey = KeyPair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(bank.last_id(), mint.last_id());
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_500);
assert_eq!(bank.transaction_count(), 2);
}
#[test]
fn test_negative_tokens() {
let mint = Mint::new(1);
let pubkey = KeyPair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::NegativeTokens)
);
assert_eq!(bank.transaction_count(), 0);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
assert_eq!(
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
Err(BankError::AccountNotFound(keypair.pubkey()))
);
assert_eq!(bank.transaction_count(), 0);
}
#[test]
fn test_insufficient_funds() {
let mint = Mint::new(11_000);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.transaction_count(), 1);
assert_eq!(
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::InsufficientFunds(mint.pubkey()))
);
assert_eq!(bank.transaction_count(), 1);
let mint_pubkey = mint.keypair().pubkey();
assert_eq!(bank.get_balance(&mint_pubkey).unwrap(), 10_000);
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
}
#[test]
fn test_transfer_to_newb() {
let mint = Mint::new(10_000);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 500);
}
#[test]
fn test_transfer_on_date() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
// tx count is 1, because debits were applied.
assert_eq!(bank.transaction_count(), 1);
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
// Now, acknowledge the time in the condition occurred and
// that pubkey's funds are now available.
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
assert_eq!(bank.get_balance(&pubkey), Some(1));
// tx count is still 1, because we chose not to count timestamp transactions
// tx count.
assert_eq!(bank.transaction_count(), 1);
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(bank.get_balance(&pubkey), Some(2));
}
#[test]
fn test_transfer_after_date() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
assert_eq!(bank.get_balance(&pubkey), Some(1));
}
#[test]
fn test_cancel_transfer() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
let sig = bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
// Assert the debit counts as a transaction.
assert_eq!(bank.transaction_count(), 1);
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
bank.apply_signature(mint.pubkey(), sig).unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
assert_eq!(bank.get_balance(&pubkey), None);
// Assert cancel doesn't cause count to go backward.
assert_eq!(bank.transaction_count(), 1);
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
}
#[test]
fn test_duplicate_transaction_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
assert!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.is_ok()
);
assert_eq!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
Err(BankError::DuplicateSiganture(sig))
);
}
#[test]
fn test_forget_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.unwrap();
bank.forget_signature_with_last_id(&sig, &mint.last_id());
assert!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.is_ok()
);
}
#[test]
fn test_reject_old_last_id() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert_eq!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
Err(BankError::LastIdNotFound(mint.last_id()))
);
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
let txs = vec![tx0, tx1];
let results = bank.process_transactions(txs);
assert!(results[1].is_err());
// Assert bad transactions aren't counted.
assert_eq!(bank.transaction_count(), 1);
}
#[test]
fn test_process_empty_entry_is_registered() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
let entry = next_entry(&mint.last_id(), 1, vec![]);
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
// First, ensure the TX is rejected because of the unregistered last ID
assert_eq!(
bank.process_transaction(&tx),
Err(BankError::LastIdNotFound(entry.id))
);
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
bank.process_entries(vec![entry]).unwrap();
assert!(bank.process_transaction(&tx).is_ok());
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use bincode::serialize;
use hash::hash;
use rayon::prelude::*;
use signature::KeyPairUtil;
#[bench]
fn bench_process_transaction(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let bank = Bank::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
bank.process_transaction(&tx).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
bank.process_transaction(&tx).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for (_, sigs) in bank.last_ids_sigs.write().unwrap().iter_mut() {
sigs.clear();
}
assert!(
bank.process_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
}
}

430
src/banking_stage.rs Normal file
View File

@@ -0,0 +1,430 @@
//! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use bank::Bank;
use bincode::deserialize;
use counter::Counter;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use record_stage::Signal;
use result::Result;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use std::time::Instant;
use timing;
use transaction::Transaction;
/// Stores the stage's thread handle and output receiver.
pub struct BankingStage {
/// Handle to the stage's thread.
pub thread_hdl: JoinHandle<()>,
/// Output receiver for the following stage.
pub signal_receiver: Receiver<Signal>,
}
impl BankingStage {
/// Create the stage using `bank`. Exit when either `exit` is set or
/// when `verified_receiver` or the stage's output receiver is dropped.
/// Discard input packets using `packet_recycler` to minimize memory
/// allocations in a previous stage such as the `fetch_stage`.
pub fn new(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
packet_recycler: packet::PacketRecycler,
) -> Self {
let (signal_sender, signal_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-banking-stage".to_string())
.spawn(move || loop {
let e = Self::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
}
})
.unwrap();
BankingStage {
thread_hdl,
signal_receiver,
}
}
/// Convert the transactions from a blob of binary data to a vector of transactions and
/// an unused `SocketAddr` that could be used to send a response.
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
/// Discard packets via `packet_recycler`.
fn process_packets(
bank: Arc<Bank>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
signal_sender: &Sender<Signal>,
packet_recycler: &packet::PacketRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let recv_start = Instant::now();
let mms = verified_receiver.recv_timeout(timer)?;
let mut reqs_len = 0;
let mms_len = mms.len();
info!(
"@{:?} process start stalled for: {:?}ms batches: {}",
timing::timestamp(),
timing::duration_as_ms(&recv_start.elapsed()),
mms.len(),
);
let count = mms.iter().map(|x| x.1.len()).sum();
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
let proc_start = Instant::now();
for (msgs, vers) in mms {
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
reqs_len += transactions.len();
let transactions = transactions
.into_iter()
.zip(vers)
.filter_map(|(tx, ver)| match tx {
None => None,
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
Some(tx)
} else {
None
},
})
.collect();
debug!("process_transactions");
let results = bank.process_transactions(transactions);
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
signal_sender.send(Signal::Transactions(transactions))?;
debug!("done process_transactions");
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
info!(
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
mms_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
inc_counter!(COUNTER, count, proc_start);
Ok(())
}
}
// TODO: When banking is pulled out of RequestStage, add this test back in.
//use bank::Bank;
//use entry::Entry;
//use hash::Hash;
//use record_stage::RecordStage;
//use record_stage::Signal;
//use result::Result;
//use std::sync::mpsc::{channel, Sender};
//use std::sync::{Arc, Mutex};
//use std::time::Duration;
//use transaction::Transaction;
//
//#[cfg(test)]
//mod tests {
// use bank::Bank;
// use mint::Mint;
// use signature::{KeyPair, KeyPairUtil};
// use transaction::Transaction;
//
// #[test]
// // TODO: Move this test banking_stage. Calling process_transactions() directly
// // defeats the purpose of this test.
// fn test_banking_sequential_consistency() {
// // In this attack we'll demonstrate that a verifier can interpret the ledger
// // differently if either the server doesn't signal the ledger to add an
// // Entry OR if the verifier tries to parallelize across multiple Entries.
// let mint = Mint::new(2);
// let bank = Bank::new(&mint);
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// // Process a batch that includes a transaction that receives two tokens.
// let alice = KeyPair::new();
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
// let transactions = vec![tx];
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
//
// // Process a second batch that spends one of those tokens.
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
// let transactions = vec![tx];
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
//
// // Collect the ledger and feed it to a new bank.
// let entries = vec![entry0, entry1];
//
// // Assert the user holds one token, not two. If the server only output one
// // entry, then the second transaction will be rejected, because it drives
// // the account balance below zero before the credit is added.
// let bank = Bank::new(&mint);
// for entry in entries {
// assert!(
// bank
// .process_transactions(entry.transactions)
// .into_iter()
// .all(|x| x.is_ok())
// );
// }
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
// }
//}
//
//#[cfg(all(feature = "unstable", test))]
//mod bench {
// extern crate test;
// use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize;
// use hash::hash;
// use mint::Mint;
// use rayon::prelude::*;
// use signature::{KeyPair, KeyPairUtil};
// use std::collections::HashSet;
// use std::time::Instant;
// use transaction::Transaction;
//
// #[bench]
// fn bench_process_transactions(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint);
// // Create transactions between unrelated parties.
// let txs = 100_000;
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
// let transactions: Vec<_> = (0..txs)
// .into_par_iter()
// .map(|i| {
// // Seed the 'to' account and a cell for its signature.
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
// {
// let mut last_ids = last_ids.lock().unwrap();
// if !last_ids.contains(&last_id) {
// last_ids.insert(last_id);
// bank.register_entry_id(&last_id);
// }
// }
//
// // Seed the 'from' account.
// let rando0 = KeyPair::new();
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_transaction(&tx).unwrap();
//
// let rando1 = KeyPair::new();
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_transaction(&tx).unwrap();
//
// // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// })
// .collect();
//
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// let now = Instant::now();
// assert!(banking_stage.process_transactions(transactions).is_ok());
// let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec;
//
// // Ensure that all transactions were successfully logged.
// drop(banking_stage.historian_input);
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].transactions.len(), txs as usize);
//
// println!("{} tps", tps);
// }
//}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use banking_stage::BankingStage;
use logger;
use mint::Mint;
use packet::{to_packets_chunked, PacketRecycler};
use rayon::prelude::*;
use record_stage::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::iter;
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
use transaction::Transaction;
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
let mut total = 0;
for _ in 0..batches {
let signal = receiver.recv().unwrap();
if let Signal::Transactions(transactions) = signal {
total += transactions.len();
} else {
assert!(false);
}
}
assert_eq!(total, ref_tx_count);
}
#[bench]
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
logger::setup();
let tx = 30_000_usize;
let mint_total = 1_000_000_000_000;
let mint = Mint::new(mint_total);
let num_dst_accounts = 8 * 1024;
let num_src_accounts = 8 * 1024;
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
let dstkeys: Vec<_> = (0..num_dst_accounts)
.map(|_| KeyPair::new().pubkey())
.collect();
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
let transactions: Vec<_> = (0..tx)
.map(|i| {
Transaction::new(
&srckeys[i % num_src_accounts],
dstkeys[i % num_dst_accounts],
i as i64,
mint.last_id(),
)
})
.collect();
info!("created transactions");
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, 192)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let setup_transactions: Vec<_> = (0..num_src_accounts)
.map(|i| {
Transaction::new(
&mint.keypair(),
srckeys[i].pubkey(),
mint_total / num_src_accounts as i64,
mint.last_id(),
)
})
.collect();
let verified_setup: Vec<_> = to_packets_chunked(&packet_recycler, setup_transactions, tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
verified_sender.send(verified_setup.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified_setup.len(), &signal_receiver, num_src_accounts);
verified_sender.send(verified.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified.len(), &signal_receiver, tx);
});
}
#[bench]
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
logger::setup();
let tx = 20_000_usize;
let mint = Mint::new(1_000_000_000_000);
let mut pubkeys = Vec::new();
let num_keys = 8;
for _ in 0..num_keys {
pubkeys.push(KeyPair::new().pubkey());
}
let transactions: Vec<_> = (0..tx)
.into_par_iter()
.map(|i| {
Transaction::new(
&mint.keypair(),
pubkeys[i % num_keys],
i as i64,
mint.last_id(),
)
})
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
verified_sender.send(verified.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified.len(), &signal_receiver, tx);
});
}
}

View File

@@ -1,25 +1,34 @@
extern crate futures;
extern crate atty;
extern crate env_logger;
extern crate getopts;
extern crate isatty;
extern crate rayon;
extern crate serde_json;
extern crate solana;
extern crate untrusted;
use futures::Future;
use atty::{is, Stream};
use getopts::Options;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::accountant_stub::AccountantStub;
use solana::crdt::{get_ip_addr, Crdt, ReplicatedData};
use solana::hash::Hash;
use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::ncp::Ncp;
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use solana::timing::{duration_as_ms, duration_as_s};
use solana::transaction::Transaction;
use std::env;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::UdpSocket;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::Builder;
use std::thread::JoinHandle;
use std::time::Duration;
use std::time::Instant;
use untrusted::Input;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
@@ -30,15 +39,125 @@ fn print_usage(program: &str, opts: Options) {
print!("{}", opts.usage(&brief));
}
fn sample_tx_count(
thread_addr: Arc<RwLock<SocketAddr>>,
exit: Arc<AtomicBool>,
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
first_count: u64,
v: ReplicatedData,
sample_period: u64,
) {
let mut client = mk_client(&thread_addr, &v);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
let mut max_tps = 0.0;
let mut total;
loop {
let tx_count = client.transaction_count();
let duration = now.elapsed();
now = Instant::now();
let sample = tx_count - initial_tx_count;
initial_tx_count = tx_count;
println!("{}: Transactions processed {}", v.transactions_addr, sample);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
if tps > max_tps {
max_tps = tps;
}
println!("{}: {:.2} tps", v.transactions_addr, tps);
total = tx_count - first_count;
println!(
"{}: Total Transactions processed {}",
v.transactions_addr, total
);
sleep(Duration::new(sample_period, 0));
if exit.load(Ordering::Relaxed) {
println!("exiting validator thread");
maxes.write().unwrap().push((max_tps, total));
break;
}
}
}
fn generate_and_send_txs(
client: &mut ThinClient,
keypair_pairs: &Vec<&[KeyPair]>,
leader: &ReplicatedData,
txs: i64,
last_id: &mut Hash,
threads: usize,
client_addr: Arc<RwLock<SocketAddr>>,
) {
println!(
"Signing transactions... {} {}",
keypair_pairs.len(),
keypair_pairs[0].len()
);
let signing_start = Instant::now();
let transactions: Vec<_> = keypair_pairs
.par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, *last_id))
.collect();
let duration = signing_start.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
println!(
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
bsps * 1_000_000_f64,
nsps / 1_000_f64,
duration_as_ms(&duration),
);
println!("Transfering {} transactions in {} batches", txs, threads);
let transfer_start = Instant::now();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|txs| {
println!(
"Transferring 1 unit {} times... to {:?}",
txs.len(),
leader.transactions_addr
);
let client = mk_client(&client_addr, &leader);
for tx in txs {
client.transfer_signed(tx.clone()).unwrap();
}
});
println!(
"Transfer done. {:?} ms {} tps",
duration_as_ms(&transfer_start.elapsed()),
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
);
*last_id = client.get_last_id();
}
fn main() {
env_logger::init();
let mut threads = 4usize;
let mut addr: String = "127.0.0.1:8000".to_string();
let mut send_addr: String = "127.0.0.1:8001".to_string();
let mut num_nodes = 1usize;
let mut time_sec = 60;
let mut opts = Options::new();
opts.optopt("s", "", "server address", "host:port");
opts.optopt("c", "", "client address", "host:port");
opts.optopt("t", "", "number of threads", "4");
opts.optopt("l", "", "leader", "leader.json");
opts.optopt("c", "", "client port", "port");
opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optopt(
"s",
"",
"send transactions for this many seconds",
&format!("{}", time_sec),
);
opts.optopt(
"n",
"",
"number of nodes to converge to",
&format!("{}", num_nodes),
);
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
@@ -54,17 +173,44 @@ fn main() {
print_usage(&program, opts);
return;
}
if matches.opt_present("s") {
addr = matches.opt_str("s").unwrap();
}
let mut addr: SocketAddr = "0.0.0.0:8100".parse().unwrap();
if matches.opt_present("c") {
send_addr = matches.opt_str("c").unwrap();
let port = matches.opt_str("c").unwrap().parse().unwrap();
addr.set_port(port);
}
if matches.opt_present("d") {
addr.set_ip(get_ip_addr().unwrap());
}
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
}
if matches.opt_present("n") {
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
}
if matches.opt_present("s") {
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
}
if stdin_isatty() {
let leader = if matches.opt_present("l") {
read_leader(matches.opt_str("l").unwrap())
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
ReplicatedData::new_leader(&server_addr)
};
let signal = Arc::new(AtomicBool::new(false));
let mut c_threads = vec![];
let validators = converge(
&client_addr,
&leader,
signal.clone(),
num_nodes,
&mut c_threads,
);
assert_eq!(validators.len(), num_nodes);
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
@@ -81,63 +227,173 @@ fn main() {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let socket = UdpSocket::bind(&send_addr).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
let mut client = mk_client(&client_addr, &leader);
println!("Get last ID...");
let last_id = acc.get_last_id().wait().unwrap();
let mut last_id = client.get_last_id();
println!("Got last ID {:?}", last_id);
let mut seed = [0u8; 32];
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
let rnd = GenKeys::new(seed);
println!("Creating keypairs...");
let txs = demo.users.len() / 2;
let keypairs: Vec<_> = demo.users
.into_par_iter()
.map(|(pkcs8, _)| KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap())
.collect();
let txs = demo.num_accounts / 2;
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions...");
let now = Instant::now();
let transactions: Vec<_> = keypair_pairs
.into_par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
let first_count = client.transaction_count();
println!("initial count {}", first_count);
println!("Sampling tps every second...",);
// Setup a thread per validator to sample every period
// collect the max transaction rate and total tx count seen
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
let v_threads: Vec<_> = validators
.into_iter()
.map(|v| {
let exit = signal.clone();
let thread_addr = client_addr.clone();
let maxes = maxes.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(thread_addr, exit, maxes, first_count, v, sample_period);
})
.unwrap()
})
.collect();
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
// generate and send transactions for the specified duration
let time = Duration::new(time_sec, 0);
let now = Instant::now();
while now.elapsed() < time {
generate_and_send_txs(
&mut client,
&keypair_pairs,
&leader,
txs,
&mut last_id,
threads,
client_addr.clone(),
);
}
// Stop the sampling threads so it will collect the stats
signal.store(true, Ordering::Relaxed);
for t in v_threads {
t.join().unwrap();
}
// Compute/report stats
let mut max_of_maxes = 0.0;
let mut total_txs = 0;
for (max, txs) in maxes.read().unwrap().iter() {
if *max > max_of_maxes {
max_of_maxes = *max;
}
total_txs += *txs;
}
println!(
"Done. {} thousand signatures per second, {}us per signature",
bsps * 1_000_000_f64,
nsps / 1_000_f64
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
max_of_maxes,
sample_period,
total_txs,
maxes.read().unwrap().len()
);
let initial_tx_count = acc.transaction_count();
println!("Transfering {} transactions in {} batches", txs, threads);
let now = Instant::now();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times...", trs.len());
let send_addr = "0.0.0.0:0";
let socket = UdpSocket::bind(send_addr).unwrap();
let acc = AccountantStub::new(&addr, socket);
for tr in trs {
acc.transfer_signed(tr.clone()).unwrap();
// join the crdt client threads
for t in c_threads {
t.join().unwrap();
}
});
println!("Waiting for half the transactions to complete...",);
let mut tx_count = acc.transaction_count();
while tx_count < transactions.len() as u64 / 2 {
tx_count = acc.transaction_count();
}
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("Done. {} tps", tps);
}
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
let mut addr = locked_addr.write().unwrap();
let port = addr.port();
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
addr.set_port(port + 2);
ThinClient::new(
r.requests_addr,
requests_socket,
r.transactions_addr,
transactions_socket,
)
}
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
let mut addr = client_addr.write().unwrap();
let port = addr.port();
let gossip = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let daddr = "0.0.0.0:0".parse().unwrap();
let pubkey = KeyPair::new().pubkey();
let node = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
daddr,
daddr,
daddr,
daddr,
);
(node, gossip)
}
fn converge(
client_addr: &Arc<RwLock<SocketAddr>>,
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let daddr = "0.0.0.0:0".parse().unwrap();
let (spy, spy_gossip) = spy_node(client_addr);
let mut spy_crdt = Crdt::new(spy);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let window = default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
spy_ref.clone(),
window.clone(),
spy_gossip,
gossip_send_socket,
exit.clone(),
).expect("DataReplicator::new");
let mut rv = vec![];
//wait for the network to converge, 30 seconds should be plenty
for _ in 0..30 {
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.requests_addr != daddr)
.cloned()
.collect();
if v.len() >= num_nodes {
println!("CONVERGED!");
rv.extend(v.into_iter());
break;
}
sleep(Duration::new(1, 0));
}
threads.extend(ncp.thread_hdls.into_iter());
rv
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
}

168
src/bin/drone.rs Normal file
View File

@@ -0,0 +1,168 @@
extern crate atty;
extern crate bincode;
extern crate env_logger;
extern crate getopts;
extern crate serde_json;
extern crate solana;
extern crate tokio;
extern crate tokio_codec;
extern crate tokio_io;
use atty::{is, Stream as atty_stream};
use bincode::deserialize;
use getopts::Options;
use solana::crdt::{get_ip_addr, ReplicatedData};
use solana::drone::{Drone, DroneRequest};
use solana::mint::MintDemo;
use std::env;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::process::exit;
use std::sync::{Arc, Mutex};
use std::thread;
use tokio::net::TcpListener;
use tokio::prelude::*;
use tokio_codec::{BytesCodec, Decoder};
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint-demo.json> | {} [options]\n\n", program);
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init();
let mut opts = Options::new();
opts.optopt(
"t",
"",
"time",
"time slice over which to limit token requests to drone",
);
opts.optopt("c", "", "cap", "request limit for time slice");
opts.optopt("l", "", "leader", "leader.json");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let time_slice: Option<u64>;
if matches.opt_present("t") {
time_slice = matches
.opt_str("t")
.expect("unexpected string from input")
.parse()
.ok();
} else {
time_slice = None;
}
let request_cap: Option<u64>;
if matches.opt_present("c") {
request_cap = matches
.opt_str("c")
.expect("unexpected string from input")
.parse()
.ok();
} else {
request_cap = None;
}
let leader = if matches.opt_present("l") {
read_leader(matches.opt_str("l").unwrap())
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
ReplicatedData::new_leader(&server_addr)
};
if is(atty_stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mint_keypair = demo.mint.keypair();
let mut drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
drone_addr.set_ip(get_ip_addr().unwrap());
let drone = Arc::new(Mutex::new(Drone::new(
mint_keypair,
drone_addr,
leader.transactions_addr,
leader.requests_addr,
time_slice,
request_cap,
)));
let drone1 = drone.clone();
thread::spawn(move || loop {
let time = drone1.lock().unwrap().time_slice;
thread::sleep(time);
drone1.lock().unwrap().clear_request_count();
});
let socket = TcpListener::bind(&drone_addr).unwrap();
println!("Drone started. Listening on: {}", drone_addr);
let done = socket
.incoming()
.map_err(|e| println!("failed to accept socket; error = {:?}", e))
.for_each(move |socket| {
let drone2 = drone.clone();
// let client_ip = socket.peer_addr().expect("drone peer_addr").ip();
let framed = BytesCodec::new().framed(socket);
let (_writer, reader) = framed.split();
let processor = reader
.for_each(move |bytes| {
let req: DroneRequest =
deserialize(&bytes).expect("deserialize packet in drone");
println!("Airdrop requested...");
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
let res1 = drone2.lock().unwrap().send_airdrop(req);
match res1 {
Ok(_) => println!("Airdrop sent!"),
Err(_) => println!("Request limit reached for this time slice"),
}
Ok(())
})
.and_then(|()| {
println!("Socket received FIN packet and closed connection");
Ok(())
})
.or_else(|err| {
println!("Socket closed with error: {:?}", err);
Err(err)
})
.then(|result| {
println!("Socket closed with result: {:?}", result);
Ok(())
});
tokio::spawn(processor)
});
tokio::run(done);
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
}

View File

@@ -0,0 +1,52 @@
extern crate getopts;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
use std::env;
use std::io;
use std::net::SocketAddr;
use std::process::exit;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: {} [options]\n\n", program);
brief += " Create a solana fullnode config file\n";
print!("{}", opts.usage(&brief));
}
fn main() {
let mut opts = Options::new();
opts.optopt("b", "", "bind", "bind to port or address");
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let bind_addr: SocketAddr = {
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
if matches.opt_present("d") {
let ip = get_ip_addr().unwrap();
bind_addr.set_ip(ip);
}
bind_addr
};
// we need all the receiving sockets to be bound within the expected
// port range that we open on aws
let repl_data = ReplicatedData::new_leader(&bind_addr);
let stdout = io::stdout();
serde_json::to_writer(stdout, &repl_data).expect("serialize");
}

180
src/bin/fullnode.rs Normal file
View File

@@ -0,0 +1,180 @@
extern crate atty;
extern crate env_logger;
extern crate getopts;
extern crate log;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use getopts::Options;
use solana::bank::Bank;
use solana::crdt::ReplicatedData;
use solana::entry::Entry;
use solana::payment_plan::PaymentPlan;
use solana::server::Server;
use solana::transaction::Instruction;
use std::env;
use std::fs::File;
use std::io::{stdin, stdout, BufRead, Write};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
//use std::time::Duration;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init();
let mut opts = Options::new();
opts.optflag("h", "help", "print help");
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
opts.optopt(
"t",
"",
"testnet; connect to the network at this gossip entry point",
"HOST:PORT",
);
opts.optopt(
"o",
"",
"output log to FILE, defaults to stdout (ignored by validators)",
"FILE",
);
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let stdin = stdin();
let mut entries = stdin.lock().lines().map(|line| {
let entry: Entry = serde_json::from_str(&line.unwrap()).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
entry
});
eprintln!("done parsing...");
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().expect("invalid ledger: empty");
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries
.next()
.expect("invalid ledger: need at least 2 entries");
let tx = &entry1.transactions[0];
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
contract.plan.final_payment()
} else {
None
}.expect("invalid ledger, needs to start with a contract");
eprintln!("creating bank...");
let bank = Bank::new_from_deposit(&deposit);
bank.register_entry_id(&entry0.id);
bank.register_entry_id(&entry1.id);
eprintln!("processing entries...");
bank.process_entries(entries).expect("process_entries");
eprintln!("creating networking stack...");
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
if matches.opt_present("l") {
let path = matches.opt_str("l").unwrap();
if let Ok(file) = File::open(path.clone()) {
if let Ok(data) = serde_json::from_reader(file) {
repl_data = data;
} else {
eprintln!("failed to parse {}", path);
exit(1);
}
} else {
eprintln!("failed to read {}", path);
exit(1);
}
}
let exit = Arc::new(AtomicBool::new(false));
let threads = if matches.opt_present("t") {
let testnet_address_string = matches.opt_str("t").unwrap();
eprintln!(
"starting validator... {} connecting to {}",
repl_data.requests_addr, testnet_address_string
);
let testnet_addr = testnet_address_string.parse().unwrap();
let newtwork_entry_point = ReplicatedData::new_entry_point(testnet_addr);
let s = Server::new_validator(
bank,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
UdpSocket::bind(repl_data.repair_addr).unwrap(),
newtwork_entry_point,
exit.clone(),
);
s.thread_hdls
} else {
eprintln!("starting leader... {}", repl_data.requests_addr);
repl_data.current_leader_id = repl_data.id.clone();
let outfile: Box<Write + Send + 'static> = if matches.opt_present("o") {
let path = matches.opt_str("o").unwrap();
Box::new(
File::create(&path).expect(&format!("unable to open output file \"{}\"", path)),
)
} else {
Box::new(stdout())
};
let server = Server::new_leader(
bank,
//Some(Duration::from_millis(1000)),
None,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
exit.clone(),
outfile,
);
server.thread_hdls
};
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
for t in threads {
t.join().expect("join");
}
}

View File

@@ -1,25 +1,22 @@
extern crate isatty;
extern crate atty;
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
extern crate untrusted;
use isatty::stdin_isatty;
use atty::{is, Stream};
use rayon::prelude::*;
use solana::accountant::MAX_ENTRY_IDS;
use solana::entry::{create_entry, next_tick};
use solana::event::Event;
use solana::bank::MAX_ENTRY_IDS;
use solana::entry::next_entry;
use solana::ledger::next_entries;
use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::signature::{GenKeys, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::{stdin, Read};
use std::process::exit;
use untrusted::Input;
// Generate a ledger with lots and lots of accounts.
fn main() {
if stdin_isatty() {
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
@@ -36,38 +33,50 @@ fn main() {
exit(1);
});
let num_accounts = demo.users.len();
let last_id = demo.mint.last_id();
let mint_keypair = demo.mint.keypair();
let mut seed = [0u8; 32];
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
let rnd = GenKeys::new(seed);
let num_accounts = demo.num_accounts;
let tokens_per_user = 500;
eprintln!("Signing {} transactions...", num_accounts);
let events: Vec<_> = demo.users
.into_par_iter()
.map(|(pkcs8, tokens)| {
let rando = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
let tr = Transaction::new(&mint_keypair, rando.pubkey(), tokens, last_id);
Event::Transaction(tr)
})
.collect();
let keypairs = rnd.gen_n_keypairs(num_accounts);
let mint_keypair = demo.mint.keypair();
let last_id = demo.mint.last_id();
for entry in demo.mint.create_entries() {
println!("{}", serde_json::to_string(&entry).unwrap());
}
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entry = create_entry(&last_id, 0, events);
println!("{}", serde_json::to_string(&entry).unwrap());
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
// Offer client lots of entry IDs to use for each transaction's last_id.
let mut last_id = last_id;
let mut last_ids = vec![];
for _ in 0..MAX_ENTRY_IDS {
let entry = next_tick(&last_id, 1);
let entry = next_entry(&last_id, 1, vec![]);
last_id = entry.id;
last_ids.push(last_id);
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
eprintln!("Creating {} transactions...", num_accounts);
let transactions: Vec<_> = keypairs
.into_par_iter()
.enumerate()
.map(|(i, rando)| {
let last_id = last_ids[i % MAX_ENTRY_IDS];
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
})
.collect();
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entries = next_entries(&last_id, 0, transactions);
for entry in entries {
println!("{}", serde_json::to_string(&entry).unwrap());
}
}

View File

@@ -1,16 +1,16 @@
//! A command-line executable for generating the chain's genesis block.
extern crate isatty;
extern crate atty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use atty::{is, Stream};
use solana::mint::Mint;
use std::io::{stdin, Read};
use std::process::exit;
fn main() {
if stdin_isatty() {
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}

View File

@@ -1,37 +0,0 @@
extern crate solana;
use solana::entry::Entry;
use solana::event::Event;
use solana::hash::Hash;
use solana::historian::Historian;
use solana::ledger::Block;
use solana::recorder::Signal;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::sync::mpsc::SendError;
use std::thread::sleep;
use std::time::Duration;
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
sleep(Duration::from_millis(15));
let keypair = KeyPair::new();
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
let signal0 = Signal::Event(Event::Transaction(tr));
hist.sender.send(signal0)?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_ledger(&hist, &seed).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(entries[..].verify(&seed));
}

View File

@@ -1,16 +1,21 @@
extern crate atty;
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
use rayon::prelude::*;
use ring::rand::SystemRandom;
use atty::{is, Stream};
use solana::mint::{Mint, MintDemo};
use solana::signature::KeyPair;
use std::io;
use std::process::exit;
fn main() {
let mut input_text = String::new();
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap();
@@ -18,16 +23,7 @@ fn main() {
let mint = Mint::new(tokens);
let tokens_per_user = 1_000;
let num_accounts = tokens / tokens_per_user;
let rnd = SystemRandom::new();
let users: Vec<_> = (0..num_accounts)
.into_par_iter()
.map(|_| {
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
(pkcs8, tokens_per_user)
})
.collect();
let demo = MintDemo { mint, users };
let demo = MintDemo { mint, num_accounts };
println!("{}", serde_json::to_string(&demo).unwrap());
}

View File

@@ -1,15 +1,15 @@
extern crate isatty;
extern crate atty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use atty::{is, Stream};
use solana::mint::Mint;
use std::io;
use std::process::exit;
fn main() {
let mut input_text = String::new();
if stdin_isatty() {
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}

View File

@@ -1,111 +0,0 @@
extern crate env_logger;
extern crate getopts;
extern crate isatty;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use isatty::stdin_isatty;
use solana::accountant::Accountant;
use solana::accountant_skel::AccountantSkel;
use solana::entry::Entry;
use solana::event::Event;
use solana::historian::Historian;
use std::env;
use std::io::{stdin, stdout, Read};
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init().unwrap();
let mut port = 8000u16;
let mut opts = Options::new();
opts.optopt("p", "", "port", "port");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if matches.opt_present("p") {
port = matches.opt_str("p").unwrap().parse().expect("port");
}
let addr = format!("0.0.0.0:{}", port);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let mut entries = buffer.lines().map(|line| {
serde_json::from_str(&line).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
})
});
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1: Entry = entries.next().unwrap();
let deposit = if let Event::Transaction(ref tr) = entry1.events[0] {
tr.data.plan.final_payment()
} else {
None
};
let acc = Accountant::new_from_deposit(&deposit.unwrap());
acc.register_entry_id(&entry0.id);
acc.register_entry_id(&entry1.id);
let mut last_id = entry1.id;
for entry in entries {
last_id = entry.id;
acc.process_verified_events(entry.events).unwrap();
acc.register_entry_id(&last_id);
}
let historian = Historian::new(&last_id, Some(1000));
let exit = Arc::new(AtomicBool::new(false));
let skel = Arc::new(Mutex::new(AccountantSkel::new(
acc,
last_id,
stdout(),
historian,
)));
let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap();
eprintln!("Ready. Listening on {}", addr);
for t in threads {
t.join().expect("join");
}
}

47
src/blob_fetch_stage.rs Normal file
View File

@@ -0,0 +1,47 @@
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
use packet;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
pub struct BlobFetchStage {
pub blob_receiver: streamer::BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl BlobFetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
) -> Self {
Self::new_multi_socket(vec![socket], exit, blob_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
) -> Self {
let (blob_sender, blob_receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
.map(|socket| {
streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
socket,
blob_sender.clone(),
).expect("blob receiver init")
})
.collect();
BlobFetchStage {
blob_receiver,
thread_hdls,
}
}
}

175
src/budget.rs Normal file
View File

@@ -0,0 +1,175 @@
//! The `budget` module provides a domain-specific language for payment plans. Users create Budget objects that
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
//! which it uses to reduce the payment plan. When the budget is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::PublicKey;
use std::mem;
/// A data type representing a `Witness` that the payment plan is waiting on.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Condition {
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
Timestamp(DateTime<Utc>),
/// Wait for a `Signature` `Witness` from `PublicKey`.
Signature(PublicKey),
}
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
match (self, witness) {
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
_ => false,
}
}
}
/// A data type reprsenting a payment plan.
#[repr(C)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Budget {
/// Make a payment.
Pay(Payment),
/// Make a payment after some condition.
After(Condition, Payment),
/// Either make a payment after one condition or a different payment after another
/// condition, which ever condition is satisfied first.
Or((Condition, Payment), (Condition, Payment)),
}
impl Budget {
/// Create the simplest budget - one that pays `tokens` to PublicKey.
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Budget::Pay(Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Budget::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after the given DateTime
/// unless cancelled by `from`.
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
tokens: i64,
to: PublicKey,
) -> Self {
Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
}
impl PaymentPlan for Budget {
/// Return Payment if the budget requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment> {
match self {
Budget::Pay(payment) => Some(payment.clone()),
_ => None,
}
}
/// Return true if the budget spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool {
match self {
Budget::Pay(payment) | Budget::After(_, payment) => payment.tokens == spendable_tokens,
Budget::Or(a, b) => a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens,
}
}
/// Apply a witness to the budget to see if the budget can be reduced.
/// If so, modify the budget in-place.
fn apply_witness(&mut self, witness: &Witness) {
let new_payment = match self {
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
_ => None,
}.cloned();
if let Some(payment) = new_payment {
mem::replace(self, Budget::Pay(payment));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_signature_satisfied() {
let sig = PublicKey::default();
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
}
#[test]
fn test_timestamp_satisfied() {
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
}
#[test]
fn test_verify() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
assert!(Budget::new_payment(42, to).verify(42));
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
}
#[test]
fn test_authorized_payment() {
let from = PublicKey::default();
let to = PublicKey::default();
let mut budget = Budget::new_authorized_payment(from, 42, to);
budget.apply_witness(&Witness::Signature(from));
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let to = PublicKey::default();
let mut budget = Budget::new_future_payment(dt, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_cancelable_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
assert_eq!(budget, Budget::new_payment(42, to));
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Signature(from));
assert_eq!(budget, Budget::new_payment(42, from));
}
}

69
src/counter.rs Normal file
View File

@@ -0,0 +1,69 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use timing;
pub struct Counter {
pub name: &'static str,
pub counts: AtomicUsize,
pub nanos: AtomicUsize,
pub times: AtomicUsize,
pub lograte: usize,
}
macro_rules! create_counter {
($name:expr, $lograte:expr) => {
Counter {
name: $name,
counts: AtomicUsize::new(0),
nanos: AtomicUsize::new(0),
times: AtomicUsize::new(0),
lograte: $lograte,
}
};
}
macro_rules! inc_counter {
($name:expr, $count:expr, $start:expr) => {
unsafe { $name.inc($count, $start.elapsed()) };
};
}
impl Counter {
pub fn inc(&mut self, events: usize, dur: Duration) {
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
let times = self.times.fetch_add(1, Ordering::Relaxed);
if times % self.lograte == 0 && times > 0 {
info!(
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
self.name,
counts,
nanos,
times,
counts as f64 * 1e9 / nanos as f64,
timing::timestamp(),
);
}
}
}
#[cfg(test)]
mod tests {
use counter::Counter;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
#[test]
fn test_counter() {
static mut COUNTER: Counter = create_counter!("test", 100);
let start = Instant::now();
let count = 1;
inc_counter!(COUNTER, count, start);
unsafe {
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
assert_eq!(COUNTER.lograte, 100);
assert_eq!(COUNTER.name, "test");
}
}
}

File diff suppressed because it is too large Load Diff

312
src/drone.rs Normal file
View File

@@ -0,0 +1,312 @@
//! The `drone` module provides an object for launching a Solana Drone,
//! which is the custodian of any remaining tokens in a mint.
//! The Solana Drone builds and send airdrop transactions,
//! checking requests against a request cap for a given time time_slice
//! and (to come) an IP rate limit.
use signature::{KeyPair, PublicKey};
use std::io;
use std::io::{Error, ErrorKind};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::Duration;
use thin_client::ThinClient;
use transaction::Transaction;
pub const TIME_SLICE: u64 = 60;
pub const REQUEST_CAP: u64 = 150_000;
#[derive(Serialize, Deserialize, Debug)]
pub enum DroneRequest {
GetAirdrop {
airdrop_request_amount: u64,
client_public_key: PublicKey,
},
}
pub struct Drone {
mint_keypair: KeyPair,
ip_cache: Vec<IpAddr>,
_airdrop_addr: SocketAddr,
transactions_addr: SocketAddr,
requests_addr: SocketAddr,
pub time_slice: Duration,
request_cap: u64,
pub request_current: u64,
}
impl Drone {
pub fn new(
mint_keypair: KeyPair,
_airdrop_addr: SocketAddr,
transactions_addr: SocketAddr,
requests_addr: SocketAddr,
time_input: Option<u64>,
request_cap_input: Option<u64>,
) -> Drone {
let time_slice = match time_input {
Some(time) => Duration::new(time, 0),
None => Duration::new(TIME_SLICE, 0),
};
let request_cap = match request_cap_input {
Some(cap) => cap,
None => REQUEST_CAP,
};
Drone {
mint_keypair,
ip_cache: Vec::new(),
_airdrop_addr,
transactions_addr,
requests_addr,
time_slice,
request_cap,
request_current: 0,
}
}
pub fn check_request_limit(&mut self, request_amount: u64) -> bool {
(self.request_current + request_amount) <= self.request_cap
}
pub fn clear_request_count(&mut self) {
self.request_current = 0;
}
pub fn add_ip_to_cache(&mut self, ip: IpAddr) {
self.ip_cache.push(ip);
}
pub fn clear_ip_cache(&mut self) {
self.ip_cache.clear();
}
pub fn check_rate_limit(&mut self, ip: IpAddr) -> Result<IpAddr, IpAddr> {
// [WIP] This is placeholder code for a proper rate limiter.
// Right now it will only allow one total drone request per IP
if self.ip_cache.contains(&ip) {
// Add proper error handling here
Err(ip)
} else {
self.add_ip_to_cache(ip);
Ok(ip)
}
}
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<usize, io::Error> {
let tx: Transaction;
let request_amount: u64;
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
self.requests_addr,
requests_socket,
self.transactions_addr,
transactions_socket,
);
let last_id = client.get_last_id();
match req {
DroneRequest::GetAirdrop {
airdrop_request_amount,
client_public_key,
} => {
request_amount = airdrop_request_amount.clone();
tx = Transaction::new(
&self.mint_keypair,
client_public_key,
airdrop_request_amount as i64,
last_id,
);
}
}
if self.check_request_limit(request_amount) {
self.request_current += request_amount;
client.transfer_signed(tx)
} else {
Err(Error::new(ErrorKind::Other, "token limit reached"))
}
}
}
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::{get_ip_addr, TestNode};
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
use logger;
use mint::Mint;
use server::Server;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
use thin_client::ThinClient;
#[test]
fn test_check_request_limit() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(
keypair,
addr,
transactions_addr,
requests_addr,
None,
Some(3),
);
assert!(drone.check_request_limit(1));
drone.request_current = 3;
assert!(!drone.check_request_limit(1));
}
#[test]
fn test_clear_request_count() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
drone.request_current = drone.request_current + 256;
assert_eq!(drone.request_current, 256);
drone.clear_request_count();
assert_eq!(drone.request_current, 0);
}
#[test]
fn test_add_ip_to_cache() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
assert_eq!(drone.ip_cache.len(), 0);
drone.add_ip_to_cache(ip);
assert_eq!(drone.ip_cache.len(), 1);
assert!(drone.ip_cache.contains(&ip));
}
#[test]
fn test_clear_ip_cache() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
assert_eq!(drone.ip_cache.len(), 0);
drone.add_ip_to_cache(ip);
assert_eq!(drone.ip_cache.len(), 1);
drone.clear_ip_cache();
assert_eq!(drone.ip_cache.len(), 0);
assert!(drone.ip_cache.is_empty());
}
#[test]
fn test_drone_default_init() {
let keypair = KeyPair::new();
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
addr.set_ip(get_ip_addr().unwrap());
let transactions_addr = "0.0.0.0:0".parse().unwrap();
let requests_addr = "0.0.0.0:0".parse().unwrap();
let time_slice: Option<u64> = None;
let request_cap: Option<u64> = None;
let drone = Drone::new(
keypair,
addr,
transactions_addr,
requests_addr,
time_slice,
request_cap,
);
assert_eq!(drone.time_slice, Duration::new(TIME_SLICE, 0));
assert_eq!(drone.request_cap, REQUEST_CAP);
}
#[test]
fn test_send_airdrop() {
const SMALL_BATCH: i64 = 50;
const TPS_BATCH: i64 = 5_000_000;
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let carlos_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(900));
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
addr.set_ip(get_ip_addr().expect("drone get_ip_addr"));
let mut drone = Drone::new(
alice.keypair(),
addr,
leader.data.transactions_addr,
leader.data.requests_addr,
None,
Some(5_000_050),
);
let bob_req = DroneRequest::GetAirdrop {
airdrop_request_amount: 50,
client_public_key: bob_pubkey,
};
let bob_result = drone.send_airdrop(bob_req).expect("send airdrop test");
assert!(bob_result > 0);
let carlos_req = DroneRequest::GetAirdrop {
airdrop_request_amount: 5_000_000,
client_public_key: carlos_pubkey,
};
let carlos_result = drone.send_airdrop(carlos_req).expect("send airdrop test");
assert!(carlos_result > 0);
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
let transactions_socket =
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.transactions_addr,
transactions_socket,
);
let bob_balance = client.poll_get_balance(&bob_pubkey);
info!("Small request balance: {:?}", bob_balance);
assert_eq!(bob_balance.unwrap(), SMALL_BATCH);
let carlos_balance = client.poll_get_balance(&carlos_pubkey);
info!("TPS request balance: {:?}", carlos_balance);
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
}

View File

@@ -2,77 +2,105 @@
//! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use event::Event;
use bincode::serialized_size;
use hash::{extend_and_hash, hash, Hash};
use packet::BLOB_DATA_SIZE;
use rayon::prelude::*;
use transaction::Transaction;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `events`
/// field points to Events that took place shortly after `id` was generated.
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
/// field points to Transactions that took place shortly before `id` was generated.
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was recorded.
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
/// world's fastest processor at the time the entry was recorded. Or said another way, it
/// is physically not possible for a shorter duration to have occurred if one assumes the
/// hash was computed by the world's fastest processor at that time. The hash chain is both
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
/// Work consensus!)
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry {
/// The number of hashes since the previous Entry ID.
pub num_hashes: u64,
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
pub id: Hash,
pub events: Vec<Event>,
/// An unordered list of transactions that were observed before the Entry ID was
/// generated. The may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>,
}
impl Entry {
/// Creates a Entry from the number of hashes `num_hashes` since the previous event
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &transactions);
let entry = Entry {
num_hashes,
id,
transactions,
};
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
entry
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(
start_hash: &mut Hash,
cur_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Self {
let entry = Self::new(start_hash, *cur_hashes, transactions);
*start_hash = entry.id;
*cur_hashes = 0;
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
entry
}
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
/// and that resulting `id`.
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
Entry {
num_hashes,
id: *id,
events: vec![],
transactions: vec![],
}
}
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the event is not a Tick, then hash that as well.
/// If the transaction is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool {
self.events.par_iter().all(|event| event.verify())
&& self.id == next_hash(start_hash, self.num_hashes, &self.events)
self.transactions.par_iter().all(|tx| tx.verify_plan())
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
}
}
fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
match *event {
Event::Transaction(ref tr) => {
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
hash_data.push(0u8);
hash_data.extend_from_slice(&tr.sig);
}
Event::Signature { ref sig, .. } => {
hash_data.push(1u8);
hash_data.extend_from_slice(sig);
}
Event::Timestamp { ref sig, .. } => {
hash_data.push(2u8);
hash_data.extend_from_slice(sig);
}
}
hash_data.extend_from_slice(&tx.sig);
}
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature.
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
/// the signature. If num_hashes is zero and there's no transaction data,
/// start_hash is returned.
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
let mut id = *start_hash;
for _ in 1..num_hashes {
id = hash(&id);
}
// Hash all the event data
// Hash all the transaction data
let mut hash_data = vec![];
for event in events {
add_event_data(&mut hash_data, event);
for tx in transactions {
add_transaction_data(&mut hash_data, tx);
}
if !hash_data.is_empty() {
@@ -84,31 +112,13 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
}
}
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn create_entry(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Entry {
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &events);
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
assert!(num_hashes > 0 || transactions.len() == 0);
Entry {
num_hashes,
id,
events,
}
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn create_entry_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Entry {
let entry = create_entry(start_hash, *cur_hashes, events);
*start_hash = entry.id;
*cur_hashes = 0;
entry
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
Entry {
num_hashes,
id: next_hash(start_hash, num_hashes, &[]),
events: vec![],
id: next_hash(start_hash, num_hashes, &transactions),
transactions,
}
}
@@ -116,8 +126,7 @@ pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
mod tests {
use super::*;
use chrono::prelude::*;
use entry::create_entry;
use event::Event;
use entry::Entry;
use hash::hash;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
@@ -128,24 +137,24 @@ mod tests {
let one = hash(&zero);
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
}
#[test]
fn test_event_reorder_attack() {
fn test_transaction_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 0, zero));
let tr1 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, zero));
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two events and ensure verification fails.
e0.events[0] = tr1; // <-- attack
e0.events[1] = tr0;
// Next, swap two transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
@@ -155,22 +164,41 @@ mod tests {
// First, verify entries
let keypair = KeyPair::new();
let tr0 = Event::new_timestamp(&keypair, Utc::now());
let tr1 = Event::new_signature(&keypair, Default::default());
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two witness events and ensure verification fails.
e0.events[0] = tr1; // <-- attack
e0.events[1] = tr0;
// Next, swap two witness transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_next_tick() {
fn test_next_entry() {
let zero = Hash::default();
let tick = next_tick(&zero, 1);
let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero);
let tick = next_entry(&zero, 0, vec![]);
assert_eq!(tick.num_hashes, 0);
assert_eq!(tick.id, zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
assert_eq!(entry0.num_hashes, 1);
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
}
#[test]
#[should_panic]
fn test_next_entry_panic() {
let zero = Hash::default();
let keypair = KeyPair::new();
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
next_entry(&zero, 0, vec![tx]);
}
}

82
src/entry_writer.rs Normal file
View File

@@ -0,0 +1,82 @@
//! The `entry_writer` module helps implement the TPU's write stage. It
//! writes entries to the given writer, which is typically a file or
//! stdout, and then sends the Entry to its output channel.
use bank::Bank;
use entry::Entry;
use ledger::Block;
use packet;
use result::Result;
use serde_json;
use std::collections::VecDeque;
use std::io::sink;
use std::io::Write;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use streamer;
pub struct EntryWriter<'a> {
bank: &'a Bank,
}
impl<'a> EntryWriter<'a> {
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: &'a Bank) -> Self {
EntryWriter { bank }
}
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
trace!("write_entry entry");
self.bank.register_entry_id(&entry.id);
writeln!(
writer.lock().expect("'writer' lock in fn fn write_entry"),
"{}",
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
).expect("writeln! in fn write_entry");
}
fn write_entries<W: Write>(
&self,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<Vec<Entry>> {
//TODO implement a serialize for channel that does this without allocations
let mut l = vec![];
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
self.write_entry(writer, &entry);
l.push(entry);
while let Ok(entry) = entry_receiver.try_recv() {
self.write_entry(writer, &entry);
l.push(entry);
}
Ok(l)
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn write_and_send_entries<W: Write>(
&self,
broadcast: &streamer::BlobSender,
blob_recycler: &packet::BlobRecycler,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<()> {
let mut q = VecDeque::new();
let list = self.write_entries(writer, entry_receiver)?;
trace!("New blobs? {}", list.len());
list.to_blobs(blob_recycler, &mut q);
if !q.is_empty() {
trace!("broadcasting {}", q.len());
broadcast.send(q)?;
}
Ok(())
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
Ok(())
}
}

View File

@@ -1,17 +1,18 @@
// Support erasure coding
use packet::{BlobRecycler, SharedBlob};
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
use std::result;
//TODO(sakridge) pick these values
const NUM_CODED: usize = 10;
const MAX_MISSING: usize = 2;
pub const NUM_CODED: usize = 20;
pub const MAX_MISSING: usize = 4;
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
#[derive(Debug, PartialEq, Eq)]
pub enum ErasureError {
NotEnoughBlocksToDecode,
DecodeError,
EncodeError,
InvalidBlockSize,
}
@@ -73,12 +74,22 @@ pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Resul
let mut data_arg = Vec::new();
for block in data {
if block_len != block.len() {
trace!(
"data block size incorrect {} expected {}",
block.len(),
block_len
);
return Err(ErasureError::InvalidBlockSize);
}
data_arg.push(block.as_ptr());
}
for mut block in coding {
if block_len != block.len() {
trace!(
"coding block size incorrect {} expected {}",
block.len(),
block_len
);
return Err(ErasureError::InvalidBlockSize);
}
coding_arg.push(block.as_mut_ptr());
@@ -150,48 +161,128 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32])
Ok(())
}
// Generate coding blocks in window from consumed to consumed+NUM_DATA
// Allocate some coding blobs and insert into the blobs array
pub fn add_coding_blobs(recycler: &BlobRecycler, blobs: &mut Vec<SharedBlob>, consumed: u64) {
let mut added = 0;
let blobs_len = blobs.len() as u64;
for i in consumed..consumed + blobs_len {
let is = i as usize;
if is != 0 && ((is + MAX_MISSING) % NUM_CODED) == 0 {
for _ in 0..MAX_MISSING {
trace!("putting coding at {}", (i - consumed));
let new_blob = recycler.allocate();
let new_blob_clone = new_blob.clone();
let mut new_blob_l = new_blob_clone.write().unwrap();
new_blob_l.set_size(0);
new_blob_l.set_coding().unwrap();
drop(new_blob_l);
blobs.insert((i - consumed) as usize, new_blob);
added += 1;
}
}
}
info!(
"add_coding consumed: {} blobs.len(): {} added: {}",
consumed,
blobs.len(),
added
);
}
// Generate coding blocks in window starting from consumed
pub fn generate_coding(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
num_blobs: usize,
) -> Result<()> {
let mut block_start = consumed - (consumed % NUM_CODED);
for i in consumed..consumed + num_blobs {
if (i % NUM_CODED) == (NUM_CODED - 1) {
let mut data_blobs = Vec::new();
let mut coding_blobs = Vec::new();
let mut data_locks = Vec::new();
let mut data_ptrs: Vec<&[u8]> = Vec::new();
let mut coding_locks = Vec::new();
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
for i in consumed..consumed + NUM_DATA {
info!(
"generate_coding start: {} end: {} consumed: {} num_blobs: {}",
block_start,
block_start + NUM_DATA,
consumed,
num_blobs
);
for i in block_start..block_start + NUM_DATA {
let n = i % window.len();
data_blobs.push(window[n].clone().unwrap());
trace!("window[{}] = {:?}", n, window[n]);
if window[n].is_none() {
trace!("data block is null @ {}", n);
return Ok(());
}
data_blobs.push(
window[n]
.clone()
.expect("'data_blobs' arr in pub fn generate_coding"),
);
}
let mut max_data_size = 0;
for b in &data_blobs {
data_locks.push(b.write().unwrap());
let lck = b.write().expect("'b' write lock in pub fn generate_coding");
if lck.meta.size > max_data_size {
max_data_size = lck.meta.size;
}
data_locks.push(lck);
}
trace!("max_data_size: {}", max_data_size);
for (i, l) in data_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
data_ptrs.push(&l.data);
data_ptrs.push(&l.data[..max_data_size]);
}
// generate coding ptr array
let coding_start = consumed + NUM_DATA;
let coding_end = consumed + NUM_CODED;
let coding_start = block_start + NUM_DATA;
let coding_end = block_start + NUM_CODED;
for i in coding_start..coding_end {
let n = i % window.len();
window[n] = Some(re.allocate());
coding_blobs.push(window[n].clone().unwrap());
if window[n].is_none() {
trace!("coding block is null @ {}", n);
return Ok(());
}
let w_l = window[n].clone().unwrap();
w_l.write().unwrap().set_size(max_data_size);
if w_l.write().unwrap().set_coding().is_err() {
return Err(ErasureError::EncodeError);
}
coding_blobs.push(
window[n]
.clone()
.expect("'coding_blobs' arr in pub fn generate_coding"),
);
}
for b in &coding_blobs {
coding_locks.push(b.write().unwrap());
coding_locks.push(
b.write()
.expect("'coding_locks' arr in pub fn generate_coding"),
);
}
for (i, l) in coding_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
coding_ptrs.push(&mut l.data);
trace!("i: {} coding: {} size: {}", i, l.data[0], max_data_size);
coding_ptrs.push(&mut l.data_mut()[..max_data_size]);
}
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
trace!("consumed: {}", consumed);
debug!(
"consumed: {} data: {}:{} coding: {}:{}",
consumed,
block_start,
block_start + NUM_DATA,
coding_start,
coding_end
);
block_start += NUM_CODED;
}
}
Ok(())
}
@@ -203,13 +294,37 @@ pub fn recover(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
received: usize,
) -> Result<()> {
//recover with erasure coding
if received <= consumed {
return Ok(());
}
let num_blocks = (received - consumed) / NUM_CODED;
let mut block_start = consumed - (consumed % NUM_CODED);
if num_blocks > 0 {
debug!(
"num_blocks: {} received: {} consumed: {}",
num_blocks, received, consumed
);
}
for i in 0..num_blocks {
if i > 100 {
break;
}
let mut data_missing = 0;
let mut coded_missing = 0;
let coding_start = consumed + NUM_DATA;
let coding_end = consumed + NUM_CODED;
for i in consumed..coding_end {
let coding_start = block_start + NUM_DATA;
let coding_end = block_start + NUM_CODED;
trace!(
"recover: block_start: {} coding_start: {} coding_end: {}",
block_start,
coding_start,
coding_end
);
for i in block_start..coding_end {
let n = i % window.len();
if window[n].is_none() {
if i >= coding_start {
@@ -219,40 +334,65 @@ pub fn recover(
}
}
}
trace!("missing: data: {} coding: {}", data_missing, coded_missing);
if (data_missing + coded_missing) != NUM_CODED && (data_missing + coded_missing) != 0 {
debug!(
"1: start: {} recovering: data: {} coding: {}",
block_start, data_missing, coded_missing
);
}
if data_missing > 0 {
if (data_missing + coded_missing) <= MAX_MISSING {
debug!(
"2: recovering: data: {} coding: {}",
data_missing, coded_missing
);
let mut blobs: Vec<SharedBlob> = Vec::new();
let mut locks = Vec::new();
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
let mut erasures: Vec<i32> = Vec::new();
for i in consumed..coding_end {
let mut meta = None;
let mut size = None;
for i in block_start..coding_end {
let j = i % window.len();
let mut b = &mut window[j];
if b.is_some() {
blobs.push(b.clone().unwrap());
if i >= NUM_DATA && size.is_none() {
let bl = b.clone().unwrap();
size = Some(bl.read().unwrap().meta.size - BLOB_HEADER_SIZE);
}
if meta.is_none() {
let bl = b.clone().unwrap();
meta = Some(bl.read().unwrap().meta.clone());
}
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
continue;
}
let n = re.allocate();
*b = Some(n.clone());
//mark the missing memory
blobs.push(n);
erasures.push((i - consumed) as i32);
erasures.push((i - block_start) as i32);
}
erasures.push(-1);
trace!("erasures: {:?}", erasures);
trace!(
"erasures: {:?} data_size: {} header_size: {}",
erasures,
size.unwrap(),
BLOB_HEADER_SIZE
);
//lock everything
for b in &blobs {
locks.push(b.write().unwrap());
locks.push(b.write().expect("'locks' arr in pb fn recover"));
}
{
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
for (i, l) in locks.iter_mut().enumerate() {
if i >= NUM_DATA {
trace!("pushing coding: {}", i);
coding_ptrs.push(&l.data);
coding_ptrs.push(&l.data()[..size.unwrap()]);
} else {
trace!("pushing data: {}", i);
data_ptrs.push(&mut l.data);
data_ptrs.push(&mut l.data[..size.unwrap()]);
}
}
trace!(
@@ -261,18 +401,35 @@ pub fn recover(
data_ptrs.len()
);
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
} else {
return Err(ErasureError::NotEnoughBlocksToDecode);
}
for i in &erasures[..erasures.len() - 1] {
let idx = *i as usize;
let data_size = locks[idx].get_data_size().unwrap() - BLOB_HEADER_SIZE as u64;
locks[idx].meta = meta.clone().unwrap();
locks[idx].set_size(data_size as usize);
trace!(
"erasures[{}] size: {} data[0]: {}",
*i,
data_size,
locks[idx].data()[0]
);
}
}
}
block_start += NUM_CODED;
}
Ok(())
}
#[cfg(test)]
mod test {
use crdt;
use erasure;
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
extern crate env_logger;
use logger;
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::sync::{Arc, RwLock};
#[test]
pub fn test_coding() {
@@ -328,10 +485,15 @@ mod test {
for (i, w) in window.iter().enumerate() {
print!("window({}): ", i);
if w.is_some() {
let window_lock = w.clone().unwrap();
let window_data = window_lock.read().unwrap().data;
let window_l1 = w.clone().unwrap();
let window_l2 = window_l1.read().unwrap();
print!(
"index: {:?} meta.size: {} data: ",
window_l2.get_index(),
window_l2.meta.size
);
for i in 0..8 {
print!("{} ", window_data[i]);
print!("{} ", window_l2.data()[i]);
}
} else {
print!("null");
@@ -340,45 +502,102 @@ mod test {
}
}
#[test]
pub fn test_window_recover() {
let mut window = Vec::new();
let blob_recycler = BlobRecycler::default();
let offset = 4;
for i in 0..(4 * erasure::NUM_CODED + 1) {
fn generate_window(
data_len: usize,
blob_recycler: &BlobRecycler,
offset: usize,
num_blobs: usize,
) -> (Vec<Option<SharedBlob>>, usize) {
let mut window = vec![None; 32];
let mut blobs = Vec::new();
for i in 0..num_blobs {
let b = blob_recycler.allocate();
let b_ = b.clone();
let data_len = b.read().unwrap().data.len();
let mut w = b.write().unwrap();
w.set_index(i as u64).unwrap();
assert_eq!(i as u64, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
w.set_size(data_len);
for k in 0..data_len {
w.data[k] = (k + i) as u8;
w.data_mut()[k] = (k + i) as u8;
}
window.push(Some(b_));
blobs.push(b_);
}
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
let blobs_len = blobs.len();
let d = crdt::ReplicatedData::new(
KeyPair::new().pubkey(),
"127.0.0.1:1234".parse().unwrap(),
"127.0.0.1:1235".parse().unwrap(),
"127.0.0.1:1236".parse().unwrap(),
"127.0.0.1:1237".parse().unwrap(),
"127.0.0.1:1238".parse().unwrap(),
);
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
for b in blobs {
let idx = b.read().unwrap().get_index().unwrap() as usize;
window[idx] = Some(b);
}
(window, blobs_len)
}
#[test]
pub fn test_window_recover_basic() {
logger::setup();
let data_len = 16;
let blob_recycler = BlobRecycler::default();
// Generate a window
let offset = 1;
let num_blobs = erasure::NUM_DATA + 2;
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, 0, num_blobs);
println!("** after-gen-window:");
print_window(&window);
// Generate the coding blocks
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
println!("** after-gen-coding:");
print_window(&window);
let erase_offset = offset;
// Create a hole in the window
let refwindow = window[erase_offset].clone();
window[erase_offset] = None;
// Recover it from coding
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
println!("** after-recover:");
print_window(&window);
// Check the result
let window_l = window[erase_offset].clone().unwrap();
let window_l2 = window_l.read().unwrap();
let ref_l = refwindow.clone().unwrap();
let ref_l2 = ref_l.read().unwrap();
assert_eq!(
window_l2.data[..(data_len + BLOB_HEADER_SIZE)],
ref_l2.data[..(data_len + BLOB_HEADER_SIZE)]
);
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
assert_eq!(window_l2.meta.addr, ref_l2.meta.addr);
assert_eq!(window_l2.meta.port, ref_l2.meta.port);
assert_eq!(window_l2.meta.v6, ref_l2.meta.v6);
assert_eq!(window_l2.get_index().unwrap(), erase_offset as u64);
}
//TODO This needs to be reworked
#[test]
#[ignore]
pub fn test_window_recover() {
logger::setup();
let blob_recycler = BlobRecycler::default();
let offset = 4;
let data_len = 16;
let num_blobs = erasure::NUM_DATA + 2;
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, offset, num_blobs);
println!("** after-gen:");
print_window(&window);
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok());
assert!(
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
.is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
println!("** after-coding:");
print_window(&window);
let refwindow = window[offset + 1].clone();
@@ -392,29 +611,14 @@ mod test {
window_l0.write().unwrap().data[0] = 55;
println!("** after-nulling:");
print_window(&window);
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok());
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_err()
);
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
println!("** after-restore:");
print_window(&window);
let window_l = window[offset + 1].clone().unwrap();
let ref_l = refwindow.clone().unwrap();
assert_eq!(
window_l.read().unwrap().data.to_vec(),
ref_l.read().unwrap().data.to_vec()
window_l.read().unwrap().data()[..data_len],
ref_l.read().unwrap().data()[..data_len]
);
}
}

View File

@@ -1,67 +0,0 @@
//! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending
//! Transaction.
use bincode::serialize;
use chrono::prelude::*;
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
use transaction::Transaction;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Event {
Transaction(Transaction),
Signature {
from: PublicKey,
tx_sig: Signature,
sig: Signature,
},
Timestamp {
from: PublicKey,
dt: DateTime<Utc>,
sig: Signature,
},
}
impl Event {
/// Create and sign a new Witness Timestamp. Used for unit-testing.
pub fn new_timestamp(from: &KeyPair, dt: DateTime<Utc>) -> Self {
let sign_data = serialize(&dt).unwrap();
let sig = Signature::clone_from_slice(from.sign(&sign_data).as_ref());
Event::Timestamp {
from: from.pubkey(),
dt,
sig,
}
}
/// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from: &KeyPair, tx_sig: Signature) -> Self {
let sig = Signature::clone_from_slice(from.sign(&tx_sig).as_ref());
Event::Signature {
from: from.pubkey(),
tx_sig,
sig,
}
}
/// Verify the Event's signature's are valid and if a transaction, that its
/// spending plan is valid.
pub fn verify(&self) -> bool {
match *self {
Event::Transaction(ref tr) => tr.verify_sig(),
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use signature::{KeyPair, KeyPairUtil};
#[test]
fn test_event_verify() {
assert!(Event::new_timestamp(&KeyPair::new(), Utc::now()).verify());
assert!(Event::new_signature(&KeyPair::new(), Signature::default()).verify());
}
}

47
src/fetch_stage.rs Normal file
View File

@@ -0,0 +1,47 @@
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
use packet;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
pub struct FetchStage {
pub packet_receiver: streamer::PacketReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl FetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
packet_recycler: packet::PacketRecycler,
) -> Self {
Self::new_multi_socket(vec![socket], exit, packet_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
packet_recycler: packet::PacketRecycler,
) -> Self {
let (packet_sender, packet_receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
.map(|socket| {
streamer::receiver(
socket,
exit.clone(),
packet_recycler.clone(),
packet_sender.clone(),
)
})
.collect();
FetchStage {
packet_receiver,
thread_hdls,
}
}
}

View File

@@ -10,7 +10,10 @@ pub type Hash = GenericArray<u8, U32>;
pub fn hash(val: &[u8]) -> Hash {
let mut hasher = Sha256::default();
hasher.input(val);
hasher.result()
// At the time of this writing, the sha2 library is stuck on an old version
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
GenericArray::clone_from_slice(hasher.result().as_slice())
}
/// Return the hash of the given hash extended with the given value.

View File

@@ -1,113 +0,0 @@
//! The `historian` module provides a microservice for generating a Proof of History.
//! It manages a thread containing a Proof of History Recorder.
use entry::Entry;
use hash::Hash;
use recorder::{ExitReason, Recorder, Signal};
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
pub struct Historian {
pub sender: SyncSender<Signal>,
pub receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<ExitReason>,
}
impl Historian {
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
let (sender, event_receiver) = sync_channel(10_000);
let (entry_sender, receiver) = sync_channel(10_000);
let thread_hdl =
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
Historian {
sender,
receiver,
thread_hdl,
}
}
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
fn create_recorder(
start_hash: Hash,
ms_per_tick: Option<u64>,
receiver: Receiver<Signal>,
sender: SyncSender<Entry>,
) -> JoinHandle<ExitReason> {
spawn(move || {
let mut recorder = Recorder::new(receiver, sender, start_hash);
let now = Instant::now();
loop {
if let Err(err) = recorder.process_events(now, ms_per_tick) {
return err;
}
if ms_per_tick.is_some() {
recorder.hash();
}
}
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use ledger::Block;
use std::thread::sleep;
use std::time::Duration;
#[test]
fn test_historian() {
let zero = Hash::default();
let hist = Historian::new(&zero, None);
hist.sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Signal::Tick).unwrap();
let entry0 = hist.receiver.recv().unwrap();
let entry1 = hist.receiver.recv().unwrap();
let entry2 = hist.receiver.recv().unwrap();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0);
drop(hist.sender);
assert_eq!(
hist.thread_hdl.join().unwrap(),
ExitReason::RecvDisconnected
);
assert!([entry0, entry1, entry2].verify(&zero));
}
#[test]
fn test_historian_closed_sender() {
let zero = Hash::default();
let hist = Historian::new(&zero, None);
drop(hist.receiver);
hist.sender.send(Signal::Tick).unwrap();
assert_eq!(
hist.thread_hdl.join().unwrap(),
ExitReason::SendDisconnected
);
}
#[test]
fn test_ticking_historian() {
let zero = Hash::default();
let hist = Historian::new(&zero, Some(20));
sleep(Duration::from_millis(300));
hist.sender.send(Signal::Tick).unwrap();
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.
assert_ne!(entries[0].id, zero);
}
}

View File

@@ -1,13 +1,21 @@
//! The `ledger` module provides functions for parallel verification of the
//! Proof of History ledger.
use entry::{next_tick, Entry};
use bincode::{self, deserialize, serialize_into, serialized_size};
use entry::Entry;
use hash::Hash;
use packet::{self, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
use rayon::prelude::*;
use std::collections::VecDeque;
use std::io::Cursor;
use transaction::Transaction;
// a Block is a slice of Entries
pub trait Block {
/// Verifies the hashes and counts of a slice of events are all consistent.
/// Verifies the hashes and counts of a slice of transactions are all consistent.
fn verify(&self, start_hash: &Hash) -> bool;
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>);
}
impl Block for [Entry] {
@@ -16,24 +24,116 @@ impl Block for [Entry] {
let entry_pairs = genesis.par_iter().chain(self).zip(self);
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
}
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
for entry in self {
let blob = blob_recycler.allocate();
let pos = {
let mut bd = blob.write().unwrap();
let mut out = Cursor::new(bd.data_mut());
serialize_into(&mut out, &entry).expect("failed to serialize output");
out.position() as usize
};
assert!(pos < BLOB_SIZE);
blob.write().unwrap().set_size(pos);
q.push_back(blob);
}
}
}
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
pub fn next_ticks(start_hash: &Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
let mut id = *start_hash;
let mut ticks = vec![];
for _ in 0..len {
let entry = next_tick(&id, num_hashes);
id = entry.id;
ticks.push(entry);
pub fn reconstruct_entries_from_blobs(
blobs: VecDeque<SharedBlob>,
blob_recycler: &packet::BlobRecycler,
) -> bincode::Result<Vec<Entry>> {
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
for blob in blobs {
let entry = {
let msg = blob.read().unwrap();
deserialize(&msg.data()[..msg.meta.size])
};
blob_recycler.recycle(blob);
match entry {
Ok(entry) => entries.push(entry),
Err(err) => {
trace!("reconstruct_entry_from_blobs: {}", err);
return Err(err);
}
ticks
}
}
Ok(entries)
}
/// Creates the next entries for given transactions, outputs
/// updates start_hash to id of last Entry, sets cur_hashes to 0
pub fn next_entries_mut(
start_hash: &mut Hash,
cur_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
if transactions.is_empty() {
vec![Entry::new_mut(start_hash, cur_hashes, transactions)]
} else {
let mut chunk_len = transactions.len();
// check for fit, make sure they can be serialized
while serialized_size(&Entry {
num_hashes: 0,
id: Hash::default(),
transactions: transactions[0..chunk_len].to_vec(),
}).unwrap() > BLOB_DATA_SIZE as u64
{
chunk_len /= 2;
}
let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1);
for chunk in transactions.chunks(chunk_len) {
entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec()));
}
entries
}
}
/// Creates the next Entries for given transactions
pub fn next_entries(
start_hash: &Hash,
cur_hashes: u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
let mut id = *start_hash;
let mut num_hashes = cur_hashes;
next_entries_mut(&mut id, &mut num_hashes, transactions)
}
#[cfg(test)]
mod tests {
use super::*;
use entry::{next_entry, Entry};
use hash::hash;
use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use transaction::Transaction;
/// Create a vector of Entries of length `transaction_batches.len()`
/// from `start_hash` hash, `num_hashes`, and `transaction_batches`.
fn next_entries_batched(
start_hash: &Hash,
cur_hashes: u64,
transaction_batches: Vec<Vec<Transaction>>,
) -> Vec<Entry> {
let mut id = *start_hash;
let mut entries = vec![];
let mut num_hashes = cur_hashes;
for transactions in transaction_batches {
let mut entry_batch = next_entries_mut(&mut id, &mut num_hashes, transactions);
entries.append(&mut entry_batch);
}
entries
}
#[test]
fn test_verify_slice() {
@@ -42,26 +142,92 @@ mod tests {
assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(next_ticks(&zero, 0, 2)[..].verify(&zero)); // inductive step
assert!(next_entries_batched(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
let mut bad_ticks = next_ticks(&zero, 0, 2);
let mut bad_ticks = next_entries_batched(&zero, 0, vec![vec![]; 2]);
bad_ticks[1].id = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
}
#[test]
fn test_entries_to_blobs() {
let zero = Hash::default();
let one = hash(&zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let transactions = vec![tx0; 10_000];
let entries = next_entries(&zero, 0, transactions);
let blob_recycler = BlobRecycler::default();
let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
assert_eq!(
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
entries
);
}
#[test]
fn test_bad_blobs_attack() {
let blob_recycler = BlobRecycler::default();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
assert!(reconstruct_entries_from_blobs(blobs_q, &blob_recycler).is_err());
}
#[test]
fn test_next_entries_batched() {
// this also tests next_entries, ugly, but is an easy way to do vec of vec (batch)
let mut id = Hash::default();
let next_id = hash(&id);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
let transactions = vec![tx0; 5];
let transaction_batches = vec![transactions.clone(); 5];
let entries0 = next_entries_batched(&id, 0, transaction_batches);
assert_eq!(entries0.len(), 5);
let mut entries1 = vec![];
for _ in 0..5 {
let entry = next_entry(&id, 1, transactions.clone());
id = entry.id;
entries1.push(entry);
}
assert_eq!(entries0, entries1);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use hash::hash;
use ledger::*;
use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[bench]
fn event_bench(bencher: &mut Bencher) {
let start_hash = Hash::default();
let entries = next_ticks(&start_hash, 10_000, 8);
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
let zero = Hash::default();
let one = hash(&zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let transactions = vec![tx0; 10];
let entries = next_entries(&zero, 1, transactions);
let blob_recycler = BlobRecycler::default();
bencher.iter(|| {
assert!(entries.verify(&start_hash));
let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
assert_eq!(
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
entries
);
});
}
}

View File

@@ -1,25 +1,52 @@
//! The `solana` library implements the Solana high-performance blockchain architecture.
//! It includes a full Rust implementation of the architecture (see
//! [Server](server/struct.Server.html)) as well as hooks to GPU implementations of its most
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
//! command-line tools to spin up fullnodes and a Rust library
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
//!
#![cfg_attr(feature = "unstable", feature(test))]
pub mod accountant;
pub mod accountant_skel;
pub mod accountant_stub;
#[macro_use]
pub mod counter;
pub mod bank;
pub mod banking_stage;
pub mod blob_fetch_stage;
pub mod budget;
pub mod crdt;
pub mod ecdsa;
pub mod drone;
pub mod entry;
pub mod entry_writer;
#[cfg(feature = "erasure")]
pub mod erasure;
pub mod event;
pub mod fetch_stage;
pub mod hash;
pub mod historian;
pub mod ledger;
pub mod logger;
pub mod mint;
pub mod ncp;
pub mod packet;
pub mod plan;
pub mod payment_plan;
pub mod record_stage;
pub mod recorder;
pub mod replicate_stage;
pub mod request;
pub mod request_processor;
pub mod request_stage;
pub mod result;
pub mod rpu;
pub mod server;
pub mod signature;
pub mod sigverify;
pub mod sigverify_stage;
pub mod streamer;
pub mod subscribers;
pub mod thin_client;
pub mod timing;
pub mod tpu;
pub mod transaction;
pub mod tvu;
pub mod window_stage;
pub mod write_stage;
extern crate bincode;
extern crate byteorder;
extern crate chrono;
@@ -32,12 +59,13 @@ extern crate ring;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate pnet_datalink;
extern crate serde_json;
extern crate sha2;
extern crate untrusted;
extern crate futures;
#[cfg(test)]
#[macro_use]
extern crate matches;
extern crate rand;

14
src/logger.rs Normal file
View File

@@ -0,0 +1,14 @@
//! The `logger` module provides a setup function for `env_logger`. Its only function,
//! `setup()` may be called multiple times.
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
pub fn setup() {
INIT.call_once(|| {
let _ = env_logger::init();
});
}

View File

@@ -1,8 +1,6 @@
//! The `mint` module is a library for generating the chain's genesis block.
use entry::create_entry;
use entry::Entry;
use event::Event;
use hash::{hash, Hash};
use ring::rand::SystemRandom;
use signature::{KeyPair, KeyPairUtil, PublicKey};
@@ -19,8 +17,11 @@ pub struct Mint {
impl Mint {
pub fn new(tokens: i64) -> Self {
let rnd = SystemRandom::new();
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
let keypair = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
.expect("generate_pkcs8 in mint pub fn new")
.to_vec();
let keypair =
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
let pubkey = keypair.pubkey();
Mint {
pkcs8,
@@ -38,22 +39,22 @@ impl Mint {
}
pub fn keypair(&self) -> KeyPair {
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap()
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
}
pub fn pubkey(&self) -> PublicKey {
self.pubkey
}
pub fn create_events(&self) -> Vec<Event> {
pub fn create_transactions(&self) -> Vec<Transaction> {
let keypair = self.keypair();
let tr = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
vec![Event::Transaction(tr)]
let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
vec![tx]
}
pub fn create_entries(&self) -> Vec<Entry> {
let e0 = create_entry(&self.seed(), 0, vec![]);
let e1 = create_entry(&e0.id, 0, self.create_events());
let e0 = Entry::new(&self.seed(), 0, vec![]);
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
vec![e0, e1]
}
}
@@ -61,24 +62,26 @@ impl Mint {
#[derive(Serialize, Deserialize, Debug)]
pub struct MintDemo {
pub mint: Mint,
pub users: Vec<(Vec<u8>, i64)>,
pub num_accounts: i64,
}
#[cfg(test)]
mod tests {
use super::*;
use budget::Budget;
use ledger::Block;
use plan::Plan;
use transaction::{Instruction, Plan};
#[test]
fn test_create_events() {
let mut events = Mint::new(100).create_events().into_iter();
if let Event::Transaction(tr) = events.next().unwrap() {
if let Plan::Pay(payment) = tr.data.plan {
assert_eq!(tr.from, payment.to);
fn test_create_transactions() {
let mut transactions = Mint::new(100).create_transactions().into_iter();
let tx = transactions.next().unwrap();
if let Instruction::NewContract(contract) = tx.instruction {
if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
assert_eq!(tx.from, payment.to);
}
}
assert_eq!(events.next(), None);
assert_eq!(transactions.next(), None);
}
#[test]

89
src/ncp.rs Normal file
View File

@@ -0,0 +1,89 @@
//! The `ncp` module implements the network control plane.
use crdt;
use packet;
use result::Result;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
pub struct Ncp {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Ncp {
pub fn new(
crdt: Arc<RwLock<crdt::Crdt>>,
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
gossip_listen_socket: UdpSocket,
gossip_send_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Result<Ncp> {
let blob_recycler = packet::BlobRecycler::default();
let (request_sender, request_receiver) = channel();
trace!(
"Ncp: id: {:?}, listening on: {:?}",
&crdt.read().unwrap().me[..4],
gossip_listen_socket.local_addr().unwrap()
);
let t_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
gossip_listen_socket,
request_sender,
)?;
let (response_sender, response_receiver) = channel();
let t_responder = streamer::responder(
gossip_send_socket,
exit.clone(),
blob_recycler.clone(),
response_receiver,
);
let t_listen = crdt::Crdt::listen(
crdt.clone(),
window,
blob_recycler.clone(),
request_receiver,
response_sender.clone(),
exit.clone(),
);
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
Ok(Ncp { thread_hdls })
}
}
#[cfg(test)]
mod tests {
use crdt::{Crdt, TestNode};
use ncp::Ncp;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
#[test]
#[ignore]
// test that stage will exit when flag is set
// TODO: Troubleshoot Docker-based coverage build and re-enabled
// this test. It is probably failing due to too many threads.
fn test_exit() {
let exit = Arc::new(AtomicBool::new(false));
let tn = TestNode::new();
let crdt = Crdt::new(tn.data.clone());
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,
exit.clone(),
).unwrap();
exit.store(true, Ordering::Relaxed);
for t in d.thread_hdls {
t.join().expect("thread join");
}
}
}

View File

@@ -1,12 +1,18 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use counter::Counter;
use result::{Error, Result};
use serde::Serialize;
use signature::PublicKey;
use std::collections::VecDeque;
use std::fmt;
use std::io;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::{Arc, Mutex, RwLock};
use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::atomic::AtomicUsize;
use std::sync::{Arc, Mutex, RwLock};
use std::time::Instant;
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>;
@@ -14,7 +20,8 @@ pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8;
const BLOB_SIZE: usize = 64 * 1024;
pub const BLOB_SIZE: usize = 64 * 1024;
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
@@ -22,6 +29,7 @@ pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
#[repr(C)]
pub struct Meta {
pub size: usize,
pub num_retransmits: u64,
pub addr: [u16; 8],
pub port: u16,
pub v6: bool,
@@ -153,42 +161,47 @@ impl<T: Default> Clone for Recycler<T> {
impl<T: Default> Recycler<T> {
pub fn allocate(&self) -> Arc<RwLock<T>> {
let mut gc = self.gc.lock().expect("recycler lock");
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
}
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
let mut gc = self.gc.lock().expect("recycler lock");
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
gc.push(msgs);
}
}
impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
static mut COUNTER: Counter = create_counter!("packets", 10);
self.packets.resize(NUM_PACKETS, Packet::default());
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until its readable
// * block on the socket until it's readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
let mut start = Instant::now();
for p in &mut self.packets {
p.meta.size = 0;
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
inc_counter!(COUNTER, i, start);
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
}
Err(e) => {
info!("recv_from err {:?}", e);
trace!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
start = Instant::now();
socket.set_nonblocking(true)?;
}
}
@@ -200,6 +213,7 @@ impl Packets {
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?;
self.packets.resize(sz, Packet::default());
debug!("recv_from: {}", sz);
Ok(())
}
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
@@ -211,34 +225,152 @@ impl Packets {
}
}
const BLOB_INDEX_SIZE: usize = size_of::<u64>();
pub fn to_packets_chunked<T: Serialize>(
r: &PacketRecycler,
xs: Vec<T>,
chunks: usize,
) -> Vec<SharedPackets> {
let mut out = vec![];
for x in xs.chunks(chunks) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(x.len(), Default::default());
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
to_packets_chunked(r, xs, NUM_PACKETS)
}
pub fn to_blob<T: Serialize>(
resp: T,
rsp_addr: SocketAddr,
blob_recycler: &BlobRecycler,
) -> Result<SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
assert!(len < BLOB_SIZE);
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
pub fn to_blobs<T: Serialize>(
rsps: Vec<(T, SocketAddr)>,
blob_recycler: &BlobRecycler,
) -> Result<VecDeque<SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
const BLOB_INDEX_END: usize = size_of::<u64>();
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
macro_rules! align {
($x:expr, $align:expr) => {
$x + ($align - 1) & !($align - 1)
};
}
pub const BLOB_FLAG_IS_CODING: u32 = 0x1;
pub const BLOB_HEADER_SIZE: usize = align!(BLOB_SIZE_END, 64);
impl Blob {
pub fn get_index(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]);
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_index(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr);
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
Ok(())
}
/// sender id, we use this for identifying if its a blob from the leader that we should
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
pub fn get_id(&self) -> Result<PublicKey> {
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
Ok(e)
}
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
let wtr = serialize(&id)?;
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
Ok(())
}
pub fn get_flags(&self) -> Result<u32> {
let mut rdr = io::Cursor::new(&self.data[BLOB_ID_END..BLOB_FLAGS_END]);
let r = rdr.read_u32::<LittleEndian>()?;
Ok(r)
}
pub fn set_flags(&mut self, ix: u32) -> Result<()> {
let mut wtr = vec![];
wtr.write_u32::<LittleEndian>(ix)?;
self.data[BLOB_ID_END..BLOB_FLAGS_END].clone_from_slice(&wtr);
Ok(())
}
pub fn is_coding(&self) -> bool {
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
}
pub fn set_coding(&mut self) -> Result<()> {
let flags = self.get_flags().unwrap();
self.set_flags(flags | BLOB_FLAG_IS_CODING)
}
pub fn get_data_size(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[BLOB_FLAGS_END..BLOB_SIZE_END]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_data_size(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[BLOB_FLAGS_END..BLOB_SIZE_END].clone_from_slice(&wtr);
Ok(())
}
pub fn data(&self) -> &[u8] {
&self.data[BLOB_INDEX_SIZE..]
&self.data[BLOB_HEADER_SIZE..]
}
pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_INDEX_SIZE..]
&mut self.data[BLOB_HEADER_SIZE..]
}
pub fn set_size(&mut self, size: usize) {
self.meta.size = size + BLOB_INDEX_SIZE;
let new_size = size + BLOB_HEADER_SIZE;
self.meta.size = new_size;
self.set_data_size(new_size as u64).unwrap();
}
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
let mut v = VecDeque::new();
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until its readable
// * block on the socket until it's readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
@@ -246,14 +378,17 @@ impl Blob {
for i in 0..NUM_BLOBS {
let r = re.allocate();
{
let mut p = r.write().unwrap();
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
}
Err(e) => {
if e.kind() != io::ErrorKind::WouldBlock {
info!("recv_from err {:?}", e);
}
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
@@ -276,7 +411,7 @@ impl Blob {
) -> Result<()> {
while let Some(r) = v.pop_front() {
{
let p = r.read().unwrap();
let p = r.read().expect("'r' read lock in pub fn send_to");
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
}
@@ -288,11 +423,13 @@ impl Blob {
#[cfg(test)]
mod test {
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets};
use packet::{to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, NUM_PACKETS};
use request::Request;
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
#[test]
pub fn packet_recycler_test() {
let r = PacketRecycler::default();
@@ -334,6 +471,24 @@ mod test {
r.recycle(p);
}
#[test]
fn test_to_packets() {
let tx = Request::GetTransactionCount;
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tx.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test]
pub fn blob_send_recv() {
trace!("start");

40
src/payment_plan.rs Normal file
View File

@@ -0,0 +1,40 @@
//! The `plan` module provides a domain-specific language for payment plans. Users create Budget objects that
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
//! which it uses to reduce the payment plan. When the plan is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use signature::PublicKey;
/// The types of events a payment plan can process.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Witness {
/// The current time.
Timestamp(DateTime<Utc>),
/// A siganture from PublicKey.
Signature(PublicKey),
}
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Payment {
/// Amount to be paid.
pub tokens: i64,
/// The `PublicKey` that `tokens` should be paid to.
pub to: PublicKey,
}
/// Interface to smart contracts.
pub trait PaymentPlan {
/// Return Payment if the payment plan requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment>;
/// Return true if the plan spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool;
/// Apply a witness to the payment plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
fn apply_witness(&mut self, witness: &Witness);
}

View File

@@ -1,176 +0,0 @@
//! The `plan` module provides a domain-specific language for payment plans. Users create Plan objects that
//! are given to an interpreter. The interpreter listens for `Witness` events,
//! which it uses to reduce the payment plan. When the plan is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use signature::PublicKey;
use std::mem;
pub enum Witness {
Timestamp(DateTime<Utc>),
Signature(PublicKey),
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Condition {
Timestamp(DateTime<Utc>),
Signature(PublicKey),
}
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
match (self, witness) {
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
(&Condition::Timestamp(ref dt), &Witness::Timestamp(ref last_time)) => dt <= last_time,
_ => false,
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Payment {
pub tokens: i64,
pub to: PublicKey,
}
#[repr(C)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Plan {
Pay(Payment),
After(Condition, Payment),
Race((Condition, Payment), (Condition, Payment)),
}
impl Plan {
/// Create the simplest spending plan - one that pays `tokens` to PublicKey.
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Plan::Pay(Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after being witnessed by `from`.
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Timestamp(dt), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime
/// unless cancelled by `from`.
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
tokens: i64,
to: PublicKey,
) -> Self {
Plan::Race(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
/// Return Payment if the spending plan requires no additional Witnesses.
pub fn final_payment(&self) -> Option<Payment> {
match *self {
Plan::Pay(ref payment) => Some(payment.clone()),
_ => None,
}
}
/// Return true if the plan spends exactly `spendable_tokens`.
pub fn verify(&self, spendable_tokens: i64) -> bool {
match *self {
Plan::Pay(ref payment) | Plan::After(_, ref payment) => {
payment.tokens == spendable_tokens
}
Plan::Race(ref a, ref b) => {
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
}
}
}
/// Apply a witness to the spending plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
pub fn apply_witness(&mut self, witness: &Witness) {
let new_payment = match *self {
Plan::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
Plan::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
Plan::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
_ => None,
}.cloned();
if let Some(payment) = new_payment {
mem::replace(self, Plan::Pay(payment));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_signature_satisfied() {
let sig = PublicKey::default();
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
}
#[test]
fn test_timestamp_satisfied() {
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
}
#[test]
fn test_verify_plan() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
assert!(Plan::new_payment(42, to).verify(42));
assert!(Plan::new_authorized_payment(from, 42, to).verify(42));
assert!(Plan::new_future_payment(dt, 42, to).verify(42));
assert!(Plan::new_cancelable_future_payment(dt, from, 42, to).verify(42));
}
#[test]
fn test_authorized_payment() {
let from = PublicKey::default();
let to = PublicKey::default();
let mut plan = Plan::new_authorized_payment(from, 42, to);
plan.apply_witness(&Witness::Signature(from));
assert_eq!(plan, Plan::new_payment(42, to));
}
#[test]
fn test_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let to = PublicKey::default();
let mut plan = Plan::new_future_payment(dt, 42, to);
plan.apply_witness(&Witness::Timestamp(dt));
assert_eq!(plan, Plan::new_payment(42, to));
}
#[test]
fn test_cancelable_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
plan.apply_witness(&Witness::Timestamp(dt));
assert_eq!(plan, Plan::new_payment(42, to));
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
plan.apply_witness(&Witness::Signature(from));
assert_eq!(plan, Plan::new_payment(42, from));
}
}

213
src/record_stage.rs Normal file
View File

@@ -0,0 +1,213 @@
//! The `record_stage` module provides an object for generating a Proof of History.
//! It records Transaction items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Transaction item. It
//! tags each Transaction with an Entry, and sends it back. The Entry includes the
//! Transaction, the latest hash, and the number of hashes since the last transaction.
//! The resulting stream of entries represents ordered transactions in time.
use entry::Entry;
use hash::Hash;
use recorder::Recorder;
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
use std::thread::{Builder, JoinHandle};
use std::time::{Duration, Instant};
use transaction::Transaction;
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
pub enum Signal {
Tick,
Transactions(Vec<Transaction>),
}
pub struct RecordStage {
pub entry_receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<()>,
}
impl RecordStage {
/// A background thread that will continue tagging received Transaction messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let thread_hdl = Builder::new()
.name("solana-record-stage".to_string())
.spawn(move || {
let mut recorder = Recorder::new(start_hash);
let _ = Self::process_signals(&mut recorder, &signal_receiver, &entry_sender);
})
.unwrap();
RecordStage {
entry_receiver,
thread_hdl,
}
}
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
pub fn new_with_clock(
signal_receiver: Receiver<Signal>,
start_hash: &Hash,
tick_duration: Duration,
) -> Self {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let thread_hdl = Builder::new()
.name("solana-record-stage".to_string())
.spawn(move || {
let mut recorder = Recorder::new(start_hash);
let start_time = Instant::now();
loop {
if let Err(_) = Self::try_process_signals(
&mut recorder,
start_time,
tick_duration,
&signal_receiver,
&entry_sender,
) {
return;
}
recorder.hash();
}
})
.unwrap();
RecordStage {
entry_receiver,
thread_hdl,
}
}
fn process_signal(
signal: Signal,
recorder: &mut Recorder,
sender: &Sender<Entry>,
) -> Result<(), ()> {
let txs = if let Signal::Transactions(txs) = signal {
txs
} else {
vec![]
};
let entries = recorder.record(txs);
let mut result = Ok(());
for entry in entries {
result = sender.send(entry).map_err(|_| ());
if result.is_err() {
break;
}
}
result
}
fn process_signals(
recorder: &mut Recorder,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
) -> Result<(), ()> {
loop {
match receiver.recv() {
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
Err(RecvError) => return Err(()),
}
}
}
fn try_process_signals(
recorder: &mut Recorder,
start_time: Instant,
tick_duration: Duration,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
) -> Result<(), ()> {
loop {
if let Some(entry) = recorder.tick(start_time, tick_duration) {
sender.send(entry).or(Err(()))?;
}
match receiver.try_recv() {
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(()),
};
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ledger::Block;
use signature::{KeyPair, KeyPairUtil};
use std::sync::mpsc::channel;
use std::thread::sleep;
#[test]
fn test_historian() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(tx_receiver, &zero);
tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
tx_sender.send(Signal::Tick).unwrap();
let entry0 = record_stage.entry_receiver.recv().unwrap();
let entry1 = record_stage.entry_receiver.recv().unwrap();
let entry2 = record_stage.entry_receiver.recv().unwrap();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0);
drop(tx_sender);
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
assert!([entry0, entry1, entry2].verify(&zero));
}
#[test]
fn test_historian_closed_sender() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(tx_receiver, &zero);
drop(record_stage.entry_receiver);
tx_sender.send(Signal::Tick).unwrap();
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
}
#[test]
fn test_transactions() {
let (tx_sender, signal_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(signal_receiver, &zero);
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
tx_sender
.send(Signal::Transactions(vec![tx0, tx1]))
.unwrap();
drop(tx_sender);
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
assert_eq!(entries.len(), 1);
}
#[test]
fn test_clock() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage =
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
sleep(Duration::from_millis(900));
tx_sender.send(Signal::Tick).unwrap();
drop(tx_sender);
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.
assert_ne!(entries[0].id, zero);
}
}

View File

@@ -1,45 +1,22 @@
//! The `recorder` module provides an object for generating a Proof of History.
//! It records Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry, and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
//! The resulting stream of entries represents ordered events in time.
//! It records Transaction items on behalf of its users.
use entry::{create_entry_mut, Entry};
use event::Event;
use entry::Entry;
use hash::{hash, Hash};
use std::mem;
use std::sync::mpsc::{Receiver, SyncSender, TryRecvError};
use ledger::next_entries_mut;
use std::time::{Duration, Instant};
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
pub enum Signal {
Tick,
Event(Event),
}
#[derive(Debug, PartialEq, Eq)]
pub enum ExitReason {
RecvDisconnected,
SendDisconnected,
}
use transaction::Transaction;
pub struct Recorder {
sender: SyncSender<Entry>,
receiver: Receiver<Signal>,
last_hash: Hash,
events: Vec<Event>,
num_hashes: u64,
num_ticks: u64,
num_ticks: u32,
}
impl Recorder {
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, last_hash: Hash) -> Self {
pub fn new(last_hash: Hash) -> Self {
Recorder {
receiver,
sender,
last_hash,
events: vec![],
num_hashes: 0,
num_ticks: 0,
}
@@ -50,40 +27,21 @@ impl Recorder {
self.num_hashes += 1;
}
pub fn record_entry(&mut self) -> Result<(), ExitReason> {
let events = mem::replace(&mut self.events, vec![]);
let entry = create_entry_mut(&mut self.last_hash, &mut self.num_hashes, events);
self.sender
.send(entry)
.or(Err(ExitReason::SendDisconnected))?;
Ok(())
pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
}
pub fn process_events(
&mut self,
epoch: Instant,
ms_per_tick: Option<u64>,
) -> Result<(), ExitReason> {
loop {
if let Some(ms) = ms_per_tick {
if epoch.elapsed() > Duration::from_millis((self.num_ticks + 1) * ms) {
self.record_entry()?;
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
// TODO: don't let this overflow u32
self.num_ticks += 1;
}
}
match self.receiver.try_recv() {
Ok(signal) => match signal {
Signal::Tick => {
self.record_entry()?;
}
Signal::Event(event) => {
self.events.push(event);
}
},
Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(ExitReason::RecvDisconnected),
};
Some(Entry::new_mut(
&mut self.last_hash,
&mut self.num_hashes,
vec![],
))
} else {
None
}
}
}

53
src/replicate_stage.rs Normal file
View File

@@ -0,0 +1,53 @@
//! The `replicate_stage` replicates transactions broadcast by the leader.
use bank::Bank;
use ledger;
use packet;
use result::Result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use streamer;
pub struct ReplicateStage {
pub thread_hdl: JoinHandle<()>,
}
impl ReplicateStage {
/// Process entry blobs, already in order
fn replicate_requests(
bank: &Arc<Bank>,
blob_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = blob_receiver.recv_timeout(timer)?;
let blobs_len = blobs.len();
let entries = ledger::reconstruct_entries_from_blobs(blobs, &blob_recycler)?;
let res = bank.process_entries(entries);
if res.is_err() {
error!("process_entries {} {:?}", blobs_len, res);
}
res?;
Ok(())
}
pub fn new(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
window_receiver: streamer::BlobReceiver,
blob_recycler: packet::BlobRecycler,
) -> Self {
let thread_hdl = Builder::new()
.name("solana-replicate-stage".to_string())
.spawn(move || loop {
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
.unwrap();
ReplicateStage { thread_hdl }
}
}

26
src/request.rs Normal file
View File

@@ -0,0 +1,26 @@
//! The `request` module defines the messages for the thin client.
use hash::Hash;
use signature::PublicKey;
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
GetBalance { key: PublicKey },
GetLastId,
GetTransactionCount,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
true
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
LastId { id: Hash },
TransactionCount { transaction_count: u64 },
}

54
src/request_processor.rs Normal file
View File

@@ -0,0 +1,54 @@
//! The `request_processor` processes thin client Request messages.
use bank::Bank;
use request::{Request, Response};
use std::net::SocketAddr;
use std::sync::Arc;
pub struct RequestProcessor {
bank: Arc<Bank>,
}
impl RequestProcessor {
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: Arc<Bank>) -> Self {
RequestProcessor { bank }
}
/// Process Request items sent by clients.
fn process_request(
&self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::GetBalance { key } => {
let val = self.bank.get_balance(&key);
let rsp = (Response::Balance { key, val }, rsp_addr);
info!("Response::Balance {:?}", rsp);
Some(rsp)
}
Request::GetLastId => {
let id = self.bank.last_id();
let rsp = (Response::LastId { id }, rsp_addr);
info!("Response::LastId {:?}", rsp);
Some(rsp)
}
Request::GetTransactionCount => {
let transaction_count = self.bank.transaction_count() as u64;
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
info!("Response::TransactionCount {:?}", rsp);
Some(rsp)
}
}
}
pub fn process_requests(
&self,
reqs: Vec<(Request, SocketAddr)>,
) -> Vec<(Response, SocketAddr)> {
reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect()
}
}

116
src/request_stage.rs Normal file
View File

@@ -0,0 +1,116 @@
//! The `request_stage` processes thin client Request messages.
use bincode::deserialize;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use request::Request;
use request_processor::RequestProcessor;
use result::Result;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Instant;
use streamer;
use timing;
pub struct RequestStage {
pub thread_hdl: JoinHandle<()>,
pub blob_receiver: streamer::BlobReceiver,
pub request_processor: Arc<RequestProcessor>,
}
impl RequestStage {
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
pub fn process_request_packets(
request_processor: &RequestProcessor,
packet_receiver: &Receiver<SharedPackets>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
debug!(
"@{:?} request_stage: processing: {}",
timing::timestamp(),
batch_len
);
let mut reqs_len = 0;
let proc_start = Instant::now();
for msgs in batch {
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
.into_iter()
.filter_map(|x| x)
.collect();
reqs_len += reqs.len();
let rsps = request_processor.process_requests(reqs);
let blobs = packet::to_blobs(rsps, blob_recycler)?;
if !blobs.is_empty() {
info!("process: sending blobs: {}", blobs.len());
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
debug!(
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
batch_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
Ok(())
}
pub fn new(
request_processor: RequestProcessor,
exit: Arc<AtomicBool>,
packet_receiver: Receiver<SharedPackets>,
packet_recycler: packet::PacketRecycler,
blob_recycler: packet::BlobRecycler,
) -> Self {
let request_processor = Arc::new(request_processor);
let request_processor_ = request_processor.clone();
let (blob_sender, blob_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-request-stage".to_string())
.spawn(move || loop {
let e = Self::process_request_packets(
&request_processor_,
&packet_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
}
})
.unwrap();
RequestStage {
thread_hdl,
blob_receiver,
request_processor,
}
}
}

View File

@@ -1,10 +1,10 @@
//! The `result` module exposes a Result type that propagates one of many different Error types.
use bank;
use bincode;
use serde_json;
use std;
use std::any::Any;
use accountant;
#[derive(Debug)]
pub enum Error {
@@ -15,9 +15,11 @@ pub enum Error {
RecvError(std::sync::mpsc::RecvError),
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
Serialize(std::boxed::Box<bincode::ErrorKind>),
AccountingError(accountant::AccountingError),
BankError(bank::BankError),
SendError,
Services,
CrdtTooSmall,
GenericError,
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -32,9 +34,9 @@ impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
Error::RecvTimeoutError(e)
}
}
impl std::convert::From<accountant::AccountingError> for Error {
fn from(e: accountant::AccountingError) -> Error {
Error::AccountingError(e)
impl std::convert::From<bank::BankError> for Error {
fn from(e: bank::BankError) -> Error {
Error::BankError(e)
}
}
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
@@ -77,6 +79,7 @@ mod tests {
use std::io;
use std::io::Write;
use std::net::SocketAddr;
use std::panic;
use std::sync::mpsc::channel;
use std::sync::mpsc::RecvError;
use std::sync::mpsc::RecvTimeoutError;
@@ -88,6 +91,7 @@ mod tests {
}
fn join_error() -> Result<()> {
panic::set_hook(Box::new(|_info| {}));
let r = thread::spawn(|| panic!("hi")).join()?;
Ok(r)
}

77
src/rpu.rs Normal file
View File

@@ -0,0 +1,77 @@
//! The `rpu` module implements the Request Processing Unit, a
//! 3-stage transaction processing pipeline in software. It listens
//! for `Request` messages from clients and replies with `Response`
//! messages.
//!
//! ```text
//! .------.
//! | Bank |
//! `---+--`
//! |
//! .------------------|-------------------.
//! | RPU | |
//! | v |
//! .---------. | .-------. .---------. .---------. | .---------.
//! | Alice |--->| | | | | +---->| Alice |
//! `---------` | | Fetch | | Request | | Respond | | `---------`
//! | | Stage |->| Stage |->| Stage | |
//! .---------. | | | | | | | | .---------.
//! | Bob |--->| | | | | +---->| Bob |
//! `---------` | `-------` `---------` `---------` | `---------`
//! | |
//! | |
//! `--------------------------------------`
//! ```
use bank::Bank;
use packet;
use request_processor::RequestProcessor;
use request_stage::RequestStage;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
pub struct Rpu {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Rpu {
pub fn new(
bank: Arc<Bank>,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Self {
let packet_recycler = packet::PacketRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver = streamer::receiver(
requests_socket,
exit.clone(),
packet_recycler.clone(),
packet_sender,
);
let blob_recycler = packet::BlobRecycler::default();
let request_processor = RequestProcessor::new(bank.clone());
let request_stage = RequestStage::new(
request_processor,
exit.clone(),
packet_receiver,
packet_recycler.clone(),
blob_recycler.clone(),
);
let t_responder = streamer::responder(
respond_socket,
exit.clone(),
blob_recycler.clone(),
request_stage.blob_receiver,
);
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
Rpu { thread_hdls }
}
}

204
src/server.rs Normal file
View File

@@ -0,0 +1,204 @@
//! The `server` module hosts all the server microservices.
use bank::Bank;
use crdt::{Crdt, ReplicatedData};
use ncp::Ncp;
use packet;
use rpu::Rpu;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use std::time::Duration;
use streamer;
use tpu::Tpu;
use tvu::Tvu;
pub struct Server {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Server {
/// Create a server instance acting as a leader.
///
/// ```text
/// .---------------------.
/// | Leader |
/// | |
/// .--------. | .-----. |
/// | |---->| | |
/// | Client | | | RPU | |
/// | |<----| | |
/// `----+---` | `-----` |
/// | | ^ |
/// | | | |
/// | | .--+---. |
/// | | | Bank | |
/// | | `------` |
/// | | ^ |
/// | | | | .------------.
/// | | .--+--. .-----. | | |
/// `-------->| TPU +-->| NCP +------>| Validators |
/// | `-----` `-----` | | |
/// | | `------------`
/// `---------------------`
/// ```
pub fn new_leader<W: Write + Send + 'static>(
bank: Bank,
tick_duration: Option<Duration>,
me: ReplicatedData,
requests_socket: UdpSocket,
transactions_socket: UdpSocket,
broadcast_socket: UdpSocket,
respond_socket: UdpSocket,
gossip_socket: UdpSocket,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let blob_recycler = packet::BlobRecycler::default();
let tpu = Tpu::new(
bank.clone(),
tick_duration,
transactions_socket,
blob_recycler.clone(),
exit.clone(),
writer,
);
thread_hdls.extend(tpu.thread_hdls);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
thread_hdls.extend(ncp.thread_hdls);
let t_broadcast = streamer::broadcaster(
broadcast_socket,
exit.clone(),
crdt,
window,
blob_recycler.clone(),
tpu.blob_receiver,
);
thread_hdls.extend(vec![t_broadcast]);
Server { thread_hdls }
}
/// Create a server instance acting as a validator.
///
/// ```text
/// .-------------------------------.
/// | Validator |
/// | |
/// .--------. | .-----. |
/// | |-------------->| | |
/// | Client | | | RPU | |
/// | |<--------------| | |
/// `--------` | `-----` |
/// | ^ |
/// | | |
/// | .--+---. |
/// | | Bank | |
/// | `------` |
/// | ^ |
/// .--------. | | | .------------.
/// | | | .--+--. | | |
/// | Leader |<------------->| TVU +<--------------->| |
/// | | | `-----` | | Validators |
/// | | | ^ | | |
/// | | | | | | |
/// | | | .--+--. | | |
/// | |<------------->| NCP +<--------------->| |
/// | | | `-----` | | |
/// `--------` | | `------------`
/// `-------------------------------`
/// ```
pub fn new_validator(
bank: Bank,
me: ReplicatedData,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
replicate_socket: UdpSocket,
gossip_listen_socket: UdpSocket,
repair_socket: UdpSocket,
entry_point: ReplicatedData,
exit: Arc<AtomicBool>,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(&entry_point);
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_listen_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
let tvu = Tvu::new(
bank.clone(),
crdt.clone(),
window.clone(),
replicate_socket,
repair_socket,
retransmit_socket,
exit.clone(),
);
thread_hdls.extend(tvu.thread_hdls);
thread_hdls.extend(ncp.thread_hdls);
Server { thread_hdls }
}
}
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::TestNode;
use mint::Mint;
use server::Server;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[test]
fn validator_exit() {
let tn = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let exit = Arc::new(AtomicBool::new(false));
let v = Server::new_validator(
bank,
tn.data.clone(),
tn.sockets.requests,
tn.sockets.respond,
tn.sockets.replicate,
tn.sockets.gossip,
tn.sockets.repair,
tn.data,
exit.clone(),
);
exit.store(true, Ordering::Relaxed);
for t in v.thread_hdls {
t.join().unwrap();
}
}
}

View File

@@ -2,8 +2,13 @@
use generic_array::typenum::{U32, U64};
use generic_array::GenericArray;
use rand::{ChaChaRng, Rng, SeedableRng};
use rayon::prelude::*;
use ring::error::Unspecified;
use ring::rand::SecureRandom;
use ring::signature::Ed25519KeyPair;
use ring::{rand, signature};
use std::cell::RefCell;
use untrusted;
pub type KeyPair = Ed25519KeyPair;
@@ -19,8 +24,10 @@ impl KeyPairUtil for Ed25519KeyPair {
/// Return a new ED25519 keypair
fn new() -> Self {
let rng = rand::SystemRandom::new();
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap();
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap()
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
.expect("generate_pkcs8 in signature pb fn new");
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
.expect("from_pcks8 in signature pb fn new")
}
/// Return the public key for the given keypair
@@ -41,3 +48,90 @@ impl SignatureUtil for GenericArray<u8, U64> {
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
}
}
pub struct GenKeys {
// This is necessary because the rng needs to mutate its state to remain
// deterministic, and the fill trait requires an immuatble reference to self
generator: RefCell<ChaChaRng>,
}
impl GenKeys {
pub fn new(seed: [u8; 32]) -> GenKeys {
let rng = ChaChaRng::from_seed(seed);
GenKeys {
generator: RefCell::new(rng),
}
}
pub fn new_key(&self) -> Vec<u8> {
KeyPair::generate_pkcs8(self).unwrap().to_vec()
}
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 32]> {
let mut rng = self.generator.borrow_mut();
(0..n).map(|_| rng.gen()).collect()
}
pub fn gen_n_keypairs(&self, n: i64) -> Vec<KeyPair> {
self.gen_n_seeds(n)
.into_par_iter()
.map(|seed| {
let pkcs8 = GenKeys::new(seed).new_key();
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
})
.collect()
}
}
impl SecureRandom for GenKeys {
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
let mut rng = self.generator.borrow_mut();
rng.fill(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
#[test]
fn test_new_key_is_deterministic() {
let seed = [0u8; 32];
let rng0 = GenKeys::new(seed);
let rng1 = GenKeys::new(seed);
for _ in 0..100 {
assert_eq!(rng0.new_key(), rng1.new_key());
}
}
fn gen_n_pubkeys(seed: [u8; 32], n: i64) -> HashSet<PublicKey> {
GenKeys::new(seed)
.gen_n_keypairs(n)
.into_iter()
.map(|x| x.pubkey())
.collect()
}
#[test]
fn test_gen_n_pubkeys_deterministic() {
let seed = [0u8; 32];
assert_eq!(gen_n_pubkeys(seed, 50), gen_n_pubkeys(seed, 50));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use super::*;
#[bench]
fn bench_gen_keys(b: &mut Bencher) {
let rnd = GenKeys::new([0u8; 32]);
b.iter(|| rnd.gen_n_keypairs(1000));
}
}

View File

@@ -1,8 +1,17 @@
//! The `sigverify` module provides digital signature verification functions.
//! By default, signatures are verified in parallel using all available CPU
//! cores. When `--features=cuda` is enabled, signature verification is
//! offloaded to the GPU.
//!
use counter::Counter;
use packet::{Packet, SharedPackets};
use std::mem::size_of;
use std::sync::atomic::AtomicUsize;
use std::time::Instant;
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
pub const TX_OFFSET: usize = 4;
pub const TX_OFFSET: usize = 0;
#[cfg(feature = "cuda")]
#[repr(C)]
@@ -51,34 +60,53 @@ fn verify_packet(packet: &Packet) -> u8 {
).is_ok() as u8
}
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
batches
.iter()
.map(|p| p.read().unwrap().packets.len())
.sum()
}
#[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*;
batches
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CPU ECDSA for {}", batch_size(batches));
let rv = batches
.into_par_iter()
.map(|p| {
p.read()
.unwrap()
.expect("'p' read lock in ed25519_verify")
.packets
.par_iter()
.map(verify_packet)
.collect()
})
.collect()
.collect();
inc_counter!(COUNTER, count, start);
rv
}
#[cfg(feature = "cuda")]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use packet::PACKET_DATA_SIZE;
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CUDA ECDSA for {}", batch_size(batches));
let mut out = Vec::new();
let mut elems = Vec::new();
let mut locks = Vec::new();
let mut rvs = Vec::new();
for packets in batches {
locks.push(packets.read().unwrap());
locks.push(
packets
.read()
.expect("'packets' read lock in pub fn ed25519_verify"),
);
}
let mut num = 0;
for p in locks {
@@ -125,30 +153,39 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
num += 1;
}
}
inc_counter!(COUNTER, count, start);
rvs
}
#[cfg(test)]
mod tests {
use accountant_skel::Request;
use bincode::serialize;
use ecdsa;
use packet::{Packet, Packets, SharedPackets};
use sigverify;
use std::sync::RwLock;
use transaction::test_tx;
use transaction::Transaction;
use transaction::{memfind, test_tx};
fn make_packet_from_transaction(tr: Transaction) -> Packet {
let tx = serialize(&Request::Transaction(tr)).unwrap();
#[test]
fn test_layout() {
let tx = test_tx();
let tx_bytes = serialize(&tx).unwrap();
let packet = serialize(&tx).unwrap();
assert_matches!(memfind(&packet, &tx_bytes), Some(sigverify::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
fn make_packet_from_transaction(tx: Transaction) -> Packet {
let tx_bytes = serialize(&tx).unwrap();
let mut packet = Packet::default();
packet.meta.size = tx.len();
packet.data[..packet.meta.size].copy_from_slice(&tx);
packet.meta.size = tx_bytes.len();
packet.data[..packet.meta.size].copy_from_slice(&tx_bytes);
return packet;
}
fn test_verify_n(n: usize, modify_data: bool) {
let tr = test_tx();
let mut packet = make_packet_from_transaction(tr);
let tx = test_tx();
let mut packet = make_packet_from_transaction(tx);
// jumble some data to test failure
if modify_data {
@@ -165,7 +202,7 @@ mod tests {
let batches = vec![shared_packets.clone(), shared_packets.clone()];
// verify packets
let ans = ecdsa::ed25519_verify(&batches);
let ans = sigverify::ed25519_verify(&batches);
// check result
let ref_ans = if modify_data { 0u8 } else { 1u8 };

101
src/sigverify_stage.rs Normal file
View File

@@ -0,0 +1,101 @@
//! The `sigverify_stage` implements the signature verification stage of the TPU. It
//! receives a list of lists of packets and outputs the same list, but tags each
//! top-level list with a list of booleans, telling the next stage whether the
//! signature in that packet is valid. It assumes each packet contains one
//! transaction. All processing is done on the CPU by default and on a GPU
//! if the `cuda` feature is enabled with `--features=cuda`.
use packet::SharedPackets;
use rand::{thread_rng, Rng};
use result::Result;
use sigverify;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
use streamer;
use timing;
pub struct SigVerifyStage {
pub verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl SigVerifyStage {
pub fn new(exit: Arc<AtomicBool>, packet_receiver: Receiver<SharedPackets>) -> Self {
let (verified_sender, verified_receiver) = channel();
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
SigVerifyStage {
thread_hdls,
verified_receiver,
}
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
let r = sigverify::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
}
fn verifier(
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> Result<()> {
let (batch, len) =
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
let now = Instant::now();
let batch_len = batch.len();
let rand_id = thread_rng().gen_range(0, 100);
info!(
"@{:?} verifier: verifying: {} id: {}",
timing::timestamp(),
batch.len(),
rand_id
);
let verified_batch = Self::verify_batch(batch);
sendr
.lock()
.expect("lock in fn verify_batch in tpu")
.send(verified_batch)?;
let total_time_ms = timing::duration_as_ms(&now.elapsed());
let total_time_s = timing::duration_as_s(&now.elapsed());
info!(
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
timing::timestamp(),
batch_len,
total_time_ms,
rand_id,
len,
(len as f32 / total_time_s)
);
Ok(())
}
fn verifier_service(
exit: Arc<AtomicBool>,
packet_receiver: Arc<Mutex<streamer::PacketReceiver>>,
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> JoinHandle<()> {
spawn(move || loop {
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
}
fn verifier_services(
exit: Arc<AtomicBool>,
packet_receiver: streamer::PacketReceiver,
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Vec<JoinHandle<()>> {
let sender = Arc::new(Mutex::new(verified_sender));
let receiver = Arc::new(Mutex::new(packet_receiver));
(0..4)
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
.collect()
}
}

View File

@@ -1,19 +1,24 @@
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS};
use result::Result;
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
//!
use crdt::Crdt;
#[cfg(feature = "erasure")]
use erasure;
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, BLOB_SIZE};
use result::{Error, Result};
use std::collections::VecDeque;
use std::net::UdpSocket;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::thread::{spawn, JoinHandle};
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use subscribers::Subscribers;
pub const WINDOW_SIZE: usize = 2 * 1024;
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
pub type PacketSender = mpsc::Sender<SharedPackets>;
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
fn recv_loop(
sock: &UdpSocket,
@@ -25,7 +30,10 @@ fn recv_loop(
let msgs = re.allocate();
let msgs_ = msgs.clone();
loop {
match msgs.write().unwrap().recv_from(sock) {
match msgs.write()
.expect("write lock in fn recv_loop")
.recv_from(sock)
{
Ok(()) => {
channel.send(msgs_)?;
break;
@@ -45,14 +53,19 @@ pub fn receiver(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: PacketRecycler,
channel: PacketSender,
) -> Result<JoinHandle<()>> {
let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?;
Ok(spawn(move || {
let _ = recv_loop(&sock, &exit, &recycler, &channel);
packet_sender: PacketSender,
) -> JoinHandle<()> {
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
if res.is_err() {
panic!("streamer::receiver set_read_timeout error");
}
Builder::new()
.name("solana-receiver".to_string())
.spawn(move || {
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
()
}))
})
.unwrap()
}
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
@@ -62,22 +75,45 @@ fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Res
Ok(())
}
pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut len = msgs.read().unwrap().packets.len();
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
len += more.read().unwrap().packets.len();
batch.push(more);
if len > 100_000 {
break;
}
}
debug!("batch len {}", batch.len());
Ok((batch, len))
}
pub fn responder(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
Builder::new()
.name("solana-responder".to_string())
.spawn(move || loop {
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
.unwrap()
}
//TODO, we would need to stick block authentication before we create the
//window.
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
trace!("receiving on {}", sock.local_addr().unwrap());
let dq = Blob::recv_from(recycler, sock)?;
if !dq.is_empty() {
s.send(dq)?;
@@ -95,57 +131,130 @@ pub fn blob_receiver(
//1 second timeout on socket read
let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?;
let t = spawn(move || loop {
let t = Builder::new()
.name("solana-blob_receiver".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let ret = recv_blobs(&recycler, &sock, &s);
if ret.is_err() {
break;
}
});
let _ = recv_blobs(&recycler, &sock, &s);
})
.unwrap();
Ok(t)
}
fn find_next_missing(
locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>,
consumed: &mut usize,
received: &mut usize,
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
if *received <= *consumed {
return Err(Error::GenericError);
}
let window = locked_window.read().unwrap();
let reqs: Vec<_> = (*consumed..*received)
.filter_map(|pix| {
let i = pix % WINDOW_SIZE;
if let &None = &window[i] {
let val = crdt.read().unwrap().window_index_request(pix as u64);
if let Ok((to, req)) = val {
return Some((to, req));
}
}
None
})
.collect();
Ok(reqs)
}
fn repair_window(
locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>,
_recycler: &BlobRecycler,
last: &mut usize,
times: &mut usize,
consumed: &mut usize,
received: &mut usize,
) -> Result<()> {
#[cfg(feature = "erasure")]
{
if erasure::recover(
_recycler,
&mut locked_window.write().unwrap(),
*consumed,
*received,
).is_err()
{
trace!("erasure::recover failed");
}
}
//exponential backoff
if *last != *consumed {
*times = 0;
}
*last = *consumed;
*times += 1;
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
if *times & (*times - 1) != 0 {
trace!("repair_window counter {} {}", *times, *consumed);
return Ok(());
}
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
let sock = UdpSocket::bind("0.0.0.0:0")?;
for (to, req) in reqs {
//todo cache socket
info!("repair_window request {} {} {}", *consumed, *received, to);
assert!(req.len() < BLOB_SIZE);
sock.send_to(&req, to)?;
}
Ok(())
}
fn recv_window(
window: &mut Vec<Option<SharedBlob>>,
subs: &Arc<RwLock<Subscribers>>,
locked_window: &Window,
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
consumed: &mut usize,
received: &mut usize,
r: &BlobReceiver,
s: &BlobSender,
retransmit: &BlobSender,
) -> Result<()> {
let timer = Duration::new(1, 0);
let timer = Duration::from_millis(200);
let mut dq = r.recv_timeout(timer)?;
let leader_id = crdt.read()
.expect("'crdt' read lock in fn recv_window")
.leader_data()
.id;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq)
}
{
//retransmit all leader blocks
let mut retransmitq = VecDeque::new();
let rsubs = subs.read().unwrap();
for b in &dq {
let p = b.read().unwrap();
let p = b.read().expect("'b' read lock in fn recv_window");
//TODO this check isn't safe against adverserial packets
//we need to maintain a sequence window
trace!(
"idx: {} addr: {:?} leader: {:?}",
p.get_index().unwrap(),
"idx: {} addr: {:?} id: {:?} leader: {:?}",
p.get_index().expect("get_index in fn recv_window"),
p.get_id().expect("get_id in trace! fn recv_window"),
p.meta.addr(),
rsubs.leader.addr
leader_id
);
if p.meta.addr() == rsubs.leader.addr {
if p.get_id().expect("get_id in fn recv_window") == leader_id {
//TODO
//need to copy the retransmited blob
//need to copy the retransmitted blob
//otherwise we get into races with which thread
//should do the recycling
//
//a better absraction would be to recycle when the blob
//a better abstraction would be to recycle when the blob
//is dropped via a weakref to the recycler
let nv = recycler.allocate();
{
let mut mnv = nv.write().unwrap();
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
let sz = p.meta.size;
mnv.meta.size = sz;
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
@@ -161,68 +270,283 @@ fn recv_window(
let mut contq = VecDeque::new();
while let Some(b) = dq.pop_front() {
let b_ = b.clone();
let p = b.write().unwrap();
let p = b.write().expect("'b' write lock in fn recv_window");
let pix = p.get_index()? as usize;
let w = pix % NUM_BLOBS;
if pix > *received {
*received = pix;
}
// Got a blob which has already been consumed, skip it
// probably from a repair window request
if pix < *consumed {
debug!(
"received: {} but older than consumed: {} skipping..",
pix, *consumed
);
continue;
}
let w = pix % WINDOW_SIZE;
//TODO, after the block are authenticated
//if we get different blocks at the same index
//that is a network failure/attack
trace!("window w: {} size: {}", w, p.meta.size);
drop(p);
{
let mut window = locked_window.write().unwrap();
if window[w].is_none() {
window[w] = Some(b_);
} else if let Some(cblob) = &window[w] {
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
warn!("overrun blob at index {:}", w);
} else {
debug!("duplicate blob at index {:}", w);
}
}
loop {
let k = *consumed % NUM_BLOBS;
let k = *consumed % WINDOW_SIZE;
trace!("k: {} consumed: {}", k, *consumed);
if window[k].is_none() {
break;
}
contq.push_back(window[k].clone().unwrap());
window[k] = None;
let mut is_coding = false;
if let &Some(ref cblob) = &window[k] {
if cblob
.read()
.expect("blob read lock for flags streamer::window")
.is_coding()
{
is_coding = true;
}
}
if !is_coding {
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
*consumed += 1;
#[cfg(not(feature = "erasure"))]
{
window[k] = None;
}
} else {
#[cfg(feature = "erasure")]
{
let block_start = *consumed - (*consumed % erasure::NUM_CODED);
let coding_end = block_start + erasure::NUM_CODED;
// We've received all this block's data blobs, go and null out the window now
for j in block_start..coding_end {
window[j % WINDOW_SIZE] = None;
}
*consumed += erasure::MAX_MISSING;
debug!(
"skipping processing coding blob k: {} consumed: {}",
k, *consumed
);
}
}
}
}
}
print_window(locked_window, *consumed);
trace!("sending contq.len: {}", contq.len());
if !contq.is_empty() {
trace!("sending contq.len: {}", contq.len());
s.send(contq)?;
}
Ok(())
}
fn print_window(locked_window: &Window, consumed: usize) {
{
let buf: Vec<_> = locked_window
.read()
.unwrap()
.iter()
.enumerate()
.map(|(i, v)| {
if i == (consumed % WINDOW_SIZE) {
"_"
} else if v.is_none() {
"0"
} else {
if let &Some(ref cblob) = &v {
if cblob.read().unwrap().is_coding() {
"C"
} else {
"1"
}
} else {
"0"
}
}
})
.collect();
debug!("WINDOW ({}): {}", consumed, buf.join(""));
}
}
pub fn default_window() -> Window {
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
}
pub fn window(
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
crdt: Arc<RwLock<Crdt>>,
window: Window,
recycler: BlobRecycler,
r: BlobReceiver,
s: BlobSender,
retransmit: BlobSender,
) -> JoinHandle<()> {
spawn(move || {
let mut window = vec![None; NUM_BLOBS];
Builder::new()
.name("solana-window".to_string())
.spawn(move || {
let mut consumed = 0;
let mut received = 0;
let mut last = 0;
let mut times = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = recv_window(
&mut window,
&subs,
&window,
&crdt,
&recycler,
&mut consumed,
&mut received,
&r,
&s,
&retransmit,
);
let _ = repair_window(
&window,
&crdt,
&recycler,
&mut last,
&mut times,
&mut consumed,
&mut received,
);
}
})
.unwrap()
}
fn broadcast(
crdt: &Arc<RwLock<Crdt>>,
window: &Window,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
transmit_index: &mut u64,
receive_index: &mut u64,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq);
}
let mut blobs: Vec<_> = dq.into_iter().collect();
print_window(window, *receive_index as usize);
// Insert the coding blobs into the blob stream
#[cfg(feature = "erasure")]
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
let blobs_len = blobs.len();
info!("broadcast blobs.len: {}", blobs_len);
// Index the blobs
Crdt::index_blobs(crdt, &blobs, receive_index)?;
// keep the cache of blobs that are broadcast
{
let mut win = window.write().unwrap();
for b in &blobs {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix as usize) % WINDOW_SIZE;
if let Some(x) = &win[pos] {
trace!(
"popped {} at {}",
x.read().unwrap().get_index().unwrap(),
pos
);
recycler.recycle(x.clone());
}
trace!("null {}", pos);
win[pos] = None;
assert!(win[pos].is_none());
}
while let Some(b) = blobs.pop() {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix as usize) % WINDOW_SIZE;
trace!("caching {} at {}", ix, pos);
assert!(win[pos].is_none());
win[pos] = Some(b);
}
}
// Fill in the coding blob data from the window data blobs
#[cfg(feature = "erasure")]
{
if erasure::generate_coding(
&mut window.write().unwrap(),
*receive_index as usize,
blobs_len,
).is_err()
{
return Err(Error::GenericError);
}
}
*receive_index += blobs_len as u64;
// Send blobs out from the window
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
Ok(())
}
/// Service to broadcast messages from the leader to layer 1 nodes.
/// See `crdt` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to send from.
/// * `exit` - Boolean to signal system exit.
/// * `crdt` - CRDT structure
/// * `window` - Cache of blobs that we have broadcast
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn broadcaster(
sock: UdpSocket,
exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>,
window: Window,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
let mut transmit_index = 0;
let mut receive_index = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = broadcast(
&crdt,
&window,
&recycler,
&r,
&sock,
&mut transmit_index,
&mut receive_index,
);
}
})
.unwrap()
}
fn retransmit(
subs: &Arc<RwLock<Subscribers>>,
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
@@ -233,10 +557,8 @@ fn retransmit(
dq.append(&mut nq);
}
{
let wsubs = subs.read().unwrap();
for b in &dq {
let mut mb = b.write().unwrap();
wsubs.retransmit(&mut mb, sock)?;
Crdt::retransmit(&crdt, b, sock)?;
}
}
while let Some(b) = dq.pop_front() {
@@ -246,34 +568,41 @@ fn retransmit(
}
/// Service to retransmit messages from the leader to layer 1 nodes.
/// See `subscribers` for network layer definitions.
/// See `crdt` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to read from. Read timeout is set to 1.
/// * `exit` - Boolean to signal system exit.
/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by
/// the accountant.
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sock: UdpSocket,
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
crdt: Arc<RwLock<Crdt>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
Builder::new()
.name("solana-retransmitter".to_string())
.spawn(move || {
trace!("retransmitter started");
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = retransmit(&subs, &recycler, &r, &sock);
// TODO: handle this error
let _ = retransmit(&crdt, &recycler, &r, &sock);
}
trace!("exiting retransmitter");
})
.unwrap()
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use packet::{Packet, PacketRecycler, PACKET_DATA_SIZE};
use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
use result::Result;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
@@ -305,6 +634,7 @@ mod bench {
let mut num = 0;
for p in msgs_.read().unwrap().packets.iter() {
let a = p.meta.addr();
assert!(p.meta.size < BLOB_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
num += 1;
}
@@ -333,14 +663,17 @@ mod bench {
}
})
}
fn run_streamer_bench() -> Result<()> {
fn bench_streamer_with_result() -> Result<()> {
let read = UdpSocket::bind("127.0.0.1:0")?;
read.set_read_timeout(Some(Duration::new(1, 0)))?;
let addr = read.local_addr()?;
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let (s_reader, r_reader) = channel();
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader)?;
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
@@ -356,7 +689,7 @@ mod bench {
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
let ftime = (time as f64) / 10000000000f64;
let fcount = (end_val - start_val) as f64;
println!("performance: {:?}", fcount / ftime);
trace!("performance: {:?}", fcount / ftime);
exit.store(true, Ordering::Relaxed);
t_reader.join()?;
t_producer1.join()?;
@@ -366,13 +699,14 @@ mod bench {
Ok(())
}
#[bench]
pub fn streamer_bench(_bench: &mut Bencher) {
run_streamer_bench().unwrap();
pub fn bench_streamer(_bench: &mut Bencher) {
bench_streamer_with_result().unwrap();
}
}
#[cfg(test)]
mod test {
use crdt::{Crdt, TestNode};
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
use std::collections::VecDeque;
use std::io;
@@ -382,16 +716,15 @@ mod test {
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver,
PacketReceiver};
use subscribers::{Node, Subscribers};
use streamer::{blob_receiver, receiver, responder, window};
use streamer::{default_window, BlobReceiver, PacketReceiver};
fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => *num += m.read().unwrap().packets.len(),
e => println!("error {:?}", e),
e => info!("error {:?}", e),
}
if *num == 10 {
break;
@@ -407,13 +740,15 @@ mod test {
#[test]
pub fn streamer_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader).unwrap();
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let mut msgs = VecDeque::new();
@@ -445,7 +780,7 @@ mod test {
}
*num += m.len();
}
e => println!("error {:?}", e),
e => info!("error {:?}", e),
}
if *num == 10 {
break;
@@ -455,31 +790,40 @@ mod test {
#[test]
pub fn window_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let tn = TestNode::new();
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::new([0; 8], 0, send.local_addr().unwrap()),
&[],
)));
let mut crdt_me = Crdt::new(tn.data.clone());
let me_id = crdt_me.my_data().id;
crdt_me.set_leader(me_id);
let subs = Arc::new(RwLock::new(crdt_me));
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver =
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
let t_receiver = blob_receiver(
exit.clone(),
resp_recycler.clone(),
tn.sockets.gossip,
s_reader,
).unwrap();
let (s_window, r_window) = channel();
let (s_retransmit, r_retransmit) = channel();
let win = default_window();
let t_window = window(
exit.clone(),
subs,
win,
resp_recycler.clone(),
r_reader,
s_window,
s_retransmit,
);
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let t_responder = responder(
tn.sockets.replicate,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let mut msgs = VecDeque::new();
for v in 0..10 {
let i = 9 - v;
@@ -487,9 +831,10 @@ mod test {
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
w.set_id(me_id).unwrap();
assert_eq!(i, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
w.meta.set_addr(&tn.data.gossip_addr);
msgs.push_back(b_);
}
s_responder.send(msgs).expect("send");
@@ -506,44 +851,4 @@ mod test {
t_responder.join().expect("join");
t_window.join().expect("join");
}
#[test]
pub fn retransmit() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::default(),
&[Node::new([0; 8], 1, read.local_addr().unwrap())],
)));
let (s_retransmit, r_retransmit) = channel();
let blob_recycler = BlobRecycler::default();
let saddr = send.local_addr().unwrap();
let t_retransmit = retransmitter(
send,
exit.clone(),
subs,
blob_recycler.clone(),
r_retransmit,
);
let mut bq = VecDeque::new();
let b = blob_recycler.allocate();
b.write().unwrap().meta.size = 10;
bq.push_back(b);
s_retransmit.send(bq).unwrap();
let (s_blob_receiver, r_blob_receiver) = channel();
let t_receiver =
blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap();
let mut oq = r_blob_receiver.recv().unwrap();
assert_eq!(oq.len(), 1);
let o = oq.pop_front().unwrap();
let ro = o.read().unwrap();
assert_eq!(ro.meta.size, 10);
assert_eq!(ro.meta.addr(), saddr);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_retransmit.join().expect("join");
}
}

View File

@@ -1,149 +0,0 @@
//! The `subscribers` module defines data structures to keep track of nodes on the network.
//! The network is arranged in layers:
//!
//! * layer 0 - Leader.
//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//!
//! It's up to the external state machine to keep this updated.
use packet::Blob;
use rayon::prelude::*;
use result::{Error, Result};
use std::net::{SocketAddr, UdpSocket};
use std::fmt;
#[derive(Clone, PartialEq)]
pub struct Node {
pub id: [u64; 8],
pub weight: u64,
pub addr: SocketAddr,
}
//sockaddr doesn't implement default
impl Default for Node {
fn default() -> Node {
Node {
id: [0; 8],
weight: 0,
addr: "0.0.0.0:0".parse().unwrap(),
}
}
}
impl Node {
pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node {
Node { id, weight, addr }
}
fn key(&self) -> i64 {
(self.weight as i64).checked_neg().unwrap()
}
}
impl fmt::Debug for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr)
}
}
pub struct Subscribers {
data: Vec<Node>,
pub me: Node,
pub leader: Node,
}
impl Subscribers {
pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers {
let mut h = Subscribers {
data: vec![],
me: me.clone(),
leader: leader.clone(),
};
h.insert(&[me, leader]);
h.insert(network);
h
}
/// retransmit messages from the leader to layer 1 nodes
pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> {
let errs: Vec<_> = self.data
.par_iter()
.map(|i| {
if self.me == *i {
return Ok(0);
}
if self.leader == *i {
return Ok(0);
}
trace!("retransmit blob to {}", i.addr);
s.send_to(&blob.data[..blob.meta.size], &i.addr)
})
.collect();
for e in errs {
trace!("retransmit result {:?}", e);
match e {
Err(e) => return Err(Error::IO(e)),
_ => (),
}
}
Ok(())
}
pub fn insert(&mut self, ns: &[Node]) {
self.data.extend_from_slice(ns);
self.data.sort_by_key(Node::key);
}
}
#[cfg(test)]
mod test {
use packet::Blob;
use rayon::prelude::*;
use std::net::UdpSocket;
use std::time::Duration;
use subscribers::{Node, Subscribers};
#[test]
pub fn subscriber() {
let mut me = Node::default();
me.weight = 10;
let mut leader = Node::default();
leader.weight = 11;
let mut s = Subscribers::new(me, leader, &[]);
assert_eq!(s.data.len(), 2);
assert_eq!(s.data[0].weight, 11);
assert_eq!(s.data[1].weight, 10);
let mut n = Node::default();
n.weight = 12;
s.insert(&[n]);
assert_eq!(s.data.len(), 3);
assert_eq!(s.data[0].weight, 12);
}
#[test]
pub fn retransmit() {
let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap());
let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap());
let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]);
let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap());
s.insert(&[n3]);
let mut b = Blob::default();
b.meta.size = 10;
let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind");
s.retransmit(&mut b, &s4).unwrap();
let res: Vec<_> = [s1, s2, s3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
s.recv_from(&mut b.data).is_err()
})
.collect();
assert_eq!(res, [true, true, false]);
let mut n4 = Node::default();
n4.addr = "255.255.255.255:1".parse().unwrap();
s.insert(&[n4]);
assert!(s.retransmit(&mut b, &s4).is_err());
}
}

297
src/thin_client.rs Normal file
View File

@@ -0,0 +1,297 @@
//! The `thin_client` module is a client-side object that interfaces with
//! a server-side TPU. Client code should use this object instead of writing
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use bincode::{deserialize, serialize};
use hash::Hash;
use request::{Request, Response};
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::{SocketAddr, UdpSocket};
use transaction::Transaction;
/// An object for querying and sending transactions to the network.
pub struct ThinClient {
requests_addr: SocketAddr,
requests_socket: UdpSocket,
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
last_id: Option<Hash>,
transaction_count: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl ThinClient {
/// Create a new ThinClient that will interface with Rpu
/// over `requests_socket` and `transactions_socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking ThinClient methods.
pub fn new(
requests_addr: SocketAddr,
requests_socket: UdpSocket,
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
) -> Self {
let client = ThinClient {
requests_addr,
requests_socket,
transactions_addr,
transactions_socket,
last_id: None,
transaction_count: 0,
balances: HashMap::new(),
};
client
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
trace!("start recv_from");
self.requests_socket.recv_from(&mut buf)?;
trace!("end recv_from");
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
trace!("Response balance {:?} {:?}", key, val);
self.balances.insert(key, val);
}
Response::LastId { id } => {
info!("Response last_id {:?}", id);
self.last_id = Some(id);
}
Response::TransactionCount { transaction_count } => {
info!("Response transaction count {:?}", transaction_count);
self.transaction_count = transaction_count;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
self.transactions_socket
.send_to(&data, &self.transactions_addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tx = Transaction::new(keypair, to, n, *last_id);
let sig = tx.sig;
self.transfer_signed(tx).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
trace!("get_balance");
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance in pub fn get_balance");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_balance");
let mut done = false;
while !done {
let resp = self.recv_response()?;
trace!("recv_response {:?}", resp);
if let Response::Balance { key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
}
/// Request the transaction count. If the response packet is dropped by the network,
/// this method will hang.
pub fn transaction_count(&mut self) -> u64 {
info!("transaction_count");
let req = Request::GetTransactionCount;
let data =
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
let mut done = false;
while !done {
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn transaction_count");
if let Ok(resp) = self.recv_response() {
info!("recv_response {:?}", resp);
if let &Response::TransactionCount { .. } = &resp {
done = true;
}
self.process_response(resp);
}
}
self.transaction_count
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response.
pub fn get_last_id(&mut self) -> Hash {
info!("get_last_id");
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
let mut done = false;
while !done {
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_last_id");
if let Ok(resp) = self.recv_response() {
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
}
self.last_id.expect("some last_id")
}
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
use std::time::Instant;
let mut balance;
let now = Instant::now();
loop {
balance = self.get_balance(pubkey);
if balance.is_ok() || now.elapsed().as_secs() > 1 {
break;
}
}
balance
}
}
#[cfg(test)]
mod tests {
use super::*;
use bank::Bank;
use budget::Budget;
use crdt::TestNode;
use logger;
use mint::Mint;
use server::Server;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
use transaction::{Instruction, Plan};
#[test]
fn test_thin_client() {
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(900));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.transactions_addr,
transactions_socket,
);
let last_id = client.get_last_id();
let _sig = client
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
#[test]
fn test_bad_sig() {
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(300));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.transactions_addr,
transactions_socket,
);
let last_id = client.get_last_id();
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = client.transfer_signed(tx).unwrap();
let last_id = client.get_last_id();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
if let Instruction::NewContract(contract) = &mut tr2.instruction {
contract.tokens = 502;
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
}
let _sig = client.transfer_signed(tr2).unwrap();
let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
}

22
src/timing.rs Normal file
View File

@@ -0,0 +1,22 @@
//! The `timing` module provides std::time utility functions.
use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn duration_as_us(d: &Duration) -> u64 {
return (d.as_secs() * 1000 * 1000) + (d.subsec_nanos() as u64 / 1_000);
}
pub fn duration_as_ms(d: &Duration) -> u64 {
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
}
pub fn duration_as_s(d: &Duration) -> f32 {
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
}
pub fn timestamp() -> u64 {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("create timestamp in timing");
return duration_as_ms(&now);
}

99
src/tpu.rs Normal file
View File

@@ -0,0 +1,99 @@
//! The `tpu` module implements the Transaction Processing Unit, a
//! 5-stage transaction processing pipeline in software.
//!
//! ```text
//! .---------------------------------------------------------------.
//! | TPU .-----. |
//! | | PoH | |
//! | `--+--` |
//! | | |
//! | v |
//! | .-------. .-----------. .---------. .--------. .-------. |
//! .---------. | | Fetch | | SigVerify | | Banking | | Record | | Write | | .------------.
//! | Clients |--->| Stage |->| Stage |->| Stage |->| Stage |->| Stage +--->| Validators |
//! `---------` | | | | | | | | | | | | `------------`
//! | `-------` `-----------` `----+----` `--------` `---+---` |
//! | | | |
//! | | | |
//! | | | |
//! | | | |
//! `---------------------------------|-----------------------|-----`
//! | |
//! v v
//! .------. .--------.
//! | Bank | | Ledger |
//! `------` `--------`
//! ```
use bank::Bank;
use banking_stage::BankingStage;
use fetch_stage::FetchStage;
use packet::{BlobRecycler, PacketRecycler};
use record_stage::RecordStage;
use sigverify_stage::SigVerifyStage;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use std::thread::JoinHandle;
use std::time::Duration;
use streamer::BlobReceiver;
use write_stage::WriteStage;
pub struct Tpu {
pub blob_receiver: BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Tpu {
pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>,
tick_duration: Option<Duration>,
transactions_socket: UdpSocket,
blob_recycler: BlobRecycler,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let packet_recycler = PacketRecycler::default();
let fetch_stage =
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
let banking_stage = BankingStage::new(
bank.clone(),
exit.clone(),
sigverify_stage.verified_receiver,
packet_recycler.clone(),
);
let record_stage = match tick_duration {
Some(tick_duration) => RecordStage::new_with_clock(
banking_stage.signal_receiver,
&bank.last_id(),
tick_duration,
),
None => RecordStage::new(banking_stage.signal_receiver, &bank.last_id()),
};
let write_stage = WriteStage::new(
bank.clone(),
exit.clone(),
blob_recycler.clone(),
Mutex::new(writer),
record_stage.entry_receiver,
);
let mut thread_hdls = vec![
banking_stage.thread_hdl,
record_stage.thread_hdl,
write_stage.thread_hdl,
];
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
Tpu {
blob_receiver: write_stage.blob_receiver,
thread_hdls,
}
}
}

View File

@@ -1,46 +1,138 @@
//! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize;
use budget::{Budget, Condition};
use chrono::prelude::*;
use hash::Hash;
use plan::{Condition, Payment, Plan};
use rayon::prelude::*;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
pub const SIGNED_DATA_OFFSET: usize = 112;
pub const SIG_OFFSET: usize = 8;
pub const PUB_KEY_OFFSET: usize = 80;
/// The type of payment plan. Each item must implement the PaymentPlan trait.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct TransactionData {
pub enum Plan {
/// The builtin contract language Budget.
Budget(Budget),
}
// A proxy for the underlying DSL.
impl PaymentPlan for Plan {
fn final_payment(&self) -> Option<Payment> {
match self {
Plan::Budget(budget) => budget.final_payment(),
}
}
fn verify(&self, spendable_tokens: i64) -> bool {
match self {
Plan::Budget(budget) => budget.verify(spendable_tokens),
}
}
fn apply_witness(&mut self, witness: &Witness) {
match self {
Plan::Budget(budget) => budget.apply_witness(witness),
}
}
}
/// A smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Contract {
/// The number of tokens allocated to the `Plan` and any transaction fees.
pub tokens: i64,
pub last_id: Hash,
pub plan: Plan,
}
/// An instruction to progress the smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Instruction {
/// Declare and instanstansiate `Contract`.
NewContract(Contract),
/// Tell a payment plan acknowledge the given `DateTime` has past.
ApplyTimestamp(DateTime<Utc>),
/// Tell the payment plan that the `NewContract` with `Signature` has been
/// signed by the containing transaction's `PublicKey`.
ApplySignature(Signature),
}
/// An instruction signed by a client with `PublicKey`.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction {
/// A digital signature of `instruction`, `last_id` and `fee`, signed by `PublicKey`.
pub sig: Signature,
/// The `PublicKey` of the entity that signed the transaction data.
pub from: PublicKey,
pub data: TransactionData,
/// The action the server should take.
pub instruction: Instruction,
/// The ID of a recent ledger entry.
pub last_id: Hash,
/// The number of tokens paid for processing and storage of this transaction.
pub fee: i64,
}
impl Transaction {
/// Create a signed transaction from the given `Instruction`.
fn new_from_instruction(
from_keypair: &KeyPair,
instruction: Instruction,
last_id: Hash,
fee: i64,
) -> Self {
let from = from_keypair.pubkey();
let mut tx = Transaction {
sig: Signature::default(),
instruction,
last_id,
from,
fee,
};
tx.sign(from_keypair);
tx
}
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new_taxed(
from_keypair: &KeyPair,
to: PublicKey,
tokens: i64,
fee: i64,
last_id: Hash,
) -> Self {
let payment = Payment {
tokens: tokens - fee,
to,
};
let budget = Budget::Pay(payment);
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens });
Self::new_from_instruction(from_keypair, instruction, last_id, fee)
}
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
let from = from_keypair.pubkey();
let plan = Plan::Pay(Payment { tokens, to });
let mut tr = Transaction {
sig: Signature::default(),
data: TransactionData {
plan,
tokens,
last_id,
},
from: from,
};
tr.sign(from_keypair);
tr
Self::new_taxed(from_keypair, to, tokens, 0, last_id)
}
/// Create and sign a new Witness Timestamp. Used for unit-testing.
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
let instruction = Instruction::ApplyTimestamp(dt);
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
}
/// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
let instruction = Instruction::ApplySignature(tx_sig);
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
}
/// Create and sign a postdated Transaction. Used for unit-testing.
@@ -52,25 +144,25 @@ impl Transaction {
last_id: Hash,
) -> Self {
let from = from_keypair.pubkey();
let plan = Plan::Race(
let budget = Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
);
let mut tr = Transaction {
data: TransactionData {
plan,
tokens,
last_id,
},
from: from,
sig: Signature::default(),
};
tr.sign(from_keypair);
tr
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens });
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
}
/// Get the transaction data to sign.
fn get_sign_data(&self) -> Vec<u8> {
serialize(&(&self.data)).unwrap()
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
data.extend_from_slice(&last_id_data);
let fee_data = serialize(&(&self.fee)).expect("serialize last_id");
data.extend_from_slice(&fee_data);
data
}
/// Sign this transaction.
@@ -79,12 +171,21 @@ impl Transaction {
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
}
/// Verify only the transaction signature.
pub fn verify_sig(&self) -> bool {
warn!("transaction signature verification called");
self.sig.verify(&self.from, &self.get_sign_data())
}
/// Verify only the payment plan.
pub fn verify_plan(&self) -> bool {
self.data.plan.verify(self.data.tokens)
if let Instruction::NewContract(contract) = &self.instruction {
self.fee >= 0
&& self.fee <= contract.tokens
&& contract.plan.verify(contract.tokens - self.fee)
} else {
true
}
}
}
@@ -108,21 +209,6 @@ pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
None
}
/// Verify a batch of signatures.
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.verify_sig())
}
/// Verify a batch of spending plans.
pub fn verify_plans(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.verify_plan())
}
/// Verify a batch of transactions.
pub fn verify_transactions(transactions: &[Transaction]) -> bool {
verify_signatures(transactions) && verify_plans(transactions)
}
#[cfg(test)]
mod tests {
use super::*;
@@ -132,8 +218,8 @@ mod tests {
fn test_claim() {
let keypair = KeyPair::new();
let zero = Hash::default();
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
assert!(tr0.verify_plan());
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
assert!(tx0.verify_plan());
}
#[test]
@@ -142,24 +228,34 @@ mod tests {
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
assert!(tr0.verify_plan());
let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero);
assert!(tx0.verify_plan());
}
#[test]
fn test_transfer_with_fee() {
let zero = Hash::default();
let keypair0 = KeyPair::new();
let pubkey1 = KeyPair::new().pubkey();
assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
}
#[test]
fn test_serialize_claim() {
let plan = Plan::Pay(Payment {
let budget = Budget::Pay(Payment {
tokens: 0,
to: Default::default(),
});
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
let claim0 = Transaction {
data: TransactionData {
plan,
tokens: 0,
last_id: Default::default(),
},
instruction,
from: Default::default(),
last_id: Default::default(),
sig: Default::default(),
fee: 0,
};
let buf = serialize(&claim0).unwrap();
let claim1: Transaction = deserialize(&buf).unwrap();
@@ -171,13 +267,15 @@ mod tests {
let zero = Hash::default();
let keypair = KeyPair::new();
let pubkey = keypair.pubkey();
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
tr.data.tokens = 1_000_000; // <-- attack, part 1!
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.tokens = tr.data.tokens; // <-- attack, part 2!
};
assert!(tr.verify_plan());
assert!(!tr.verify_sig());
let mut tx = Transaction::new(&keypair, pubkey, 42, zero);
if let Instruction::NewContract(contract) = &mut tx.instruction {
contract.tokens = 1_000_000; // <-- attack, part 1!
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = contract.tokens; // <-- attack, part 2!
}
}
assert!(tx.verify_plan());
assert!(!tx.verify_sig());
}
#[test]
@@ -187,21 +285,23 @@ mod tests {
let thief_keypair = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
if let Plan::Pay(ref mut payment) = tr.data.plan {
let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero);
if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.to = thief_keypair.pubkey(); // <-- attack!
};
assert!(tr.verify_plan());
assert!(!tr.verify_sig());
}
}
assert!(tx.verify_plan());
assert!(!tx.verify_sig());
}
#[test]
fn test_layout() {
let tr = test_tx();
let sign_data = tr.get_sign_data();
let tx = serialize(&tr).unwrap();
assert_matches!(memfind(&tx, &sign_data), Some(SIGNED_DATA_OFFSET));
assert_matches!(memfind(&tx, &tr.sig), Some(SIG_OFFSET));
assert_matches!(memfind(&tx, &tr.from), Some(PUB_KEY_OFFSET));
let tx = test_tx();
let sign_data = tx.get_sign_data();
let tx_bytes = serialize(&tx).unwrap();
assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET));
assert_matches!(memfind(&tx_bytes, &tx.sig), Some(SIG_OFFSET));
assert_matches!(memfind(&tx_bytes, &tx.from), Some(PUB_KEY_OFFSET));
}
#[test]
@@ -209,51 +309,20 @@ mod tests {
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
if let Plan::Pay(ref mut payment) = tr.data.plan {
let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = 2; // <-- attack!
}
assert!(!tr.verify_plan());
}
assert!(!tx.verify_plan());
// Also, ensure all branchs of the plan spend all tokens
if let Plan::Pay(ref mut payment) = tr.data.plan {
if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = 0; // <-- whoops!
}
assert!(!tr.verify_plan());
}
#[test]
fn test_verify_transactions() {
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
let carol_pubkey = KeyPair::new().pubkey();
let last_id = Hash::default();
let tr0 = Transaction::new(&alice_keypair, bob_pubkey, 1, last_id);
let tr1 = Transaction::new(&alice_keypair, carol_pubkey, 1, last_id);
let transactions = vec![tr0, tr1];
assert!(verify_transactions(&transactions));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use transaction::*;
#[bench]
fn verify_signatures_bench(bencher: &mut Bencher) {
let alice_keypair = KeyPair::new();
let last_id = Hash::default();
let transactions: Vec<_> = (0..64)
.into_par_iter()
.map(|_| {
let rando_pubkey = KeyPair::new().pubkey();
Transaction::new(&alice_keypair, rando_pubkey, 1, last_id)
})
.collect();
bencher.iter(|| {
assert!(verify_signatures(&transactions));
});
assert!(!tx.verify_plan());
}
}

279
src/tvu.rs Normal file
View File

@@ -0,0 +1,279 @@
//! The `tvu` module implements the Transaction Validation Unit, a
//! 3-stage transaction validation pipeline in software.
//!
//! ```text
//! .------------------------------------------.
//! | TVU |
//! | |
//! | | .------------.
//! | .------------------------>| Validators |
//! | .-------. | | `------------`
//! .--------. | | | .----+---. .-----------. |
//! | Leader |--------->| Blob | | Window | | Replicate | |
//! `--------` | | Fetch |-->| Stage |-->| Stage | |
//! .------------. | | Stage | | | | | |
//! | Validators |----->| | `--------` `----+------` |
//! `------------` | `-------` | |
//! | | |
//! | | |
//! | | |
//! `--------------------------------|---------`
//! |
//! v
//! .------.
//! | Bank |
//! `------`
//! ```
//!
//! 1. Fetch Stage
//! - Incoming blobs are picked up from the replicate socket and repair socket.
//! 2. Window Stage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. Replicate Stage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
use bank::Bank;
use blob_fetch_stage::BlobFetchStage;
use crdt::Crdt;
use packet;
use replicate_stage::ReplicateStage;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
use window_stage::WindowStage;
pub struct Tvu {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `bank` - The bank state.
/// * `crdt` - The crdt state.
/// * `window` - The window state.
/// * `replicate_socket` - my replicate socket
/// * `repair_socket` - my repair socket
/// * `retransmit_socket` - my retransmit socket
/// * `exit` - The exit signal.
pub fn new(
bank: Arc<Bank>,
crdt: Arc<RwLock<Crdt>>,
window: streamer::Window,
replicate_socket: UdpSocket,
repair_socket: UdpSocket,
retransmit_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Self {
let blob_recycler = packet::BlobRecycler::default();
let fetch_stage = BlobFetchStage::new_multi_socket(
vec![replicate_socket, repair_socket],
exit.clone(),
blob_recycler.clone(),
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let window_stage = WindowStage::new(
crdt,
window,
retransmit_socket,
exit.clone(),
blob_recycler.clone(),
fetch_stage.blob_receiver,
);
let replicate_stage =
ReplicateStage::new(bank, exit, window_stage.blob_receiver, blob_recycler);
let mut threads = vec![replicate_stage.thread_hdl];
threads.extend(fetch_stage.thread_hdls.into_iter());
threads.extend(window_stage.thread_hdls.into_iter());
Tvu {
thread_hdls: threads,
}
}
}
#[cfg(test)]
pub mod tests {
use bank::Bank;
use bincode::serialize;
use crdt::{Crdt, TestNode};
use entry::Entry;
use hash::{hash, Hash};
use logger;
use mint::Mint;
use ncp::Ncp;
use packet::BlobRecycler;
use result::Result;
use signature::{KeyPair, KeyPairUtil};
use std::collections::VecDeque;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use streamer;
use transaction::Transaction;
use tvu::Tvu;
fn new_ncp(
crdt: Arc<RwLock<Crdt>>,
listen: UdpSocket,
exit: Arc<AtomicBool>,
) -> Result<(Ncp, streamer::Window)> {
let window = streamer::default_window();
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
Ok((ncp, window))
}
/// Test that message sent from leader to target1 and replicated to target2
#[test]
fn test_replicate() {
logger::setup();
let leader = TestNode::new();
let target1 = TestNode::new();
let target2 = TestNode::new();
let exit = Arc::new(AtomicBool::new(false));
//start crdt_leader
let mut crdt_l = Crdt::new(leader.data.clone());
crdt_l.set_leader(leader.data.id);
let cref_l = Arc::new(RwLock::new(crdt_l));
let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
//start crdt2
let mut crdt2 = Crdt::new(target2.data.clone());
crdt2.insert(&leader.data);
crdt2.set_leader(leader.data.id);
let leader_id = leader.data.id;
let cref2 = Arc::new(RwLock::new(crdt2));
let dr_2 = new_ncp(cref2, target2.sockets.gossip, exit.clone()).unwrap();
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target2.sockets.replicate,
s_reader,
).unwrap();
// simulate leader sending messages
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
leader.sockets.requests,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let mint = Mint::new(starting_balance);
let replicate_addr = target1.data.replicate_addr;
let bank = Arc::new(Bank::new(&mint));
//start crdt1
let mut crdt1 = Crdt::new(target1.data.clone());
crdt1.insert(&leader.data);
crdt1.set_leader(leader.data.id);
let cref1 = Arc::new(RwLock::new(crdt1));
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone()).unwrap();
let tvu = Tvu::new(
bank.clone(),
cref1,
dr_1.1,
target1.sockets.replicate,
target1.sockets.repair,
target1.sockets.retransmit,
exit.clone(),
);
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let mut blob_id = 0;
let num_transfers = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_transfers {
let entry0 = Entry::new(&cur_hash, i, vec![]);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tx0 = Transaction::new(
&mint.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0]);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
for entry in vec![entry0, entry1] {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(blob_id).unwrap();
blob_id += 1;
w.set_id(leader_id).unwrap();
let serialized_entry = serialize(&entry).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&replicate_addr);
drop(w);
msgs.push_back(b_);
}
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
}
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = bank.get_balance(&bob_keypair.pubkey()).unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
for t in tvu.thread_hdls {
t.join().expect("join");
}
for t in dr_l.0.thread_hdls {
t.join().expect("join");
}
for t in dr_2.0.thread_hdls {
t.join().expect("join");
}
for t in dr_1.0.thread_hdls {
t.join().expect("join");
}
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}

52
src/window_stage.rs Normal file
View File

@@ -0,0 +1,52 @@
//! The `window_stage` maintains the blob window
use crdt::Crdt;
use packet;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
pub struct WindowStage {
pub blob_receiver: streamer::BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl WindowStage {
pub fn new(
crdt: Arc<RwLock<Crdt>>,
window: streamer::Window,
retransmit_socket: UdpSocket,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
fetch_stage_receiver: streamer::BlobReceiver,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = streamer::retransmitter(
retransmit_socket,
exit.clone(),
crdt.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
let (blob_sender, blob_receiver) = channel();
let t_window = streamer::window(
exit.clone(),
crdt.clone(),
window,
blob_recycler.clone(),
fetch_stage_receiver,
blob_sender,
retransmit_sender,
);
let thread_hdls = vec![t_retransmit, t_window];
WindowStage {
blob_receiver,
thread_hdls,
}
}
}

79
src/write_stage.rs Normal file
View File

@@ -0,0 +1,79 @@
//! The `write_stage` module implements the TPU's write stage. It
//! writes entries to the given writer, which is typically a file or
//! stdout, and then sends the Entry to its output channel.
use bank::Bank;
use entry::Entry;
use entry_writer::EntryWriter;
use packet;
use std::io::Write;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex};
use std::thread::{Builder, JoinHandle};
use streamer;
pub struct WriteStage {
pub thread_hdl: JoinHandle<()>,
pub blob_receiver: streamer::BlobReceiver,
}
impl WriteStage {
/// Create a new Rpu that wraps the given Bank.
pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
writer: Mutex<W>,
entry_receiver: Receiver<Entry>,
) -> Self {
let (blob_sender, blob_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-writer".to_string())
.spawn(move || loop {
let entry_writer = EntryWriter::new(&bank);
let _ = entry_writer.write_and_send_entries(
&blob_sender,
&blob_recycler,
&writer,
&entry_receiver,
);
if exit.load(Ordering::Relaxed) {
info!("broadcat_service exiting");
break;
}
})
.unwrap();
WriteStage {
thread_hdl,
blob_receiver,
}
}
pub fn new_drain(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
entry_receiver: Receiver<Entry>,
) -> Self {
let (_blob_sender, blob_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-drain".to_string())
.spawn(move || {
let entry_writer = EntryWriter::new(&bank);
loop {
let _ = entry_writer.drain_entries(&entry_receiver);
if exit.load(Ordering::Relaxed) {
info!("drain_service exiting");
break;
}
}
})
.unwrap();
WriteStage {
thread_hdl,
blob_receiver,
}
}
}

185
tests/data_replicator.rs Normal file
View File

@@ -0,0 +1,185 @@
#[macro_use]
extern crate log;
extern crate rayon;
extern crate solana;
use rayon::iter::*;
use solana::crdt::{Crdt, TestNode};
use solana::logger;
use solana::ncp::Ncp;
use solana::packet::Blob;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
let tn = TestNode::new();
let crdt = Crdt::new(tn.data.clone());
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,
exit,
).unwrap();
(c, d, tn.sockets.replicate)
}
/// Test that the network converges.
/// Run until every node in the network has a full ReplicatedData set.
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(topo: F)
where
F: Fn(&Vec<(Arc<RwLock<Crdt>>, Ncp, UdpSocket)>) -> (),
{
let num: usize = 5;
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = false;
trace!("round {}", i);
for (c, _, _) in &listen {
if num == c.read().unwrap().convergence() as usize {
done = true;
break;
}
}
//at least 1 node converged
if done == true {
break;
}
sleep(Duration::new(1, 0));
}
exit.store(true, Ordering::Relaxed);
for (c, dr, _) in listen.into_iter() {
for j in dr.thread_hdls.into_iter() {
j.join().unwrap();
}
// make it clear what failed
// protocol is to chatty, updates should stop after everyone receives `num`
assert!(c.read().unwrap().update_index <= num as u64);
// protocol is not chatty enough, everyone should get `num` entries
assert_eq!(c.read().unwrap().table.len(), num);
}
assert!(done);
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut yd = yv.table[&yv.me].clone();
yd.version = 0;
xv.insert(&yd);
trace!("star leader {:?}", &xv.me[..4]);
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
let xd = {
let xv = listen[0].0.read().unwrap();
xv.table[&xv.me].clone()
};
trace!("rstar leader {:?}", &xd.id[..4]);
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let mut yv = listen[y].0.write().unwrap();
yv.insert(&xd);
trace!("rstar insert {:?} into {:?}", &xd.id[..4], &yv.me[..4]);
}
});
}
#[test]
pub fn crdt_retransmit() {
logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_data = c1.read().unwrap().my_data().clone();
c1.write().unwrap().set_leader(c1_data.id);
c2.write().unwrap().insert(&c1_data);
c3.write().unwrap().insert(&c1_data);
c2.write().unwrap().set_leader(c1_data.id);
c3.write().unwrap().set_leader(c1_data.id);
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.read().unwrap().table.len() == 3
&& c2.read().unwrap().table.len() == 3
&& c3.read().unwrap().table.len() == 3;
if done {
break;
}
sleep(Duration::new(1, 0));
}
assert!(done);
let mut b = Blob::default();
b.meta.size = 10;
Crdt::retransmit(&c1, &Arc::new(RwLock::new(b)), &tn1).unwrap();
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let res = s.recv_from(&mut b.data);
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
let mut threads = vec![];
threads.extend(dr1.thread_hdls.into_iter());
threads.extend(dr2.thread_hdls.into_iter());
threads.extend(dr3.thread_hdls.into_iter());
for t in threads.into_iter() {
t.join().unwrap();
}
}

175
tests/multinode.rs Normal file
View File

@@ -0,0 +1,175 @@
#[macro_use]
extern crate log;
extern crate bincode;
extern crate solana;
use solana::bank::Bank;
use solana::crdt::TestNode;
use solana::crdt::{Crdt, ReplicatedData};
use solana::logger;
use solana::mint::Mint;
use solana::ncp::Ncp;
use solana::server::Server;
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use std::io;
use std::io::sink;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::JoinHandle;
use std::time::Duration;
fn validator(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
alice: &Mint,
threads: &mut Vec<JoinHandle<()>>,
) {
let validator = TestNode::new();
let replicant_bank = Bank::new(&alice);
let mut ts = Server::new_validator(
replicant_bank,
validator.data.clone(),
validator.sockets.requests,
validator.sockets.respond,
validator.sockets.replicate,
validator.sockets.gossip,
validator.sockets.repair,
leader.clone(),
exit.clone(),
);
threads.append(&mut ts.thread_hdls);
}
fn converge(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let mut spy = TestNode::new();
let daddr = "0.0.0.0:0".parse().unwrap();
let me = spy.data.id.clone();
spy.data.replicate_addr = daddr;
spy.data.requests_addr = daddr;
let mut spy_crdt = Crdt::new(spy.data);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let dr = Ncp::new(
spy_ref.clone(),
spy_window,
spy.sockets.gossip,
spy.sockets.gossip_send,
exit,
).unwrap();
//wait for the network to converge
let mut converged = false;
for _ in 0..30 {
let num = spy_ref.read().unwrap().convergence();
if num == num_nodes as u64 {
converged = true;
break;
}
sleep(Duration::new(1, 0));
}
assert!(converged);
threads.extend(dr.thread_hdls.into_iter());
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.id != me)
.map(|x| x.clone())
.collect();
v.clone()
}
#[test]
fn test_multi_node() {
logger::setup();
const N: usize = 5;
trace!("test_multi_accountant_stub");
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_bank = Bank::new(&alice);
let server = Server::new_leader(
leader_bank,
None,
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
let mut threads = server.thread_hdls;
for _ in 0..N {
validator(&leader.data, exit.clone(), &alice, &mut threads);
}
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
//contains the leader addr as well
assert_eq!(servers.len(), N + 1);
//verify leader can do transfer
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
assert_eq!(leader_balance, 500);
//verify validator has the same balance
let mut success = 0usize;
for server in servers.iter() {
let mut client = mk_client(server);
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
trace!("validator balance {}", bal);
if bal == leader_balance {
success += 1;
}
}
}
assert_eq!(success, servers.len());
exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
}
}
fn mk_client(leader: &ReplicatedData) -> ThinClient {
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
ThinClient::new(
leader.requests_addr,
requests_socket,
leader.transactions_addr,
transactions_socket,
)
}
fn tx_and_retry_get_balance(
leader: &ReplicatedData,
alice: &Mint,
bob_pubkey: &PublicKey,
) -> io::Result<i64> {
let mut client = mk_client(leader);
trace!("getting leader last_id");
let last_id = client.get_last_id();
info!("executing leader transer");
let _sig = client
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
.unwrap();
client.poll_get_balance(bob_pubkey)
}