Compare commits

...

342 Commits

Author SHA1 Message Date
Greg Fitzgerald
b43ae748c3 Update publish.sh 2018-05-25 16:08:14 -06:00
Greg Fitzgerald
02ddd89653 Version bump
And solana.io -> solana.com
2018-05-25 15:37:07 -06:00
Anatoly Yakovenko
bbe6eccefe log 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
6677a7b66a verify plan not sig 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
75c37fcc73 names 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
5be71a8a9d logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
b9ae7d1ebb logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
8b02e0f57c logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
342cc7350a poll both endpoints in client 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
2335a51ced logs 2018-05-25 07:02:39 -06:00
Anatoly Yakovenko
868df1824c fmt 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
83c11f0f9d logs 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
1022f1b0c6 logs 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
c2c80232e3 logs 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
115f4e54b8 update 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
669b1694b8 exponentail backoff for retransmit 2018-05-24 17:40:33 -06:00
Anatoly Yakovenko
2128c58fbe logs and tps counting 2018-05-24 10:35:23 -06:00
Greg Fitzgerald
e12e154877 Boot Event timestamp/singature constructors 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
73d3c17507 Migrate from Event to Transaction Timestramp/Signature 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
7f647a93da Add last_id to Event timestamp/signature constructors 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
ecb3dbbb60 Add witness tx constructors 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
cc907ba69d Add Instruction type 2018-05-24 10:10:41 -06:00
Greg Fitzgerald
5a45eef1dc Exit cleanup (#252)
* Ignore record_stage exit reason. We only really care about panic exit versus graceful exit.
* Ignore coverage build in CI
2018-05-24 10:03:17 -06:00
Greg Fitzgerald
0d980e89bc cargo fmt
@aeyakovenko: https://github.com/rust-lang/rust.vim#formatting-with-rustfmt
2018-05-23 20:05:08 -06:00
Anatoly Yakovenko
ef87832bff fixed 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
94507d1aca cuda 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
89924a38ff cuda 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
7faa2b8698 fixed demo 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
65352ce8e7 fix 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
f1988ee1e3 help 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
82ac8eb731 use client ports 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
ae47e34fa5 fix 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
28e781efc3 break early 2018-05-23 17:24:58 -06:00
Anatoly Yakovenko
5c3ceb8355 aws demo2 2018-05-23 17:24:58 -06:00
Greg Fitzgerald
c9113b381d Pull channel functionality into record_stage
This makes record_stage consistent with the other stages. The stage
manages the channels. Anything else is in a standalone object. In
the case of the record_stage, that leaves almost nothing!
2018-05-23 17:15:28 -06:00
Stephen Akridge
75e69eecfa Fix nightly bench 2018-05-23 17:15:03 -06:00
Anatoly Yakovenko
f3c4acc723 cleanup multi node test 2018-05-23 16:59:17 -06:00
Stephen Akridge
2a0095e322 Remove unused variable in multinode-demo fix compiler warning 2018-05-23 16:55:45 -06:00
anatoly yakovenko
9ad5f3c65b fix option (#246) 2018-05-23 14:48:00 -07:00
Greg Fitzgerald
579de64d49 Delete binary again 2018-05-23 14:15:59 -06:00
Greg Fitzgerald
d4200a7b1e Fix build
GenKeys() fix and new multinode module crossed in flight.
2018-05-23 14:10:26 -06:00
Greg Fitzgerald
84477835dc Fix nondeterministic key generation (#243)
Our one and only unsafe operation was ...unsafe.
2018-05-23 14:04:07 -06:00
anatoly yakovenko
504b318ef1 Hooks for binaries to run as leader or replicator and attach to network (#221) 2018-05-23 14:03:19 -06:00
Greg Fitzgerald
f154c8c490 Add data to errors to ease debugging 2018-05-23 12:50:23 -06:00
Greg Fitzgerald
d4959bc157 Test cleanup
GenKey unit tests were in the benchmark suite.
2018-05-23 12:50:23 -06:00
Anatoly Yakovenko
87e025fe22 fmt 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
8049323ca8 @garious review 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
b38c7ea2ff fmt 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
239b925fb3 woop 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
60da7f7aaf wip 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
8646ff4927 refactor wip 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
59be94a81f cleanup 2018-05-23 12:07:44 -06:00
Anatoly Yakovenko
437c485e5c cleanup 2018-05-23 12:07:44 -06:00
Michael Vines
79a58da6a9 Merge pull request #240 from mvines/master
CI overhaul follow-ups
2018-05-22 23:27:19 -07:00
Michael Vines
ae29641a18 Run most CI steps in docker 2018-05-22 23:16:25 -07:00
Michael Vines
9c3f65bca9 Update build status badge 2018-05-22 22:59:19 -07:00
Greg Fitzgerald
086365b4c4 Merge pull request #237 from garious/hoist-lastid
Hoist last_id
2018-05-22 17:48:25 -06:00
Greg Fitzgerald
64044da49c Merge pull request #239 from sakridge/fix_bad_sig_mac
Fix test_bad_sig on mac
2018-05-22 17:48:01 -06:00
Stephen Akridge
7b5b7feb63 Fix test_bad_sig on mac 2018-05-22 16:40:01 -07:00
Greg Fitzgerald
2e059f8504 Rename TransactionData to Contract
No longer a single place to get all the data that was signed.
2018-05-22 17:00:40 -06:00
Greg Fitzgerald
207b6686d1 Hoist last_id
First step in unifying Witness processing and Transaction processing
2018-05-22 17:00:40 -06:00
Greg Fitzgerald
abfd7d6951 Merge pull request #234 from sakridge/fix_events_addr
Send events to the right address
2018-05-22 16:59:28 -06:00
anatoly yakovenko
7fc166b5ba Merge pull request #238 from aeyakovenko/tvu_cleanup
tvu cleanup
2018-05-22 15:41:33 -07:00
Anatoly Yakovenko
021953d59a cleanup 2018-05-22 15:30:46 -07:00
Anatoly Yakovenko
bbe89df2ff fmt 2018-05-22 15:18:07 -07:00
Anatoly Yakovenko
a638ec5911 builds 2018-05-22 15:17:59 -07:00
Anatoly Yakovenko
26272a3600 split out stages 2018-05-22 14:26:28 -07:00
Stephen Akridge
8454eb79d0 Send events to the right address and set recv socket timeout 2018-05-22 13:52:50 -07:00
Michael Vines
796f4b981b Merge pull request #233 from mvines/ci
Add in-tree buildkite pipeline
2018-05-22 13:06:24 -07:00
Michael Vines
34514d65bc Add in-tree buildkite pipeline 2018-05-21 23:43:27 -07:00
Greg Fitzgerald
2786357082 Merge pull request #230 from garious/generalize-topackets
Benchmark the banking stage
2018-05-18 19:47:26 -07:00
anatoly yakovenko
4badeacd1d Merge pull request #226 from aeyakovenko/converge_test
check convergence
2018-05-16 23:44:23 -07:00
Anatoly Yakovenko
63a0ba6ec8 fixed 2018-05-16 23:28:03 -07:00
Anatoly Yakovenko
9a4ce6d70e fmt 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
35ee2d0ce1 cleanup 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
b04716d40d fmt 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
051fa6f1f1 cleanup 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
8dc1b07e75 docs 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
bee1e7ebaf compute convergence maximum 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
f3f0b9f0c5 update 2018-05-16 23:27:26 -07:00
Anatoly Yakovenko
a5cf745e1c check convergence 2018-05-16 23:27:26 -07:00
Greg Fitzgerald
273b800047 Benchmark the banking stage 2018-05-16 23:18:58 -07:00
Greg Fitzgerald
6c1f1c2a7a Promote create_entry() to Entry::new() 2018-05-16 23:18:58 -07:00
Greg Fitzgerald
9c62f8d81f Add Event::Transaction constructor 2018-05-16 23:18:58 -07:00
Greg Fitzgerald
82aef7ebe2 Merge pull request #225 from mvines/deploy
Auto deploy tagged versions of solana to crate.io
2018-05-16 23:36:15 -06:00
Michael Vines
57636d3d5f Auto deploy tagged versions of solana to crate.io 2018-05-16 21:38:14 -07:00
Greg Fitzgerald
dc87effc0a Merge pull request #229 from garious/fix-bench
Fix the benchmark build
2018-05-16 16:37:56 -06:00
Greg Fitzgerald
f0c9823e9f Merge pull request #228 from garious/generalize-topackets
request::to_request_packets -> packet::to_packets
2018-05-16 16:37:29 -06:00
Greg Fitzgerald
0b91dd6163 Fix the benchmark build 2018-05-16 16:35:50 -06:00
Greg Fitzgerald
4955c6f13a request::to_request_packets -> packet::to_packets 2018-05-16 16:11:53 -06:00
Greg Fitzgerald
2e7beca9ba Generalize to_request_packets 2018-05-16 16:01:19 -06:00
Greg Fitzgerald
59c1b9983d Merge pull request #220 from garious/add-tpu
Add tpu
2018-05-16 12:21:07 -06:00
Greg Fitzgerald
f7083e0923 Remove transaction processing from RPU and request processing from TVU 2018-05-15 12:15:29 -06:00
Greg Fitzgerald
6d4defdf96 Offload event processing to the TPU 2018-05-15 11:33:43 -06:00
Greg Fitzgerald
b826f837f8 First attempt to pull TPU into the server 2018-05-15 11:25:55 -06:00
Greg Fitzgerald
5855e18a4e Let server own the bank, not TPU/RPU 2018-05-15 11:21:48 -06:00
Greg Fitzgerald
3f38c0a245 Feed events socket into the server 2018-05-15 11:19:58 -06:00
Greg Fitzgerald
cfe8b3fc55 Wrap the RPU with new object Server 2018-05-15 11:00:01 -06:00
Greg Fitzgerald
e9ee020b5f Rename constructors 2018-05-15 10:45:36 -06:00
Greg Fitzgerald
1bcf3891b4 New TPU/RPU constructors 2018-05-15 10:44:47 -06:00
Greg Fitzgerald
5456de63e9 Less state 2018-05-15 10:38:17 -06:00
Greg Fitzgerald
9026c70952 Inline Rpu::new 2018-05-15 10:33:16 -06:00
Greg Fitzgerald
99dc4ea4a9 Spin up threads from Rpu/Tpu constructors 2018-05-15 10:30:52 -06:00
Greg Fitzgerald
0aaa500f7c Rpu/Tpu serve() functions now only spin up threads 2018-05-15 10:10:45 -06:00
Greg Fitzgerald
5f5be83a17 Hoist socket creation/configuration
TODO: Add a library for socket configuration.
2018-05-15 10:05:23 -06:00
Greg Fitzgerald
7e44005a0f Don't do error-prone things in functions that spawn threads 2018-05-15 09:53:51 -06:00
Greg Fitzgerald
ee3fb985ea Hoist set_timeout 2018-05-15 09:42:28 -06:00
Greg Fitzgerald
2a268aa528 Reorder to reflect dependencies 2018-05-15 09:17:48 -06:00
Greg Fitzgerald
cd262cf860 Merge pull request #223 from rlkelly/202__rust_refactor
202  rust refactor
2018-05-15 08:44:47 -06:00
Robert Kelly
a1889c32d4 fixed CrdtToSmall typo 2018-05-15 10:29:56 -04:00
Robert Kelly
d42d024d9c minor changes 2018-05-15 10:23:11 -04:00
anatoly yakovenko
7b88b8d159 Merge pull request #222 from aeyakovenko/fixed_ignore_tests
fix ignore tests
2018-05-14 22:18:38 -07:00
Anatoly Yakovenko
4131071b9a fix ignore tests 2018-05-14 22:06:42 -07:00
Greg Fitzgerald
ef6bd7e3b8 Add TPU 2018-05-14 17:36:19 -06:00
Greg Fitzgerald
374bff6550 Extract event processing from request_stage 2018-05-14 17:31:27 -06:00
Greg Fitzgerald
0a46bbe4f9 Merge pull request #219 from garious/add-write-stage
Move write_service and drain_service into new write_stage module
2018-05-14 17:18:04 -06:00
Greg Fitzgerald
f4971be236 Merge pull request #218 from aeyakovenko/multitest-rebase
multinode test
2018-05-14 17:17:34 -06:00
Anatoly Yakovenko
421273f862 disable tests that fail with kcov 2018-05-14 16:07:21 -07:00
Anatoly Yakovenko
2c7f229883 wait longer 2018-05-14 15:48:43 -07:00
Anatoly Yakovenko
904eabad2f waint longer 2018-05-14 15:48:24 -07:00
Anatoly Yakovenko
8b233f6be4 update 2018-05-14 15:43:26 -07:00
Anatoly Yakovenko
08fc821ca9 rebase 2018-05-14 15:35:54 -07:00
Greg Fitzgerald
81706f2d75 Move write_service and drain_service into new write_stage module 2018-05-14 16:31:31 -06:00
Anatoly Yakovenko
7b50c3910f fmt 2018-05-14 15:21:41 -07:00
Anatoly Yakovenko
2d635386af rebased 2018-05-14 15:20:41 -07:00
Greg Fitzgerald
a604dcb4c4 Merge pull request #217 from garious/add-historian-stage
Add record_stage to pipeline
2018-05-14 16:01:45 -06:00
Greg Fitzgerald
7736b9cac6 Boot Alice and Bob from the unit tests 2018-05-14 15:39:34 -06:00
Greg Fitzgerald
d2dd005a59 accountant -> bank 2018-05-14 15:33:11 -06:00
Greg Fitzgerald
6e8f99d9b2 Purge EventProcessor 2018-05-14 14:45:29 -06:00
Greg Fitzgerald
685de30047 Purge EventProcessor from RPU 2018-05-14 14:35:25 -06:00
Greg Fitzgerald
17cc9ab07f Rename Historian to RecordStage
Historian was a legacy name. The new name reflects the new pipelined
architecture.
2018-05-14 14:19:19 -06:00
Greg Fitzgerald
3f10bf44db Config recorder with any kind of Duration, not just milliseconds 2018-05-14 14:12:36 -06:00
Greg Fitzgerald
27984e469a Multiply duration, not milliseconds 2018-05-14 13:58:42 -06:00
Greg Fitzgerald
a2c05b112e Add historian to pipeline
No longer intercept entries to register_entry_id(). Intead,
register the ID in the Write stage.

EventProcessor is now just being used as a place to store data.

Fixes #216
2018-05-14 12:43:40 -06:00
Greg Fitzgerald
a578c1a5e3 Merge pull request #215 from garious/suppress_panic_message_in_tests
Don't output panic noise from panic test
2018-05-14 11:46:22 -06:00
Greg Fitzgerald
500aaed48e Merge pull request #211 from garious/add-tx-count
Drop EntryInfo subscriptions
2018-05-14 10:41:09 -06:00
Robert Kelly
4a94da8a94 Don't output panic noise from panic test
P.S. rustfmt 0.4.1-stable (7a807262 2018-04-20)
2018-05-14 10:38:59 -06:00
Greg Fitzgerald
cc447c0fda Drop support for EntryInfo subscriptions 2018-05-14 09:53:57 -06:00
Greg Fitzgerald
0ae69bdcd9 Get transactionn_count via GetTransactionCount instead of EntryInfo 2018-05-14 09:45:09 -06:00
Greg Fitzgerald
5ba20a94e8 Panic on error to get same signature as transaction_count() 2018-05-14 09:43:40 -06:00
Greg Fitzgerald
f168c377fd Get last_id via GetLastId instead of EntryInfo 2018-05-14 09:40:29 -06:00
Greg Fitzgerald
dfb754dd13 Revive GetLastId messages 2018-05-14 09:35:10 -06:00
Greg Fitzgerald
455050e19c Expose the server-side transaction count 2018-05-14 07:21:12 -06:00
Greg Fitzgerald
317031f455 Add transaction count to accountant 2018-05-14 06:49:51 -06:00
Greg Fitzgerald
b132ce1944 Merge pull request #210 from aeyakovenko/buildite_coverage
ignore unstable tests
2018-05-13 22:00:32 -06:00
Anatoly Yakovenko
8b226652aa unstable 2018-05-13 20:54:41 -07:00
Anatoly Yakovenko
2c7fe3ed8d unstable 2018-05-13 20:51:07 -07:00
Anatoly Yakovenko
3d5f2b3c28 unstable 2018-05-13 20:45:55 -07:00
Anatoly Yakovenko
7a79afe4a6 unstable 2018-05-13 20:41:54 -07:00
Anatoly Yakovenko
1f7387a39b increase sleep 2018-05-13 20:33:41 -07:00
Greg Fitzgerald
0fc2bee144 Merge pull request #208 from rlkelly/203__remove_old_genkey
removed old keygen
2018-05-13 19:04:23 -06:00
Robert Kelly
791ae852a2 removed old keygen 2018-05-13 18:14:10 -04:00
Greg Fitzgerald
c2fcd876d7 Merge pull request #206 from garious/add-accounting-stage
More modules
2018-05-12 18:05:10 -06:00
Greg Fitzgerald
d239d4a495 Add missing files 2018-05-12 17:57:28 -06:00
Greg Fitzgerald
aec05ef602 Move RequestProcessor into its own module 2018-05-12 17:50:55 -06:00
Greg Fitzgerald
e5d46d998b Move thin client messages into their own module 2018-05-12 17:41:27 -06:00
Greg Fitzgerald
b2e3299539 Only pass accountant write_service 2018-05-12 17:30:15 -06:00
Greg Fitzgerald
c308a6459f cargo fmt 2018-05-12 17:27:15 -06:00
Greg Fitzgerald
4eb1bc08a7 Merge pull request #205 from rlkelly/203__test_key_generation
203  test key generation
2018-05-12 17:26:46 -06:00
Robert Kelly
ff5e1c635f increased iterations 2018-05-12 18:18:18 -04:00
Robert Kelly
6149c2fcb5 added benchmarks for two GenKeys 2018-05-12 18:08:08 -04:00
Greg Fitzgerald
d7cd80dce5 Merge pull request #204 from garious/add-accounting-stage
TPU cleanup
2018-05-12 15:47:37 -06:00
Greg Fitzgerald
6264508f5e Consistent naming of senders and receivers 2018-05-12 15:24:20 -06:00
Greg Fitzgerald
a3869dd4c1 Move entry_receiver to RequestStage
This can move to AccountingStage once RequestStage stops
calling process_events().
2018-05-12 15:14:37 -06:00
Greg Fitzgerald
a3d2831f8c Free up the name 'accounting_stage' 2018-05-12 14:05:57 -06:00
Robert Kelly
4cd1fa8c38 refactored seed generation 2018-05-12 15:42:27 -04:00
Greg Fitzgerald
1511dc43d7 Move RequestProcessor out of Rpu/Tvu state 2018-05-12 11:39:24 -06:00
Greg Fitzgerald
3d82807965 Delete dead code 2018-05-12 11:24:40 -06:00
Greg Fitzgerald
4180571660 Don't pass events_socket to RPU 2018-05-12 11:11:30 -06:00
Greg Fitzgerald
421d9aa501 Free up the name 'tpu' 2018-05-12 10:53:25 -06:00
Greg Fitzgerald
898f4971a2 Free up name 'thin_client_service' 2018-05-12 10:50:22 -06:00
Greg Fitzgerald
7ab3331f01 Move validation processor to its own module 2018-05-12 00:31:32 -06:00
Greg Fitzgerald
b4ca414492 More object-oriented 2018-05-12 00:19:12 -06:00
Greg Fitzgerald
73abea088a No need for TPU dependency 2018-05-11 23:51:35 -06:00
Greg Fitzgerald
2376dfc139 Let thin client own the receiver channel 2018-05-11 23:46:04 -06:00
Greg Fitzgerald
d2f95d5319 Move thin client service thread into thin_client_service.rs 2018-05-11 23:37:44 -06:00
Greg Fitzgerald
cd96843699 Free up name ThinClientService 2018-05-11 23:37:14 -06:00
Greg Fitzgerald
ca80bc33c6 Move the writer stage's utilities to its own module 2018-05-11 22:36:16 -06:00
Greg Fitzgerald
19607886f7 Move sig verification stage into its own module 2018-05-11 21:51:37 -06:00
Greg Fitzgerald
3c11a91f77 Cleanup verifier error handling 2018-05-11 21:01:07 -06:00
Greg Fitzgerald
b781fdbd04 Reorganize 2018-05-11 20:50:50 -06:00
Greg Fitzgerald
765d901530 Better names 2018-05-11 20:18:04 -06:00
Greg Fitzgerald
3cedbc493e Reorder to reflect the pipeline order 2018-05-11 20:11:25 -06:00
Greg Fitzgerald
0488d0a82f Extract sig verify functions 2018-05-11 19:59:40 -06:00
Greg Fitzgerald
f0be595e4c Create function for thin client thread 2018-05-11 17:58:27 -06:00
Greg Fitzgerald
55100854d6 Better names 2018-05-11 16:41:35 -06:00
Greg Fitzgerald
600a1f8866 Initialize thin client with events port 2018-05-11 16:35:53 -06:00
Greg Fitzgerald
95bf68f3f5 Correct some strange naming 2018-05-11 16:24:18 -06:00
Greg Fitzgerald
bcdb058492 cargo fmt 2018-05-11 13:06:05 -06:00
Greg Fitzgerald
7f46aef624 Merge pull request #200 from jackson-sandland/153-panic-cleanup
issue #153 - panic cleanup
2018-05-11 13:05:04 -06:00
Code Cobain
e779496dfb Update signature.rs 2018-05-11 11:49:22 -07:00
Code Cobain
3d77fa5fbc Merge branch 'master' into 153-panic-cleanup 2018-05-11 11:40:20 -07:00
Jackson Sandland
250830ade9 cargo fmt run 2018-05-11 11:38:52 -07:00
Greg Fitzgerald
7b2eb7ccfc Merge pull request #189 from rlkelly/156__remove_user_keys_in_mintdemo
156  remove user keys in mintdemo
2018-05-11 12:19:32 -06:00
Code Cobain
458c27c6e9 Merge branch 'master' into 153-panic-cleanup 2018-05-11 11:18:45 -07:00
Robert Kelly
a49e664e63 Merge branch '156__remove_user_keys_in_mintdemo' of github.com:rlkelly/solana into 156__remove_user_keys_in_mintdemo 2018-05-11 14:07:48 -04:00
Robert Kelly
f20380d6b4 changed RwLock to RefCell 2018-05-11 14:07:41 -04:00
Rob Kelly
05a5e551d6 Merge branch 'master' into 156__remove_user_keys_in_mintdemo 2018-05-11 13:00:44 -04:00
Robert Kelly
d278b71cb2 added tests and utility method for key generation 2018-05-11 12:55:05 -04:00
Greg Fitzgerald
a485c141d5 Merge pull request #199 from garious/add-accounting-stage
Fix race condition in Accountant::apply_payment()
2018-05-11 10:54:32 -06:00
Greg Fitzgerald
8a9f6b9ae3 Merge pull request #201 from CriesofCarrots/master
Generalize next tick functions to carry events
2018-05-11 10:54:14 -06:00
Tyera Eulberg
7144090528 Fix whitespace 2018-05-11 10:40:31 -06:00
Tyera Eulberg
ee0015ac38 Fix whitespace 2018-05-11 10:34:46 -06:00
Tyera Eulberg
8b7f7f1088 Generalize next tick functions to carry events 2018-05-11 09:45:42 -06:00
Jackson Sandland
c95c6a75f8 tpu.rs - panic cleanup 2018-05-10 20:49:58 -07:00
Jackson Sandland
44bf79e35f transaction.rs - panic cleanup 2018-05-10 18:24:33 -07:00
Jackson Sandland
bb654f286c tpu.rs - panic cleanup 2018-05-10 18:21:10 -07:00
Greg Fitzgerald
1acd2aa8cf Fix race condition in Accountant::apply_payment() 2018-05-10 19:07:15 -06:00
Jackson Sandland
18d3659b91 timing.rs - panic cleanup 2018-05-10 17:47:27 -07:00
Jackson Sandland
63a4bafa72 thin_client - panic cleanup 2018-05-10 17:46:10 -07:00
Jackson Sandland
4eb2e84c9f streamer.rs - panic cleanup 2018-05-10 17:38:00 -07:00
Jackson Sandland
73c7fb87e8 signature.rs - panic cleanup 2018-05-10 17:15:53 -07:00
Jackson Sandland
c1496722aa packet.rs - panic cleanup 2018-05-10 17:11:31 -07:00
Jackson Sandland
d9f81b0c8c mint.rs - panic cleanup 2018-05-10 17:06:43 -07:00
Jackson Sandland
d69beaabe1 historian.rs - panic cleanup 2018-05-10 17:00:37 -07:00
Jackson Sandland
b7a0bd6347 event.rs - panic cleanup 2018-05-10 16:59:13 -07:00
Jackson Sandland
882ea6b672 erasure.rs - panic cleanup 2018-05-10 16:54:21 -07:00
Greg Fitzgerald
736d3eabae Merge pull request #198 from garious/add-accounting-stage
Move more code out of TPU
2018-05-10 17:24:22 -06:00
Greg Fitzgerald
af53197c04 cargo +nightly fmt 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
cf186c5762 Better names 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
f384a2ce85 Move streamer-specific utility into streamer module 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
803b76e997 More idiomatic Rust 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
230d7c3dd6 Move all Request processing into thin_client_service 2018-05-10 16:58:37 -06:00
Greg Fitzgerald
4f629dd982 Add events socket instead of modifying the existing socket 2018-05-10 16:54:43 -06:00
Greg Fitzgerald
4fdd891b54 More precise function names 2018-05-10 16:54:43 -06:00
Greg Fitzgerald
64a892321a Merge pull request #197 from sakridge/fixes_for_entry_serialization
Fixes for serializing entries over blobs
2018-05-10 16:53:30 -06:00
Stephen Akridge
a80991f2b3 Fixes for serializing entries over blobs and reorg into ledger 2018-05-10 15:30:30 -07:00
Raj Gokal
c9cd81319a Set theme jekyll-theme-slate 2018-05-10 13:28:29 -07:00
Greg Fitzgerald
521ae21632 Merge pull request #193 from sakridge/serialize_entries_over_multiple_blobs
Serialize entries over multiple blobs
2018-05-10 13:53:48 -06:00
Jackson Sandland
bcd6606a16 ecdsa.rs - panic cleanup 2018-05-09 18:19:23 -07:00
Jackson Sandland
52ebb88205 accountant.rs - simplify error messages 2018-05-09 18:16:37 -07:00
Jackson Sandland
1e91d09be7 crdt.rs - panic cleanup 2018-05-09 18:10:48 -07:00
Jackson Sandland
02c573986b historian / transaction updates 2018-05-09 17:22:14 -07:00
Jackson Sandland
f2de486658 accountant.rs - panic cleanup 2018-05-09 17:19:12 -07:00
Stephen Akridge
900b4f2644 Serialize entries over multiple blobs 2018-05-09 16:03:47 -07:00
Greg Fitzgerald
1cfaa9afb6 Merge pull request #194 from garious/add-accounting-stage
Fix nightly
2018-05-09 16:53:45 -06:00
Greg Fitzgerald
801468d70d Fix nightly 2018-05-09 16:51:34 -06:00
Greg Fitzgerald
0601e05978 Merge pull request #192 from garious/add-accounting-stage
Add accounting stage
2018-05-09 16:47:50 -06:00
Greg Fitzgerald
7ce11b5d1c Cleanup: use full words for field names
and optionally for variable names
2018-05-09 16:19:42 -06:00
Greg Fitzgerald
f2d4799491 Cleanup: field names should be nouns 2018-05-09 16:14:40 -06:00
Greg Fitzgerald
ebc458cd32 Remove redundant Arcs 2018-05-09 15:45:10 -06:00
Greg Fitzgerald
43cd631579 Add thin_client_service 2018-05-09 14:56:34 -06:00
Greg Fitzgerald
bc824c1a6c Reference count the accountant
So that the thin client can reference the AccountingStage's accountant
from separate threads.
2018-05-09 14:33:20 -06:00
Greg Fitzgerald
4223aff840 Remove useless ref counts 2018-05-09 14:25:52 -06:00
Greg Fitzgerald
f107c6c2ca Don't wrap thread-safe objects with mutexes 2018-05-09 14:21:42 -06:00
Greg Fitzgerald
7daf14caa7 Don't depend on client from server 2018-05-09 13:33:33 -06:00
Greg Fitzgerald
ded28c705f Tuck away the Historian
The Historian is now just a utility of the accounting stage.
2018-05-09 12:25:19 -06:00
Greg Fitzgerald
778bec0777 Intercept historian output from accounting stage
We were accessing the accountant from multiple stages just to
register the ID the historian adds to Events.

This change should cause a whole lot of Arcs and Mutexes to go away.
2018-05-09 12:00:37 -06:00
Greg Fitzgerald
6967cf7f86 Boot sync_channel()
This is less useful now that we send Vec<Event> instead of Event.
2018-05-09 11:43:16 -06:00
Greg Fitzgerald
0ee3ec86bd Fix nightly 2018-05-09 10:48:56 -06:00
Greg Fitzgerald
e4c47e8417 Use AccountingStage in Tpu 2018-05-09 10:31:23 -06:00
Greg Fitzgerald
98ae80f4ed Hoist historian 2018-05-09 09:26:58 -06:00
Greg Fitzgerald
876c77d0bc Extract accounting stage code from tpu 2018-05-09 09:22:46 -06:00
Greg Fitzgerald
d44a6f7541 Move Accounting stage functionality into its own object 2018-05-09 09:03:00 -06:00
Greg Fitzgerald
9040c04d27 Remove redundant Tick 2018-05-09 08:18:52 -06:00
Greg Fitzgerald
ebbdef0538 Ignore flakey test 2018-05-09 08:16:59 -06:00
Greg Fitzgerald
bfbee988d0 No longer wait for a Tick signal to record events 2018-05-09 08:15:51 -06:00
Greg Fitzgerald
1d4d0272ca Drop support for logging a single event 2018-05-09 08:12:33 -06:00
Greg Fitzgerald
77a76f0783 Record a batch of events 2018-05-09 08:11:19 -06:00
Greg Fitzgerald
d9079de262 Add a way of sending a batch of events 2018-05-09 08:05:40 -06:00
Greg Fitzgerald
b3d732a1a1 No longer artificially limit the size of entries
Instead, serialize the entries and split them up over multiple
blobs.
2018-05-09 07:59:55 -06:00
Greg Fitzgerald
52f1a02938 Delete historical artifact
This was just to explain Proof of History. We have better explanations
elsewhere. Delete!
2018-05-09 07:53:24 -06:00
Jackson Sandland
fe51669e85 signature.rs - panic cleanup 2018-05-08 23:21:45 -07:00
Jackson Sandland
670a6c50c9 event.rs - panic cleanup 2018-05-08 22:58:48 -07:00
Jackson Sandland
86c1aaf7d8 transaction.rs - panic cleanup 2018-05-08 22:46:22 -07:00
Jackson Sandland
658e787b60 timing.rs panic cleanup 2018-05-08 22:40:07 -07:00
Robert Kelly
40c50aef50 deterministic random wallet generationg 2018-05-09 00:07:19 -04:00
Robert Kelly
a24c2bbe73 merge bug 2018-05-09 00:07:03 -04:00
Robert Kelly
bdbe90b891 Merge branch 'master' of github.com:solana-labs/solana 2018-05-08 23:40:54 -04:00
Greg Fitzgerald
3236be7877 Merge pull request #188 from garious/add-tpu
AccountantSkel -> Tpu
2018-05-08 19:50:58 -06:00
Greg Fitzgerald
1dca17fdb4 cargo +nightly fmt 2018-05-08 18:59:01 -06:00
Greg Fitzgerald
785e971698 AccountantSkel -> Tpu
The terms Stub and Skel come from OMG IDL and only made sense while
the Stub was acting as an RPC client for the the Accountant object.
Nowadays, the Stub interface looks nothing like the Accountant and
meanwhile we've recognized the multithreaded implementation is more
reminiscent of a pipelined CPU. Thus, we finally bite the bullet and
rename our modules.

AccountantSkel -> Tpu
AccountantStub -> ThinClient

Up next will be moving much of the TPU code into separate modules,
each representing a stage of the pipeline. The interface of each
will follow the precedent set by the Historian object.
2018-05-08 17:40:02 -06:00
Greg Fitzgerald
2bfa20ff85 Merge pull request #182 from garious/split-request
Control port prep
2018-05-08 17:11:34 -06:00
Greg Fitzgerald
474a9af78d Merge pull request #187 from sakridge/fix_blob_size_check
Trust the recorder not to give us more than we can serialize
2018-05-08 17:11:18 -06:00
Greg Fitzgerald
61425eacb8 Merge pull request #185 from sakridge/fix_default_client_port
Fix default client port, server uses 8000-8002 for gossip
2018-05-08 16:58:04 -06:00
Stephen Akridge
4870def1fb Fix default client port, server uses 8000-8002 for gossip. 2018-05-08 15:40:55 -07:00
Stephen Akridge
3e73fb9233 Trust the recorder not to give us more than we can serialize
Also run client for 10 seconds, 5 is bit too short
2018-05-08 15:23:41 -07:00
Greg Fitzgerald
5ad6061c3f Merge pull request #184 from sakridge/add_debug_msg_in_readme
Add message about trace debugging
2018-05-08 14:39:09 -06:00
Stephen Akridge
fae019b974 Add message about trace debugging 2018-05-08 13:26:09 -07:00
Greg Fitzgerald
3bb06d8364 Merge pull request #183 from sakridge/verify_thread_rework
Rework sig processing threads and add perf for process/verify
2018-05-08 13:15:41 -06:00
Greg Fitzgerald
c9c9afa472 Remove the note about git-lfs 2018-05-08 12:52:24 -06:00
Stephen Akridge
bd0671e123 Rework sig processing threads and add perf for process/verify 2018-05-08 11:49:29 -07:00
Greg Fitzgerald
6f3ec8d21f Merge pull request #181 from aeyakovenko/link
update link
2018-05-08 08:20:43 -06:00
Anatoly Yakovenko
9a0bf13feb update link 2018-05-08 06:44:24 -07:00
Greg Fitzgerald
9ff1a6f0cd Add a thread to support thin clients 2018-05-07 21:44:44 -06:00
Greg Fitzgerald
a59f64cae1 Merge pull request #179 from garious/update-readme
Update README with proposed way to download the gpu lib
2018-05-07 16:43:20 -06:00
Greg Fitzgerald
a4ecd09723 Delete .gitattributes
This was used by git-lfs.
2018-05-07 16:35:54 -06:00
Greg Fitzgerald
f159dfd15a Update README with proposed way to download the gpu lib
If you checked here yesterday, this was a top-level file in git-lfs,
but that made the developer workflow more painful so we boot that
file and are making it available via an http endpoint.
2018-05-07 16:33:27 -06:00
Greg Fitzgerald
9e8ec86fa3 Merge pull request #178 from garious/split-request
Refactoring for upcoming thin client port
2018-05-07 16:21:48 -06:00
Greg Fitzgerald
62bb78f58d Prepwork to hoist processing requests 2018-05-07 15:09:08 -06:00
Greg Fitzgerald
893011c3ba Process events instead of processing only transactions
Prep work to allow clients to send any type that can end up in
the ledger.
2018-05-07 14:51:13 -06:00
Greg Fitzgerald
880cb8e7cc Merge pull request #176 from aeyakovenko/multinode
Multinode
2018-05-07 09:05:12 -06:00
Anatoly Yakovenko
85f83f2c74 fmt 2018-05-06 22:29:33 -07:00
Anatoly Yakovenko
4751e459cc fixed! 2018-05-06 22:25:05 -07:00
Anatoly Yakovenko
138efa6cec fixed constant 2018-05-06 22:06:19 -07:00
Anatoly Yakovenko
a68e50935e useless timeouts i think 2018-05-06 21:48:46 -07:00
Stephen Akridge
e8f5fb35ac Multinode fixes and test
* Replace magic numbers for 64k event size
* Fix gossip, dont ping yourself
* Retransmit only to listening nodes
* Multinode test in stub marked unstable
2018-05-06 21:36:06 -07:00
sakridge
6af27669b0 Merge pull request #175 from garious/64k-entries
Limit 256 events per entry
2018-05-04 12:19:25 -07:00
Greg Fitzgerald
e162f24119 Limit 256 events per entry
Attempt to keep blob size under 64kb
2018-05-04 11:52:05 -06:00
Greg Fitzgerald
dbcc462a48 Merge pull request #173 from sakridge/entry_process_cleanup
Factor out entry processing and fix replicate test to call global setup fn
2018-05-04 11:19:28 -06:00
Stephen Akridge
2d5313639a Factor out entry processing and fix replicate test to call global setup fn 2018-05-03 22:24:30 -07:00
Greg Fitzgerald
38af0f436d Merge pull request #174 from sakridge/fix_bind_for_external
Fix bind so we can talk on external interfaces and surface send error
2018-05-03 18:20:00 -06:00
Stephen Akridge
888c2ffb20 Fix bind so we can talk on external interfaces and surface send error 2018-05-03 17:05:02 -07:00
Greg Fitzgerald
588593f619 Merge pull request #172 from sakridge/fix_entry_serialize
Fix entry serialize
2018-05-03 16:12:42 -06:00
Stephen Akridge
2cdd515b12 Compiles/fmt and add assert for forward progress 2018-05-03 14:58:08 -07:00
Anatoly Yakovenko
0aad71d46e fix entry serialize 2018-05-03 14:35:04 -07:00
Greg Fitzgerald
6f9285322d Merge pull request #171 from garious/cleanup-lastid
Cleanup last_id access in stub and skel
2018-05-03 14:57:28 -06:00
Greg Fitzgerald
68c7f992fa Sooth all versions of rustfmt 2018-05-03 13:56:10 -06:00
Greg Fitzgerald
1feff408ff Implement get_last_id() with transaction_count()
This is more precice than the previous implementation because it'll
drain the EntryInfo queue and return the most recent last_id instead
of the first one.
2018-05-03 13:34:57 -06:00
Greg Fitzgerald
f752e02487 Implement GetLastId with EntryInfo subscription 2018-05-03 13:31:43 -06:00
Greg Fitzgerald
c9c7fb0a27 Update comment
The last PR added a thread that logs entries without needing to
be driven by the client.
2018-05-03 13:27:37 -06:00
Greg Fitzgerald
de680c2a8e Remove duplicate state 2018-05-03 13:24:37 -06:00
Greg Fitzgerald
03695ba4c5 Merge pull request #169 from sakridge/broadcast_rebase
Add broadcast impl
2018-05-03 12:22:34 -06:00
Anatoly Yakovenko
c2e2960bf7 Add broadcast impl 2018-05-03 10:34:01 -07:00
Greg Fitzgerald
385d2a580c Merge pull request #168 from aeyakovenko/fix_multi_host_client_demo
multi host client demo
2018-05-03 10:21:41 -06:00
Greg Fitzgerald
7e02652068 Merge pull request #170 from garious/refactor-historian
Fix nightly build
2018-05-03 10:16:05 -06:00
Greg Fitzgerald
ae29c9b4a0 Fix nightly build 2018-05-03 09:38:59 -06:00
Anatoly Yakovenko
078f917e61 useless assert 2018-05-03 08:34:57 -07:00
Anatoly Yakovenko
b65f04d500 multi host client demo
Bind to the same interface as the user supplied client address.
2018-05-03 08:28:11 -07:00
Greg Fitzgerald
6acaffe581 Merge pull request #166 from garious/refactor-historian
TPU-friendly Historian
2018-05-02 18:13:30 -06:00
Greg Fitzgerald
e47ef42a33 Merge pull request #167 from djKooks/readme-version
Add comment about rustc version in README
2018-05-02 18:08:13 -06:00
kwangin
b950e33d81 Remove useless comment 2018-05-03 09:06:41 +09:00
kwangin
ec8cfc77ad Remove component adding part 2018-05-03 09:04:56 +09:00
kwangin
00a16db9cd Add comment about rustc version in README 2018-05-03 08:38:09 +09:00
Greg Fitzgerald
4b9f115586 Hoist Historian input 2018-05-02 16:35:37 -06:00
Greg Fitzgerald
c5cc91443e Rename sender/receiver to input/output 2018-05-02 15:54:53 -06:00
Greg Fitzgerald
48d94143e7 Fix CI 2018-05-02 11:05:11 -06:00
Greg Fitzgerald
8174a05156 Merge pull request #165 from rlkelly/126__atomic_balances
126  atomic balances
2018-05-02 10:43:31 -06:00
Robert Kelly
63cf6363a2 more rustfmt 2018-05-02 12:24:25 -04:00
Robert Kelly
cc6de605ac rustfmt 2018-05-02 12:21:20 -04:00
Robert Kelly
d0151d2b79 restored original test logic 2018-05-02 12:07:42 -04:00
Robert Kelly
6b45d453b8 modified verification map 2018-05-02 10:44:41 -04:00
Robert Kelly
b992a84d67 modified verification to loop until success or failure 2018-05-02 10:15:08 -04:00
Robert Kelly
cb362e9052 rust format 2018-05-01 16:38:15 -04:00
Robert Kelly
ccb478c1f6 improved error handling and atomic transactions 2018-05-01 16:38:15 -04:00
Greg Fitzgerald
6af3680f99 Version bump 2018-04-30 22:38:39 -06:00
Greg Fitzgerald
e6c3c215ab Add note about installing git-lfs 2018-04-30 15:26:31 -06:00
Greg Fitzgerald
5c66bbde01 Add a note about running with GPU optimizations 2018-04-30 15:20:39 -06:00
Anatoly Yakovenko
77dd1bdd4a move CI specific scripts to solana-labs/buildkite repo 2018-04-29 23:43:43 -07:00
Anatoly Yakovenko
6268d540a8 move CI specific scripts to solana-labs/buildkite repo
Former-commit-id: 77dd1bdd4a
2018-04-29 23:43:43 -07:00
Greg Fitzgerald
5918e38747 Version bump 2018-04-27 15:49:48 -07:00
Greg Fitzgerald
5eb80f8027 Add GPU library for Linux systems
To get solana to use the GPU, invoke cargo with "--features=cuda".
2018-04-27 15:47:22 -07:00
61 changed files with 4777 additions and 2345 deletions

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
*.a filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
Cargo.lock Cargo.lock
/target/ /target/
**/*.rs.bk **/*.rs.bk
.cargo

View File

@@ -20,3 +20,17 @@ script:
after_success: | after_success: |
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
bash <(curl -s https://codecov.io/bash) -s target/cov bash <(curl -s https://codecov.io/bash) -s target/cov
before_deploy:
- cargo package
deploy:
provider: releases
api-key:
secure: j3cPAbOuGjXuSl+j+JL/4GWxD6dA0/f5NQ0Od4LBVewPmnKiqimGOJ1xj3eKth+ZzwuCpcHwBIIR54NEDSJgHaYDXiukc05qCeToIPqOc0wGJ+GcUrWAy8M7Wo981I/0SVYDAnLv4+ivvJxYE7b2Jr3pHsQAzH7ClY8g2xu9HlNkScEsc4cizA9Sf3zIqtIoi480vxtQ5ghGOUCkwZuG3+Dg+IGnnjvE4qQOYey1del+KIDkmbHjry7iFWPF6fWK2187JNt6XiO2/2tZt6BkMEmdRnkw1r/wL9tj0AbqLgyBjzlI4QQfkBwsuX3ZFeNGArn71s7WmAUGyVOl0DJXfwN/BEUxMTd+lkMjuMNUxaU/hxVZ7zAWH55KJK+qf6B95DLVWr7ypjfJLLBcds+JfkBNoReWLM1XoDUKAU+wBf1b+PKiywNfNascjZTcz6QGe94sa7l/T4PxtHDSREmflFgu1Hysg61WuODDwTTHGrsg9ZuvlINnqQhXsJo9r9+TMIGwwWHcvLQDNz2TPALCfcLtd+RsevdOeXItYa0KD3D4gKGv36bwAVDpCIoZnSeiaT/PUyjilFtJjBpKz9BbOKgOtQhHGrHucn0WOF+bu/t3SFaJKQf/W+hLwO3NV8yiL5LQyHVm/TPY62nBfne2KEqi/LOFxgKG35aACouP0ig=
file: target/package/solana-$TRAVIS_TAG.crate
skip_cleanup: true
on:
tags: true
condition: "$TRAVIS_RUST_VERSION = stable"
after_deploy:
- cargo publish --token "$CRATES_IO_TOKEN"

View File

@@ -1,25 +1,25 @@
[package] [package]
name = "solana" name = "solana"
description = "High Performance Blockchain" description = "The World's Fastest Blockchain"
version = "0.5.0-beta" version = "0.6.0-alpha"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "http://solana.io/" homepage = "http://solana.com/"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
authors = [ authors = [
"Anatoly Yakovenko <anatoly@solana.io>", "Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.io>", "Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.io>", "Stephen Akridge <stephen@solana.com>",
] ]
license = "Apache-2.0" license = "Apache-2.0"
[[bin]]
name = "solana-historian-demo"
path = "src/bin/historian-demo.rs"
[[bin]] [[bin]]
name = "solana-client-demo" name = "solana-client-demo"
path = "src/bin/client-demo.rs" path = "src/bin/client-demo.rs"
[[bin]]
name = "solana-multinode-demo"
path = "src/bin/multinode-demo.rs"
[[bin]] [[bin]]
name = "solana-testnode" name = "solana-testnode"
path = "src/bin/testnode.rs" path = "src/bin/testnode.rs"
@@ -68,3 +68,5 @@ libc = "^0.2.1"
getopts = "^0.2" getopts = "^0.2"
isatty = "0.1" isatty = "0.1"
futures = "0.1" futures = "0.1"
rand = "0.4.2"
pnet = "^0.21.0"

View File

@@ -1,6 +1,6 @@
[![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana) [![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana)
[![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana) [![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana)
[![Build Status](https://travis-ci.org/solana-labs/solana.svg?branch=master)](https://travis-ci.org/solana-labs/solana) [![Build status](https://badge.buildkite.com/d4c4d7da9154e3a8fb7199325f430ccdb05be5fc1e92777e51.svg)](https://buildkite.com/solana-labs/solana)
[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana) [![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
Disclaimer Disclaimer
@@ -102,6 +102,12 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt-preview $ rustup component add rustfmt-preview
``` ```
If your rustc version is lower than 1.25.0, please update it:
```bash
$ rustup update
```
Download the source code: Download the source code:
```bash ```bash
@@ -118,6 +124,18 @@ Run the test suite:
cargo test cargo test
``` ```
Debugging
---
There are some useful debug messages in the code, you can enable them on a per-module and per-level
basis with the normal RUST\_LOG environment variable. Run the testnode with this syntax:
```bash
$ RUST_LOG=solana::streamer=debug,solana::accountant_skel=info cat genesis.log | ./target/release/solana-testnode > transactions0.log
```
to see the debug and info sections for streamer and accountant\_skel respectively. Generally
we are using debug for infrequent debug messages, trace for potentially frequent messages and
info for performance-related logging.
Benchmarking Benchmarking
--- ---
@@ -133,6 +151,13 @@ Run the benchmarks:
$ cargo +nightly bench --features="unstable" $ cargo +nightly bench --features="unstable"
``` ```
To run the benchmarks on Linux with GPU optimizations enabled:
```bash
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
$ cargo +nightly bench --features="unstable,cuda"
```
Code coverage Code coverage
--- ---

1
_config.yml Normal file
View File

@@ -0,0 +1 @@
theme: jekyll-theme-slate

37
ci/buildkite.yml Normal file
View File

@@ -0,0 +1,37 @@
steps:
- command: "ci/coverage.sh || true"
label: "coverage"
# TODO: Run coverage in a docker image rather than assuming kcov/cargo-kcov
# is installed on the build agent...
#plugins:
# docker#v1.1.1:
# image: "rust"
# user: "998:997" # buildkite-agent:buildkite-agent
# environment:
# - CODECOV_TOKEN=$CODECOV_TOKEN
- command: "ci/test-stable.sh"
label: "stable [public]"
plugins:
docker#v1.1.1:
image: "rust"
user: "998:997" # buildkite-agent:buildkite-agent
- command: "ci/test-nightly.sh || true"
label: "nightly - FAILURES IGNORED [public]"
plugins:
docker#v1.1.1:
image: "rustlang/rust:nightly"
user: "998:997" # buildkite-agent:buildkite-agent
- command: "ci/test-ignored.sh || true"
label: "ignored - FAILURES IGNORED [public]"
- command: "ci/test-cuda.sh"
label: "cuda"
- wait
- command: "ci/publish.sh"
label: "publish release artifacts"
plugins:
docker#v1.1.1:
image: "rust"
user: "998:997" # buildkite-agent:buildkite-agent
environment:
- BUILDKITE_TAG=$BUILDKITE_TAG
- CRATES_IO_TOKEN=$CRATES_IO_TOKEN

25
ci/coverage.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash -e
cd $(dirname $0)/..
if [[ -r ~/.cargo/env ]]; then
# Pick up local install of kcov/cargo-kcov
source ~/.cargo/env
fi
rustc --version
cargo --version
kcov --version
cargo-kcov --version
export RUST_BACKTRACE=1
cargo build
cargo kcov
if [[ -z "$CODECOV_TOKEN" ]]; then
echo CODECOV_TOKEN undefined
exit 1
fi
bash <(curl -s https://codecov.io/bash)
exit 0

19
ci/publish.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash -e
cd $(dirname $0)/..
if [[ -z "$BUILDKITE_TAG" ]]; then
# Skip publish if this is not a tagged release
exit 0
fi
if [[ -z "$CRATES_IO_TOKEN" ]]; then
echo CRATES_IO_TOKEN undefined
exit 1
fi
cargo package
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
cargo publish --token "$CRATES_IO_TOKEN"
exit 0

17
ci/test-cuda.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash -e
cd $(dirname $0)/..
if [[ -z "$libcuda_verify_ed25519_URL" ]]; then
echo libcuda_verify_ed25519_URL undefined
exit 1
fi
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
export PATH=$PATH:/usr/local/cuda/bin
curl -X GET -o libcuda_verify_ed25519.a "$libcuda_verify_ed25519_URL"
source $HOME/.cargo/env
cargo test --features=cuda
exit 0

9
ci/test-ignored.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/bash -e
cd $(dirname $0)/..
rustc --version
cargo --version
export RUST_BACKTRACE=1
cargo test -- --ignored

13
ci/test-nightly.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash -e
cd $(dirname $0)/..
rustc --version
cargo --version
rustup component add rustfmt-preview
cargo fmt -- --write-mode=diff
cargo build --verbose --features unstable
cargo test --verbose --features unstable
exit 0

13
ci/test-stable.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash -e
cd $(dirname $0)/..
rustc --version
cargo --version
rustup component add rustfmt-preview
cargo fmt -- --write-mode=diff
cargo build --verbose
cargo test --verbose
exit 0

7
multinode-demo/client.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/bash
cd /home/ubuntu/solana
#git pull
export RUST_LOG=solana::crdt=trace
# scp ubuntu@18.206.1.146:~/solana/leader.json .
# scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
cat mint-demo.json | cargo run --release --bin solana-multinode-demo -- -l leader.json -c 10.0.5.179:8100 -n 3

6
multinode-demo/leader.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
cd /home/ubuntu/solana
git pull
export RUST_LOG=solana=info
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s leader.json -b 8000 -d | grep INFO
#cat genesis.log | cargo run --release --bin solana-testnode -- -s leader.json -b 8000 -d

10
multinode-demo/validator.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
cd /home/ubuntu/solana
git pull
scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
scp ubuntu@18.206.1.146:~/solana/leader.json .
scp ubuntu@18.206.1.146:~/solana/genesis.log .
scp ubuntu@18.206.1.146:~/solana/libcuda_verify_ed25519.a .
export RUST_LOG=solana=info
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s replicator.json -v leader.json -b 9000 -d 2>&1 | tee validator.log

View File

@@ -1,6 +0,0 @@
#!/bin/bash
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
source $HOME/.cargo/env
export PATH=$PATH:/usr/local/cuda/bin
cp /tmp/libcuda_verify_ed25519.a .
cargo test --features=cuda

View File

@@ -1,526 +0,0 @@
//! The `accountant` module tracks client balances, and the progress of pending
//! transactions. It offers a high-level public API that signs transactions
//! on behalf of the caller, and a private low-level API for when they have
//! already been signed and verified.
extern crate libc;
use chrono::prelude::*;
use event::Event;
use hash::Hash;
use mint::Mint;
use plan::{Payment, Plan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::RwLock;
use transaction::Transaction;
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
#[derive(Debug, PartialEq, Eq)]
pub enum AccountingError {
AccountNotFound,
InsufficientFunds,
InvalidTransferSignature,
}
pub type Result<T> = result::Result<T, AccountingError>;
/// Commit funds to the 'to' party.
fn apply_payment(balances: &RwLock<HashMap<PublicKey, RwLock<i64>>>, payment: &Payment) {
if balances.read().unwrap().contains_key(&payment.to) {
let bals = balances.read().unwrap();
*bals[&payment.to].write().unwrap() += payment.tokens;
} else {
let mut bals = balances.write().unwrap();
bals.insert(payment.to, RwLock::new(payment.tokens));
}
}
pub struct Accountant {
balances: RwLock<HashMap<PublicKey, RwLock<i64>>>,
pending: RwLock<HashMap<Signature, Plan>>,
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
time_sources: RwLock<HashSet<PublicKey>>,
last_time: RwLock<DateTime<Utc>>,
}
impl Accountant {
/// Create an Accountant using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let balances = RwLock::new(HashMap::new());
apply_payment(&balances, deposit);
Accountant {
balances,
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
}
}
/// Create an Accountant with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let acc = Self::new_from_deposit(&deposit);
acc.register_entry_id(&mint.last_id());
acc
}
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
if signatures.read().unwrap().contains(sig) {
return false;
}
signatures.write().unwrap().insert(*sig);
true
}
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
signatures.write().unwrap().remove(sig)
}
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::forget_signature(&entry.1, sig);
}
return false;
}
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::reserve_signature(&entry.1, sig);
}
false
}
/// Tell the accountant which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// accountant will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids.write().unwrap();
if last_ids.len() >= MAX_ENTRY_IDS {
last_ids.pop_front();
}
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
}
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
let bals = self.balances.read().unwrap();
// Hold a write lock before the condition check, so that a debit can't occur
// between checking the balance and the withdraw.
let option = bals.get(&tr.from);
if option.is_none() {
return Err(AccountingError::AccountNotFound);
}
let mut bal = option.unwrap().write().unwrap();
if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) {
return Err(AccountingError::InvalidTransferSignature);
}
if *bal < tr.data.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
return Err(AccountingError::InsufficientFunds);
}
*bal -= tr.data.tokens;
Ok(())
}
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
let mut plan = tr.data.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
} else {
let mut pending = self.pending.write().unwrap();
pending.insert(tr.sig, plan);
}
}
/// Process a Transaction that has already been verified.
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
self.process_verified_transaction_debits(tr)?;
self.process_verified_transaction_credits(tr);
Ok(())
}
/// Process a batch of verified transactions.
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
// Run all debits first to filter out any transactions that can't be processed
// in parallel deterministically.
let results: Vec<_> = trs.into_par_iter()
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
.collect(); // Calling collect() here forces all debits to complete before moving on.
results
.into_par_iter()
.map(|result| {
result.map(|tr| {
self.process_verified_transaction_credits(&tr);
tr
})
})
.collect()
}
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
let mut trs = vec![];
let mut rest = vec![];
for event in events {
match event {
Event::Transaction(tr) => trs.push(tr),
_ => rest.push(event),
}
}
(trs, rest)
}
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> {
let (trs, rest) = Self::partition_events(events);
self.process_verified_transactions(trs);
for event in rest {
self.process_verified_event(&event)?;
}
Ok(())
}
/// Process a Witness Signature that has already been verified.
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending.write().unwrap().entry(tx_sig) {
e.get_mut().apply_witness(&Witness::Signature(from));
if let Some(ref payment) = e.get().final_payment() {
apply_payment(&self.balances, payment);
e.remove_entry();
}
};
Ok(())
}
/// Process a Witness Timestamp that has already been verified.
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if *self.last_time.read().unwrap() == Utc.timestamp(0, 0) {
self.time_sources.write().unwrap().insert(from);
}
if self.time_sources.read().unwrap().contains(&from) {
if dt > *self.last_time.read().unwrap() {
*self.last_time.write().unwrap() = dt;
}
} else {
return Ok(());
}
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
// double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending.write().unwrap();
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
completed.push(key.clone());
}
}
for key in completed {
pending.remove(&key);
}
Ok(())
}
/// Process an Transaction or Witness that has already been verified.
pub fn process_verified_event(&self, event: &Event) -> Result<()> {
match *event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
}
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new(keypair, to, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances.read().unwrap();
bals.get(pubkey).map(|x| *x.read().unwrap())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[test]
fn test_accountant() {
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let acc = Accountant::new(&alice);
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let acc = Accountant::new(&mint);
assert_eq!(
acc.transfer(1, &KeyPair::new(), mint.pubkey(), mint.last_id()),
Err(AccountingError::AccountNotFound)
);
}
#[test]
fn test_invalid_transfer() {
let alice = Mint::new(11_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.last_id()),
Err(AccountingError::InsufficientFunds)
);
let alice_pubkey = alice.keypair().pubkey();
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
}
#[test]
fn test_transfer_to_newb() {
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(500, &alice_keypair, bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
}
#[test]
fn test_transfer_on_date() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
// Bob's balance will be None because the funds have not been
// sent.
assert_eq!(acc.get_balance(&bob_pubkey), None);
// Now, acknowledge the time in the condition occurred and
// that bob's funds are now available.
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(acc.get_balance(&bob_pubkey), Some(2));
}
#[test]
fn test_transfer_after_date() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
}
#[test]
fn test_cancel_transfer() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
// Bob's balance will be None because the funds have not been
// sent.
assert_eq!(acc.get_balance(&bob_pubkey), None);
// Now, cancel the trancaction. Alice gets her funds back, Bob never sees them.
acc.process_verified_sig(alice.pubkey(), sig).unwrap();
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
assert_eq!(acc.get_balance(&bob_pubkey), None);
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
}
#[test]
fn test_duplicate_event_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
assert!(acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_forget_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
acc.reserve_signature_with_last_id(&sig, &alice.last_id());
assert!(acc.forget_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.forget_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_max_entry_ids() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let alice = KeyPair::new();
let tr0 = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let tr1 = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let trs = vec![tr0, tr1];
assert!(acc.process_verified_transactions(trs)[1].is_err());
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[bench]
fn process_verified_event_bench(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
acc.process_verified_transaction(&tr).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for sigs in acc.last_ids.read().unwrap().iter() {
sigs.1.write().unwrap().clear();
}
assert!(
acc.process_verified_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
}
}

View File

@@ -1,810 +0,0 @@
//! The `accountant_skel` module is a microservice that exposes the high-level
//! Accountant API to the network. Its message encoding is currently
//! in flux. Clients should use AccountantStub to interact with it.
use accountant::Accountant;
use bincode::{deserialize, serialize};
use ecdsa;
use entry::Entry;
use event::Event;
use hash::Hash;
use historian::Historian;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use recorder::Signal;
use result::Result;
use serde_json;
use signature::PublicKey;
use std::cmp::max;
use std::collections::VecDeque;
use std::io::Write;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use streamer;
use transaction::Transaction;
use subscribers;
pub struct AccountantSkel<W: Write + Send + 'static> {
acc: Accountant,
last_id: Hash,
writer: W,
historian: Historian,
entry_info_subscribers: Vec<SocketAddr>,
}
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
Transaction(Transaction),
GetBalance { key: PublicKey },
GetLastId,
Subscribe { subscriptions: Vec<Subscription> },
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Subscription {
EntryInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EntryInfo {
pub id: Hash,
pub num_hashes: u64,
pub num_events: u64,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
match *self {
Request::Transaction(ref tr) => tr.verify_plan(),
_ => true,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
EntryInfo(EntryInfo),
LastId { id: Hash },
}
impl<W: Write + Send + 'static> AccountantSkel<W> {
/// Create a new AccountantSkel that wraps the given Accountant.
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
AccountantSkel {
acc,
last_id,
writer,
historian,
entry_info_subscribers: vec![],
}
}
fn notify_entry_info_subscribers(&mut self, entry: &Entry) {
// TODO: No need to bind().
let socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
for addr in &self.entry_info_subscribers {
let entry_info = EntryInfo {
id: entry.id,
num_hashes: entry.num_hashes,
num_events: entry.events.len() as u64,
};
let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo");
let _res = socket.send_to(&data, addr);
}
}
/// Process any Entry items that have been published by the Historian.
pub fn sync(&mut self) -> Hash {
while let Ok(entry) = self.historian.receiver.try_recv() {
self.last_id = entry.id;
self.acc.register_entry_id(&self.last_id);
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
self.notify_entry_info_subscribers(&entry);
}
self.last_id
}
/// Process Request items sent by clients.
pub fn process_request(
&mut self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::GetBalance { key } => {
let val = self.acc.get_balance(&key);
Some((Response::Balance { key, val }, rsp_addr))
}
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
Request::Transaction(_) => unreachable!(),
Request::Subscribe { subscriptions } => {
for subscription in subscriptions {
match subscription {
Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr),
}
}
None
}
}
}
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
batch.push(more);
}
info!("batch len {}", batch.len());
Ok(batch)
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
let chunk_size = max(1, (batch.len() + 3) / 4);
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
batches
.into_par_iter()
.map(|batch| {
let r = ecdsa::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
})
.collect()
}
fn verifier(
recvr: &streamer::PacketReceiver,
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Result<()> {
let batch = Self::recv_batch(recvr)?;
let verified_batches = Self::verify_batch(batch);
for xs in verified_batches {
sendr.send(xs)?;
}
Ok(())
}
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Split Request list into verified transactions and the rest
fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
let mut trs = vec![];
let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers {
match msg {
Request::Transaction(tr) => {
if verify != 0 {
trs.push(tr);
}
}
_ => reqs.push((msg, rsp_addr)),
}
}
(trs, reqs)
}
fn process_packets(
&mut self,
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> Result<Vec<(Response, SocketAddr)>> {
let (trs, reqs) = Self::partition_requests(req_vers);
// Process the transactions in parallel and then log the successful ones.
for result in self.acc.process_verified_transactions(trs) {
if let Ok(tr) = result {
self.historian
.sender
.send(Signal::Event(Event::Transaction(tr)))?;
}
}
// Let validators know they should not attempt to process additional
// transactions in parallel.
self.historian.sender.send(Signal::Tick)?;
// Process the remaining requests serially.
let rsps = reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect();
Ok(rsps)
}
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
fn process(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mms = verified_receiver.recv_timeout(timer)?;
for (msgs, vers) in mms {
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
let req_vers = reqs.into_iter()
.zip(vers)
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
.filter(|x| x.0.verify())
.collect();
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
// Write new entries to the ledger and notify subscribers.
obj.lock().unwrap().sync();
}
Ok(())
}
/// Process verified blobs, already in order
/// Respond with a signed hash of the state
fn replicate_state(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
for msgs in &blobs {
let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries {
obj.lock().unwrap().acc.register_entry_id(&entry.id);
obj.lock()
.unwrap()
.acc
.process_verified_events(entry.events)?;
}
//TODO respond back to leader with hash of the state
}
for blob in blobs {
blob_recycler.recycle(blob);
}
Ok(())
}
/// Create a UDP microservice that forwards messages the given AccountantSkel.
/// This service is the network leader
/// Set `exit` to shutdown its threads.
pub fn serve(
obj: &Arc<Mutex<AccountantSkel<W>>>,
addr: &str,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver =
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
let (blob_sender, blob_receiver) = channel();
let t_responder =
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
let (verified_sender, verified_receiver) = channel();
let exit_ = exit.clone();
let t_verifier = spawn(move || loop {
let e = Self::verifier(&packet_receiver, &verified_sender);
if e.is_err() && exit_.load(Ordering::Relaxed) {
break;
}
});
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::process(
&skel,
&verified_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
// Assume this was a timeout, so sync any empty entries.
skel.lock().unwrap().sync();
if exit.load(Ordering::Relaxed) {
break;
}
}
});
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
}
/// This service receives messages from a leader in the network and processes the transactions
/// on the accountant state.
/// # Arguments
/// * `obj` - The accountant state.
/// * `rsubs` - The subscribers.
/// * `exit` - The exit signal.
/// # Remarks
/// The pipeline is constructed as follows:
/// 1. receive blobs from the network, these are out of order
/// 2. verify blobs, PoH, signatures (TODO)
/// 3. reconstruct contiguous window
/// a. order the blobs
/// b. use erasure coding to reconstruct missing blobs
/// c. ask the network for missing blobs, if erasure coding is insufficient
/// d. make sure that the blobs PoH sequences connect (TODO)
/// 4. process the transaction state machine
/// 5. respond with the hash of the state back to the leader
pub fn replicate(
obj: &Arc<Mutex<AccountantSkel<W>>>,
rsubs: subscribers::Subscribers,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(rsubs.me.addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
read,
blob_sender.clone(),
)?;
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let subs = Arc::new(RwLock::new(rsubs));
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
subs.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
subs,
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
});
Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server])
}
}
#[cfg(test)]
pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec<SharedPackets> {
let mut out = vec![];
for rrs in reqs.chunks(packet::NUM_PACKETS) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(rrs.len(), Default::default());
for (i, o) in rrs.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
#[cfg(test)]
mod tests {
use accountant_skel::{to_packets, Request};
use bincode::serialize;
use ecdsa;
use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS};
use transaction::{memfind, test_tx};
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use accountant_stub::AccountantStub;
use entry::Entry;
use futures::Future;
use historian::Historian;
use mint::Mint;
use plan::Plan;
use recorder::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
use transaction::Transaction;
use subscribers::{Node, Subscribers};
use streamer;
use std::sync::mpsc::channel;
use std::collections::VecDeque;
use hash::{hash, Hash};
use event::Event;
use entry;
use chrono::prelude::*;
#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Request::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
#[test]
fn test_to_packets() {
let tr = Request::Transaction(test_tx());
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tr.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test]
fn test_accounting_sequential_consistency() {
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
// Process a batch that includes a transaction that receives two tokens.
let alice = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Process a second batch that spends one of those tokens.
let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Collect the ledger and feed it to a new accountant.
skel.historian.sender.send(Signal::Tick).unwrap();
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
// Assert the user holds one token, not two. If the server only output one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
let acc = Accountant::new(&mint);
for entry in entries {
acc.process_verified_events(entry.events).unwrap();
}
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
}
#[test]
fn test_accountant_bad_sig() {
let serve_port = 9002;
let send_port = 9003;
let addr = format!("127.0.0.1:{}", serve_port);
let send_addr = format!("127.0.0.1:{}", send_port);
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = acc.transfer_signed(tr).unwrap();
let last_id = acc.get_last_id().wait().unwrap();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
tr2.data.tokens = 502;
tr2.data.plan = Plan::new_payment(502, bob_pubkey);
let _sig = acc.transfer_signed(tr2).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
fn setup() {
INIT.call_once(|| {
env_logger::init().unwrap();
});
}
#[test]
fn test_replicate() {
setup();
let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let leader_addr = leader_sock.local_addr().unwrap();
let me_addr = "127.0.0.1:9010".parse().unwrap();
let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let target_peer_addr = target_peer_sock.local_addr().unwrap();
let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr);
let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1];
let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr);
let subs = Subscribers::new(node_me, node_leader, &node_subs);
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target_peer_sock,
s_reader,
).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
source_peer_sock,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let alice = Mint::new(starting_balance);
let acc = Accountant::new(&alice);
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap();
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tr1 = Transaction::new(
&alice.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 =
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&me_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let alice_balance = acc.lock()
.unwrap()
.acc
.get_balance(&alice.keypair().pubkey())
.unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = acc.lock()
.unwrap()
.acc
.get_balance(&bob_keypair.pubkey())
.unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::{Accountant, MAX_ENTRY_IDS};
use accountant_skel::*;
use bincode::serialize;
use hash::hash;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::collections::HashSet;
use std::io::sink;
use std::time::Instant;
use transaction::Transaction;
#[bench]
fn process_packets_bench(_bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
// Create transactions between unrelated parties.
let txs = 100_000;
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
let transactions: Vec<_> = (0..txs)
.into_par_iter()
.map(|i| {
// Seed the 'to' account and a cell for its signature.
let dummy_id = i % (MAX_ENTRY_IDS as i32);
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
{
let mut last_ids = last_ids.lock().unwrap();
if !last_ids.contains(&last_id) {
last_ids.insert(last_id);
acc.register_entry_id(&last_id);
}
}
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
acc.process_verified_transaction(&tr).unwrap();
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
let req_vers = transactions
.into_iter()
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
.collect();
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
let now = Instant::now();
assert!(skel.process_packets(req_vers).is_ok());
let duration = now.elapsed();
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
let tps = txs as f64 / sec;
// Ensure that all transactions were successfully logged.
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].events.len(), txs as usize);
println!("{} tps", tps);
}
}

View File

@@ -1,201 +0,0 @@
//! The `accountant_stub` module is a client-side object that interfaces with a server-side Accountant
//! object via the network interface exposed by AccountantSkel. Client code should use
//! this object instead of writing messages to the network directly. The binary
//! encoding of its messages are unstable and may change in future releases.
use accountant_skel::{Request, Response, Subscription};
use bincode::{deserialize, serialize};
use futures::future::{ok, FutureResult};
use hash::Hash;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::UdpSocket;
use transaction::Transaction;
pub struct AccountantStub {
pub addr: String,
pub socket: UdpSocket,
last_id: Option<Hash>,
num_events: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl AccountantStub {
/// Create a new AccountantStub that will interface with AccountantSkel
/// over `socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking AccountantStub methods.
pub fn new(addr: &str, socket: UdpSocket) -> Self {
let stub = AccountantStub {
addr: addr.to_string(),
socket,
last_id: None,
num_events: 0,
balances: HashMap::new(),
};
stub.init();
stub
}
pub fn init(&self) {
let subscriptions = vec![Subscription::EntryInfo];
let req = Request::Subscribe { subscriptions };
let data = serialize(&req).expect("serialize Subscribe");
let _res = self.socket.send_to(&data, &self.addr);
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
self.socket.recv_from(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize balance");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
self.balances.insert(key, val);
}
Response::LastId { id } => {
self.last_id = Some(id);
}
Response::EntryInfo(entry_info) => {
self.last_id = Some(entry_info.id);
self.num_events += entry_info.num_events;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
let req = Request::Transaction(tr);
let data = serialize(&req).unwrap();
self.socket.send_to(&data, &self.addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tr = Transaction::new(keypair, to, n, *last_id);
let sig = tr.sig;
self.transfer_signed(tr).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult<i64, i64> {
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::Balance { ref key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
ok(self.balances[pubkey].unwrap())
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response. At the time of this writing,
/// it also has the side-effect of causing the server to log any
/// entries that have been published by the Historian.
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetId");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
ok(self.last_id.unwrap_or(Hash::default()))
}
/// Return the number of transactions the server processed since creating
/// this stub instance.
pub fn transaction_count(&mut self) -> u64 {
// Wait for at least one EntryInfo.
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::EntryInfo(_) = &resp {
done = true;
}
self.process_response(resp);
}
// Then take the rest.
self.socket.set_nonblocking(true).expect("set nonblocking");
loop {
match self.recv_response() {
Err(_) => break,
Ok(resp) => self.process_response(resp),
}
}
self.socket.set_nonblocking(false).expect("set blocking");
self.num_events
}
}
#[cfg(test)]
mod tests {
use super::*;
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use futures::Future;
use historian::Historian;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
// TODO: Figure out why this test sometimes hangs on TravisCI.
#[test]
fn test_accountant_stub() {
let addr = "127.0.0.1:9000";
let send_addr = "127.0.0.1:9001";
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
}

654
src/bank.rs Normal file
View File

@@ -0,0 +1,654 @@
//! The `bank` module tracks client balances, and the progress of pending
//! transactions. It offers a high-level public API that signs transactions
//! on behalf of the caller, and a private low-level API for when they have
//! already been signed and verified.
extern crate libc;
use chrono::prelude::*;
use entry::Entry;
use event::Event;
use hash::Hash;
use mint::Mint;
use plan::{Payment, Plan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::RwLock;
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
use transaction::{Instruction, Transaction};
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
#[derive(Debug, PartialEq, Eq)]
pub enum BankError {
AccountNotFound(PublicKey),
InsufficientFunds(PublicKey),
InvalidTransferSignature(Signature),
}
pub type Result<T> = result::Result<T, BankError>;
/// Commit funds to the 'to' party.
fn apply_payment(balances: &RwLock<HashMap<PublicKey, AtomicIsize>>, payment: &Payment) {
// First we check balances with a read lock to maximize potential parallelization.
if balances
.read()
.expect("'balances' read lock in apply_payment")
.contains_key(&payment.to)
{
let bals = balances.read().expect("'balances' read lock");
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
} else {
// Now we know the key wasn't present a nanosecond ago, but it might be there
// by the time we aquire a write lock, so we'll have to check again.
let mut bals = balances.write().expect("'balances' write lock");
if bals.contains_key(&payment.to) {
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
} else {
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
}
}
}
pub struct Bank {
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
pending: RwLock<HashMap<Signature, Plan>>,
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
time_sources: RwLock<HashSet<PublicKey>>,
last_time: RwLock<DateTime<Utc>>,
transaction_count: AtomicUsize,
}
impl Bank {
/// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let balances = RwLock::new(HashMap::new());
apply_payment(&balances, deposit);
Bank {
balances,
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
transaction_count: AtomicUsize::new(0),
}
}
/// Create an Bank with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let bank = Self::new_from_deposit(&deposit);
bank.register_entry_id(&mint.last_id());
bank
}
/// Return the last entry ID registered
pub fn last_id(&self) -> Hash {
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
last_item.0
}
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
if signatures
.read()
.expect("'signatures' read lock")
.contains(sig)
{
return false;
}
signatures
.write()
.expect("'signatures' write lock")
.insert(*sig);
true
}
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
signatures
.write()
.expect("'signatures' write lock in forget_signature")
.remove(sig)
}
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.expect("'last_ids' read lock in forget_signature_with_last_id")
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::forget_signature(&entry.1, sig);
}
return false;
}
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.expect("'last_ids' read lock in reserve_signature_with_last_id")
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::reserve_signature(&entry.1, sig);
}
false
}
/// Tell the bank which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// bank will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids
.write()
.expect("'last_ids' write lock in register_entry_id");
if last_ids.len() >= MAX_ENTRY_IDS {
last_ids.pop_front();
}
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
}
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
if let Instruction::NewContract(contract) = &tr.instruction {
trace!("Transaction {}", contract.tokens);
}
let bals = self.balances
.read()
.expect("'balances' read lock in process_verified_transaction_debits");
let option = bals.get(&tr.from);
if option.is_none() {
return Err(BankError::AccountNotFound(tr.from));
}
if !self.reserve_signature_with_last_id(&tr.sig, &tr.last_id) {
return Err(BankError::InvalidTransferSignature(tr.sig));
}
loop {
let result = if let Instruction::NewContract(contract) = &tr.instruction {
let bal = option.expect("assignment of option to bal");
let current = bal.load(Ordering::Relaxed) as i64;
if current < contract.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.last_id);
return Err(BankError::InsufficientFunds(tr.from));
}
bal.compare_exchange(
current as isize,
(current - contract.tokens) as isize,
Ordering::Relaxed,
Ordering::Relaxed,
)
} else {
Ok(0)
};
match result {
Ok(_) => {
self.transaction_count.fetch_add(1, Ordering::Relaxed);
return Ok(());
}
Err(_) => continue,
};
}
}
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
match &tr.instruction {
Instruction::NewContract(contract) => {
let mut plan = contract.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("timestamp creation in process_verified_transaction_credits")));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
} else {
let mut pending = self.pending
.write()
.expect("'pending' write lock in process_verified_transaction_credits");
pending.insert(tr.sig, plan);
}
}
Instruction::ApplyTimestamp(dt) => {
let _ = self.process_verified_timestamp(tr.from, *dt);
}
Instruction::ApplySignature(tx_sig) => {
let _ = self.process_verified_sig(tr.from, *tx_sig);
}
}
}
/// Process a Transaction that has already been verified.
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
self.process_verified_transaction_debits(tr)?;
self.process_verified_transaction_credits(tr);
Ok(())
}
/// Process a batch of verified transactions.
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
// Run all debits first to filter out any transactions that can't be processed
// in parallel deterministically.
info!("processing Transactions {}", trs.len());
let results: Vec<_> = trs.into_par_iter()
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
.collect(); // Calling collect() here forces all debits to complete before moving on.
results
.into_par_iter()
.map(|result| {
result.map(|tr| {
self.process_verified_transaction_credits(&tr);
tr
})
})
.collect()
}
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
(
events
.into_iter()
.map(|Event::Transaction(tr)| tr)
.collect(),
vec![],
)
}
pub fn process_verified_events(&self, events: Vec<Event>) -> Vec<Result<Event>> {
let (trs, rest) = Self::partition_events(events);
let mut results: Vec<_> = self.process_verified_transactions(trs)
.into_iter()
.map(|x| x.map(Event::Transaction))
.collect();
for event in rest {
results.push(self.process_verified_event(event));
}
results
}
pub fn process_verified_entries(&self, entries: Vec<Entry>) -> Result<()> {
for entry in entries {
self.register_entry_id(&entry.id);
for result in self.process_verified_events(entry.events) {
result?;
}
}
Ok(())
}
/// Process a Witness Signature that has already been verified.
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending
.write()
.expect("write() in process_verified_sig")
.entry(tx_sig)
{
e.get_mut().apply_witness(&Witness::Signature(from));
if let Some(payment) = e.get().final_payment() {
apply_payment(&self.balances, &payment);
e.remove_entry();
}
};
Ok(())
}
/// Process a Witness Timestamp that has already been verified.
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if *self.last_time
.read()
.expect("'last_time' read lock on first timestamp check")
== Utc.timestamp(0, 0)
{
self.time_sources
.write()
.expect("'time_sources' write lock on first timestamp")
.insert(from);
}
if self.time_sources
.read()
.expect("'time_sources' read lock")
.contains(&from)
{
if dt > *self.last_time.read().expect("'last_time' read lock") {
*self.last_time.write().expect("'last_time' write lock") = dt;
}
} else {
return Ok(());
}
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
// double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending
.write()
.expect("'pending' write lock in process_verified_timestamp");
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("'last_time' read lock when creating timestamp")));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
completed.push(key.clone());
}
}
for key in completed {
pending.remove(&key);
}
Ok(())
}
/// Process an Transaction or Witness that has already been verified.
pub fn process_verified_event(&self, event: Event) -> Result<Event> {
match event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
}?;
Ok(event)
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new(keypair, to, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances
.read()
.expect("'balances' read lock in get_balance");
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64)
}
pub fn transaction_count(&self) -> usize {
self.transaction_count.load(Ordering::Relaxed)
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[test]
fn test_bank() {
let mint = Mint::new(10_000);
let pubkey = KeyPair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(bank.last_id(), mint.last_id());
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_500);
assert_eq!(bank.transaction_count(), 2);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
assert_eq!(
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
Err(BankError::AccountNotFound(keypair.pubkey()))
);
assert_eq!(bank.transaction_count(), 0);
}
#[test]
fn test_invalid_transfer() {
let mint = Mint::new(11_000);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.transaction_count(), 1);
assert_eq!(
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::InsufficientFunds(mint.pubkey()))
);
assert_eq!(bank.transaction_count(), 1);
let mint_pubkey = mint.keypair().pubkey();
assert_eq!(bank.get_balance(&mint_pubkey).unwrap(), 10_000);
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
}
#[test]
fn test_transfer_to_newb() {
let mint = Mint::new(10_000);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 500);
}
#[test]
fn test_transfer_on_date() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
// tx count is 1, because debits were applied.
assert_eq!(bank.transaction_count(), 1);
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
// Now, acknowledge the time in the condition occurred and
// that pubkey's funds are now available.
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
assert_eq!(bank.get_balance(&pubkey), Some(1));
// tx count is still 1, because we chose not to count timestamp events
// tx count.
assert_eq!(bank.transaction_count(), 1);
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(bank.get_balance(&pubkey), Some(2));
}
#[test]
fn test_transfer_after_date() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
assert_eq!(bank.get_balance(&pubkey), Some(1));
}
#[test]
fn test_cancel_transfer() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
let sig = bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
// Assert the debit counts as a transaction.
assert_eq!(bank.transaction_count(), 1);
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
bank.process_verified_sig(mint.pubkey(), sig).unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
assert_eq!(bank.get_balance(&pubkey), None);
// Assert cancel doesn't cause count to go backward.
assert_eq!(bank.transaction_count(), 1);
bank.process_verified_sig(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
}
#[test]
fn test_duplicate_event_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
assert!(bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
}
#[test]
fn test_forget_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
bank.reserve_signature_with_last_id(&sig, &mint.last_id());
assert!(bank.forget_signature_with_last_id(&sig, &mint.last_id()));
assert!(!bank.forget_signature_with_last_id(&sig, &mint.last_id()));
}
#[test]
fn test_max_entry_ids() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
let tr0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
let tr1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
let trs = vec![tr0, tr1];
let results = bank.process_verified_transactions(trs);
assert!(results[1].is_err());
// Assert bad transactions aren't counted.
assert_eq!(bank.transaction_count(), 1);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[bench]
fn process_verified_event_bench(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let bank = Bank::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
bank.process_verified_transaction(&tr).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
bank.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for sigs in bank.last_ids.read().unwrap().iter() {
sigs.1.write().unwrap().clear();
}
assert!(
bank.process_verified_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
}
}

308
src/banking_stage.rs Normal file
View File

@@ -0,0 +1,308 @@
//! The `banking_stage` processes Event messages.
use bank::Bank;
use bincode::deserialize;
use event::Event;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use record_stage::Signal;
use result::Result;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use std::time::Instant;
use timing;
pub struct BankingStage {
pub thread_hdl: JoinHandle<()>,
pub signal_receiver: Receiver<Signal>,
}
impl BankingStage {
pub fn new(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
packet_recycler: packet::PacketRecycler,
) -> Self {
let (signal_sender, signal_receiver) = channel();
let thread_hdl = spawn(move || loop {
let e = Self::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
}
});
BankingStage {
thread_hdl,
signal_receiver,
}
}
fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
fn process_packets(
bank: Arc<Bank>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
signal_sender: &Sender<Signal>,
packet_recycler: &packet::PacketRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let recv_start = Instant::now();
let mms = verified_receiver.recv_timeout(timer)?;
let mut reqs_len = 0;
let mms_len = mms.len();
info!(
"@{:?} process start stalled for: {:?}ms batches: {}",
timing::timestamp(),
timing::duration_as_ms(&recv_start.elapsed()),
mms.len(),
);
let proc_start = Instant::now();
for (msgs, vers) in mms {
let events = Self::deserialize_events(&msgs.read().unwrap());
reqs_len += events.len();
let events = events
.into_iter()
.zip(vers)
.filter_map(|(event, ver)| match event {
None => None,
Some((event, _addr)) => if event.verify() && ver != 0 {
Some(event)
} else {
None
},
})
.collect();
debug!("process_events");
let results = bank.process_verified_events(events);
let events = results.into_iter().filter_map(|x| x.ok()).collect();
signal_sender.send(Signal::Events(events))?;
debug!("done process_events");
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
info!(
"@{:?} done processing event batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
mms_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
Ok(())
}
}
// TODO: When banking is pulled out of RequestStage, add this test back in.
//use bank::Bank;
//use entry::Entry;
//use event::Event;
//use hash::Hash;
//use record_stage::RecordStage;
//use record_stage::Signal;
//use result::Result;
//use std::sync::mpsc::{channel, Sender};
//use std::sync::{Arc, Mutex};
//use std::time::Duration;
//
//#[cfg(test)]
//mod tests {
// use bank::Bank;
// use event::Event;
// use event_processor::EventProcessor;
// use mint::Mint;
// use signature::{KeyPair, KeyPairUtil};
// use transaction::Transaction;
//
// #[test]
// // TODO: Move this test banking_stage. Calling process_events() directly
// // defeats the purpose of this test.
// fn test_banking_sequential_consistency() {
// // In this attack we'll demonstrate that a verifier can interpret the ledger
// // differently if either the server doesn't signal the ledger to add an
// // Entry OR if the verifier tries to parallelize across multiple Entries.
// let mint = Mint::new(2);
// let bank = Bank::new(&mint);
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
//
// // Process a batch that includes a transaction that receives two tokens.
// let alice = KeyPair::new();
// let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
// let events = vec![Event::Transaction(tr)];
// let entry0 = event_processor.process_events(events).unwrap();
//
// // Process a second batch that spends one of those tokens.
// let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
// let events = vec![Event::Transaction(tr)];
// let entry1 = event_processor.process_events(events).unwrap();
//
// // Collect the ledger and feed it to a new bank.
// let entries = vec![entry0, entry1];
//
// // Assert the user holds one token, not two. If the server only output one
// // entry, then the second transaction will be rejected, because it drives
// // the account balance below zero before the credit is added.
// let bank = Bank::new(&mint);
// for entry in entries {
// assert!(
// bank
// .process_verified_events(entry.events)
// .into_iter()
// .all(|x| x.is_ok())
// );
// }
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
// }
//}
//
//#[cfg(all(feature = "unstable", test))]
//mod bench {
// extern crate test;
// use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize;
// use event_processor::*;
// use hash::hash;
// use mint::Mint;
// use rayon::prelude::*;
// use signature::{KeyPair, KeyPairUtil};
// use std::collections::HashSet;
// use std::time::Instant;
// use transaction::Transaction;
//
// #[bench]
// fn process_events_bench(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint);
// // Create transactions between unrelated parties.
// let txs = 100_000;
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
// let transactions: Vec<_> = (0..txs)
// .into_par_iter()
// .map(|i| {
// // Seed the 'to' account and a cell for its signature.
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
// {
// let mut last_ids = last_ids.lock().unwrap();
// if !last_ids.contains(&last_id) {
// last_ids.insert(last_id);
// bank.register_entry_id(&last_id);
// }
// }
//
// // Seed the 'from' account.
// let rando0 = KeyPair::new();
// let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_verified_transaction(&tr).unwrap();
//
// let rando1 = KeyPair::new();
// let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_verified_transaction(&tr).unwrap();
//
// // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// })
// .collect();
//
// let events: Vec<_> = transactions
// .into_iter()
// .map(|tr| Event::Transaction(tr))
// .collect();
//
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
//
// let now = Instant::now();
// assert!(event_processor.process_events(events).is_ok());
// let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec;
//
// // Ensure that all transactions were successfully logged.
// drop(event_processor.historian_input);
// let entries: Vec<Entry> = event_processor.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].events.len(), txs as usize);
//
// println!("{} tps", tps);
// }
//}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use banking_stage::BankingStage;
use event::Event;
use mint::Mint;
use packet::{to_packets, PacketRecycler};
use record_stage::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::iter;
use std::sync::Arc;
use std::sync::mpsc::channel;
#[bench]
fn stage_bench(bencher: &mut Bencher) {
let tx = 100_usize;
let mint = Mint::new(1_000_000_000);
let pubkey = KeyPair::new().pubkey();
let events: Vec<_> = (0..tx)
.map(|i| Event::new_transaction(&mint.keypair(), pubkey, i as i64, mint.last_id()))
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets(&packet_recycler, events)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
verified_sender.send(verified.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
let signal = signal_receiver.recv().unwrap();
if let Signal::Events(ref events) = signal {
assert_eq!(events.len(), tx);
} else {
assert!(false);
}
});
}
}

View File

@@ -4,22 +4,22 @@ extern crate isatty;
extern crate rayon; extern crate rayon;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
extern crate untrusted;
use futures::Future; use futures::Future;
use getopts::Options; use getopts::Options;
use isatty::stdin_isatty; use isatty::stdin_isatty;
use rayon::prelude::*; use rayon::prelude::*;
use solana::accountant_stub::AccountantStub;
use solana::mint::MintDemo; use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil}; use solana::signature::{GenKeys, KeyPairUtil};
use solana::thin_client::ThinClient;
use solana::transaction::Transaction; use solana::transaction::Transaction;
use std::env; use std::env;
use std::io::{stdin, Read}; use std::io::{stdin, Read};
use std::net::UdpSocket; use std::net::{SocketAddr, UdpSocket};
use std::process::exit; use std::process::exit;
use std::thread::sleep;
use std::time::Duration;
use std::time::Instant; use std::time::Instant;
use untrusted::Input;
fn print_usage(program: &str, opts: Options) { fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program); let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
@@ -32,13 +32,13 @@ fn print_usage(program: &str, opts: Options) {
fn main() { fn main() {
let mut threads = 4usize; let mut threads = 4usize;
let mut addr: String = "127.0.0.1:8000".to_string(); let mut server_addr: String = "127.0.0.1:8000".to_string();
let mut send_addr: String = "127.0.0.1:8001".to_string(); let mut requests_addr: String = "127.0.0.1:8010".to_string();
let mut opts = Options::new(); let mut opts = Options::new();
opts.optopt("s", "", "server address", "host:port"); opts.optopt("s", "", "server address", "host:port");
opts.optopt("c", "", "client address", "host:port"); opts.optopt("c", "", "client address", "host:port");
opts.optopt("t", "", "number of threads", "4"); opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optflag("h", "help", "print help"); opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect(); let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) { let matches = match opts.parse(&args[1..]) {
@@ -55,15 +55,19 @@ fn main() {
return; return;
} }
if matches.opt_present("s") { if matches.opt_present("s") {
addr = matches.opt_str("s").unwrap(); server_addr = matches.opt_str("s").unwrap();
} }
if matches.opt_present("c") { if matches.opt_present("c") {
send_addr = matches.opt_str("c").unwrap(); requests_addr = matches.opt_str("c").unwrap();
} }
if matches.opt_present("t") { if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer"); threads = matches.opt_str("t").unwrap().parse().expect("integer");
} }
let mut events_addr: SocketAddr = requests_addr.parse().unwrap();
let requests_port = events_addr.port();
events_addr.set_port(requests_port + 1);
if stdin_isatty() { if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file"); eprintln!("nothing found on stdin, expected a json file");
exit(1); exit(1);
@@ -82,18 +86,32 @@ fn main() {
exit(1); exit(1);
}); });
let socket = UdpSocket::bind(&send_addr).unwrap(); println!("Binding to {}", requests_addr);
let mut acc = AccountantStub::new(&addr, socket); let requests_socket = UdpSocket::bind(&requests_addr).unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let events_socket = UdpSocket::bind(&events_addr).unwrap();
let requests_addr: SocketAddr = server_addr.parse().unwrap();
let requests_port = requests_addr.port();
let mut events_server_addr = requests_addr.clone();
events_server_addr.set_port(requests_port + 3);
let mut client = ThinClient::new(
requests_addr,
requests_socket,
events_server_addr,
events_socket,
);
println!("Get last ID..."); println!("Get last ID...");
let last_id = acc.get_last_id().wait().unwrap(); let last_id = client.get_last_id().wait().unwrap();
println!("Got last ID {:?}", last_id);
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
println!("Creating keypairs..."); println!("Creating keypairs...");
let txs = demo.users.len() / 2; let txs = demo.num_accounts / 2;
let keypairs: Vec<_> = demo.users let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
.into_par_iter()
.map(|(pkcs8, _)| KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap())
.collect();
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect(); let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions..."); println!("Signing transactions...");
@@ -102,7 +120,7 @@ fn main() {
.into_par_iter() .into_par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id)) .map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
.collect(); .collect();
let duration = now.elapsed(); let mut duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64; let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64; let nsps = ns as f64 / txs as f64;
@@ -112,32 +130,46 @@ fn main() {
nsps / 1_000_f64 nsps / 1_000_f64
); );
let initial_tx_count = acc.transaction_count(); let initial_tx_count = client.transaction_count();
println!("initial count {}", initial_tx_count);
println!("Transfering {} transactions in {} batches", txs, threads); println!("Transfering {} transactions in {} batches", txs, threads);
let now = Instant::now(); let now = Instant::now();
let sz = transactions.len() / threads; let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect(); let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| { chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times...", trs.len()); println!("Transferring 1 unit {} times... to", trs.len());
let send_addr = "0.0.0.0:0"; let requests_addr: SocketAddr = server_addr.parse().unwrap();
let socket = UdpSocket::bind(send_addr).unwrap(); let mut requests_cb_addr = requests_addr.clone();
let acc = AccountantStub::new(&addr, socket); requests_cb_addr.set_port(0);
let requests_socket = UdpSocket::bind(requests_cb_addr).unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let mut events_addr: SocketAddr = requests_addr.clone();
events_addr.set_port(0);
let events_socket = UdpSocket::bind(&events_addr).unwrap();
let client = ThinClient::new(
requests_addr,
requests_socket,
events_server_addr,
events_socket,
);
for tr in trs { for tr in trs {
acc.transfer_signed(tr.clone()).unwrap(); client.transfer_signed(tr.clone()).unwrap();
} }
}); });
println!("Waiting for half the transactions to complete...",); println!("Waiting for transactions to complete...",);
let mut tx_count = acc.transaction_count(); let mut tx_count;
while tx_count < transactions.len() as u64 / 2 { for _ in 0..10 {
tx_count = acc.transaction_count(); tx_count = client.transaction_count();
duration = now.elapsed();
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("{} tps", tps);
sleep(Duration::new(1, 0));
} }
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("Done. {} tps", tps);
} }

View File

@@ -1,21 +1,17 @@
extern crate isatty; extern crate isatty;
extern crate rayon; extern crate rayon;
extern crate ring;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
extern crate untrusted;
use isatty::stdin_isatty; use isatty::stdin_isatty;
use rayon::prelude::*; use rayon::prelude::*;
use solana::accountant::MAX_ENTRY_IDS; use solana::bank::MAX_ENTRY_IDS;
use solana::entry::{create_entry, next_tick}; use solana::entry::{next_entry, Entry};
use solana::event::Event; use solana::event::Event;
use solana::mint::MintDemo; use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil}; use solana::signature::{GenKeys, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::{stdin, Read}; use std::io::{stdin, Read};
use std::process::exit; use std::process::exit;
use untrusted::Input;
// Generate a ledger with lots and lots of accounts. // Generate a ledger with lots and lots of accounts.
fn main() { fn main() {
@@ -36,17 +32,21 @@ fn main() {
exit(1); exit(1);
}); });
let num_accounts = demo.users.len(); let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
let last_id = demo.mint.last_id(); let num_accounts = demo.num_accounts;
let tokens_per_user = 1_000;
let keypairs = rnd.gen_n_keypairs(num_accounts);
let mint_keypair = demo.mint.keypair(); let mint_keypair = demo.mint.keypair();
let last_id = demo.mint.last_id();
eprintln!("Signing {} transactions...", num_accounts); eprintln!("Signing {} transactions...", num_accounts);
let events: Vec<_> = demo.users let events: Vec<_> = keypairs
.into_par_iter() .into_par_iter()
.map(|(pkcs8, tokens)| { .map(|rando| {
let rando = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap(); let last_id = demo.mint.last_id();
let tr = Transaction::new(&mint_keypair, rando.pubkey(), tokens, last_id); Event::new_transaction(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
Event::Transaction(tr)
}) })
.collect(); .collect();
@@ -55,14 +55,14 @@ fn main() {
} }
eprintln!("Logging the creation of {} accounts...", num_accounts); eprintln!("Logging the creation of {} accounts...", num_accounts);
let entry = create_entry(&last_id, 0, events); let entry = Entry::new(&last_id, 0, events);
println!("{}", serde_json::to_string(&entry).unwrap()); println!("{}", serde_json::to_string(&entry).unwrap());
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS); eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
// Offer client lots of entry IDs to use for each transaction's last_id. // Offer client lots of entry IDs to use for each transaction's last_id.
let mut last_id = last_id; let mut last_id = last_id;
for _ in 0..MAX_ENTRY_IDS { for _ in 0..MAX_ENTRY_IDS {
let entry = next_tick(&last_id, 1); let entry = next_entry(&last_id, 1, vec![]);
last_id = entry.id; last_id = entry.id;
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| { let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e); eprintln!("failed to serialize: {}", e);

View File

@@ -1,37 +0,0 @@
extern crate solana;
use solana::entry::Entry;
use solana::event::Event;
use solana::hash::Hash;
use solana::historian::Historian;
use solana::ledger::Block;
use solana::recorder::Signal;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::sync::mpsc::SendError;
use std::thread::sleep;
use std::time::Duration;
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
sleep(Duration::from_millis(15));
let keypair = KeyPair::new();
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
let signal0 = Signal::Event(Event::Transaction(tr));
hist.sender.send(signal0)?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_ledger(&hist, &seed).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(entries[..].verify(&seed));
}

View File

@@ -3,10 +3,7 @@ extern crate ring;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
use rayon::prelude::*;
use ring::rand::SystemRandom;
use solana::mint::{Mint, MintDemo}; use solana::mint::{Mint, MintDemo};
use solana::signature::KeyPair;
use std::io; use std::io;
fn main() { fn main() {
@@ -18,16 +15,7 @@ fn main() {
let mint = Mint::new(tokens); let mint = Mint::new(tokens);
let tokens_per_user = 1_000; let tokens_per_user = 1_000;
let num_accounts = tokens / tokens_per_user; let num_accounts = tokens / tokens_per_user;
let rnd = SystemRandom::new();
let users: Vec<_> = (0..num_accounts) let demo = MintDemo { mint, num_accounts };
.into_par_iter()
.map(|_| {
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
(pkcs8, tokens_per_user)
})
.collect();
let demo = MintDemo { mint, users };
println!("{}", serde_json::to_string(&demo).unwrap()); println!("{}", serde_json::to_string(&demo).unwrap());
} }

261
src/bin/multinode-demo.rs Normal file
View File

@@ -0,0 +1,261 @@
extern crate futures;
extern crate getopts;
extern crate isatty;
extern crate rayon;
extern crate serde_json;
extern crate solana;
use futures::Future;
use getopts::Options;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::crdt::{Crdt, ReplicatedData};
use solana::mint::MintDemo;
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use solana::transaction::Transaction;
use std::env;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use std::thread::sleep;
use std::time::Duration;
use std::time::Instant;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
brief += " Solana client demo creates a number of transactions and\n";
brief += " sends them to a target node.";
brief += " Takes json formatted mint file to stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
let mut threads = 4usize;
let mut num_nodes = 10usize;
let mut leader = "leader.json".to_string();
let mut opts = Options::new();
opts.optopt("l", "", "leader", "leader.json");
opts.optopt("c", "", "client address", "host:port");
opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optopt(
"n",
"",
"number of nodes to converge to",
&format!("{}", num_nodes),
);
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if matches.opt_present("l") {
leader = matches.opt_str("l").unwrap();
}
let client_addr: Arc<RwLock<SocketAddr>> = if matches.opt_present("c") {
let addr = matches.opt_str("c").unwrap().parse().unwrap();
Arc::new(RwLock::new(addr))
} else {
Arc::new(RwLock::new("127.0.0.1:8010".parse().unwrap()))
};
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
}
if matches.opt_present("n") {
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
}
let leader: ReplicatedData = read_leader(leader);
let signal = Arc::new(AtomicBool::new(false));
let mut c_threads = vec![];
let validators = converge(
&client_addr,
&leader,
signal.clone(),
num_nodes + 2,
&mut c_threads,
);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
println!("Parsing stdin...");
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mut client = mk_client(&client_addr, &leader);
println!("Get last ID...");
let last_id = client.get_last_id().wait().unwrap();
println!("Got last ID {:?}", last_id);
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
println!("Creating keypairs...");
let txs = demo.num_accounts / 2;
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions...");
let now = Instant::now();
let transactions: Vec<_> = keypair_pairs
.into_par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
.collect();
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
println!(
"Done. {} thousand signatures per second, {}us per signature",
bsps * 1_000_000_f64,
nsps / 1_000_f64
);
let first_count = client.transaction_count();
println!("initial count {}", first_count);
println!("Transfering {} transactions in {} batches", txs, threads);
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times... to", trs.len());
let client = mk_client(&client_addr, &leader);
for tr in trs {
client.transfer_signed(tr.clone()).unwrap();
}
});
println!("Sampling tps every second...",);
validators.into_par_iter().for_each(|val| {
let mut client = mk_client(&client_addr, &val);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
for i in 0..100 {
let tx_count = client.transaction_count();
let duration = now.elapsed();
now = Instant::now();
let sample = tx_count - initial_tx_count;
initial_tx_count = tx_count;
println!("{}: Transactions processed {}", val.events_addr, sample);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
println!("{}: {} tps", val.events_addr, tps);
let total = tx_count - first_count;
println!(
"{}: Total Transactions processed {}",
val.events_addr, total
);
if total == transactions.len() as u64 {
break;
}
if i > 20 && sample == 0 {
break;
}
sleep(Duration::new(1, 0));
}
});
signal.store(true, Ordering::Relaxed);
for t in c_threads {
t.join().unwrap();
}
}
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
let mut addr = locked_addr.write().unwrap();
let port = addr.port();
let events_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 2);
ThinClient::new(
r.requests_addr,
requests_socket,
r.events_addr,
events_socket,
)
}
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
let mut addr = client_addr.write().unwrap();
let port = addr.port();
let gossip = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let daddr = "0.0.0.0:0".parse().unwrap();
let pubkey = KeyPair::new().pubkey();
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
(node, gossip)
}
fn converge(
client_addr: &Arc<RwLock<SocketAddr>>,
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let daddr = "0.0.0.0:0".parse().unwrap();
let (spy, spy_gossip) = spy_node(client_addr);
let mut spy_crdt = Crdt::new(spy);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
//wait for the network to converge
for _ in 0..30 {
let min = spy_ref.read().unwrap().convergence();
if num_nodes as u64 == min {
println!("converged!");
break;
}
sleep(Duration::new(1, 0));
}
threads.push(t_spy_listen);
threads.push(t_spy_gossip);
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.requests_addr != daddr)
.map(|x| x.clone())
.collect();
v.clone()
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path).expect("file");
serde_json::from_reader(file).expect("parse")
}

View File

@@ -1,21 +1,28 @@
extern crate env_logger; extern crate env_logger;
extern crate getopts; extern crate getopts;
extern crate isatty; extern crate isatty;
extern crate pnet;
extern crate serde_json; extern crate serde_json;
extern crate solana; extern crate solana;
use getopts::Options; use getopts::Options;
use isatty::stdin_isatty; use isatty::stdin_isatty;
use solana::accountant::Accountant; use pnet::datalink;
use solana::accountant_skel::AccountantSkel; use solana::bank::Bank;
use solana::crdt::ReplicatedData;
use solana::entry::Entry; use solana::entry::Entry;
use solana::event::Event; use solana::event::Event;
use solana::historian::Historian; use solana::server::Server;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Instruction;
use std::env; use std::env;
use std::fs::File;
use std::io::{stdin, stdout, Read}; use std::io::{stdin, stdout, Read};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::process::exit; use std::process::exit;
use std::sync::Arc;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex}; use std::time::Duration;
fn print_usage(program: &str, opts: Options) { fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program); let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
@@ -28,10 +35,17 @@ fn print_usage(program: &str, opts: Options) {
fn main() { fn main() {
env_logger::init().unwrap(); env_logger::init().unwrap();
let mut port = 8000u16;
let mut opts = Options::new(); let mut opts = Options::new();
opts.optopt("p", "", "port", "port"); opts.optopt("b", "", "bind", "bind to port or address");
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optopt("s", "", "save", "save my identity to path.json");
opts.optflag("h", "help", "print help"); opts.optflag("h", "help", "print help");
opts.optopt(
"v",
"",
"validator",
"run as replicate with path to leader.json",
);
let args: Vec<String> = env::args().collect(); let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) { let matches = match opts.parse(&args[1..]) {
Ok(m) => m, Ok(m) => m,
@@ -45,11 +59,14 @@ fn main() {
print_usage(&program, opts); print_usage(&program, opts);
return; return;
} }
if matches.opt_present("p") { let bind_addr: SocketAddr = {
port = matches.opt_str("p").unwrap().parse().expect("port"); let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
} if matches.opt_present("d") {
let addr = format!("0.0.0.0:{}", port); let ip = get_ip_addr().unwrap();
bind_addr.set_ip(ip);
}
bind_addr
};
if stdin_isatty() { if stdin_isatty() {
eprintln!("nothing found on stdin, expected a log file"); eprintln!("nothing found on stdin, expected a log file");
exit(1); exit(1);
@@ -70,6 +87,8 @@ fn main() {
}) })
}); });
eprintln!("done parsing...");
// The first item in the ledger is required to be an entry with zero num_hashes, // The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed. // which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap(); let entry0 = entries.next().unwrap();
@@ -78,34 +97,141 @@ fn main() {
// fields are the same. That entry should be treated as a deposit, not a // fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself. // transfer to oneself.
let entry1: Entry = entries.next().unwrap(); let entry1: Entry = entries.next().unwrap();
let deposit = if let Event::Transaction(ref tr) = entry1.events[0] { let Event::Transaction(ref tr) = entry1.events[0];
tr.data.plan.final_payment() let deposit = if let Instruction::NewContract(contract) = &tr.instruction {
contract.plan.final_payment()
} else { } else {
None None
}; };
let acc = Accountant::new_from_deposit(&deposit.unwrap()); eprintln!("creating bank...");
acc.register_entry_id(&entry0.id);
acc.register_entry_id(&entry1.id); let bank = Bank::new_from_deposit(&deposit.unwrap());
bank.register_entry_id(&entry0.id);
bank.register_entry_id(&entry1.id);
eprintln!("processing entries...");
let mut last_id = entry1.id; let mut last_id = entry1.id;
for entry in entries { for entry in entries {
last_id = entry.id; last_id = entry.id;
acc.process_verified_events(entry.events).unwrap(); let results = bank.process_verified_events(entry.events);
acc.register_entry_id(&last_id); for result in results {
if let Err(e) = result {
eprintln!("failed to process event {:?}", e);
exit(1);
}
}
bank.register_entry_id(&last_id);
} }
let historian = Historian::new(&last_id, Some(1000)); eprintln!("creating networking stack...");
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let skel = Arc::new(Mutex::new(AccountantSkel::new( // we need all the receiving sockets to be bound within the expected
acc, // port range that we open on aws
last_id, let mut repl_data = make_repl_data(&bind_addr);
stdout(), let threads = if matches.opt_present("v") {
historian, eprintln!("starting validator... {}", repl_data.requests_addr);
))); let path = matches.opt_str("v").unwrap();
let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap(); let file = File::open(path).expect("file");
eprintln!("Ready. Listening on {}", addr); let leader = serde_json::from_reader(file).expect("parse");
let s = Server::new_validator(
bank,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
leader,
exit.clone(),
);
s.thread_hdls
} else {
eprintln!("starting leader... {}", repl_data.requests_addr);
repl_data.current_leader_id = repl_data.id.clone();
let server = Server::new_leader(
bank,
last_id,
Some(Duration::from_millis(1000)),
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind(repl_data.events_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
exit.clone(),
stdout(),
);
server.thread_hdls
};
if matches.opt_present("s") {
let path = matches.opt_str("s").unwrap();
let file = File::create(path).expect("file");
serde_json::to_writer(file, &repl_data).expect("serialize");
}
eprintln!("Ready. Listening on {}", bind_addr);
for t in threads { for t in threads {
t.join().expect("join"); t.join().expect("join");
} }
} }
fn next_port(server_addr: &SocketAddr, nxt: u16) -> SocketAddr {
let mut gossip_addr = server_addr.clone();
gossip_addr.set_port(server_addr.port() + nxt);
gossip_addr
}
fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
let events_addr = bind_addr.clone();
let gossip_addr = next_port(&bind_addr, 1);
let replicate_addr = next_port(&bind_addr, 2);
let requests_addr = next_port(&bind_addr, 3);
let pubkey = KeyPair::new().pubkey();
ReplicatedData::new(
pubkey,
gossip_addr,
replicate_addr,
requests_addr,
events_addr,
)
}
fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
if let Some(addrstr) = optstr {
if let Ok(port) = addrstr.parse() {
let mut addr = daddr.clone();
addr.set_port(port);
addr
} else if let Ok(addr) = addrstr.parse() {
addr
} else {
daddr
}
} else {
daddr
}
}
fn get_ip_addr() -> Option<IpAddr> {
for iface in datalink::interfaces() {
for p in iface.ips {
if !p.ip().is_loopback() && !p.ip().is_multicast() {
return Some(p.ip());
}
}
}
None
}
#[test]
fn test_parse_port_or_addr() {
let p1 = parse_port_or_addr(Some("9000".to_string()));
assert_eq!(p1.port(), 9000);
let p2 = parse_port_or_addr(Some("127.0.0.1:7000".to_string()));
assert_eq!(p2.port(), 7000);
let p3 = parse_port_or_addr(None);
assert_eq!(p3.port(), 8000);
}

View File

@@ -1,16 +1,27 @@
//! The `crdt` module defines a data structure that is shared by all the nodes in the network over //! The `crdt` module defines a data structure that is shared by all the nodes in the network over
//! a gossip control plane. The goal is to share small bits of of-chain information and detect and //! a gossip control plane. The goal is to share small bits of off-chain information and detect and
//! repair partitions. //! repair partitions.
//! //!
//! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct. //! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct.
//! The last version is always picked durring an update. //! The last version is always picked during an update.
//!
//! The network is arranged in layers:
//!
//! * layer 0 - Leader.
//! * layer 1 - As many nodes as we can fit
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//!
//! Bank needs to provide an interface for us to query the stake weight
use bincode::{deserialize, serialize}; use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt}; use byteorder::{LittleEndian, ReadBytesExt};
use hash::Hash; use hash::Hash;
use result::Result; use packet::SharedBlob;
use rayon::prelude::*;
use result::{Error, Result};
use ring::rand::{SecureRandom, SystemRandom}; use ring::rand::{SecureRandom, SystemRandom};
use signature::{PublicKey, Signature}; use signature::{PublicKey, Signature};
use std;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::Cursor; use std::io::Cursor;
use std::net::{SocketAddr, UdpSocket}; use std::net::{SocketAddr, UdpSocket};
@@ -20,20 +31,22 @@ use std::thread::{sleep, spawn, JoinHandle};
use std::time::Duration; use std::time::Duration;
/// Structure to be replicated by the network /// Structure to be replicated by the network
#[derive(Serialize, Deserialize, Clone)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ReplicatedData { pub struct ReplicatedData {
id: PublicKey, pub id: PublicKey,
sig: Signature, sig: Signature,
/// should always be increasing /// should always be increasing
version: u64, version: u64,
/// address to connect to for gossip /// address to connect to for gossip
gossip_addr: SocketAddr, pub gossip_addr: SocketAddr,
/// address to connect to for replication /// address to connect to for replication
replicate_addr: SocketAddr, pub replicate_addr: SocketAddr,
/// address to connect to when this node is leader /// address to connect to when this node is leader
lead_addr: SocketAddr, pub requests_addr: SocketAddr,
/// events address
pub events_addr: SocketAddr,
/// current leader identity /// current leader identity
current_leader_id: PublicKey, pub current_leader_id: PublicKey,
/// last verified hash that was submitted to the leader /// last verified hash that was submitted to the leader
last_verified_hash: Hash, last_verified_hash: Hash,
/// last verified count, always increasing /// last verified count, always increasing
@@ -41,15 +54,21 @@ pub struct ReplicatedData {
} }
impl ReplicatedData { impl ReplicatedData {
pub fn new(id: PublicKey, gossip_addr: SocketAddr) -> ReplicatedData { pub fn new(
let daddr = "0.0.0.0:0".parse().unwrap(); id: PublicKey,
gossip_addr: SocketAddr,
replicate_addr: SocketAddr,
requests_addr: SocketAddr,
events_addr: SocketAddr,
) -> ReplicatedData {
ReplicatedData { ReplicatedData {
id, id,
sig: Signature::default(), sig: Signature::default(),
version: 0, version: 0,
gossip_addr, gossip_addr,
replicate_addr: daddr, replicate_addr,
lead_addr: daddr, requests_addr,
events_addr,
current_leader_id: PublicKey::default(), current_leader_id: PublicKey::default(),
last_verified_hash: Hash::default(), last_verified_hash: Hash::default(),
last_verified_count: 0, last_verified_count: 0,
@@ -69,17 +88,17 @@ impl ReplicatedData {
/// * `listen` - listen for requests and responses /// * `listen` - listen for requests and responses
/// No attempt to keep track of timeouts or dropped requests is made, or should be. /// No attempt to keep track of timeouts or dropped requests is made, or should be.
pub struct Crdt { pub struct Crdt {
table: HashMap<PublicKey, ReplicatedData>, pub table: HashMap<PublicKey, ReplicatedData>,
/// Value of my update index when entry in table was updated. /// Value of my update index when entry in table was updated.
/// Nodes will ask for updates since `update_index`, and this node /// Nodes will ask for updates since `update_index`, and this node
/// should respond with all the identities that are greater then the /// should respond with all the identities that are greater then the
/// request's `update_index` in this list /// request's `update_index` in this list
local: HashMap<PublicKey, u64>, local: HashMap<PublicKey, u64>,
/// The value of the remote update index that i have last seen /// The value of the remote update index that I have last seen
/// This Node will ask external nodes for updates since the value in this list /// This Node will ask external nodes for updates since the value in this list
remote: HashMap<PublicKey, u64>, pub remote: HashMap<PublicKey, u64>,
update_index: u64, pub update_index: u64,
me: PublicKey, pub me: PublicKey,
timeout: Duration, timeout: Duration,
} }
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering // TODO These messages should be signed, and go through the gpu pipeline for spam filtering
@@ -92,6 +111,8 @@ enum Protocol {
//TODO might need a since? //TODO might need a since?
/// from id, form's last update index, ReplicatedData /// from id, form's last update index, ReplicatedData
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>), ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>),
/// ask for a missing index
RequestWindowIndex(ReplicatedData, u64),
} }
impl Crdt { impl Crdt {
@@ -103,38 +124,198 @@ impl Crdt {
remote: HashMap::new(), remote: HashMap::new(),
me: me.id, me: me.id,
update_index: 1, update_index: 1,
timeout: Duration::new(0, 100_000), timeout: Duration::from_millis(100),
}; };
g.local.insert(me.id, g.update_index); g.local.insert(me.id, g.update_index);
g.table.insert(me.id, me); g.table.insert(me.id, me);
g g
} }
pub fn import(&mut self, v: &ReplicatedData) { pub fn my_data(&self) -> &ReplicatedData {
// TODO check that last_verified types are always increasing &self.table[&self.me]
// TODO probably an error or attack
if self.me != v.id {
self.insert(v);
}
} }
pub fn leader_data(&self) -> &ReplicatedData {
&self.table[&self.table[&self.me].current_leader_id]
}
pub fn set_leader(&mut self, key: PublicKey) -> () {
let mut me = self.my_data().clone();
me.current_leader_id = key;
me.version += 1;
self.insert(&me);
}
pub fn insert(&mut self, v: &ReplicatedData) { pub fn insert(&mut self, v: &ReplicatedData) {
// TODO check that last_verified types are always increasing
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) { if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
//somehow we signed a message for our own identity with a higher version that
// we have stored ourselves
trace!("me: {:?}", self.me[0]);
trace!("v.id: {:?}", v.id[0]);
trace!("insert! {}", v.version); trace!("insert! {}", v.version);
self.update_index += 1; self.update_index += 1;
let _ = self.table.insert(v.id, v.clone()); let _ = self.table.insert(v.id.clone(), v.clone());
let _ = self.local.insert(v.id, self.update_index); let _ = self.local.insert(v.id, self.update_index);
} else { } else {
trace!("INSERT FAILED {}", v.version); trace!(
"INSERT FAILED new.version: {} me.version: {}",
v.version,
self.table[&v.id].version
);
} }
} }
/// broadcast messages from the leader to layer 1 nodes
/// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to`
pub fn broadcast(
obj: &Arc<RwLock<Self>>,
blobs: &Vec<SharedBlob>,
s: &UdpSocket,
transmit_index: &mut u64,
) -> Result<()> {
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
// copy to avoid locking during IO
let robj = obj.read().expect("'obj' read lock in pub fn broadcast");
trace!("broadcast table {}", robj.table.len());
let cloned_table: Vec<ReplicatedData> = robj.table.values().cloned().collect();
(robj.table[&robj.me].clone(), cloned_table)
};
let daddr = "0.0.0.0:0".parse().unwrap();
let nodes: Vec<&ReplicatedData> = table
.iter()
.filter(|v| {
if me.id == v.id {
//filter myself
false
} else if v.replicate_addr == daddr {
//filter nodes that are not listening
false
} else {
trace!("broadcast node {}", v.replicate_addr);
true
}
})
.collect();
if nodes.len() < 1 {
warn!("crdt too small");
return Err(Error::CrdtTooSmall);
}
trace!("nodes table {}", nodes.len());
trace!("blobs table {}", blobs.len());
// enumerate all the blobs, those are the indices
// transmit them to nodes, starting from a different node
let orders: Vec<_> = blobs
.iter()
.enumerate()
.zip(
nodes
.iter()
.cycle()
.skip((*transmit_index as usize) % nodes.len()),
)
.collect();
trace!("orders table {}", orders.len());
let errs: Vec<_> = orders
.into_iter()
.map(|((i, b), v)| {
// only leader should be broadcasting
assert!(me.current_leader_id != v.id);
let mut blob = b.write().expect("'b' write lock in pub fn broadcast");
blob.set_id(me.id).expect("set_id in pub fn broadcast");
blob.set_index(*transmit_index + i as u64)
.expect("set_index in pub fn broadcast");
//TODO profile this, may need multiple sockets for par_iter
trace!("broadcast {} to {}", blob.meta.size, v.replicate_addr);
let e = s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr);
trace!("done broadcast {} to {}", blob.meta.size, v.replicate_addr);
e
})
.collect();
trace!("broadcast results {}", errs.len());
for e in errs {
match e {
Err(e) => {
error!("broadcast result {:?}", e);
return Err(Error::IO(e));
}
_ => (),
}
*transmit_index += 1;
}
Ok(())
}
/// retransmit messages from the leader to layer 1 nodes
/// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to`
pub fn retransmit(obj: &Arc<RwLock<Self>>, blob: &SharedBlob, s: &UdpSocket) -> Result<()> {
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
// copy to avoid locking during IO
let s = obj.read().expect("'obj' read lock in pub fn retransmit");
(s.table[&s.me].clone(), s.table.values().cloned().collect())
};
blob.write()
.unwrap()
.set_id(me.id)
.expect("set_id in pub fn retransmit");
let rblob = blob.read().unwrap();
let daddr = "0.0.0.0:0".parse().unwrap();
let orders: Vec<_> = table
.iter()
.filter(|v| {
if me.id == v.id {
false
} else if me.current_leader_id == v.id {
trace!("skip retransmit to leader {:?}", v.id);
false
} else if v.replicate_addr == daddr {
trace!("skip nodes that are not listening {:?}", v.id);
false
} else {
true
}
})
.collect();
let errs: Vec<_> = orders
.par_iter()
.map(|v| {
trace!(
"retransmit blob {} to {}",
rblob.get_index().unwrap(),
v.replicate_addr
);
//TODO profile this, may need multiple sockets for par_iter
s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr)
})
.collect();
for e in errs {
match e {
Err(e) => {
info!("retransmit error {:?}", e);
return Err(Error::IO(e));
}
_ => (),
}
}
Ok(())
}
// max number of nodes that we could be converged to
pub fn convergence(&self) -> u64 {
let max = self.remote.values().len() as u64 + 1;
self.remote.values().fold(max, |a, b| std::cmp::min(a, *b))
}
fn random() -> u64 { fn random() -> u64 {
let rnd = SystemRandom::new(); let rnd = SystemRandom::new();
let mut buf = [0u8; 8]; let mut buf = [0u8; 8];
rnd.fill(&mut buf).unwrap(); rnd.fill(&mut buf).expect("rnd.fill in pub fn random");
let mut rdr = Cursor::new(&buf); let mut rdr = Cursor::new(&buf);
rdr.read_u64::<LittleEndian>().unwrap() rdr.read_u64::<LittleEndian>()
.expect("rdr.read_u64 in fn random")
} }
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) { fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
trace!("get updates since {}", v); //trace!("get updates since {}", v);
let data = self.table let data = self.table
.values() .values()
.filter(|x| self.local[&x.id] > v) .filter(|x| self.local[&x.id] > v)
@@ -145,19 +326,36 @@ impl Crdt {
(id, ups, data) (id, ups, data)
} }
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
if self.table.len() <= 1 {
return Err(Error::CrdtTooSmall);
}
let mut n = (Self::random() as usize) % self.table.len();
while self.table.values().nth(n).unwrap().id == self.me {
n = (Self::random() as usize) % self.table.len();
}
let addr = self.table.values().nth(n).unwrap().gossip_addr.clone();
let req = Protocol::RequestWindowIndex(self.table[&self.me].clone(), ix);
let out = serialize(&req)?;
Ok((addr, out))
}
/// Create a random gossip request /// Create a random gossip request
/// # Returns /// # Returns
/// (A,B,C) /// (A,B)
/// * A - Remote gossip address /// * A - Address to send to
/// * B - My gossip address /// * B - RequestUpdates protocol message
/// * C - Remote update index to request updates since fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
fn gossip_request(&self) -> (SocketAddr, Protocol) { let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
let n = (Self::random() as usize) % self.table.len(); if options.len() < 1 {
trace!("random {:?} {}", &self.me[0..1], n); trace!("crdt too small for gossip");
let v = self.table.values().nth(n).unwrap().clone(); return Err(Error::CrdtTooSmall);
}
let n = (Self::random() as usize) % options.len();
let v = options[n].clone();
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0); let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone()); let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
(v.gossip_addr, req) Ok((v.gossip_addr, req))
} }
/// At random pick a node and try to get updated changes from them /// At random pick a node and try to get updated changes from them
@@ -167,11 +365,14 @@ impl Crdt {
// Lock the object only to do this operation and not for any longer // Lock the object only to do this operation and not for any longer
// especially not when doing the `sock.send_to` // especially not when doing the `sock.send_to`
let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request(); let (remote_gossip_addr, req) = obj.read()
.expect("'obj' read lock in fn run_gossip")
.gossip_request()?;
let sock = UdpSocket::bind("0.0.0.0:0")?; let sock = UdpSocket::bind("0.0.0.0:0")?;
// TODO this will get chatty, so we need to first ask for number of updates since // TODO this will get chatty, so we need to first ask for number of updates since
// then only ask for specific data that we dont have // then only ask for specific data that we dont have
let r = serialize(&req)?; let r = serialize(&req)?;
trace!("sending gossip request to {}", remote_gossip_addr);
sock.send_to(&r, remote_gossip_addr)?; sock.send_to(&r, remote_gossip_addr)?;
Ok(()) Ok(())
} }
@@ -186,7 +387,7 @@ impl Crdt {
// TODO we need to punish/spam resist here // TODO we need to punish/spam resist here
// sig verify the whole update and slash anyone who sends a bad update // sig verify the whole update and slash anyone who sends a bad update
for v in data { for v in data {
self.import(&v); self.insert(&v);
} }
*self.remote.entry(from).or_insert(update_index) = update_index; *self.remote.entry(from).or_insert(update_index) = update_index;
} }
@@ -199,14 +400,50 @@ impl Crdt {
return; return;
} }
//TODO this should be a tuned parameter //TODO this should be a tuned parameter
sleep(obj.read().unwrap().timeout); sleep(
obj.read()
.expect("'obj' read lock in pub fn gossip")
.timeout,
);
}) })
} }
fn run_window_request(
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
sock: &UdpSocket,
from: &ReplicatedData,
ix: u64,
) -> Result<()> {
let pos = (ix as usize) % window.read().unwrap().len();
let mut outblob = vec![];
if let &Some(ref blob) = &window.read().unwrap()[pos] {
let rblob = blob.read().unwrap();
let blob_ix = rblob.get_index().expect("run_window_request get_index");
if blob_ix == ix {
// copy to avoid doing IO inside the lock
outblob.extend(&rblob.data[..rblob.meta.size]);
}
} else {
assert!(window.read().unwrap()[pos].is_none());
info!("failed RequestWindowIndex {} {}", ix, from.replicate_addr);
}
if outblob.len() > 0 {
info!(
"responding RequestWindowIndex {} {}",
ix, from.replicate_addr
);
sock.send_to(&outblob, from.replicate_addr)?;
}
Ok(())
}
/// Process messages from the network /// Process messages from the network
fn run_listen(obj: &Arc<RwLock<Self>>, sock: &UdpSocket) -> Result<()> { fn run_listen(
obj: &Arc<RwLock<Self>>,
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
sock: &UdpSocket,
) -> Result<()> {
//TODO cache connections //TODO cache connections
let mut buf = vec![0u8; 1024 * 64]; let mut buf = vec![0u8; 1024 * 64];
trace!("recv_from on {}", sock.local_addr().unwrap());
let (amt, src) = sock.recv_from(&mut buf)?; let (amt, src) = sock.recv_from(&mut buf)?;
trace!("got request from {}", src); trace!("got request from {}", src);
buf.resize(amt, 0); buf.resize(amt, 0);
@@ -216,31 +453,59 @@ impl Crdt {
Protocol::RequestUpdates(v, reqdata) => { Protocol::RequestUpdates(v, reqdata) => {
trace!("RequestUpdates {}", v); trace!("RequestUpdates {}", v);
let addr = reqdata.gossip_addr; let addr = reqdata.gossip_addr;
// only lock for this call, dont lock durring IO `sock.send_to` or `sock.recv_from` // only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
let (from, ups, data) = obj.read().unwrap().get_updates_since(v); let (from, ups, data) = obj.read()
.expect("'obj' read lock in RequestUpdates")
.get_updates_since(v);
trace!("get updates since response {} {}", v, data.len()); trace!("get updates since response {} {}", v, data.len());
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?; let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
trace!("send_to {}", addr); trace!("send_to {}", addr);
//TODO verify reqdata belongs to sender //TODO verify reqdata belongs to sender
obj.write().unwrap().import(&reqdata); obj.write()
sock.send_to(&rsp, addr).unwrap(); .expect("'obj' write lock in RequestUpdates")
.insert(&reqdata);
sock.send_to(&rsp, addr)
.expect("'sock.send_to' in RequestUpdates");
trace!("send_to done!"); trace!("send_to done!");
} }
Protocol::ReceiveUpdates(from, ups, data) => { Protocol::ReceiveUpdates(from, ups, data) => {
trace!("ReceivedUpdates"); trace!("ReceivedUpdates");
obj.write().unwrap().apply_updates(from, ups, &data); obj.write()
.expect("'obj' write lock in ReceiveUpdates")
.apply_updates(from, ups, &data);
}
Protocol::RequestWindowIndex(from, ix) => {
//TODO verify from is signed
obj.write().unwrap().insert(&from);
let me = obj.read().unwrap().my_data().clone();
trace!(
"received RequestWindowIndex {} {} myaddr {}",
ix,
from.replicate_addr,
me.replicate_addr
);
assert_ne!(from.replicate_addr, me.replicate_addr);
let _ = Self::run_window_request(window, sock, &from, ix);
} }
} }
Ok(()) Ok(())
} }
pub fn listen( pub fn listen(
obj: Arc<RwLock<Self>>, obj: Arc<RwLock<Self>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
sock: UdpSocket, sock: UdpSocket,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
sock.set_read_timeout(Some(Duration::new(2, 0))).unwrap(); sock.set_read_timeout(Some(Duration::new(2, 0)))
.expect("'sock.set_read_timeout' in crdt.rs");
spawn(move || loop { spawn(move || loop {
let _ = Self::run_listen(&obj, &sock); let e = Self::run_listen(&obj, &window, &sock);
if e.is_err() {
info!(
"run_listen timeout, table size: {}",
obj.read().unwrap().table.len()
);
}
if exit.load(Ordering::Relaxed) { if exit.load(Ordering::Relaxed) {
return; return;
} }
@@ -249,8 +514,11 @@ impl Crdt {
} }
#[cfg(test)] #[cfg(test)]
mod test { mod tests {
use crdt::{Crdt, ReplicatedData}; use crdt::{Crdt, ReplicatedData};
use logger;
use packet::Blob;
use rayon::iter::*;
use signature::KeyPair; use signature::KeyPair;
use signature::KeyPairUtil; use signature::KeyPairUtil;
use std::net::UdpSocket; use std::net::UdpSocket;
@@ -259,6 +527,30 @@ mod test {
use std::thread::{sleep, JoinHandle}; use std::thread::{sleep, JoinHandle};
use std::time::Duration; use std::time::Duration;
fn test_node() -> (Crdt, UdpSocket, UdpSocket, UdpSocket) {
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
let events = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
events.local_addr().unwrap(),
);
let crdt = Crdt::new(d);
trace!(
"id: {} gossip: {} replicate: {} serve: {}",
crdt.my_data().id[0],
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
);
(crdt, gossip, replicate, serve)
}
/// Test that the network converges. /// Test that the network converges.
/// Run until every node in the network has a full ReplicatedData set. /// Run until every node in the network has a full ReplicatedData set.
/// Check that nodes stop sending updates after all the ReplicatedData has been shared. /// Check that nodes stop sending updates after all the ReplicatedData has been shared.
@@ -271,12 +563,10 @@ mod test {
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num) let listen: Vec<_> = (0..num)
.map(|_| { .map(|_| {
let listener = UdpSocket::bind("0.0.0.0:0").unwrap(); let (crdt, gossip, _, _) = test_node();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(pubkey, listener.local_addr().unwrap());
let crdt = Crdt::new(d);
let c = Arc::new(RwLock::new(crdt)); let c = Arc::new(RwLock::new(crdt));
let l = Crdt::listen(c.clone(), listener, exit.clone()); let w = Arc::new(RwLock::new(vec![]));
let l = Crdt::listen(c.clone(), w, gossip, exit.clone());
(c, l) (c, l)
}) })
.collect(); .collect();
@@ -286,21 +576,16 @@ mod test {
.map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone())) .map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone()))
.collect(); .collect();
let mut done = true; let mut done = true;
for _ in 0..(num * 16) { for i in 0..(num * 32) {
done = true; done = false;
trace!("round {}", i);
for &(ref c, _) in listen.iter() { for &(ref c, _) in listen.iter() {
trace!( if num == c.read().unwrap().convergence() as usize {
"done updates {} {}", done = true;
c.read().unwrap().table.len(), break;
c.read().unwrap().update_index
);
//make sure the number of updates doesn't grow unbounded
assert!(c.read().unwrap().update_index <= num as u64);
//make sure we got all the updates
if c.read().unwrap().table.len() != num {
done = false;
} }
} }
//at least 1 node converged
if done == true { if done == true {
break; break;
} }
@@ -322,7 +607,9 @@ mod test {
} }
/// ring a -> b -> c -> d -> e -> a /// ring a -> b -> c -> d -> e -> a
#[test] #[test]
#[ignore]
fn gossip_ring_test() { fn gossip_ring_test() {
logger::setup();
run_gossip_topo(|listen| { run_gossip_topo(|listen| {
let num = listen.len(); let num = listen.len();
for n in 0..num { for n in 0..num {
@@ -339,6 +626,7 @@ mod test {
/// star (b,c,d,e) -> a /// star (b,c,d,e) -> a
#[test] #[test]
#[ignore]
fn gossip_star_test() { fn gossip_star_test() {
run_gossip_topo(|listen| { run_gossip_topo(|listen| {
let num = listen.len(); let num = listen.len();
@@ -357,7 +645,13 @@ mod test {
/// Test that insert drops messages that are older /// Test that insert drops messages that are older
#[test] #[test]
fn insert_test() { fn insert_test() {
let mut d = ReplicatedData::new(KeyPair::new().pubkey(), "127.0.0.1:1234".parse().unwrap()); let mut d = ReplicatedData::new(
KeyPair::new().pubkey(),
"127.0.0.1:1234".parse().unwrap(),
"127.0.0.1:1235".parse().unwrap(),
"127.0.0.1:1236".parse().unwrap(),
"127.0.0.1:1237".parse().unwrap(),
);
assert_eq!(d.version, 0); assert_eq!(d.version, 0);
let mut crdt = Crdt::new(d.clone()); let mut crdt = Crdt::new(d.clone());
assert_eq!(crdt.table[&d.id].version, 0); assert_eq!(crdt.table[&d.id].version, 0);
@@ -369,4 +663,76 @@ mod test {
assert_eq!(crdt.table[&d.id].version, 2); assert_eq!(crdt.table[&d.id].version, 2);
} }
#[test]
#[ignore]
pub fn test_crdt_retransmit() {
logger::setup();
trace!("c1:");
let (mut c1, s1, r1, e1) = test_node();
trace!("c2:");
let (mut c2, s2, r2, _) = test_node();
trace!("c3:");
let (mut c3, s3, r3, _) = test_node();
let c1_id = c1.my_data().id;
c1.set_leader(c1_id);
c2.insert(&c1.my_data());
c3.insert(&c1.my_data());
c2.set_leader(c1.my_data().id);
c3.set_leader(c1.my_data().id);
let exit = Arc::new(AtomicBool::new(false));
// Create listen threads
let win1 = Arc::new(RwLock::new(vec![]));
let a1 = Arc::new(RwLock::new(c1));
let t1 = Crdt::listen(a1.clone(), win1, s1, exit.clone());
let a2 = Arc::new(RwLock::new(c2));
let win2 = Arc::new(RwLock::new(vec![]));
let t2 = Crdt::listen(a2.clone(), win2, s2, exit.clone());
let a3 = Arc::new(RwLock::new(c3));
let win3 = Arc::new(RwLock::new(vec![]));
let t3 = Crdt::listen(a3.clone(), win3, s3, exit.clone());
// Create gossip threads
let t1_gossip = Crdt::gossip(a1.clone(), exit.clone());
let t2_gossip = Crdt::gossip(a2.clone(), exit.clone());
let t3_gossip = Crdt::gossip(a3.clone(), exit.clone());
//wait to converge
trace!("waitng to converge:");
let mut done = false;
for _ in 0..30 {
done = a1.read().unwrap().table.len() == 3 && a2.read().unwrap().table.len() == 3
&& a3.read().unwrap().table.len() == 3;
if done {
break;
}
sleep(Duration::new(1, 0));
}
assert!(done);
let mut b = Blob::default();
b.meta.size = 10;
Crdt::retransmit(&a1, &Arc::new(RwLock::new(b)), &e1).unwrap();
let res: Vec<_> = [r1, r2, r3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let res = s.recv_from(&mut b.data);
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
let threads = vec![t1, t2, t3, t1_gossip, t2_gossip, t3_gossip];
for t in threads.into_iter() {
t.join().unwrap();
}
}
} }

View File

@@ -51,15 +51,22 @@ fn verify_packet(packet: &Packet) -> u8 {
).is_ok() as u8 ).is_ok() as u8
} }
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
batches
.iter()
.map(|p| p.read().unwrap().packets.len())
.fold(0, |x, y| x + y)
}
#[cfg(not(feature = "cuda"))] #[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> { pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*; use rayon::prelude::*;
info!("CPU ECDSA for {}", batch_size(batches));
batches batches
.into_par_iter() .into_par_iter()
.map(|p| { .map(|p| {
p.read() p.read()
.unwrap() .expect("'p' read lock in ed25519_verify")
.packets .packets
.par_iter() .par_iter()
.map(verify_packet) .map(verify_packet)
@@ -72,13 +79,18 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> { pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use packet::PACKET_DATA_SIZE; use packet::PACKET_DATA_SIZE;
info!("CUDA ECDSA for {}", batch_size(batches));
let mut out = Vec::new(); let mut out = Vec::new();
let mut elems = Vec::new(); let mut elems = Vec::new();
let mut locks = Vec::new(); let mut locks = Vec::new();
let mut rvs = Vec::new(); let mut rvs = Vec::new();
for packets in batches { for packets in batches {
locks.push(packets.read().unwrap()); locks.push(
packets
.read()
.expect("'packets' read lock in pub fn ed25519_verify"),
);
} }
let mut num = 0; let mut num = 0;
for p in locks { for p in locks {
@@ -130,16 +142,25 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use accountant_skel::Request;
use bincode::serialize; use bincode::serialize;
use ecdsa; use ecdsa;
use event::Event;
use packet::{Packet, Packets, SharedPackets}; use packet::{Packet, Packets, SharedPackets};
use std::sync::RwLock; use std::sync::RwLock;
use transaction::test_tx;
use transaction::Transaction; use transaction::Transaction;
use transaction::{memfind, test_tx};
#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Event::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
fn make_packet_from_transaction(tr: Transaction) -> Packet { fn make_packet_from_transaction(tr: Transaction) -> Packet {
let tx = serialize(&Request::Transaction(tr)).unwrap(); let tx = serialize(&Event::Transaction(tr)).unwrap();
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.meta.size = tx.len(); packet.meta.size = tx.len();
packet.data[..packet.meta.size].copy_from_slice(&tx); packet.data[..packet.meta.size].copy_from_slice(&tx);

View File

@@ -25,6 +25,25 @@ pub struct Entry {
} }
impl Entry { impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Self {
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &events);
Entry {
num_hashes,
id,
events,
}
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Self {
let entry = Self::new(start_hash, *cur_hashes, events);
*start_hash = entry.id;
*cur_hashes = 0;
entry
}
/// Creates a Entry from the number of hashes `num_hashes` since the previous event /// Creates a Entry from the number of hashes `num_hashes` since the previous event
/// and that resulting `id`. /// and that resulting `id`.
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self { pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
@@ -49,14 +68,6 @@ fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
hash_data.push(0u8); hash_data.push(0u8);
hash_data.extend_from_slice(&tr.sig); hash_data.extend_from_slice(&tr.sig);
} }
Event::Signature { ref sig, .. } => {
hash_data.push(1u8);
hash_data.extend_from_slice(sig);
}
Event::Timestamp { ref sig, .. } => {
hash_data.push(2u8);
hash_data.extend_from_slice(sig);
}
} }
} }
@@ -84,31 +95,12 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
} }
} }
/// Creates the next Entry `num_hashes` after `start_hash`. /// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
pub fn create_entry(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Entry { pub fn next_entry(start_hash: &Hash, num_hashes: u64, events: Vec<Event>) -> Entry {
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &events);
Entry { Entry {
num_hashes, num_hashes,
id, id: next_hash(start_hash, num_hashes, &events),
events, events: events,
}
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn create_entry_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Entry {
let entry = create_entry(start_hash, *cur_hashes, events);
*start_hash = entry.id;
*cur_hashes = 0;
entry
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
Entry {
num_hashes,
id: next_hash(start_hash, num_hashes, &[]),
events: vec![],
} }
} }
@@ -116,7 +108,7 @@ pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
mod tests { mod tests {
use super::*; use super::*;
use chrono::prelude::*; use chrono::prelude::*;
use entry::create_entry; use entry::Entry;
use event::Event; use event::Event;
use hash::hash; use hash::hash;
use signature::{KeyPair, KeyPairUtil}; use signature::{KeyPair, KeyPairUtil};
@@ -128,8 +120,8 @@ mod tests {
let one = hash(&zero); let one = hash(&zero);
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
} }
#[test] #[test]
@@ -138,9 +130,9 @@ mod tests {
// First, verify entries // First, verify entries
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 0, zero)); let tr0 = Event::new_transaction(&keypair, keypair.pubkey(), 0, zero);
let tr1 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, zero)); let tr1 = Event::new_transaction(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]); let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
assert!(e0.verify(&zero)); assert!(e0.verify(&zero));
// Next, swap two events and ensure verification fails. // Next, swap two events and ensure verification fails.
@@ -155,9 +147,13 @@ mod tests {
// First, verify entries // First, verify entries
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tr0 = Event::new_timestamp(&keypair, Utc::now()); let tr0 = Event::Transaction(Transaction::new_timestamp(&keypair, Utc::now(), zero));
let tr1 = Event::new_signature(&keypair, Default::default()); let tr1 = Event::Transaction(Transaction::new_signature(
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]); &keypair,
Default::default(),
zero,
));
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
assert!(e0.verify(&zero)); assert!(e0.verify(&zero));
// Next, swap two witness events and ensure verification fails. // Next, swap two witness events and ensure verification fails.
@@ -167,9 +163,9 @@ mod tests {
} }
#[test] #[test]
fn test_next_tick() { fn test_next_entry() {
let zero = Hash::default(); let zero = Hash::default();
let tick = next_tick(&zero, 1); let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1); assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero); assert_ne!(tick.id, zero);
} }

80
src/entry_writer.rs Normal file
View File

@@ -0,0 +1,80 @@
//! The `entry_writer` module helps implement the TPU's write stage.
use bank::Bank;
use entry::Entry;
use ledger;
use packet;
use result::Result;
use serde_json;
use std::collections::VecDeque;
use std::io::Write;
use std::io::sink;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use streamer;
pub struct EntryWriter<'a> {
bank: &'a Bank,
}
impl<'a> EntryWriter<'a> {
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: &'a Bank) -> Self {
EntryWriter { bank }
}
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
trace!("write_entry entry");
self.bank.register_entry_id(&entry.id);
writeln!(
writer.lock().expect("'writer' lock in fn fn write_entry"),
"{}",
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
).expect("writeln! in fn write_entry");
}
fn write_entries<W: Write>(
&self,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<Vec<Entry>> {
//TODO implement a serialize for channel that does this without allocations
let mut l = vec![];
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
self.write_entry(writer, &entry);
l.push(entry);
while let Ok(entry) = entry_receiver.try_recv() {
self.write_entry(writer, &entry);
l.push(entry);
}
Ok(l)
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn write_and_send_entries<W: Write>(
&self,
broadcast: &streamer::BlobSender,
blob_recycler: &packet::BlobRecycler,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<()> {
let mut q = VecDeque::new();
let list = self.write_entries(writer, entry_receiver)?;
trace!("New blobs? {}", list.len());
ledger::process_entry_list_into_blobs(&list, blob_recycler, &mut q);
if !q.is_empty() {
trace!("broadcasting {}", q.len());
broadcast.send(q)?;
}
Ok(())
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
Ok(())
}
}

View File

@@ -153,7 +153,7 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32])
// Generate coding blocks in window from consumed to consumed+NUM_DATA // Generate coding blocks in window from consumed to consumed+NUM_DATA
pub fn generate_coding( pub fn generate_coding(
re: &BlobRecycler, re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>, window: &mut Vec<SharedBlob>,
consumed: usize, consumed: usize,
) -> Result<()> { ) -> Result<()> {
let mut data_blobs = Vec::new(); let mut data_blobs = Vec::new();
@@ -164,10 +164,14 @@ pub fn generate_coding(
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new(); let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
for i in consumed..consumed + NUM_DATA { for i in consumed..consumed + NUM_DATA {
let n = i % window.len(); let n = i % window.len();
data_blobs.push(window[n].clone().unwrap()); data_blobs.push(
window[n]
.clone()
.expect("'data_blobs' arr in pub fn generate_coding"),
);
} }
for b in &data_blobs { for b in &data_blobs {
data_locks.push(b.write().unwrap()); data_locks.push(b.write().expect("'b' write lock in pub fn generate_coding"));
} }
for (i, l) in data_locks.iter_mut().enumerate() { for (i, l) in data_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]); trace!("i: {} data: {}", i, l.data[0]);
@@ -179,11 +183,18 @@ pub fn generate_coding(
let coding_end = consumed + NUM_CODED; let coding_end = consumed + NUM_CODED;
for i in coding_start..coding_end { for i in coding_start..coding_end {
let n = i % window.len(); let n = i % window.len();
window[n] = Some(re.allocate()); window[n] = re.allocate();
coding_blobs.push(window[n].clone().unwrap()); coding_blobs.push(
window[n]
.clone()
.expect("'coding_blobs' arr in pub fn generate_coding"),
);
} }
for b in &coding_blobs { for b in &coding_blobs {
coding_locks.push(b.write().unwrap()); coding_locks.push(
b.write()
.expect("'coding_locks' arr in pub fn generate_coding"),
);
} }
for (i, l) in coding_locks.iter_mut().enumerate() { for (i, l) in coding_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]); trace!("i: {} data: {}", i, l.data[0]);
@@ -231,7 +242,7 @@ pub fn recover(
let j = i % window.len(); let j = i % window.len();
let mut b = &mut window[j]; let mut b = &mut window[j];
if b.is_some() { if b.is_some() {
blobs.push(b.clone().unwrap()); blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
continue; continue;
} }
let n = re.allocate(); let n = re.allocate();
@@ -244,7 +255,7 @@ pub fn recover(
trace!("erasures: {:?}", erasures); trace!("erasures: {:?}", erasures);
//lock everything //lock everything
for b in &blobs { for b in &blobs {
locks.push(b.write().unwrap()); locks.push(b.write().expect("'locks' arr in pb fn recover"));
} }
for (i, l) in locks.iter_mut().enumerate() { for (i, l) in locks.iter_mut().enumerate() {
if i >= NUM_DATA { if i >= NUM_DATA {
@@ -272,7 +283,6 @@ pub fn recover(
mod test { mod test {
use erasure; use erasure;
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE}; use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
extern crate env_logger;
#[test] #[test]
pub fn test_coding() { pub fn test_coding() {

View File

@@ -1,67 +1,31 @@
//! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending //! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending
//! Transaction. //! Transaction.
use bincode::serialize; use hash::Hash;
use chrono::prelude::*; use signature::{KeyPair, PublicKey};
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
use transaction::Transaction; use transaction::Transaction;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Event { pub enum Event {
Transaction(Transaction), Transaction(Transaction),
Signature {
from: PublicKey,
tx_sig: Signature,
sig: Signature,
},
Timestamp {
from: PublicKey,
dt: DateTime<Utc>,
sig: Signature,
},
} }
impl Event { impl Event {
/// Create and sign a new Witness Timestamp. Used for unit-testing. pub fn new_transaction(
pub fn new_timestamp(from: &KeyPair, dt: DateTime<Utc>) -> Self { from_keypair: &KeyPair,
let sign_data = serialize(&dt).unwrap(); to: PublicKey,
let sig = Signature::clone_from_slice(from.sign(&sign_data).as_ref()); tokens: i64,
Event::Timestamp { last_id: Hash,
from: from.pubkey(), ) -> Self {
dt, let tr = Transaction::new(from_keypair, to, tokens, last_id);
sig, Event::Transaction(tr)
}
}
/// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from: &KeyPair, tx_sig: Signature) -> Self {
let sig = Signature::clone_from_slice(from.sign(&tx_sig).as_ref());
Event::Signature {
from: from.pubkey(),
tx_sig,
sig,
}
} }
/// Verify the Event's signature's are valid and if a transaction, that its /// Verify the Event's signature's are valid and if a transaction, that its
/// spending plan is valid. /// spending plan is valid.
pub fn verify(&self) -> bool { pub fn verify(&self) -> bool {
match *self { match *self {
Event::Transaction(ref tr) => tr.verify_sig(), Event::Transaction(ref tr) => tr.verify_plan(),
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
} }
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use signature::{KeyPair, KeyPairUtil};
#[test]
fn test_event_verify() {
assert!(Event::new_timestamp(&KeyPair::new(), Utc::now()).verify());
assert!(Event::new_signature(&KeyPair::new(), Signature::default()).verify());
}
}

View File

@@ -1,7 +1,7 @@
//! The `hash` module provides functions for creating SHA-256 hashes. //! The `hash` module provides functions for creating SHA-256 hashes.
use generic_array::typenum::U32;
use generic_array::GenericArray; use generic_array::GenericArray;
use generic_array::typenum::U32;
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
pub type Hash = GenericArray<u8, U32>; pub type Hash = GenericArray<u8, U32>;

View File

@@ -1,113 +0,0 @@
//! The `historian` module provides a microservice for generating a Proof of History.
//! It manages a thread containing a Proof of History Recorder.
use entry::Entry;
use hash::Hash;
use recorder::{ExitReason, Recorder, Signal};
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
pub struct Historian {
pub sender: SyncSender<Signal>,
pub receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<ExitReason>,
}
impl Historian {
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
let (sender, event_receiver) = sync_channel(10_000);
let (entry_sender, receiver) = sync_channel(10_000);
let thread_hdl =
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
Historian {
sender,
receiver,
thread_hdl,
}
}
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
fn create_recorder(
start_hash: Hash,
ms_per_tick: Option<u64>,
receiver: Receiver<Signal>,
sender: SyncSender<Entry>,
) -> JoinHandle<ExitReason> {
spawn(move || {
let mut recorder = Recorder::new(receiver, sender, start_hash);
let now = Instant::now();
loop {
if let Err(err) = recorder.process_events(now, ms_per_tick) {
return err;
}
if ms_per_tick.is_some() {
recorder.hash();
}
}
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use ledger::Block;
use std::thread::sleep;
use std::time::Duration;
#[test]
fn test_historian() {
let zero = Hash::default();
let hist = Historian::new(&zero, None);
hist.sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Signal::Tick).unwrap();
let entry0 = hist.receiver.recv().unwrap();
let entry1 = hist.receiver.recv().unwrap();
let entry2 = hist.receiver.recv().unwrap();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0);
drop(hist.sender);
assert_eq!(
hist.thread_hdl.join().unwrap(),
ExitReason::RecvDisconnected
);
assert!([entry0, entry1, entry2].verify(&zero));
}
#[test]
fn test_historian_closed_sender() {
let zero = Hash::default();
let hist = Historian::new(&zero, None);
drop(hist.receiver);
hist.sender.send(Signal::Tick).unwrap();
assert_eq!(
hist.thread_hdl.join().unwrap(),
ExitReason::SendDisconnected
);
}
#[test]
fn test_ticking_historian() {
let zero = Hash::default();
let hist = Historian::new(&zero, Some(20));
sleep(Duration::from_millis(300));
hist.sender.send(Signal::Tick).unwrap();
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.
assert_ne!(entries[0].id, zero);
}
}

View File

@@ -1,9 +1,17 @@
//! The `ledger` module provides functions for parallel verification of the //! The `ledger` module provides functions for parallel verification of the
//! Proof of History ledger. //! Proof of History ledger.
use entry::{next_tick, Entry}; use bincode::{deserialize, serialize_into};
use entry::{next_entry, Entry};
use event::Event;
use hash::Hash; use hash::Hash;
use packet;
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
use rayon::prelude::*; use rayon::prelude::*;
use std::cmp::min;
use std::collections::VecDeque;
use std::io::Cursor;
use std::mem::size_of;
pub trait Block { pub trait Block {
/// Verifies the hashes and counts of a slice of events are all consistent. /// Verifies the hashes and counts of a slice of events are all consistent.
@@ -18,22 +26,107 @@ impl Block for [Entry] {
} }
} }
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`. /// Create a vector of Entries of length `event_set.len()` from `start_hash` hash, `num_hashes`, and `event_set`.
pub fn next_ticks(start_hash: &Hash, num_hashes: u64, len: usize) -> Vec<Entry> { pub fn next_entries(start_hash: &Hash, num_hashes: u64, event_set: Vec<Vec<Event>>) -> Vec<Entry> {
let mut id = *start_hash; let mut id = *start_hash;
let mut ticks = vec![]; let mut entries = vec![];
for _ in 0..len { for event_list in &event_set {
let entry = next_tick(&id, num_hashes); let events = event_list.clone();
let entry = next_entry(&id, num_hashes, events);
id = entry.id; id = entry.id;
ticks.push(entry); entries.push(entry);
} }
ticks entries
}
pub fn process_entry_list_into_blobs(
list: &Vec<Entry>,
blob_recycler: &packet::BlobRecycler,
q: &mut VecDeque<SharedBlob>,
) {
let mut start = 0;
let mut end = 0;
while start < list.len() {
let mut entries: Vec<Vec<Entry>> = Vec::new();
let mut total = 0;
for i in &list[start..] {
total += size_of::<Event>() * i.events.len();
total += size_of::<Entry>();
if total >= BLOB_DATA_SIZE {
break;
}
end += 1;
}
// See if we need to split the events
if end <= start {
let mut event_start = 0;
let num_events_per_blob = BLOB_DATA_SIZE / size_of::<Event>();
let total_entry_chunks =
(list[end].events.len() + num_events_per_blob - 1) / num_events_per_blob;
trace!(
"splitting events end: {} total_chunks: {}",
end,
total_entry_chunks
);
for _ in 0..total_entry_chunks {
let event_end = min(event_start + num_events_per_blob, list[end].events.len());
let mut entry = Entry {
num_hashes: list[end].num_hashes,
id: list[end].id,
events: list[end].events[event_start..event_end].to_vec(),
};
entries.push(vec![entry]);
event_start = event_end;
}
end += 1;
} else {
entries.push(list[start..end].to_vec());
}
for entry in entries {
let b = blob_recycler.allocate();
let pos = {
let mut bd = b.write().unwrap();
let mut out = Cursor::new(bd.data_mut());
serialize_into(&mut out, &entry).expect("failed to serialize output");
out.position() as usize
};
assert!(pos < BLOB_SIZE);
b.write().unwrap().set_size(pos);
q.push_back(b);
}
start = end;
}
}
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
let mut entries_to_apply: Vec<Entry> = Vec::new();
let mut last_id = Hash::default();
for msgs in blobs {
let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries {
if entry.id == last_id {
if let Some(last_entry) = entries_to_apply.last_mut() {
last_entry.events.extend(entry.events);
}
} else {
last_id = entry.id;
entries_to_apply.push(entry);
}
}
//TODO respond back to leader with hash of the state
}
entries_to_apply
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use hash::hash; use hash::hash;
use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[test] #[test]
fn test_verify_slice() { fn test_verify_slice() {
@@ -42,12 +135,51 @@ mod tests {
assert!(vec![][..].verify(&zero)); // base case assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(next_ticks(&zero, 0, 2)[..].verify(&zero)); // inductive step assert!(next_entries(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
let mut bad_ticks = next_ticks(&zero, 0, 2); let mut bad_ticks = next_entries(&zero, 0, vec![vec![]; 2]);
bad_ticks[1].id = one; bad_ticks[1].id = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad assert!(!bad_ticks.verify(&zero)); // inductive step, bad
} }
#[test]
fn test_entry_to_blobs() {
let zero = Hash::default();
let one = hash(&zero);
let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, one));
let events = vec![tr0.clone(); 10000];
let e0 = Entry::new(&zero, 0, events);
let entry_list = vec![e0.clone(); 1];
let blob_recycler = BlobRecycler::default();
let mut blob_q = VecDeque::new();
process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q);
let entries = reconstruct_entries_from_blobs(&blob_q);
assert_eq!(entry_list, entries);
}
#[test]
fn test_next_entries() {
let mut id = Hash::default();
let next_id = hash(&id);
let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, next_id));
let events = vec![tr0.clone(); 5];
let event_set = vec![events.clone(); 5];
let entries0 = next_entries(&id, 0, event_set);
assert_eq!(entries0.len(), 5);
let mut entries1 = vec![];
for _ in 0..5 {
let entry = next_entry(&id, 0, events.clone());
id = entry.id;
entries1.push(entry);
}
assert_eq!(entries0, entries1);
}
} }
#[cfg(all(feature = "unstable", test))] #[cfg(all(feature = "unstable", test))]
@@ -59,7 +191,7 @@ mod bench {
#[bench] #[bench]
fn event_bench(bencher: &mut Bencher) { fn event_bench(bencher: &mut Bencher) {
let start_hash = Hash::default(); let start_hash = Hash::default();
let entries = next_ticks(&start_hash, 10_000, 8); let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
bencher.iter(|| { bencher.iter(|| {
assert!(entries.verify(&start_hash)); assert!(entries.verify(&start_hash));
}); });

View File

@@ -1,25 +1,37 @@
#![cfg_attr(feature = "unstable", feature(test))] #![cfg_attr(feature = "unstable", feature(test))]
pub mod accountant; pub mod bank;
pub mod accountant_skel; pub mod banking_stage;
pub mod accountant_stub;
pub mod crdt; pub mod crdt;
pub mod ecdsa; pub mod ecdsa;
pub mod entry; pub mod entry;
pub mod entry_writer;
#[cfg(feature = "erasure")] #[cfg(feature = "erasure")]
pub mod erasure; pub mod erasure;
pub mod event; pub mod event;
pub mod hash; pub mod hash;
pub mod historian;
pub mod ledger; pub mod ledger;
pub mod logger;
pub mod mint; pub mod mint;
pub mod packet; pub mod packet;
pub mod plan; pub mod plan;
pub mod record_stage;
pub mod recorder; pub mod recorder;
pub mod replicate_stage;
pub mod request;
pub mod request_processor;
pub mod request_stage;
pub mod result; pub mod result;
pub mod rpu;
pub mod server;
pub mod sig_verify_stage;
pub mod signature; pub mod signature;
pub mod streamer; pub mod streamer;
pub mod subscribers; pub mod thin_client;
pub mod timing;
pub mod tpu;
pub mod transaction; pub mod transaction;
pub mod tvu;
pub mod write_stage;
extern crate bincode; extern crate bincode;
extern crate byteorder; extern crate byteorder;
extern crate chrono; extern crate chrono;
@@ -41,3 +53,5 @@ extern crate futures;
#[cfg(test)] #[cfg(test)]
#[macro_use] #[macro_use]
extern crate matches; extern crate matches;
extern crate rand;

11
src/logger.rs Normal file
View File

@@ -0,0 +1,11 @@
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
pub fn setup() {
INIT.call_once(|| {
let _ = env_logger::init();
});
}

View File

@@ -1,6 +1,5 @@
//! The `mint` module is a library for generating the chain's genesis block. //! The `mint` module is a library for generating the chain's genesis block.
use entry::create_entry;
use entry::Entry; use entry::Entry;
use event::Event; use event::Event;
use hash::{hash, Hash}; use hash::{hash, Hash};
@@ -19,8 +18,11 @@ pub struct Mint {
impl Mint { impl Mint {
pub fn new(tokens: i64) -> Self { pub fn new(tokens: i64) -> Self {
let rnd = SystemRandom::new(); let rnd = SystemRandom::new();
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec(); let pkcs8 = KeyPair::generate_pkcs8(&rnd)
let keypair = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap(); .expect("generate_pkcs8 in mint pub fn new")
.to_vec();
let keypair =
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
let pubkey = keypair.pubkey(); let pubkey = keypair.pubkey();
Mint { Mint {
pkcs8, pkcs8,
@@ -38,7 +40,7 @@ impl Mint {
} }
pub fn keypair(&self) -> KeyPair { pub fn keypair(&self) -> KeyPair {
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap() KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
} }
pub fn pubkey(&self) -> PublicKey { pub fn pubkey(&self) -> PublicKey {
@@ -52,8 +54,8 @@ impl Mint {
} }
pub fn create_entries(&self) -> Vec<Entry> { pub fn create_entries(&self) -> Vec<Entry> {
let e0 = create_entry(&self.seed(), 0, vec![]); let e0 = Entry::new(&self.seed(), 0, vec![]);
let e1 = create_entry(&e0.id, 0, self.create_events()); let e1 = Entry::new(&e0.id, 0, self.create_events());
vec![e0, e1] vec![e0, e1]
} }
} }
@@ -61,7 +63,7 @@ impl Mint {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct MintDemo { pub struct MintDemo {
pub mint: Mint, pub mint: Mint,
pub users: Vec<(Vec<u8>, i64)>, pub num_accounts: i64,
} }
#[cfg(test)] #[cfg(test)]
@@ -69,12 +71,14 @@ mod tests {
use super::*; use super::*;
use ledger::Block; use ledger::Block;
use plan::Plan; use plan::Plan;
use transaction::Instruction;
#[test] #[test]
fn test_create_events() { fn test_create_events() {
let mut events = Mint::new(100).create_events().into_iter(); let mut events = Mint::new(100).create_events().into_iter();
if let Event::Transaction(tr) = events.next().unwrap() { let Event::Transaction(tr) = events.next().unwrap();
if let Plan::Pay(payment) = tr.data.plan { if let Instruction::NewContract(contract) = tr.instruction {
if let Plan::Pay(payment) = contract.plan {
assert_eq!(tr.from, payment.to); assert_eq!(tr.from, payment.to);
} }
} }

View File

@@ -1,12 +1,15 @@
//! The `packet` module defines data structures and methods to pull data from the network. //! The `packet` module defines data structures and methods to pull data from the network.
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use result::{Error, Result}; use result::{Error, Result};
use serde::Serialize;
use signature::PublicKey;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use std::mem::size_of;
pub type SharedPackets = Arc<RwLock<Packets>>; pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>; pub type SharedBlob = Arc<RwLock<Blob>>;
@@ -14,7 +17,8 @@ pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>; pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8; pub const NUM_PACKETS: usize = 1024 * 8;
const BLOB_SIZE: usize = 64 * 1024; pub const BLOB_SIZE: usize = 64 * 1024;
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END;
pub const PACKET_DATA_SIZE: usize = 256; pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE; pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
@@ -153,12 +157,12 @@ impl<T: Default> Clone for Recycler<T> {
impl<T: Default> Recycler<T> { impl<T: Default> Recycler<T> {
pub fn allocate(&self) -> Arc<RwLock<T>> { pub fn allocate(&self) -> Arc<RwLock<T>> {
let mut gc = self.gc.lock().expect("recycler lock"); let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
gc.pop() gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default()))) .unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
} }
pub fn recycle(&self, msgs: Arc<RwLock<T>>) { pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
let mut gc = self.gc.lock().expect("recycler lock"); let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
gc.push(msgs); gc.push(msgs);
} }
} }
@@ -176,13 +180,14 @@ impl Packets {
socket.set_nonblocking(false)?; socket.set_nonblocking(false)?;
for p in &mut self.packets { for p in &mut self.packets {
p.meta.size = 0; p.meta.size = 0;
trace!("receiving");
match socket.recv_from(&mut p.data) { match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => { Err(_) if i > 0 => {
trace!("got {:?} messages", i); debug!("got {:?} messages", i);
break; break;
} }
Err(e) => { Err(e) => {
info!("recv_from err {:?}", e); trace!("recv_from err {:?}", e);
return Err(Error::IO(e)); return Err(Error::IO(e));
} }
Ok((nrecv, from)) => { Ok((nrecv, from)) => {
@@ -200,6 +205,7 @@ impl Packets {
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> { pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?; let sz = self.run_read_from(socket)?;
self.packets.resize(sz, Packet::default()); self.packets.resize(sz, Packet::default());
debug!("recv_from: {}", sz);
Ok(()) Ok(())
} }
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> { pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
@@ -211,28 +217,60 @@ impl Packets {
} }
} }
const BLOB_INDEX_SIZE: usize = size_of::<u64>(); pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
let mut out = vec![];
for x in xs.chunks(NUM_PACKETS) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(x.len(), Default::default());
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
const BLOB_INDEX_END: usize = size_of::<u64>();
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
impl Blob { impl Blob {
pub fn get_index(&self) -> Result<u64> { pub fn get_index(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]); let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]);
let r = rdr.read_u64::<LittleEndian>()?; let r = rdr.read_u64::<LittleEndian>()?;
Ok(r) Ok(r)
} }
pub fn set_index(&mut self, ix: u64) -> Result<()> { pub fn set_index(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![]; let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?; wtr.write_u64::<LittleEndian>(ix)?;
self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr); self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
Ok(()) Ok(())
} }
pub fn get_id(&self) -> Result<PublicKey> {
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
Ok(e)
}
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
let wtr = serialize(&id)?;
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
Ok(())
}
pub fn data(&self) -> &[u8] { pub fn data(&self) -> &[u8] {
&self.data[BLOB_INDEX_SIZE..] &self.data[BLOB_ID_END..]
} }
pub fn data_mut(&mut self) -> &mut [u8] { pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_INDEX_SIZE..] &mut self.data[BLOB_ID_END..]
} }
pub fn set_size(&mut self, size: usize) { pub fn set_size(&mut self, size: usize) {
self.meta.size = size + BLOB_INDEX_SIZE; self.meta.size = size + BLOB_ID_END;
} }
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> { pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
let mut v = VecDeque::new(); let mut v = VecDeque::new();
@@ -246,14 +284,16 @@ impl Blob {
for i in 0..NUM_BLOBS { for i in 0..NUM_BLOBS {
let r = re.allocate(); let r = re.allocate();
{ {
let mut p = r.write().unwrap(); let mut p = r.write().expect("'r' write lock in pub fn recv_from");
match socket.recv_from(&mut p.data) { match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => { Err(_) if i > 0 => {
trace!("got {:?} messages", i); trace!("got {:?} messages", i);
break; break;
} }
Err(e) => { Err(e) => {
info!("recv_from err {:?}", e); if e.kind() != io::ErrorKind::WouldBlock {
info!("recv_from err {:?}", e);
}
return Err(Error::IO(e)); return Err(Error::IO(e));
} }
Ok((nrecv, from)) => { Ok((nrecv, from)) => {
@@ -276,7 +316,7 @@ impl Blob {
) -> Result<()> { ) -> Result<()> {
while let Some(r) = v.pop_front() { while let Some(r) = v.pop_front() {
{ {
let p = r.read().unwrap(); let p = r.read().expect("'r' read lock in pub fn send_to");
let a = p.meta.addr(); let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?; socket.send_to(&p.data[..p.meta.size], &a)?;
} }
@@ -288,11 +328,13 @@ impl Blob {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets}; use packet::{to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, NUM_PACKETS};
use request::Request;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
use std::net::UdpSocket; use std::net::UdpSocket;
#[test] #[test]
pub fn packet_recycler_test() { pub fn packet_recycler_test() {
let r = PacketRecycler::default(); let r = PacketRecycler::default();
@@ -334,6 +376,24 @@ mod test {
r.recycle(p); r.recycle(p);
} }
#[test]
fn test_to_packets() {
let tr = Request::GetTransactionCount;
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tr.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test] #[test]
pub fn blob_send_recv() { pub fn blob_send_recv() {
trace!("start"); trace!("start");

View File

@@ -7,6 +7,7 @@ use chrono::prelude::*;
use signature::PublicKey; use signature::PublicKey;
use std::mem; use std::mem;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Witness { pub enum Witness {
Timestamp(DateTime<Utc>), Timestamp(DateTime<Utc>),
Signature(PublicKey), Signature(PublicKey),

166
src/record_stage.rs Normal file
View File

@@ -0,0 +1,166 @@
//! The `record_stage` module provides an object for generating a Proof of History.
//! It records Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry, and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
//! The resulting stream of entries represents ordered events in time.
use entry::Entry;
use event::Event;
use hash::Hash;
use recorder::Recorder;
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::thread::{spawn, JoinHandle};
use std::time::{Duration, Instant};
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
pub enum Signal {
Tick,
Events(Vec<Event>),
}
pub struct RecordStage {
pub entry_receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<()>,
}
impl RecordStage {
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
pub fn new(
event_receiver: Receiver<Signal>,
start_hash: &Hash,
tick_duration: Option<Duration>,
) -> Self {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let thread_hdl = spawn(move || {
let mut recorder = Recorder::new(start_hash);
let duration_data = tick_duration.map(|dur| (Instant::now(), dur));
loop {
if let Err(_) = Self::process_events(
&mut recorder,
duration_data,
&event_receiver,
&entry_sender,
) {
return;
}
if duration_data.is_some() {
recorder.hash();
}
}
});
RecordStage {
entry_receiver,
thread_hdl,
}
}
pub fn process_events(
recorder: &mut Recorder,
duration_data: Option<(Instant, Duration)>,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
) -> Result<(), ()> {
loop {
if let Some((start_time, tick_duration)) = duration_data {
if let Some(entry) = recorder.tick(start_time, tick_duration) {
sender.send(entry).or(Err(()))?;
}
}
match receiver.try_recv() {
Ok(signal) => match signal {
Signal::Tick => {
let entry = recorder.record(vec![]);
sender.send(entry).or(Err(()))?;
}
Signal::Events(events) => {
let entry = recorder.record(events);
sender.send(entry).or(Err(()))?;
}
},
Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(()),
};
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ledger::Block;
use signature::{KeyPair, KeyPairUtil};
use std::sync::mpsc::channel;
use std::thread::sleep;
#[test]
fn test_historian() {
let (input, event_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(event_receiver, &zero, None);
input.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
input.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
input.send(Signal::Tick).unwrap();
let entry0 = record_stage.entry_receiver.recv().unwrap();
let entry1 = record_stage.entry_receiver.recv().unwrap();
let entry2 = record_stage.entry_receiver.recv().unwrap();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0);
drop(input);
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
assert!([entry0, entry1, entry2].verify(&zero));
}
#[test]
fn test_historian_closed_sender() {
let (input, event_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(event_receiver, &zero, None);
drop(record_stage.entry_receiver);
input.send(Signal::Tick).unwrap();
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
}
#[test]
fn test_events() {
let (input, signal_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(signal_receiver, &zero, None);
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
let event0 = Event::new_transaction(&alice_keypair, bob_pubkey, 1, zero);
let event1 = Event::new_transaction(&alice_keypair, bob_pubkey, 2, zero);
input.send(Signal::Events(vec![event0, event1])).unwrap();
drop(input);
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
assert_eq!(entries.len(), 1);
}
#[test]
#[ignore]
fn test_ticking_historian() {
let (input, event_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(event_receiver, &zero, Some(Duration::from_millis(20)));
sleep(Duration::from_millis(900));
input.send(Signal::Tick).unwrap();
drop(input);
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.
assert_ne!(entries[0].id, zero);
}
}

View File

@@ -1,45 +1,21 @@
//! The `recorder` module provides an object for generating a Proof of History. //! The `recorder` module provides an object for generating a Proof of History.
//! It records Event items on behalf of its users. It continuously generates //! It records Event items on behalf of its users.
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry, and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
//! The resulting stream of entries represents ordered events in time.
use entry::{create_entry_mut, Entry}; use entry::Entry;
use event::Event; use event::Event;
use hash::{hash, Hash}; use hash::{hash, Hash};
use std::mem;
use std::sync::mpsc::{Receiver, SyncSender, TryRecvError};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
pub enum Signal {
Tick,
Event(Event),
}
#[derive(Debug, PartialEq, Eq)]
pub enum ExitReason {
RecvDisconnected,
SendDisconnected,
}
pub struct Recorder { pub struct Recorder {
sender: SyncSender<Entry>,
receiver: Receiver<Signal>,
last_hash: Hash, last_hash: Hash,
events: Vec<Event>,
num_hashes: u64, num_hashes: u64,
num_ticks: u64, num_ticks: u32,
} }
impl Recorder { impl Recorder {
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, last_hash: Hash) -> Self { pub fn new(last_hash: Hash) -> Self {
Recorder { Recorder {
receiver,
sender,
last_hash, last_hash,
events: vec![],
num_hashes: 0, num_hashes: 0,
num_ticks: 0, num_ticks: 0,
} }
@@ -50,40 +26,17 @@ impl Recorder {
self.num_hashes += 1; self.num_hashes += 1;
} }
pub fn record_entry(&mut self) -> Result<(), ExitReason> { pub fn record(&mut self, events: Vec<Event>) -> Entry {
let events = mem::replace(&mut self.events, vec![]); Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, events)
let entry = create_entry_mut(&mut self.last_hash, &mut self.num_hashes, events);
self.sender
.send(entry)
.or(Err(ExitReason::SendDisconnected))?;
Ok(())
} }
pub fn process_events( pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
&mut self, if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
epoch: Instant, // TODO: don't let this overflow u32
ms_per_tick: Option<u64>, self.num_ticks += 1;
) -> Result<(), ExitReason> { Some(self.record(vec![]))
loop { } else {
if let Some(ms) = ms_per_tick { None
if epoch.elapsed() > Duration::from_millis((self.num_ticks + 1) * ms) {
self.record_entry()?;
self.num_ticks += 1;
}
}
match self.receiver.try_recv() {
Ok(signal) => match signal {
Signal::Tick => {
self.record_entry()?;
}
Signal::Event(event) => {
self.events.push(event);
}
},
Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(ExitReason::RecvDisconnected),
};
} }
} }
} }

52
src/replicate_stage.rs Normal file
View File

@@ -0,0 +1,52 @@
//! The `replicate_stage` replicates transactions broadcast by the leader.
use bank::Bank;
use ledger;
use packet;
use result::Result;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use streamer;
pub struct ReplicateStage {
pub thread_hdl: JoinHandle<()>,
}
impl ReplicateStage {
/// Process verified blobs, already in order
fn replicate_requests(
bank: &Arc<Bank>,
verified_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
let res = bank.process_verified_entries(entries);
if res.is_err() {
error!("process_verified_entries {} {:?}", blobs.len(), res);
}
res?;
for blob in blobs {
blob_recycler.recycle(blob);
}
Ok(())
}
pub fn new(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
window_receiver: streamer::BlobReceiver,
blob_recycler: packet::BlobRecycler,
) -> Self {
let thread_hdl = spawn(move || loop {
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
});
ReplicateStage { thread_hdl }
}
}

26
src/request.rs Normal file
View File

@@ -0,0 +1,26 @@
//! The `request` module defines the messages for the thin client.
use hash::Hash;
use signature::PublicKey;
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
GetBalance { key: PublicKey },
GetLastId,
GetTransactionCount,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
true
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
LastId { id: Hash },
TransactionCount { transaction_count: u64 },
}

165
src/request_processor.rs Normal file
View File

@@ -0,0 +1,165 @@
//! The `request_stage` processes thin client Request messages.
use bank::Bank;
use bincode::{deserialize, serialize};
use event::Event;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use request::{Request, Response};
use result::Result;
use std::collections::VecDeque;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::mpsc::Receiver;
use std::time::Instant;
use streamer;
use timing;
pub struct RequestProcessor {
bank: Arc<Bank>,
}
impl RequestProcessor {
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: Arc<Bank>) -> Self {
RequestProcessor { bank }
}
/// Process Request items sent by clients.
fn process_request(
&self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::GetBalance { key } => {
let val = self.bank.get_balance(&key);
let rsp = (Response::Balance { key, val }, rsp_addr);
info!("Response::Balance {:?}", rsp);
Some(rsp)
}
Request::GetLastId => {
let id = self.bank.last_id();
let rsp = (Response::LastId { id }, rsp_addr);
info!("Response::LastId {:?}", rsp);
Some(rsp)
}
Request::GetTransactionCount => {
let transaction_count = self.bank.transaction_count() as u64;
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
info!("Response::TransactionCount {:?}", rsp);
Some(rsp)
}
}
}
pub fn process_requests(
&self,
reqs: Vec<(Request, SocketAddr)>,
) -> Vec<(Response, SocketAddr)> {
reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect()
}
fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
// Copy-paste of deserialize_requests() because I can't figure out how to
// route the lifetimes in a generic version.
pub fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Split Request list into verified transactions and the rest
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
pub fn process_request_packets(
&self,
packet_receiver: &Receiver<SharedPackets>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
info!(
"@{:?} request_stage: processing: {}",
timing::timestamp(),
batch_len
);
let mut reqs_len = 0;
let proc_start = Instant::now();
for msgs in batch {
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
.into_iter()
.filter_map(|x| x)
.collect();
reqs_len += reqs.len();
let rsps = self.process_requests(reqs);
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
info!("process: sending blobs: {}", blobs.len());
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
info!(
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
batch_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
Ok(())
}
}

48
src/request_stage.rs Normal file
View File

@@ -0,0 +1,48 @@
//! The `request_stage` processes thin client Request messages.
use packet;
use packet::SharedPackets;
use request_processor::RequestProcessor;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::thread::{spawn, JoinHandle};
use streamer;
pub struct RequestStage {
pub thread_hdl: JoinHandle<()>,
pub blob_receiver: streamer::BlobReceiver,
pub request_processor: Arc<RequestProcessor>,
}
impl RequestStage {
pub fn new(
request_processor: RequestProcessor,
exit: Arc<AtomicBool>,
packet_receiver: Receiver<SharedPackets>,
packet_recycler: packet::PacketRecycler,
blob_recycler: packet::BlobRecycler,
) -> Self {
let request_processor = Arc::new(request_processor);
let request_processor_ = request_processor.clone();
let (blob_sender, blob_receiver) = channel();
let thread_hdl = spawn(move || loop {
let e = request_processor_.process_request_packets(
&packet_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
}
});
RequestStage {
thread_hdl,
blob_receiver,
request_processor,
}
}
}

View File

@@ -1,10 +1,10 @@
//! The `result` module exposes a Result type that propagates one of many different Error types. //! The `result` module exposes a Result type that propagates one of many different Error types.
use bank;
use bincode; use bincode;
use serde_json; use serde_json;
use std; use std;
use std::any::Any; use std::any::Any;
use accountant;
#[derive(Debug)] #[derive(Debug)]
pub enum Error { pub enum Error {
@@ -15,9 +15,11 @@ pub enum Error {
RecvError(std::sync::mpsc::RecvError), RecvError(std::sync::mpsc::RecvError),
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError), RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
Serialize(std::boxed::Box<bincode::ErrorKind>), Serialize(std::boxed::Box<bincode::ErrorKind>),
AccountingError(accountant::AccountingError), BankError(bank::BankError),
SendError, SendError,
Services, Services,
CrdtTooSmall,
GenericError,
} }
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@@ -32,9 +34,9 @@ impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
Error::RecvTimeoutError(e) Error::RecvTimeoutError(e)
} }
} }
impl std::convert::From<accountant::AccountingError> for Error { impl std::convert::From<bank::BankError> for Error {
fn from(e: accountant::AccountingError) -> Error { fn from(e: bank::BankError) -> Error {
Error::AccountingError(e) Error::BankError(e)
} }
} }
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error { impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
@@ -77,9 +79,10 @@ mod tests {
use std::io; use std::io;
use std::io::Write; use std::io::Write;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::mpsc::channel; use std::panic;
use std::sync::mpsc::RecvError; use std::sync::mpsc::RecvError;
use std::sync::mpsc::RecvTimeoutError; use std::sync::mpsc::RecvTimeoutError;
use std::sync::mpsc::channel;
use std::thread; use std::thread;
fn addr_parse_error() -> Result<SocketAddr> { fn addr_parse_error() -> Result<SocketAddr> {
@@ -88,6 +91,7 @@ mod tests {
} }
fn join_error() -> Result<()> { fn join_error() -> Result<()> {
panic::set_hook(Box::new(|_info| {}));
let r = thread::spawn(|| panic!("hi")).join()?; let r = thread::spawn(|| panic!("hi")).join()?;
Ok(r) Ok(r)
} }

55
src/rpu.rs Normal file
View File

@@ -0,0 +1,55 @@
//! The `rpu` module implements the Request Processing Unit, a
//! 5-stage transaction processing pipeline in software.
use bank::Bank;
use packet;
use request_processor::RequestProcessor;
use request_stage::RequestStage;
use std::net::UdpSocket;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::thread::JoinHandle;
use streamer;
pub struct Rpu {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Rpu {
pub fn new(
bank: Arc<Bank>,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Self {
let packet_recycler = packet::PacketRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver = streamer::receiver(
requests_socket,
exit.clone(),
packet_recycler.clone(),
packet_sender,
);
let blob_recycler = packet::BlobRecycler::default();
let request_processor = RequestProcessor::new(bank.clone());
let request_stage = RequestStage::new(
request_processor,
exit.clone(),
packet_receiver,
packet_recycler.clone(),
blob_recycler.clone(),
);
let t_responder = streamer::responder(
respond_socket,
exit.clone(),
blob_recycler.clone(),
request_stage.blob_receiver,
);
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
Rpu { thread_hdls }
}
}

77
src/server.rs Normal file
View File

@@ -0,0 +1,77 @@
//! The `server` module hosts all the server microservices.
use bank::Bank;
use crdt::ReplicatedData;
use hash::Hash;
use rpu::Rpu;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::thread::JoinHandle;
use std::time::Duration;
use tpu::Tpu;
use tvu::Tvu;
pub struct Server {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Server {
pub fn new_leader<W: Write + Send + 'static>(
bank: Bank,
start_hash: Hash,
tick_duration: Option<Duration>,
me: ReplicatedData,
requests_socket: UdpSocket,
events_socket: UdpSocket,
broadcast_socket: UdpSocket,
respond_socket: UdpSocket,
gossip_socket: UdpSocket,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let tpu = Tpu::new(
bank.clone(),
start_hash,
tick_duration,
me,
events_socket,
broadcast_socket,
gossip_socket,
exit.clone(),
writer,
);
thread_hdls.extend(tpu.thread_hdls);
Server { thread_hdls }
}
pub fn new_validator(
bank: Bank,
me: ReplicatedData,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
replicate_socket: UdpSocket,
gossip_socket: UdpSocket,
leader_repl_data: ReplicatedData,
exit: Arc<AtomicBool>,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let tvu = Tvu::new(
bank.clone(),
me,
gossip_socket,
replicate_socket,
leader_repl_data,
exit.clone(),
);
thread_hdls.extend(tvu.thread_hdls);
Server { thread_hdls }
}
}

96
src/sig_verify_stage.rs Normal file
View File

@@ -0,0 +1,96 @@
//! The `sig_verify_stage` implements the signature verification stage of the TPU.
use ecdsa;
use packet::SharedPackets;
use rand::{thread_rng, Rng};
use result::Result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
use streamer;
use timing;
pub struct SigVerifyStage {
pub verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl SigVerifyStage {
pub fn new(exit: Arc<AtomicBool>, packet_receiver: Receiver<SharedPackets>) -> Self {
let (verified_sender, verified_receiver) = channel();
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
SigVerifyStage {
thread_hdls,
verified_receiver,
}
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
let r = ecdsa::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
}
fn verifier(
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> Result<()> {
let (batch, len) =
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
let now = Instant::now();
let batch_len = batch.len();
let rand_id = thread_rng().gen_range(0, 100);
info!(
"@{:?} verifier: verifying: {} id: {}",
timing::timestamp(),
batch.len(),
rand_id
);
let verified_batch = Self::verify_batch(batch);
sendr
.lock()
.expect("lock in fn verify_batch in tpu")
.send(verified_batch)?;
let total_time_ms = timing::duration_as_ms(&now.elapsed());
let total_time_s = timing::duration_as_s(&now.elapsed());
info!(
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
timing::timestamp(),
batch_len,
total_time_ms,
rand_id,
len,
(len as f32 / total_time_s)
);
Ok(())
}
fn verifier_service(
exit: Arc<AtomicBool>,
packet_receiver: Arc<Mutex<streamer::PacketReceiver>>,
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> JoinHandle<()> {
spawn(move || loop {
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
}
fn verifier_services(
exit: Arc<AtomicBool>,
packet_receiver: streamer::PacketReceiver,
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Vec<JoinHandle<()>> {
let sender = Arc::new(Mutex::new(verified_sender));
let receiver = Arc::new(Mutex::new(packet_receiver));
(0..4)
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
.collect()
}
}

View File

@@ -1,9 +1,14 @@
//! The `signature` module provides functionality for public, and private keys. //! The `signature` module provides functionality for public, and private keys.
use generic_array::typenum::{U32, U64};
use generic_array::GenericArray; use generic_array::GenericArray;
use generic_array::typenum::{U32, U64};
use rand::{ChaChaRng, Rng, SeedableRng};
use rayon::prelude::*;
use ring::error::Unspecified;
use ring::rand::SecureRandom;
use ring::signature::Ed25519KeyPair; use ring::signature::Ed25519KeyPair;
use ring::{rand, signature}; use ring::{rand, signature};
use std::cell::RefCell;
use untrusted; use untrusted;
pub type KeyPair = Ed25519KeyPair; pub type KeyPair = Ed25519KeyPair;
@@ -19,8 +24,10 @@ impl KeyPairUtil for Ed25519KeyPair {
/// Return a new ED25519 keypair /// Return a new ED25519 keypair
fn new() -> Self { fn new() -> Self {
let rng = rand::SystemRandom::new(); let rng = rand::SystemRandom::new();
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap() .expect("generate_pkcs8 in signature pb fn new");
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
.expect("from_pcks8 in signature pb fn new")
} }
/// Return the public key for the given keypair /// Return the public key for the given keypair
@@ -41,3 +48,92 @@ impl SignatureUtil for GenericArray<u8, U64> {
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok() signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
} }
} }
pub struct GenKeys {
// This is necessary because the rng needs to mutate its state to remain
// deterministic, and the fill trait requires an immuatble reference to self
generator: RefCell<ChaChaRng>,
}
impl GenKeys {
pub fn new(seed: &[u8]) -> GenKeys {
let seed32: Vec<_> = seed.iter().map(|&x| x as u32).collect();
let rng = ChaChaRng::from_seed(&seed32);
GenKeys {
generator: RefCell::new(rng),
}
}
pub fn new_key(&self) -> Vec<u8> {
KeyPair::generate_pkcs8(self).unwrap().to_vec()
}
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 16]> {
let mut rng = self.generator.borrow_mut();
(0..n).map(|_| rng.gen()).collect()
}
pub fn gen_n_keypairs(&self, n: i64) -> Vec<KeyPair> {
self.gen_n_seeds(n)
.into_par_iter()
.map(|seed| {
let pkcs8 = GenKeys::new(&seed).new_key();
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
})
.collect()
}
}
impl SecureRandom for GenKeys {
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
let mut rng = self.generator.borrow_mut();
rng.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
#[test]
fn test_new_key_is_deterministic() {
let seed = [1, 2, 3, 4];
let rng0 = GenKeys::new(&seed);
let rng1 = GenKeys::new(&seed);
for _ in 0..100 {
assert_eq!(rng0.new_key(), rng1.new_key());
}
}
fn gen_n_pubkeys(seed: &[u8], n: i64) -> HashSet<PublicKey> {
GenKeys::new(&seed)
.gen_n_keypairs(n)
.into_iter()
.map(|x| x.pubkey())
.collect()
}
#[test]
fn test_gen_n_pubkeys_deterministic() {
let seed = [1, 2, 3, 4];
assert_eq!(gen_n_pubkeys(&seed, 50), gen_n_pubkeys(&seed, 50));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use super::*;
#[bench]
fn bench_gen_keys(b: &mut Bencher) {
let seed: &[_] = &[1, 2, 3, 4];
let rnd = GenKeys::new(seed);
b.iter(|| rnd.gen_n_keypairs(1000));
}
}

View File

@@ -1,15 +1,18 @@
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets. //! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS}; use crdt::Crdt;
use result::Result; #[cfg(feature = "erasure")]
use erasure;
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets};
use result::{Error, Result};
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::UdpSocket; use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::{spawn, JoinHandle}; use std::thread::{spawn, JoinHandle};
use std::time::Duration; use std::time::Duration;
use subscribers::Subscribers;
pub const WINDOW_SIZE: usize = 2 * 1024;
pub type PacketReceiver = mpsc::Receiver<SharedPackets>; pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
pub type PacketSender = mpsc::Sender<SharedPackets>; pub type PacketSender = mpsc::Sender<SharedPackets>;
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>; pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
@@ -25,7 +28,10 @@ fn recv_loop(
let msgs = re.allocate(); let msgs = re.allocate();
let msgs_ = msgs.clone(); let msgs_ = msgs.clone();
loop { loop {
match msgs.write().unwrap().recv_from(sock) { match msgs.write()
.expect("write lock in fn recv_loop")
.recv_from(sock)
{
Ok(()) => { Ok(()) => {
channel.send(msgs_)?; channel.send(msgs_)?;
break; break;
@@ -45,14 +51,16 @@ pub fn receiver(
sock: UdpSocket, sock: UdpSocket,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
recycler: PacketRecycler, recycler: PacketRecycler,
channel: PacketSender, packet_sender: PacketSender,
) -> Result<JoinHandle<()>> { ) -> JoinHandle<()> {
let timer = Duration::new(1, 0); let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
sock.set_read_timeout(Some(timer))?; if res.is_err() {
Ok(spawn(move || { panic!("streamer::receiver set_read_timeout error");
let _ = recv_loop(&sock, &exit, &recycler, &channel); }
spawn(move || {
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
() ()
})) })
} }
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> { fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
@@ -62,6 +70,25 @@ fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Res
Ok(()) Ok(())
} }
pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut len = msgs.read().unwrap().packets.len();
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
len += more.read().unwrap().packets.len();
batch.push(more);
if len > 100_000 {
break;
}
}
debug!("batch len {}", batch.len());
Ok((batch, len))
}
pub fn responder( pub fn responder(
sock: UdpSocket, sock: UdpSocket,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
@@ -99,43 +126,99 @@ pub fn blob_receiver(
if exit.load(Ordering::Relaxed) { if exit.load(Ordering::Relaxed) {
break; break;
} }
let ret = recv_blobs(&recycler, &sock, &s); let _ = recv_blobs(&recycler, &sock, &s);
if ret.is_err() {
break;
}
}); });
Ok(t) Ok(t)
} }
fn find_next_missing(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
crdt: &Arc<RwLock<Crdt>>,
consumed: &mut usize,
received: &mut usize,
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
if *received <= *consumed {
return Err(Error::GenericError);
}
let window = locked_window.read().unwrap();
let reqs: Vec<_> = (*consumed..*received)
.filter_map(|pix| {
let i = pix % WINDOW_SIZE;
if let &None = &window[i] {
let val = crdt.read().unwrap().window_index_request(pix as u64);
if let Ok((to, req)) = val {
return Some((to, req));
}
}
None
})
.collect();
Ok(reqs)
}
fn repair_window(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
crdt: &Arc<RwLock<Crdt>>,
last: &mut usize,
times: &mut usize,
consumed: &mut usize,
received: &mut usize,
) -> Result<()> {
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
//exponential backoff
if *last != *consumed {
*times = 0;
}
*last = *consumed;
*times += 1;
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
if *times & (*times - 1) != 0 {
trace!("repair_window counter {} {}", *times, *consumed);
return Ok(());
}
info!("repair_window request {} {}", *consumed, *received);
let sock = UdpSocket::bind("0.0.0.0:0")?;
for (to, req) in reqs {
//todo cache socket
sock.send_to(&req, to)?;
}
Ok(())
}
fn recv_window( fn recv_window(
window: &mut Vec<Option<SharedBlob>>, locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
subs: &Arc<RwLock<Subscribers>>, crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler, recycler: &BlobRecycler,
consumed: &mut usize, consumed: &mut usize,
received: &mut usize,
r: &BlobReceiver, r: &BlobReceiver,
s: &BlobSender, s: &BlobSender,
retransmit: &BlobSender, retransmit: &BlobSender,
) -> Result<()> { ) -> Result<()> {
let timer = Duration::new(1, 0); let timer = Duration::from_millis(200);
let mut dq = r.recv_timeout(timer)?; let mut dq = r.recv_timeout(timer)?;
let leader_id = crdt.read()
.expect("'crdt' read lock in fn recv_window")
.leader_data()
.id;
while let Ok(mut nq) = r.try_recv() { while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq) dq.append(&mut nq)
} }
{ {
//retransmit all leader blocks //retransmit all leader blocks
let mut retransmitq = VecDeque::new(); let mut retransmitq = VecDeque::new();
let rsubs = subs.read().unwrap();
for b in &dq { for b in &dq {
let p = b.read().unwrap(); let p = b.read().expect("'b' read lock in fn recv_window");
//TODO this check isn't safe against adverserial packets //TODO this check isn't safe against adverserial packets
//we need to maintain a sequence window //we need to maintain a sequence window
trace!( trace!(
"idx: {} addr: {:?} leader: {:?}", "idx: {} addr: {:?} id: {:?} leader: {:?}",
p.get_index().unwrap(), p.get_index().expect("get_index in fn recv_window"),
p.get_id().expect("get_id in trace! fn recv_window"),
p.meta.addr(), p.meta.addr(),
rsubs.leader.addr leader_id
); );
if p.meta.addr() == rsubs.leader.addr { if p.get_id().expect("get_id in fn recv_window") == leader_id {
//TODO //TODO
//need to copy the retransmited blob //need to copy the retransmited blob
//otherwise we get into races with which thread //otherwise we get into races with which thread
@@ -145,7 +228,7 @@ fn recv_window(
//is dropped via a weakref to the recycler //is dropped via a weakref to the recycler
let nv = recycler.allocate(); let nv = recycler.allocate();
{ {
let mut mnv = nv.write().unwrap(); let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
let sz = p.meta.size; let sz = p.meta.size;
mnv.meta.size = sz; mnv.meta.size = sz;
mnv.data[..sz].copy_from_slice(&p.data[..sz]); mnv.data[..sz].copy_from_slice(&p.data[..sz]);
@@ -161,68 +244,187 @@ fn recv_window(
let mut contq = VecDeque::new(); let mut contq = VecDeque::new();
while let Some(b) = dq.pop_front() { while let Some(b) = dq.pop_front() {
let b_ = b.clone(); let b_ = b.clone();
let p = b.write().unwrap(); let p = b.write().expect("'b' write lock in fn recv_window");
let pix = p.get_index()? as usize; let pix = p.get_index()? as usize;
let w = pix % NUM_BLOBS; if pix > *received {
*received = pix;
}
let w = pix % WINDOW_SIZE;
//TODO, after the block are authenticated //TODO, after the block are authenticated
//if we get different blocks at the same index //if we get different blocks at the same index
//that is a network failure/attack //that is a network failure/attack
trace!("window w: {} size: {}", w, p.meta.size); trace!("window w: {} size: {}", w, p.meta.size);
{ {
let mut window = locked_window.write().unwrap();
if window[w].is_none() { if window[w].is_none() {
window[w] = Some(b_); window[w] = Some(b_);
} else { } else if let &Some(ref cblob) = &window[w] {
debug!("duplicate blob at index {:}", w); if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
warn!("overrun blob at index {:}", w);
} else {
debug!("duplicate blob at index {:}", w);
}
} }
loop { loop {
let k = *consumed % NUM_BLOBS; let k = *consumed % WINDOW_SIZE;
trace!("k: {} consumed: {}", k, *consumed); trace!("k: {} consumed: {}", k, *consumed);
if window[k].is_none() { if window[k].is_none() {
break; break;
} }
contq.push_back(window[k].clone().unwrap()); contq.push_back(window[k].clone().expect("clone in fn recv_window"));
window[k] = None; window[k] = None;
*consumed += 1; *consumed += 1;
} }
} }
} }
{
let buf: Vec<_> = locked_window
.read()
.unwrap()
.iter()
.enumerate()
.map(|(i, v)| {
if i == (*consumed % WINDOW_SIZE) {
assert!(v.is_none());
"_"
} else if v.is_none() {
"0"
} else {
"1"
}
})
.collect();
trace!("WINDOW: {}", buf.join(""));
}
trace!("sending contq.len: {}", contq.len()); trace!("sending contq.len: {}", contq.len());
if !contq.is_empty() { if !contq.is_empty() {
trace!("sending contq.len: {}", contq.len());
s.send(contq)?; s.send(contq)?;
} }
Ok(()) Ok(())
} }
pub fn default_window() -> Arc<RwLock<Vec<Option<SharedBlob>>>> {
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
}
pub fn window( pub fn window(
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>, crdt: Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
recycler: BlobRecycler, recycler: BlobRecycler,
r: BlobReceiver, r: BlobReceiver,
s: BlobSender, s: BlobSender,
retransmit: BlobSender, retransmit: BlobSender,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
spawn(move || { spawn(move || {
let mut window = vec![None; NUM_BLOBS];
let mut consumed = 0; let mut consumed = 0;
let mut received = 0;
let mut last = 0;
let mut times = 0;
loop { loop {
if exit.load(Ordering::Relaxed) { if exit.load(Ordering::Relaxed) {
break; break;
} }
let _ = recv_window( let _ = recv_window(
&mut window, &window,
&subs, &crdt,
&recycler, &recycler,
&mut consumed, &mut consumed,
&mut received,
&r, &r,
&s, &s,
&retransmit, &retransmit,
); );
let _ = repair_window(
&window,
&crdt,
&mut last,
&mut times,
&mut consumed,
&mut received,
);
}
})
}
fn broadcast(
crdt: &Arc<RwLock<Crdt>>,
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
transmit_index: &mut u64,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq);
}
let mut blobs = dq.into_iter().collect();
/// appends codes to the list of blobs allowing us to reconstruct the stream
#[cfg(feature = "erasure")]
erasure::generate_coding(re, blobs, consumed);
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
// keep the cache of blobs that are broadcast
{
let mut win = window.write().unwrap();
for b in &blobs {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix as usize) % WINDOW_SIZE;
if let Some(x) = &win[pos] {
trace!(
"popped {} at {}",
x.read().unwrap().get_index().unwrap(),
pos
);
recycler.recycle(x.clone());
}
trace!("null {}", pos);
win[pos] = None;
assert!(win[pos].is_none());
}
while let Some(b) = blobs.pop() {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix as usize) % WINDOW_SIZE;
trace!("caching {} at {}", ix, pos);
assert!(win[pos].is_none());
win[pos] = Some(b);
}
}
Ok(())
}
/// Service to broadcast messages from the leader to layer 1 nodes.
/// See `crdt` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to send from.
/// * `exit` - Boolean to signal system exit.
/// * `crdt` - CRDT structure
/// * `window` - Cache of blobs that we have broadcast
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn broadcaster(
sock: UdpSocket,
exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || {
let mut transmit_index = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index);
} }
}) })
} }
fn retransmit( fn retransmit(
subs: &Arc<RwLock<Subscribers>>, crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler, recycler: &BlobRecycler,
r: &BlobReceiver, r: &BlobReceiver,
sock: &UdpSocket, sock: &UdpSocket,
@@ -233,10 +435,8 @@ fn retransmit(
dq.append(&mut nq); dq.append(&mut nq);
} }
{ {
let wsubs = subs.read().unwrap();
for b in &dq { for b in &dq {
let mut mb = b.write().unwrap(); Crdt::retransmit(&crdt, b, sock)?;
wsubs.retransmit(&mut mb, sock)?;
} }
} }
while let Some(b) = dq.pop_front() { while let Some(b) = dq.pop_front() {
@@ -246,26 +446,30 @@ fn retransmit(
} }
/// Service to retransmit messages from the leader to layer 1 nodes. /// Service to retransmit messages from the leader to layer 1 nodes.
/// See `subscribers` for network layer definitions. /// See `crdt` for network layer definitions.
/// # Arguments /// # Arguments
/// * `sock` - Socket to read from. Read timeout is set to 1. /// * `sock` - Socket to read from. Read timeout is set to 1.
/// * `exit` - Boolean to signal system exit. /// * `exit` - Boolean to signal system exit.
/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by /// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
/// the accountant.
/// * `recycler` - Blob recycler. /// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes. /// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn retransmitter( pub fn retransmitter(
sock: UdpSocket, sock: UdpSocket,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>, crdt: Arc<RwLock<Crdt>>,
recycler: BlobRecycler, recycler: BlobRecycler,
r: BlobReceiver, r: BlobReceiver,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
spawn(move || loop { spawn(move || {
if exit.load(Ordering::Relaxed) { trace!("retransmitter started");
break; loop {
if exit.load(Ordering::Relaxed) {
break;
}
// TODO: handle this error
let _ = retransmit(&crdt, &recycler, &r, &sock);
} }
let _ = retransmit(&subs, &recycler, &r, &sock); trace!("exiting retransmitter");
}) })
} }
@@ -335,12 +539,14 @@ mod bench {
} }
fn run_streamer_bench() -> Result<()> { fn run_streamer_bench() -> Result<()> {
let read = UdpSocket::bind("127.0.0.1:0")?; let read = UdpSocket::bind("127.0.0.1:0")?;
read.set_read_timeout(Some(Duration::new(1, 0)))?;
let addr = read.local_addr()?; let addr = read.local_addr()?;
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default(); let pack_recycler = PacketRecycler::default();
let (s_reader, r_reader) = channel(); let (s_reader, r_reader) = channel();
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader)?; let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone()); let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone()); let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone()); let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
@@ -356,7 +562,7 @@ mod bench {
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64; let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
let ftime = (time as f64) / 10000000000f64; let ftime = (time as f64) / 10000000000f64;
let fcount = (end_val - start_val) as f64; let fcount = (end_val - start_val) as f64;
println!("performance: {:?}", fcount / ftime); trace!("performance: {:?}", fcount / ftime);
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);
t_reader.join()?; t_reader.join()?;
t_producer1.join()?; t_producer1.join()?;
@@ -373,7 +579,11 @@ mod bench {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crdt::{Crdt, ReplicatedData};
use logger;
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE}; use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -381,17 +591,17 @@ mod test {
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver, use streamer::{default_window, BlobReceiver, PacketReceiver};
PacketReceiver}; use streamer::{blob_receiver, receiver, responder, retransmitter, window};
use subscribers::{Node, Subscribers};
fn get_msgs(r: PacketReceiver, num: &mut usize) { fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 { for _t in 0..5 {
let timer = Duration::new(1, 0); let timer = Duration::new(1, 0);
match r.recv_timeout(timer) { match r.recv_timeout(timer) {
Ok(m) => *num += m.read().unwrap().packets.len(), Ok(m) => *num += m.read().unwrap().packets.len(),
e => println!("error {:?}", e), e => info!("error {:?}", e),
} }
if *num == 10 { if *num == 10 {
break; break;
@@ -407,13 +617,15 @@ mod test {
#[test] #[test]
pub fn streamer_send_test() { pub fn streamer_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind"); let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let addr = read.local_addr().unwrap(); let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind"); let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default(); let pack_recycler = PacketRecycler::default();
let resp_recycler = BlobRecycler::default(); let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel(); let (s_reader, r_reader) = channel();
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader).unwrap(); let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
let (s_responder, r_responder) = channel(); let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder); let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let mut msgs = VecDeque::new(); let mut msgs = VecDeque::new();
@@ -445,7 +657,7 @@ mod test {
} }
*num += m.len(); *num += m.len();
} }
e => println!("error {:?}", e), e => info!("error {:?}", e),
} }
if *num == 10 { if *num == 10 {
break; break;
@@ -455,24 +667,36 @@ mod test {
#[test] #[test]
pub fn window_send_test() { pub fn window_send_test() {
let pubkey_me = KeyPair::new().pubkey();
let read = UdpSocket::bind("127.0.0.1:0").expect("bind"); let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = read.local_addr().unwrap(); let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind"); let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
let event = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new( let rep_data = ReplicatedData::new(
Node::default(), pubkey_me,
Node::new([0; 8], 0, send.local_addr().unwrap()), read.local_addr().unwrap(),
&[], send.local_addr().unwrap(),
))); serve.local_addr().unwrap(),
event.local_addr().unwrap(),
);
let mut crdt_me = Crdt::new(rep_data);
let me_id = crdt_me.my_data().id;
crdt_me.set_leader(me_id);
let subs = Arc::new(RwLock::new(crdt_me));
let resp_recycler = BlobRecycler::default(); let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel(); let (s_reader, r_reader) = channel();
let t_receiver = let t_receiver =
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap(); blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
let (s_window, r_window) = channel(); let (s_window, r_window) = channel();
let (s_retransmit, r_retransmit) = channel(); let (s_retransmit, r_retransmit) = channel();
let win = default_window();
let t_window = window( let t_window = window(
exit.clone(), exit.clone(),
subs, subs,
win,
resp_recycler.clone(), resp_recycler.clone(),
r_reader, r_reader,
s_window, s_window,
@@ -487,6 +711,7 @@ mod test {
let b_ = b.clone(); let b_ = b.clone();
let mut w = b.write().unwrap(); let mut w = b.write().unwrap();
w.set_index(i).unwrap(); w.set_index(i).unwrap();
w.set_id(me_id).unwrap();
assert_eq!(i, w.get_index().unwrap()); assert_eq!(i, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE; w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr); w.meta.set_addr(&addr);
@@ -507,43 +732,110 @@ mod test {
t_window.join().expect("join"); t_window.join().expect("join");
} }
fn test_node() -> (Arc<RwLock<Crdt>>, UdpSocket, UdpSocket, UdpSocket) {
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
let event = UdpSocket::bind("127.0.0.1:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
serve.local_addr().unwrap(),
event.local_addr().unwrap(),
);
trace!("data: {:?}", d);
let crdt = Crdt::new(d);
(Arc::new(RwLock::new(crdt)), gossip, replicate, serve)
}
#[test] #[test]
#[ignore]
//retransmit from leader to replicate target
pub fn retransmit() { pub fn retransmit() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind"); logger::setup();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind"); trace!("retransmit test start");
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new( let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node();
Node::default(), let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node();
Node::default(), let leader_data = crdt_leader.read().unwrap().my_data().clone();
&[Node::new([0; 8], 1, read.local_addr().unwrap())], crdt_leader.write().unwrap().insert(&leader_data);
))); crdt_leader.write().unwrap().set_leader(leader_data.id);
let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone());
let window_leader = Arc::new(RwLock::new(vec![]));
let t_crdt_leader_l = Crdt::listen(
crdt_leader.clone(),
window_leader,
sock_gossip_leader,
exit.clone(),
);
crdt_target.write().unwrap().insert(&leader_data);
crdt_target.write().unwrap().set_leader(leader_data.id);
let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone());
let window_target = Arc::new(RwLock::new(vec![]));
let t_crdt_target_l = Crdt::listen(
crdt_target.clone(),
window_target,
sock_gossip_target,
exit.clone(),
);
//leader retransmitter
let (s_retransmit, r_retransmit) = channel(); let (s_retransmit, r_retransmit) = channel();
let blob_recycler = BlobRecycler::default(); let blob_recycler = BlobRecycler::default();
let saddr = send.local_addr().unwrap(); let saddr = sock_leader.local_addr().unwrap();
let t_retransmit = retransmitter( let t_retransmit = retransmitter(
send, sock_leader,
exit.clone(), exit.clone(),
subs, crdt_leader.clone(),
blob_recycler.clone(), blob_recycler.clone(),
r_retransmit, r_retransmit,
); );
//target receiver
let (s_blob_receiver, r_blob_receiver) = channel();
let t_receiver = blob_receiver(
exit.clone(),
blob_recycler.clone(),
sock_replicate_target,
s_blob_receiver,
).unwrap();
for _ in 0..10 {
let done = crdt_target.read().unwrap().update_index == 2
&& crdt_leader.read().unwrap().update_index == 2;
if done {
break;
}
let timer = Duration::new(1, 0);
sleep(timer);
}
//send the data through
let mut bq = VecDeque::new(); let mut bq = VecDeque::new();
let b = blob_recycler.allocate(); let b = blob_recycler.allocate();
b.write().unwrap().meta.size = 10; b.write().unwrap().meta.size = 10;
bq.push_back(b); bq.push_back(b);
s_retransmit.send(bq).unwrap(); s_retransmit.send(bq).unwrap();
let (s_blob_receiver, r_blob_receiver) = channel(); let timer = Duration::new(5, 0);
let t_receiver = trace!("Waiting for timeout");
blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap(); let mut oq = r_blob_receiver.recv_timeout(timer).unwrap();
let mut oq = r_blob_receiver.recv().unwrap();
assert_eq!(oq.len(), 1); assert_eq!(oq.len(), 1);
let o = oq.pop_front().unwrap(); let o = oq.pop_front().unwrap();
let ro = o.read().unwrap(); let ro = o.read().unwrap();
assert_eq!(ro.meta.size, 10); assert_eq!(ro.meta.size, 10);
assert_eq!(ro.meta.addr(), saddr); assert_eq!(ro.meta.addr(), saddr);
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join"); let threads = vec![
t_retransmit.join().expect("join"); t_receiver,
t_retransmit,
t_crdt_target_g,
t_crdt_target_l,
t_crdt_leader_g,
t_crdt_leader_l,
];
for t in threads {
t.join().unwrap();
}
} }
} }

View File

@@ -1,149 +0,0 @@
//! The `subscribers` module defines data structures to keep track of nodes on the network.
//! The network is arranged in layers:
//!
//! * layer 0 - Leader.
//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//!
//! It's up to the external state machine to keep this updated.
use packet::Blob;
use rayon::prelude::*;
use result::{Error, Result};
use std::net::{SocketAddr, UdpSocket};
use std::fmt;
#[derive(Clone, PartialEq)]
pub struct Node {
pub id: [u64; 8],
pub weight: u64,
pub addr: SocketAddr,
}
//sockaddr doesn't implement default
impl Default for Node {
fn default() -> Node {
Node {
id: [0; 8],
weight: 0,
addr: "0.0.0.0:0".parse().unwrap(),
}
}
}
impl Node {
pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node {
Node { id, weight, addr }
}
fn key(&self) -> i64 {
(self.weight as i64).checked_neg().unwrap()
}
}
impl fmt::Debug for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr)
}
}
pub struct Subscribers {
data: Vec<Node>,
pub me: Node,
pub leader: Node,
}
impl Subscribers {
pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers {
let mut h = Subscribers {
data: vec![],
me: me.clone(),
leader: leader.clone(),
};
h.insert(&[me, leader]);
h.insert(network);
h
}
/// retransmit messages from the leader to layer 1 nodes
pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> {
let errs: Vec<_> = self.data
.par_iter()
.map(|i| {
if self.me == *i {
return Ok(0);
}
if self.leader == *i {
return Ok(0);
}
trace!("retransmit blob to {}", i.addr);
s.send_to(&blob.data[..blob.meta.size], &i.addr)
})
.collect();
for e in errs {
trace!("retransmit result {:?}", e);
match e {
Err(e) => return Err(Error::IO(e)),
_ => (),
}
}
Ok(())
}
pub fn insert(&mut self, ns: &[Node]) {
self.data.extend_from_slice(ns);
self.data.sort_by_key(Node::key);
}
}
#[cfg(test)]
mod test {
use packet::Blob;
use rayon::prelude::*;
use std::net::UdpSocket;
use std::time::Duration;
use subscribers::{Node, Subscribers};
#[test]
pub fn subscriber() {
let mut me = Node::default();
me.weight = 10;
let mut leader = Node::default();
leader.weight = 11;
let mut s = Subscribers::new(me, leader, &[]);
assert_eq!(s.data.len(), 2);
assert_eq!(s.data[0].weight, 11);
assert_eq!(s.data[1].weight, 10);
let mut n = Node::default();
n.weight = 12;
s.insert(&[n]);
assert_eq!(s.data.len(), 3);
assert_eq!(s.data[0].weight, 12);
}
#[test]
pub fn retransmit() {
let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap());
let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap());
let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]);
let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap());
s.insert(&[n3]);
let mut b = Blob::default();
b.meta.size = 10;
let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind");
s.retransmit(&mut b, &s4).unwrap();
let res: Vec<_> = [s1, s2, s3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
s.recv_from(&mut b.data).is_err()
})
.collect();
assert_eq!(res, [true, true, false]);
let mut n4 = Node::default();
n4.addr = "255.255.255.255:1".parse().unwrap();
s.insert(&[n4]);
assert!(s.retransmit(&mut b, &s4).is_err());
}
}

454
src/thin_client.rs Normal file
View File

@@ -0,0 +1,454 @@
//! The `thin_client` module is a client-side object that interfaces with
//! a server-side TPU. Client code should use this object instead of writing
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use bincode::{deserialize, serialize};
use event::Event;
use futures::future::{ok, FutureResult};
use hash::Hash;
use request::{Request, Response};
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::{SocketAddr, UdpSocket};
use transaction::Transaction;
pub struct ThinClient {
requests_addr: SocketAddr,
requests_socket: UdpSocket,
events_addr: SocketAddr,
events_socket: UdpSocket,
last_id: Option<Hash>,
transaction_count: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl ThinClient {
/// Create a new ThinClient that will interface with Rpu
/// over `requests_socket` and `events_socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking ThinClient methods.
pub fn new(
requests_addr: SocketAddr,
requests_socket: UdpSocket,
events_addr: SocketAddr,
events_socket: UdpSocket,
) -> Self {
let client = ThinClient {
requests_addr,
requests_socket,
events_addr,
events_socket,
last_id: None,
transaction_count: 0,
balances: HashMap::new(),
};
client
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
trace!("start recv_from");
self.requests_socket.recv_from(&mut buf)?;
trace!("end recv_from");
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
trace!("Response balance {:?} {:?}", key, val);
self.balances.insert(key, val);
}
Response::LastId { id } => {
info!("Response last_id {:?}", id);
self.last_id = Some(id);
}
Response::TransactionCount { transaction_count } => {
info!("Response transaction count {:?}", transaction_count);
self.transaction_count = transaction_count;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
let event = Event::Transaction(tr);
let data = serialize(&event).expect("serialize Transaction in pub fn transfer_signed");
self.events_socket.send_to(&data, &self.events_addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tr = Transaction::new(keypair, to, n, *last_id);
let sig = tr.sig;
self.transfer_signed(tr).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
trace!("get_balance");
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance in pub fn get_balance");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_balance");
let mut done = false;
while !done {
let resp = self.recv_response()?;
trace!("recv_response {:?}", resp);
if let &Response::Balance { ref key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
}
/// Request the transaction count. If the response packet is dropped by the network,
/// this method will hang.
pub fn transaction_count(&mut self) -> u64 {
info!("transaction_count");
let req = Request::GetTransactionCount;
let data =
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn transaction_count");
let mut done = false;
while !done {
let resp = self.recv_response().expect("transaction count dropped");
info!("recv_response {:?}", resp);
if let &Response::TransactionCount { .. } = &resp {
done = true;
}
self.process_response(resp);
}
self.transaction_count
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response.
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
info!("get_last_id");
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_last_id");
let mut done = false;
while !done {
let resp = self.recv_response().expect("get_last_id response");
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
ok(self.last_id.expect("some last_id"))
}
}
#[cfg(test)]
pub fn poll_get_balance(client: &mut ThinClient, pubkey: &PublicKey) -> io::Result<i64> {
use std::time::Instant;
let mut balance;
let now = Instant::now();
loop {
balance = client.get_balance(pubkey);
if balance.is_ok() || now.elapsed().as_secs() > 1 {
break;
}
}
balance
}
#[cfg(test)]
mod tests {
use super::*;
use bank::Bank;
use crdt::{Crdt, ReplicatedData};
use futures::Future;
use logger;
use mint::Mint;
use plan::Plan;
use server::Server;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use std::thread::sleep;
use std::time::Duration;
use streamer::default_window;
use transaction::Instruction;
use tvu::tests::TestNode;
#[test]
fn test_thin_client() {
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
alice.last_id(),
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.event,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(900));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.events_addr,
events_socket,
);
let last_id = client.get_last_id().wait().unwrap();
let _sig = client
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
let balance = poll_get_balance(&mut client, &bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
#[test]
fn test_bad_sig() {
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
alice.last_id(),
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.event,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(300));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.events_addr,
events_socket,
);
let last_id = client.get_last_id().wait().unwrap();
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = client.transfer_signed(tr).unwrap();
let last_id = client.get_last_id().wait().unwrap();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
if let Instruction::NewContract(contract) = &mut tr2.instruction {
contract.tokens = 502;
contract.plan = Plan::new_payment(502, bob_pubkey);
}
let _sig = client.transfer_signed(tr2).unwrap();
let balance = poll_get_balance(&mut client, &bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
fn validator(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
alice: &Mint,
threads: &mut Vec<JoinHandle<()>>,
) {
let validator = TestNode::new();
let replicant_bank = Bank::new(&alice);
let mut ts = Server::new_validator(
replicant_bank,
validator.data.clone(),
validator.sockets.requests,
validator.sockets.respond,
validator.sockets.replicate,
validator.sockets.gossip,
leader.clone(),
exit.clone(),
);
threads.append(&mut ts.thread_hdls);
}
fn converge(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let mut spy = TestNode::new();
let daddr = "0.0.0.0:0".parse().unwrap();
let me = spy.data.id.clone();
spy.data.replicate_addr = daddr;
spy.data.requests_addr = daddr;
let mut spy_crdt = Crdt::new(spy.data);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let t_spy_listen = Crdt::listen(
spy_ref.clone(),
spy_window,
spy.sockets.gossip,
exit.clone(),
);
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
//wait for the network to converge
let mut converged = false;
for _ in 0..30 {
let num = spy_ref.read().unwrap().convergence();
if num == num_nodes as u64 {
converged = true;
break;
}
sleep(Duration::new(1, 0));
}
assert!(converged);
threads.push(t_spy_listen);
threads.push(t_spy_gossip);
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.id != me)
.map(|x| x.clone())
.collect();
v.clone()
}
#[test]
fn test_multi_node() {
logger::setup();
const N: usize = 5;
trace!("test_multi_accountant_stub");
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_bank = Bank::new(&alice);
let server = Server::new_leader(
leader_bank,
alice.last_id(),
None,
leader.data.clone(),
leader.sockets.requests,
leader.sockets.event,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
let mut threads = server.thread_hdls;
for _ in 0..N {
validator(&leader.data, exit.clone(), &alice, &mut threads);
}
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
//contains the leader addr as well
assert_eq!(servers.len(), N + 1);
//verify leader can do transfer
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
assert_eq!(leader_balance, 500);
//verify validator has the same balance
let mut success = 0usize;
for server in servers.iter() {
let mut client = mk_client(server);
if let Ok(bal) = poll_get_balance(&mut client, &bob_pubkey) {
trace!("validator balance {}", bal);
if bal == leader_balance {
success += 1;
}
}
}
assert_eq!(success, servers.len());
exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
}
}
fn mk_client(leader: &ReplicatedData) -> ThinClient {
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
ThinClient::new(
leader.requests_addr,
requests_socket,
leader.events_addr,
events_socket,
)
}
fn tx_and_retry_get_balance(
leader: &ReplicatedData,
alice: &Mint,
bob_pubkey: &PublicKey,
) -> io::Result<i64> {
let mut client = mk_client(leader);
trace!("getting leader last_id");
let last_id = client.get_last_id().wait().unwrap();
info!("executing leader transer");
let _sig = client
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
.unwrap();
poll_get_balance(&mut client, bob_pubkey)
}
}

17
src/timing.rs Normal file
View File

@@ -0,0 +1,17 @@
use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn duration_as_ms(d: &Duration) -> u64 {
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
}
pub fn duration_as_s(d: &Duration) -> f32 {
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
}
pub fn timestamp() -> u64 {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("create timestamp in timing");
return duration_as_ms(&now);
}

93
src/tpu.rs Normal file
View File

@@ -0,0 +1,93 @@
//! The `tpu` module implements the Transaction Processing Unit, a
//! 5-stage transaction processing pipeline in software.
use bank::Bank;
use banking_stage::BankingStage;
use crdt::{Crdt, ReplicatedData};
use hash::Hash;
use packet;
use record_stage::RecordStage;
use sig_verify_stage::SigVerifyStage;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex, RwLock};
use std::thread::JoinHandle;
use std::time::Duration;
use streamer;
use write_stage::WriteStage;
pub struct Tpu {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Tpu {
pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>,
start_hash: Hash,
tick_duration: Option<Duration>,
me: ReplicatedData,
events_socket: UdpSocket,
broadcast_socket: UdpSocket,
gossip: UdpSocket,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let packet_recycler = packet::PacketRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver = streamer::receiver(
events_socket,
exit.clone(),
packet_recycler.clone(),
packet_sender,
);
let sig_verify_stage = SigVerifyStage::new(exit.clone(), packet_receiver);
let blob_recycler = packet::BlobRecycler::default();
let banking_stage = BankingStage::new(
bank.clone(),
exit.clone(),
sig_verify_stage.verified_receiver,
packet_recycler.clone(),
);
let record_stage =
RecordStage::new(banking_stage.signal_receiver, &start_hash, tick_duration);
let write_stage = WriteStage::new(
bank.clone(),
exit.clone(),
blob_recycler.clone(),
Mutex::new(writer),
record_stage.entry_receiver,
);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
let window = streamer::default_window();
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
let t_broadcast = streamer::broadcaster(
broadcast_socket,
exit.clone(),
crdt.clone(),
window,
blob_recycler.clone(),
write_stage.blob_receiver,
);
let mut thread_hdls = vec![
t_receiver,
banking_stage.thread_hdl,
record_stage.thread_hdl,
write_stage.thread_hdl,
t_gossip,
t_listen,
t_broadcast,
];
thread_hdls.extend(sig_verify_stage.thread_hdls.into_iter());
Tpu { thread_hdls }
}
}

View File

@@ -12,37 +12,62 @@ pub const SIG_OFFSET: usize = 8;
pub const PUB_KEY_OFFSET: usize = 80; pub const PUB_KEY_OFFSET: usize = 80;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct TransactionData { pub struct Contract {
pub tokens: i64, pub tokens: i64,
pub last_id: Hash,
pub plan: Plan, pub plan: Plan,
} }
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Instruction {
NewContract(Contract),
ApplyTimestamp(DateTime<Utc>),
ApplySignature(Signature),
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction { pub struct Transaction {
pub sig: Signature, pub sig: Signature,
pub from: PublicKey, pub from: PublicKey,
pub data: TransactionData, pub instruction: Instruction,
pub last_id: Hash,
} }
impl Transaction { impl Transaction {
/// Create and sign a new Transaction. Used for unit-testing. fn new_from_instruction(
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self { from_keypair: &KeyPair,
instruction: Instruction,
last_id: Hash,
) -> Self {
let from = from_keypair.pubkey(); let from = from_keypair.pubkey();
let plan = Plan::Pay(Payment { tokens, to });
let mut tr = Transaction { let mut tr = Transaction {
sig: Signature::default(), sig: Signature::default(),
data: TransactionData { instruction,
plan, last_id,
tokens, from,
last_id,
},
from: from,
}; };
tr.sign(from_keypair); tr.sign(from_keypair);
tr tr
} }
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
let plan = Plan::Pay(Payment { tokens, to });
let instruction = Instruction::NewContract(Contract { plan, tokens });
Self::new_from_instruction(from_keypair, instruction, last_id)
}
/// Create and sign a new Witness Timestamp. Used for unit-testing.
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
let instruction = Instruction::ApplyTimestamp(dt);
Self::new_from_instruction(from_keypair, instruction, last_id)
}
/// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
let instruction = Instruction::ApplySignature(tx_sig);
Self::new_from_instruction(from_keypair, instruction, last_id)
}
/// Create and sign a postdated Transaction. Used for unit-testing. /// Create and sign a postdated Transaction. Used for unit-testing.
pub fn new_on_date( pub fn new_on_date(
from_keypair: &KeyPair, from_keypair: &KeyPair,
@@ -56,13 +81,11 @@ impl Transaction {
(Condition::Timestamp(dt), Payment { tokens, to }), (Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }), (Condition::Signature(from), Payment { tokens, to: from }),
); );
let instruction = Instruction::NewContract(Contract { plan, tokens });
let mut tr = Transaction { let mut tr = Transaction {
data: TransactionData { instruction,
plan, from,
tokens, last_id,
last_id,
},
from: from,
sig: Signature::default(), sig: Signature::default(),
}; };
tr.sign(from_keypair); tr.sign(from_keypair);
@@ -70,7 +93,10 @@ impl Transaction {
} }
fn get_sign_data(&self) -> Vec<u8> { fn get_sign_data(&self) -> Vec<u8> {
serialize(&(&self.data)).unwrap() let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
data.extend_from_slice(&last_id_data);
data
} }
/// Sign this transaction. /// Sign this transaction.
@@ -84,7 +110,11 @@ impl Transaction {
} }
pub fn verify_plan(&self) -> bool { pub fn verify_plan(&self) -> bool {
self.data.plan.verify(self.data.tokens) if let Instruction::NewContract(contract) = &self.instruction {
contract.plan.verify(contract.tokens)
} else {
true
}
} }
} }
@@ -152,13 +182,11 @@ mod tests {
tokens: 0, tokens: 0,
to: Default::default(), to: Default::default(),
}); });
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
let claim0 = Transaction { let claim0 = Transaction {
data: TransactionData { instruction,
plan,
tokens: 0,
last_id: Default::default(),
},
from: Default::default(), from: Default::default(),
last_id: Default::default(),
sig: Default::default(), sig: Default::default(),
}; };
let buf = serialize(&claim0).unwrap(); let buf = serialize(&claim0).unwrap();
@@ -172,10 +200,12 @@ mod tests {
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let pubkey = keypair.pubkey(); let pubkey = keypair.pubkey();
let mut tr = Transaction::new(&keypair, pubkey, 42, zero); let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
tr.data.tokens = 1_000_000; // <-- attack, part 1! if let Instruction::NewContract(contract) = &mut tr.instruction {
if let Plan::Pay(ref mut payment) = tr.data.plan { contract.tokens = 1_000_000; // <-- attack, part 1!
payment.tokens = tr.data.tokens; // <-- attack, part 2! if let Plan::Pay(ref mut payment) = contract.plan {
}; payment.tokens = contract.tokens; // <-- attack, part 2!
}
}
assert!(tr.verify_plan()); assert!(tr.verify_plan());
assert!(!tr.verify_sig()); assert!(!tr.verify_sig());
} }
@@ -188,9 +218,11 @@ mod tests {
let pubkey1 = keypair1.pubkey(); let pubkey1 = keypair1.pubkey();
let zero = Hash::default(); let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero); let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
if let Plan::Pay(ref mut payment) = tr.data.plan { if let Instruction::NewContract(contract) = &mut tr.instruction {
payment.to = thief_keypair.pubkey(); // <-- attack! if let Plan::Pay(ref mut payment) = contract.plan {
}; payment.to = thief_keypair.pubkey(); // <-- attack!
}
}
assert!(tr.verify_plan()); assert!(tr.verify_plan());
assert!(!tr.verify_sig()); assert!(!tr.verify_sig());
} }
@@ -210,14 +242,18 @@ mod tests {
let keypair1 = KeyPair::new(); let keypair1 = KeyPair::new();
let zero = Hash::default(); let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero); let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
if let Plan::Pay(ref mut payment) = tr.data.plan { if let Instruction::NewContract(contract) = &mut tr.instruction {
payment.tokens = 2; // <-- attack! if let Plan::Pay(ref mut payment) = contract.plan {
payment.tokens = 2; // <-- attack!
}
} }
assert!(!tr.verify_plan()); assert!(!tr.verify_plan());
// Also, ensure all branchs of the plan spend all tokens // Also, ensure all branchs of the plan spend all tokens
if let Plan::Pay(ref mut payment) = tr.data.plan { if let Instruction::NewContract(contract) = &mut tr.instruction {
payment.tokens = 0; // <-- whoops! if let Plan::Pay(ref mut payment) = contract.plan {
payment.tokens = 0; // <-- whoops!
}
} }
assert!(!tr.verify_plan()); assert!(!tr.verify_plan());
} }

347
src/tvu.rs Normal file
View File

@@ -0,0 +1,347 @@
//! The `tvu` module implements the Transaction Validation Unit, a
//! 5-stage transaction validation pipeline in software.
//! 1. streamer
//! - Incoming blobs are picked up from the replicate socket.
//! 2. verifier
//! - TODO Blobs are sent to the GPU, and while the memory is there the PoH stream is verified
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
//! with errors are dropped, or marked for slashing.
//! 3.a retransmit
//! - Blobs originating from the parent (leader atm is the only parent), are retransmit to all the
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
//! address.
//! 3.b window
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
//! be the PoH counter if its monitonically increasing in each blob. Easure coding is used to
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
//! a missing packet.
//! 4. accountant
//! - Contigous blobs are sent to the accountant for processing transactions
//! 5. validator
//! - TODO Validation messages are sent back to the leader
use bank::Bank;
use crdt::{Crdt, ReplicatedData};
use packet;
use replicate_stage::ReplicateStage;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
pub struct Tvu {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `bank` - The bank state.
/// * `me` - my configuration
/// * `gossip` - my gosisp socket
/// * `replicte` - my replicte socket
/// * `leader` - leader configuration
/// * `exit` - The exit signal.
pub fn new(
bank: Arc<Bank>,
me: ReplicatedData,
gossip: UdpSocket,
replicate: UdpSocket,
leader: ReplicatedData,
exit: Arc<AtomicBool>,
) -> Self {
//replicate pipeline
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
crdt.write()
.expect("'crdt' write lock in pub fn replicate")
.set_leader(leader.id);
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(&leader);
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
let window = streamer::default_window();
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
// TODO pull this socket out through the public interface
// make sure we are on the same interface
let mut local = replicate.local_addr().expect("tvu: get local address");
local.set_port(0);
let write = UdpSocket::bind(local).expect("tvu: bind to local socket");
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
replicate,
blob_sender.clone(),
).expect("tvu: blob receiver creation");
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
crdt.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
crdt.clone(),
window,
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
let replicate_stage = ReplicateStage::new(
bank.clone(),
exit.clone(),
window_receiver,
blob_recycler.clone(),
);
let threads = vec![
//replicate threads
t_blob_receiver,
t_retransmit,
t_window,
replicate_stage.thread_hdl,
t_gossip,
t_listen,
];
Tvu {
thread_hdls: threads,
}
}
}
#[cfg(test)]
use std::time::Duration;
#[cfg(test)]
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
use signature::{KeyPair, KeyPairUtil};
let events_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
requests_socket.local_addr().unwrap(),
events_socket.local_addr().unwrap(),
);
(d, gossip, replicate, requests_socket, events_socket)
}
#[cfg(test)]
pub mod tests {
use bank::Bank;
use bincode::serialize;
use crdt::Crdt;
use crdt::ReplicatedData;
use entry::Entry;
use event::Event;
use hash::{hash, Hash};
use logger;
use mint::Mint;
use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil};
use std::collections::VecDeque;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use streamer;
use tvu::Tvu;
/// Test that mesasge sent from leader to target1 and repliated to target2
#[test]
fn test_replicate() {
logger::setup();
let leader = TestNode::new();
let target1 = TestNode::new();
let target2 = TestNode::new();
let exit = Arc::new(AtomicBool::new(false));
//start crdt_leader
let mut crdt_l = Crdt::new(leader.data.clone());
crdt_l.set_leader(leader.data.id);
let cref_l = Arc::new(RwLock::new(crdt_l));
let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone());
let window1 = streamer::default_window();
let t_l_listen = Crdt::listen(cref_l, window1, leader.sockets.gossip, exit.clone());
//start crdt2
let mut crdt2 = Crdt::new(target2.data.clone());
crdt2.insert(&leader.data);
crdt2.set_leader(leader.data.id);
let leader_id = leader.data.id;
let cref2 = Arc::new(RwLock::new(crdt2));
let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone());
let window2 = streamer::default_window();
let t2_listen = Crdt::listen(cref2, window2, target2.sockets.gossip, exit.clone());
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target2.sockets.replicate,
s_reader,
).unwrap();
// simulate leader sending messages
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
leader.sockets.requests,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let mint = Mint::new(starting_balance);
let replicate_addr = target1.data.replicate_addr;
let bank = Arc::new(Bank::new(&mint));
let tvu = Tvu::new(
bank.clone(),
target1.data,
target1.sockets.gossip,
target1.sockets.replicate,
leader.data,
exit.clone(),
);
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
w.set_id(leader_id).unwrap();
let entry0 = Entry::new(&cur_hash, i, vec![]);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tr1 = Event::new_transaction(
&mint.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tr1]);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&replicate_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = bank.get_balance(&bob_keypair.pubkey()).unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
for t in tvu.thread_hdls {
t.join().expect("join");
}
t2_gossip.join().expect("join");
t2_listen.join().expect("join");
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_l_gossip.join().expect("join");
t_l_listen.join().expect("join");
}
pub struct Sockets {
pub gossip: UdpSocket,
pub requests: UdpSocket,
pub replicate: UdpSocket,
pub event: UdpSocket,
pub respond: UdpSocket,
pub broadcast: UdpSocket,
}
pub struct TestNode {
pub data: ReplicatedData,
pub sockets: Sockets,
}
impl TestNode {
pub fn new() -> TestNode {
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
let event = UdpSocket::bind("0.0.0.0:0").unwrap();
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let data = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
requests.local_addr().unwrap(),
event.local_addr().unwrap(),
);
TestNode {
data: data,
sockets: Sockets {
gossip,
requests,
replicate,
event,
respond,
broadcast,
},
}
}
}
}

71
src/write_stage.rs Normal file
View File

@@ -0,0 +1,71 @@
//! The `write_stage` module implements write stage of the RPU.
use bank::Bank;
use entry::Entry;
use entry_writer::EntryWriter;
use packet;
use std::io::Write;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle};
use streamer;
pub struct WriteStage {
pub thread_hdl: JoinHandle<()>,
pub blob_receiver: streamer::BlobReceiver,
}
impl WriteStage {
/// Create a new Rpu that wraps the given Bank.
pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
writer: Mutex<W>,
entry_receiver: Receiver<Entry>,
) -> Self {
let (blob_sender, blob_receiver) = channel();
let thread_hdl = spawn(move || loop {
let entry_writer = EntryWriter::new(&bank);
let _ = entry_writer.write_and_send_entries(
&blob_sender,
&blob_recycler,
&writer,
&entry_receiver,
);
if exit.load(Ordering::Relaxed) {
info!("broadcat_service exiting");
break;
}
});
WriteStage {
thread_hdl,
blob_receiver,
}
}
pub fn new_drain(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
entry_receiver: Receiver<Entry>,
) -> Self {
let (_blob_sender, blob_receiver) = channel();
let thread_hdl = spawn(move || {
let entry_writer = EntryWriter::new(&bank);
loop {
let _ = entry_writer.drain_entries(&entry_receiver);
if exit.load(Ordering::Relaxed) {
info!("drain_service exiting");
break;
}
}
});
WriteStage {
thread_hdl,
blob_receiver,
}
}
}